diff --git a/controller/juju-2.9.42-linux-amd64.tgz b/controller/juju-2.9.42-linux-amd64.tgz
new file mode 100644
index 0000000..47613be
Binary files /dev/null and b/controller/juju-2.9.42-linux-amd64.tgz differ
diff --git a/controller/juju-db.assert b/controller/juju-db.assert
new file mode 100644
index 0000000..c6b588e
--- /dev/null
+++ b/controller/juju-db.assert
@@ -0,0 +1,112 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: account
+authority-id: canonical
+revision: 1
+account-id: yZLP8pbP8Cx3OCVg7cfq5H390RGDn8jP
+display-name: Canonical Juju QA Bot
+timestamp: 2017-03-08T16:37:12.237500Z
+username: juju-qa
+validation: unproven
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMAzOAAAe/EQAHJALpG3qHEljF2qQzQRdP8k+bwPQqFg1MOugDVZJODl6WuO
+VFqqOeCG1Gim5Ph8ww6tDqaxsiuc+S2pnzjY5ohQ8JkkKhXOLyktw4AVrdVzgRZfzWWRRW5Hlfeq
+r8WBz123odeGx0vZVBGJS3f/LgaY8w6MNsnujvBCW/BEjOX5XzBSVQJ5MZ6O/xeEdA06nChRW1Ji
+Mgn2ZxcfayKVEdYubYSCcNg3BjBjJ4Up1nOmEYoA0p+plcbnp4fHRIZkWS1OMvQlqWmWVzv33Nyd
+qtMHCuZMKy38nMZ06jHKaNby2ZksAgiIGXRiPPeVxDDwvi8KmFoDv8VEKyZ0m43rpOBVdtu/Y6+R
+rKYb4osDiQeynsLjAtB4nu/YC9RKJIiS8NSKc0Oytzk4lC8nCfk4OAxsuASEK5DoU5DsG+/1pgq4
+EUBiXCQfFfjRYKZOaj/OI/jcsuSXhutinT32kdxWLa2mMaictnqB94rhOXwNA68jJGBYEKJccxcz
+yUi73zAe8AYp6X7Y8awZXukWBdBnMpcto1uG0MSALMcOeNUVWSToOUpaMwBy4o/Tg9QAVt+O08IT
+r3KtgqXxf4Zwm6avh6ZbeHt0kpcjiBE+2ZJ5ycXzk1KiFExhrKnBF6MLj8B02sNuyg9tiJagABEX
+6e3TS6yu0/UxqMeb5jlG64lN3Tve
+
+type: snap-declaration
+format: 1
+authority-id: canonical
+revision: 2
+series: 16
+snap-id: KeESO5HF7y6lC8AyCLt93EBLvAHbbpjz
+plugs:
+ mount-observe:
+ allow-auto-connection: true
+ network-observe:
+ allow-auto-connection: true
+ system-observe:
+ allow-auto-connection: true
+publisher-id: yZLP8pbP8Cx3OCVg7cfq5H390RGDn8jP
+snap-name: juju-db
+timestamp: 2022-06-06T04:33:32.996288Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYp2DnQAAL/YQAHiEGLP68egWND/YhYSJyQeWbbk5BYr6TzsSP65L8xKKNdRQ
+DuTztV3c0RLf++bUuB24BbcslAq1sEblRaCGNHe274WjMnm6SgSRigjIZ/bOSiel0anncLi9JxgP
+CEreGvnj3PixKfuCQSnO5RoC1O++xsyg/o/VbBeXL9REFJB95i9GwT/9L7cBEft8SOOKBWS5Dpr6
+xt5SVR8kGPuywgRf5G/Byc31KuED3MA3p7dPu0Ia0/W4OxxZlAdJ610S2CZRiHoIGBwUJ3/NDgKF
+lxfrvICbyT1pd8GMReeuu52wBVm95zNI+p1Tb0QuhExwPTpGJ0p8WkcPUKKV/Z8Pq4HUueJtylkC
+EPvP/oFtYwvhTAd4Xjb4DTgIyAG7ye3FGQgdoM27/NXz11X37ePeXsDvR/Qy5Gy4qfqA9ALFoVS8
+ok4fssm1oJWb/CaKeAlj+fuZ1IIra/RE5yexKgMEpc15IStnO0b+TbUJtXkhaF6xvEFtbj+9G+p+
+K6DU+T9a7FCYOq8e8kcu8Nixw0vgvQWs9oFRUMqv1bD3tsv3t+EGMKqzK20Au2U/KyPM9cTDqhcr
+TzVjEQ1PMccEbd/AndO2fLvpHE+oueAvEnWdqb3GhvaoVWRIPj6y3r527cmDGou/uhPODpbCtBKV
+VsflVdzV+blaKue6H8Po32QLLc7Z
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: BjQEnrQHxDO8-k3f97njCv3ZEreOm3LTxcxYiUNOAjYrwfkOQKg5wNZ03RCnikFq
+developer-id: yZLP8pbP8Cx3OCVg7cfq5H390RGDn8jP
+provenance: global-upload
+snap-id: KeESO5HF7y6lC8AyCLt93EBLvAHbbpjz
+snap-revision: 160
+snap-size: 57831424
+timestamp: 2022-12-09T05:13:02.662522Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCY5LD3gAA/soQAMZ9CYivfeBsBV6TXERo0KA42jVprpZHlAkfIKIbw/YGOrcH
+tgF8qnb/1dMghFVKCajzWkB8MBNj/fNUOa3iM+EYbBk+0bZSOGoEBvYO7BOdwlNz+ldXZVx87xTD
+WBqvtv5lvBaiOUW+XgW1ubo6DZHm5F6MgzYMr2wRrSXOLdkYt4At6HPxO3527lIFE1EnYAeiDo/1
+gBlzpSh7ncKTCxCKhtKdDl0HzwKiA8tDv7KKqt8R4Sc6tpkjHffrDLaWPsjMhWeIzhIL+17dq2hM
+xNzKgtabCQ8Zp7Hx4axyPWwBcop1jbv8i9qa06t1rXsAcoEV1XoKsmWitrf7pfq9mXESu39Q3NGE
+l6JsTiMcG7gpy0s53RIncrZYVS0kgU+qDg7spxfXX3F7p32ksUhKJK6UpP8nK8DjWvwWoaDh/kaa
+XS7rYpqueTnJ66wXpW1RjFpX8QdiamDMZ3e+0OE8t2RIr+zjkQTMbJXwZKbdf93Rd+K5ISV5QYkW
+927im0ArDmTbZ8K/OwR4DwbQI4G6k6e1MhDm8jqBwsXpxIYMdFC5FpReNyeWUuJTxjDMijRI0kmC
+gJve0qQHK8ylX+FN/93Iv+GDvDzkffK+5Hi1tRgYsHL37pVLJdpJWe40nHijUE1gDNMXc8lrpuMr
+H9szwSFrQoU1VAHvmAiJpL6gg8zU
diff --git a/controller/juju-db.snap b/controller/juju-db.snap
new file mode 100644
index 0000000..26c58de
Binary files /dev/null and b/controller/juju-db.snap differ
diff --git a/coredns/.github/workflows/tests.yaml b/coredns/.github/workflows/tests.yaml
new file mode 100644
index 0000000..9b2b55e
--- /dev/null
+++ b/coredns/.github/workflows/tests.yaml
@@ -0,0 +1,92 @@
+name: Test Suite for CoreDNS
+
+on:
+ - pull_request
+
+jobs:
+ lint-and-unit-tests:
+ name: Lint & Unit tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+ - name: Install Tox
+ run: pip install tox
+ - name: Run lint & unit tests
+ run: tox
+
+ func-test:
+ name: Functional test with MicroK8s
+ runs-on: ubuntu-latest
+ timeout-minutes: 20
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v2
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+ - name: Fix global gitconfig for confined snap
+ run: |
+ # GH automatically includes the git-lfs plugin and configures it in
+ # /etc/gitconfig. However, the confinement of the charmcraft snap
+ # means that it can see that this file exists but cannot read it, even
+ # if the file permissions should allow it; this breaks git usage within
+ # the snap. To get around this, we move it from the global gitconfig to
+ # the user's .gitconfig file.
+ cat /etc/gitconfig >> $HOME/.gitconfig
+ sudo rm /etc/gitconfig
+ - name: Install MicroK8s
+ uses: balchua/microk8s-actions@v0.1.3
+ with:
+ rbac: 'true'
+ storage: 'true'
+ dns: 'true' # required for juju, will adjust later
+ - name: Install Dependencies
+ run: |
+ pip install tox
+ sudo snap install juju --classic
+ sudo snap install juju-wait --classic
+ sudo usermod -aG microk8s $USER
+ sudo snap install charmcraft --beta
+ sudo snap install yq
+ - name: Build charm
+ run: |
+ if ! charmcraft build; then
+ echo Build failed, full log:
+ cat "$(ls -1t "$HOME"/snap/charmcraft/common/charmcraft-log-* | head -n1)"
+ exit 1
+ fi
+ - name: Bootstrap MicroK8s with Juju
+ run: sg microk8s 'juju bootstrap microk8s microk8s'
+ - name: Add model
+ run: juju add-model coredns microk8s
+ - name: Deploy CoreDNS
+ run: |
+ upstream_image=$(yq eval '.resources.coredns-image.upstream-source' metadata.yaml)
+ juju deploy ./coredns.charm --resource coredns-image=$upstream_image --config forward=8.8.8.8
+ - name: Wait for stable environment
+ run: juju wait -wv
+ - name: Tell MicroK8s to use CoreDNS charm
+ run: |
+ cluster_ip=$(sudo microk8s.kubectl get svc -n coredns coredns -o jsonpath='{..spec.clusterIP}')
+ sudo sed -i -e "s/--cluster-dns=.*/--cluster-dns=$cluster_ip/" /var/snap/microk8s/current/args/kubelet
+ sudo systemctl restart snap.microk8s.daemon-kubelet
+ - name: Run functional test
+ run: tox -e func
+ - name: Juju Status
+ if: failure()
+ run: sudo juju status
+ - name: Juju Log
+ if: failure()
+ run: sudo juju debug-log --replay --no-tail -i coredns
+ - name: Microk8s Status
+ if: failure()
+ run: sudo microk8s.kubectl get all -A
+ - name: Microk8s Pod Log
+ if: failure()
+ run: sudo microk8s.kubectl logs -n coredns -l juju-app=coredns
diff --git a/coredns/.gitignore b/coredns/.gitignore
new file mode 100644
index 0000000..878f4f1
--- /dev/null
+++ b/coredns/.gitignore
@@ -0,0 +1,6 @@
+.tox/
+__pycache__/
+*.pyc
+placeholders/
+*.charm
+build/
diff --git a/coredns/CONTRIBUTING.md b/coredns/CONTRIBUTING.md
new file mode 100644
index 0000000..e8f19f1
--- /dev/null
+++ b/coredns/CONTRIBUTING.md
@@ -0,0 +1,34 @@
+# Contributor Guide
+
+This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
+for code, suggestions and documentation.
+This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
+all build a better charm - please give them a read before working on any contributions.
+
+## Licensing
+
+This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
+make to this project. Please familiarise yourself with the terms of the license.
+
+Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
+use your contributions.
+In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
+code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
+making any contributions.
+
+## Code of conduct
+
+We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
+
+## Contributing code
+
+To contribute code to this project, please use the following workflow:
+
+1. [Submit a bug](https://bugs.launchpad.net/charm-coredns/+filebug) to explain the need for and track the change.
+2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
+3. Submit a PR. The PR description should include a link to the bug on Launchpad.
+4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
+5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
+ for further release according to priority.
+
+
diff --git a/coredns/LICENSE b/coredns/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/coredns/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/coredns/Pipfile b/coredns/Pipfile
new file mode 100644
index 0000000..466f522
--- /dev/null
+++ b/coredns/Pipfile
@@ -0,0 +1,16 @@
+[[source]]
+name = "pypi"
+url = "https://pypi.org/simple"
+verify_ssl = true
+
+[dev-packages]
+pytest = "*"
+flake8 = "*"
+ipdb = "*"
+
+[packages]
+ops = "*"
+oci-image = {git = "https://github.com/juju-solutions/resource-oci-image/"}
+
+[requires]
+python_version = "3.8"
diff --git a/coredns/Pipfile.lock b/coredns/Pipfile.lock
new file mode 100644
index 0000000..f6cd306
--- /dev/null
+++ b/coredns/Pipfile.lock
@@ -0,0 +1,246 @@
+{
+ "_meta": {
+ "hash": {
+ "sha256": "3a93ef1bf6ad71dacc9efebae3e194bb569d6bf8728161b19e95dbd7c407aa22"
+ },
+ "pipfile-spec": 6,
+ "requires": {
+ "python_version": "3.8"
+ },
+ "sources": [
+ {
+ "name": "pypi",
+ "url": "https://pypi.org/simple",
+ "verify_ssl": true
+ }
+ ]
+ },
+ "default": {
+ "oci-image": {
+ "git": "https://github.com/juju-solutions/resource-oci-image/",
+ "ref": "c5778285d332edf3d9a538f9d0c06154b7ec1b0b"
+ },
+ "ops": {
+ "hashes": [
+ "sha256:23556db47b2c97a1bb72845b7c8ec88aa7a3e27717402903b5fea7b659616ab8",
+ "sha256:d102359496584617a00f6f42525a01d1b60269a3d41788cf025738cbe3348c99"
+ ],
+ "index": "pypi",
+ "version": "==0.10.0"
+ },
+ "pyyaml": {
+ "hashes": [
+ "sha256:02c78d77281d8f8d07a255e57abdbf43b02257f59f50cc6b636937d68efa5dd0",
+ "sha256:0dc9f2eb2e3c97640928dec63fd8dc1dd91e6b6ed236bd5ac00332b99b5c2ff9",
+ "sha256:124fd7c7bc1e95b1eafc60825f2daf67c73ce7b33f1194731240d24b0d1bf628",
+ "sha256:26fcb33776857f4072601502d93e1a619f166c9c00befb52826e7b774efaa9db",
+ "sha256:31ba07c54ef4a897758563e3a0fcc60077698df10180abe4b8165d9895c00ebf",
+ "sha256:3c49e39ac034fd64fd576d63bb4db53cda89b362768a67f07749d55f128ac18a",
+ "sha256:52bf0930903818e600ae6c2901f748bc4869c0c406056f679ab9614e5d21a166",
+ "sha256:5a3f345acff76cad4aa9cb171ee76c590f37394186325d53d1aa25318b0d4a09",
+ "sha256:5e7ac4e0e79a53451dc2814f6876c2fa6f71452de1498bbe29c0b54b69a986f4",
+ "sha256:7242790ab6c20316b8e7bb545be48d7ed36e26bbe279fd56f2c4a12510e60b4b",
+ "sha256:737bd70e454a284d456aa1fa71a0b429dd527bcbf52c5c33f7c8eee81ac16b89",
+ "sha256:8635d53223b1f561b081ff4adecb828fd484b8efffe542edcfdff471997f7c39",
+ "sha256:8b818b6c5a920cbe4203b5a6b14256f0e5244338244560da89b7b0f1313ea4b6",
+ "sha256:8bf38641b4713d77da19e91f8b5296b832e4db87338d6aeffe422d42f1ca896d",
+ "sha256:a36a48a51e5471513a5aea920cdad84cbd56d70a5057cca3499a637496ea379c",
+ "sha256:b2243dd033fd02c01212ad5c601dafb44fbb293065f430b0d3dbf03f3254d615",
+ "sha256:cc547d3ead3754712223abb7b403f0a184e4c3eae18c9bb7fd15adef1597cc4b",
+ "sha256:cc552b6434b90d9dbed6a4f13339625dc466fd82597119897e9489c953acbc22",
+ "sha256:f3790156c606299ff499ec44db422f66f05a7363b39eb9d5b064f17bd7d7c47b",
+ "sha256:f7a21e3d99aa3095ef0553e7ceba36fb693998fbb1226f1392ce33681047465f",
+ "sha256:fdc6b2cb4b19e431994f25a9160695cc59a4e861710cc6fc97161c5e845fc579"
+ ],
+ "index": "pypi",
+ "version": "==5.4"
+ }
+ },
+ "develop": {
+ "attrs": {
+ "hashes": [
+ "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6",
+ "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700"
+ ],
+ "version": "==20.3.0"
+ },
+ "backcall": {
+ "hashes": [
+ "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e",
+ "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"
+ ],
+ "version": "==0.2.0"
+ },
+ "decorator": {
+ "hashes": [
+ "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760",
+ "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"
+ ],
+ "version": "==4.4.2"
+ },
+ "flake8": {
+ "hashes": [
+ "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c",
+ "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208"
+ ],
+ "index": "pypi",
+ "version": "==3.8.3"
+ },
+ "iniconfig": {
+ "hashes": [
+ "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
+ "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
+ ],
+ "version": "==1.1.1"
+ },
+ "ipdb": {
+ "hashes": [
+ "sha256:d6f46d261c45a65e65a2f7ec69288a1c511e16206edb2875e7ec6b2f66997e78"
+ ],
+ "index": "pypi",
+ "version": "==0.13.3"
+ },
+ "ipython": {
+ "hashes": [
+ "sha256:04323f72d5b85b606330b6d7e2dc8d2683ad46c3905e955aa96ecc7a99388e70",
+ "sha256:34207ffb2f653bced2bc8e3756c1db86e7d93e44ed049daae9814fed66d408ec"
+ ],
+ "version": "==7.21.0"
+ },
+ "ipython-genutils": {
+ "hashes": [
+ "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8",
+ "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"
+ ],
+ "version": "==0.2.0"
+ },
+ "jedi": {
+ "hashes": [
+ "sha256:18456d83f65f400ab0c2d3319e48520420ef43b23a086fdc05dff34132f0fb93",
+ "sha256:92550a404bad8afed881a137ec9a461fed49eca661414be45059329614ed0707"
+ ],
+ "version": "==0.18.0"
+ },
+ "mccabe": {
+ "hashes": [
+ "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
+ "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
+ ],
+ "version": "==0.6.1"
+ },
+ "packaging": {
+ "hashes": [
+ "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
+ "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
+ ],
+ "version": "==20.9"
+ },
+ "parso": {
+ "hashes": [
+ "sha256:15b00182f472319383252c18d5913b69269590616c947747bc50bf4ac768f410",
+ "sha256:8519430ad07087d4c997fda3a7918f7cfa27cb58972a8c89c2a0295a1c940e9e"
+ ],
+ "version": "==0.8.1"
+ },
+ "pexpect": {
+ "hashes": [
+ "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937",
+ "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"
+ ],
+ "markers": "sys_platform != 'win32'",
+ "version": "==4.8.0"
+ },
+ "pickleshare": {
+ "hashes": [
+ "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca",
+ "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"
+ ],
+ "version": "==0.7.5"
+ },
+ "pluggy": {
+ "hashes": [
+ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
+ "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
+ ],
+ "version": "==0.13.1"
+ },
+ "prompt-toolkit": {
+ "hashes": [
+ "sha256:bf00f22079f5fadc949f42ae8ff7f05702826a97059ffcc6281036ad40ac6f04",
+ "sha256:e1b4f11b9336a28fa11810bc623c357420f69dfdb6d2dac41ca2c21a55c033bc"
+ ],
+ "version": "==3.0.18"
+ },
+ "ptyprocess": {
+ "hashes": [
+ "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35",
+ "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"
+ ],
+ "version": "==0.7.0"
+ },
+ "py": {
+ "hashes": [
+ "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
+ "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
+ ],
+ "version": "==1.10.0"
+ },
+ "pycodestyle": {
+ "hashes": [
+ "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367",
+ "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"
+ ],
+ "version": "==2.6.0"
+ },
+ "pyflakes": {
+ "hashes": [
+ "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92",
+ "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"
+ ],
+ "version": "==2.2.0"
+ },
+ "pygments": {
+ "hashes": [
+ "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94",
+ "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8"
+ ],
+ "version": "==2.8.1"
+ },
+ "pyparsing": {
+ "hashes": [
+ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
+ "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
+ ],
+ "version": "==2.4.7"
+ },
+ "pytest": {
+ "hashes": [
+ "sha256:1cd09785c0a50f9af72220dd12aa78cfa49cbffc356c61eab009ca189e018a33",
+ "sha256:d010e24666435b39a4cf48740b039885642b6c273a3f77be3e7e03554d2806b7"
+ ],
+ "index": "pypi",
+ "version": "==6.1.0"
+ },
+ "toml": {
+ "hashes": [
+ "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
+ "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
+ ],
+ "version": "==0.10.2"
+ },
+ "traitlets": {
+ "hashes": [
+ "sha256:178f4ce988f69189f7e523337a3e11d91c786ded9360174a3d9ca83e79bc5396",
+ "sha256:69ff3f9d5351f31a7ad80443c2674b7099df13cc41fc5fa6e2f6d3b0330b0426"
+ ],
+ "version": "==5.0.5"
+ },
+ "wcwidth": {
+ "hashes": [
+ "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784",
+ "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"
+ ],
+ "version": "==0.2.5"
+ }
+ }
+}
diff --git a/coredns/README.md b/coredns/README.md
new file mode 100644
index 0000000..18f5691
--- /dev/null
+++ b/coredns/README.md
@@ -0,0 +1,15 @@
+# CoreDNS Operator
+
+[CoreDNS][] is a flexible, plugin-based DNS server, and is the recommended
+solution for providing DNS to Kubernetes services within the cluster.
+This operator enables integration with [Charmed Kubernetes][] via a
+cross-model relation and allows for more customization than provided by the
+deployment of CoreDNS provided by default by Charmed Kubernetes.
+
+More information on using this operator with Charmed Kubernetes can be found
+[here](https://ubuntu.com/kubernetes/docs/cdk-addons#coredns), and bugs should
+be filed [here](https://bugs.launchpad.net/charmed-kubernetes).
+
+
+[CoreDNS]: https://coredns.io/
+[Charmed Kubernetes]: https://ubuntu.com/kubernetes/docs
diff --git a/coredns/charmcraft.yaml b/coredns/charmcraft.yaml
new file mode 100644
index 0000000..8ddda8d
--- /dev/null
+++ b/coredns/charmcraft.yaml
@@ -0,0 +1,6 @@
+type: charm
+parts:
+ charm:
+ build-packages: [git]
+ prime:
+ - ./files/*
diff --git a/coredns/config.yaml b/coredns/config.yaml
new file mode 100644
index 0000000..d502510
--- /dev/null
+++ b/coredns/config.yaml
@@ -0,0 +1,38 @@
+options:
+ domain:
+ description: The local domain for cluster DNS.
+ type: string
+ default: cluster.local
+ forward:
+ description: Where to forward non-cluster addresses.
+ type: string
+ default: /etc/resolv.conf
+ extra_servers:
+ description: Any additional servers to add to the Corefile.
+ type: string
+ default: ''
+ corefile:
+ description: >-
+ Configuration file to use for CoreDNS. This is interpreted as a Python
+ string. Template which will be given the `domain` and `forward` configs as
+ its context.
+ type: string
+ default: |
+ .:53 {
+ errors
+ health {
+ lameduck 5s
+ }
+ ready
+ kubernetes ${domain} in-addr.arpa ip6.arpa {
+ fallthrough in-addr.arpa ip6.arpa
+ pods insecure
+ }
+ prometheus :9153
+ forward . ${forward}
+ cache 30
+ loop
+ reload
+ loadbalance
+ }
+ ${extra_servers}
diff --git a/coredns/dispatch b/coredns/dispatch
new file mode 100755
index 0000000..fe31c05
--- /dev/null
+++ b/coredns/dispatch
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/coredns/hooks/install b/coredns/hooks/install
new file mode 100755
index 0000000..fe31c05
--- /dev/null
+++ b/coredns/hooks/install
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/coredns/hooks/start b/coredns/hooks/start
new file mode 100755
index 0000000..fe31c05
--- /dev/null
+++ b/coredns/hooks/start
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/coredns/hooks/upgrade-charm b/coredns/hooks/upgrade-charm
new file mode 100755
index 0000000..fe31c05
--- /dev/null
+++ b/coredns/hooks/upgrade-charm
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/coredns/icon.svg b/coredns/icon.svg
new file mode 100644
index 0000000..a5bac8f
--- /dev/null
+++ b/coredns/icon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/coredns/metadata.yaml b/coredns/metadata.yaml
new file mode 100644
index 0000000..6db0b27
--- /dev/null
+++ b/coredns/metadata.yaml
@@ -0,0 +1,21 @@
+name: coredns
+summary: CoreDNS
+maintainers:
+ - Cory Johns
+description: |
+ CoreDNS provides DNS resolution for Kubernetes.
+tags:
+ - networking
+series:
+ - kubernetes
+provides:
+ dns-provider:
+ interface: kube-dns
+requires: {}
+peers: {}
+resources:
+ coredns-image:
+ type: oci-image
+ description: 'CoreDNS image'
+ upstream-source: coredns/coredns:1.6.7
+min-juju-version: 2.8.2
diff --git a/coredns/requirements.txt b/coredns/requirements.txt
new file mode 100644
index 0000000..d0569c4
--- /dev/null
+++ b/coredns/requirements.txt
@@ -0,0 +1,4 @@
+-i https://pypi.org/simple
+git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+ops==0.10.0
+pyyaml==5.3.1
diff --git a/coredns/revision b/coredns/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/coredns/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/coredns/src/charm.py b/coredns/src/charm.py
new file mode 100755
index 0000000..6e4128e
--- /dev/null
+++ b/coredns/src/charm.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+
+import logging
+from string import Template
+
+from ops.charm import CharmBase
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, WaitingStatus
+
+from oci_image import OCIImageResource, OCIImageResourceError
+
+
+class CoreDNSCharm(CharmBase):
+ def __init__(self, *args):
+ super().__init__(*args)
+ if not self.unit.is_leader():
+ # We can't do anything useful when not the leader, so do nothing.
+ self.model.unit.status = WaitingStatus('Waiting for leadership')
+ return
+ self.log = logging.getLogger(__name__)
+ self.image = OCIImageResource(self, 'coredns-image')
+ for event in [self.on.install,
+ self.on.leader_elected,
+ self.on.upgrade_charm,
+ self.on.config_changed]:
+ self.framework.observe(event, self.main)
+ self.framework.observe(self.on.dns_provider_relation_joined, self.provide_dns)
+
+ def main(self, event):
+ try:
+ image_details = self.image.fetch()
+ except OCIImageResourceError as e:
+ self.model.unit.status = e.status
+ return
+
+ self.model.unit.status = MaintenanceStatus('Setting pod spec')
+
+ corefile = Template(self.model.config['corefile'])
+ corefile = corefile.safe_substitute(self.model.config)
+
+ # Adapted from coredns.yaml.sed in https://github.com/coredns/ at 75a1cad
+ self.model.pod.set_spec({
+ 'version': 3,
+ 'service': {
+ 'updateStrategy': {
+ 'type': 'RollingUpdate',
+ 'rollingUpdate': {'maxUnavailable': 1},
+ },
+ 'annotations': {
+ 'prometheus.io/port': "9153",
+ 'prometheus.io/scrape': "true",
+ },
+ },
+ # Dropped by a regression; see:
+ # https://bugs.launchpad.net/juju/+bug/1895886
+ # 'priorityClassName': 'system-cluster-critical',
+ 'containers': [{
+ 'name': 'coredns',
+ 'imageDetails': image_details,
+ 'imagePullPolicy': 'IfNotPresent',
+ 'args': ['-conf', '/etc/coredns/Corefile'],
+ 'volumeConfig': [{
+ 'name': 'config-volume',
+ 'mountPath': '/etc/coredns',
+ # Not supported
+ # 'readOnly': True,
+ 'files': [{
+ 'path': 'Corefile',
+ 'mode': 0o444,
+ 'content': corefile,
+ }],
+ }],
+ 'ports': [
+ {
+ 'name': 'dns',
+ 'containerPort': 53,
+ 'protocol': 'UDP',
+ },
+ {
+ 'name': 'dns-tcp',
+ 'containerPort': 53,
+ 'protocol': 'TCP',
+ },
+ {
+ 'name': 'metrics',
+ 'containerPort': 9153,
+ 'protocol': 'TCP',
+ },
+ ],
+ # Can't be specified by the charm yet; see:
+ # https://bugs.launchpad.net/juju/+bug/1893123
+ # 'resources': {
+ # 'limits': {'memory': '170Mi'},
+ # 'requests': {'cpu': '100m', 'memory': '70Mi'},
+ # },
+ 'kubernetes': {
+ 'securityContext': {
+ 'allowPrivilegeEscalation': False,
+ 'capabilities': {
+ 'add': ['NET_BIND_SERVICE'],
+ 'drop': ['all'],
+ },
+ 'readOnlyRootFilesystem': True,
+ },
+ 'livenessProbe': {
+ 'httpGet': {
+ 'path': '/health',
+ 'port': 8080,
+ 'scheme': 'HTTP',
+ },
+ 'initialDelaySeconds': 60,
+ 'timeoutSeconds': 5,
+ 'successThreshold': 1,
+ 'failureThreshold': 5,
+ },
+ 'readinessProbe': {
+ 'httpGet': {
+ 'path': '/ready',
+ 'port': 8181,
+ 'scheme': 'HTTP',
+ },
+ },
+ },
+ }],
+ 'serviceAccount': {
+ 'roles': [{
+ 'global': True,
+ 'rules': [
+ {
+ 'apigroups': ['discovery.k8s.io'],
+ 'resources': [
+ 'endpointslices',
+ ],
+ 'verbs': ['list', 'watch'],
+ },
+ {
+ 'apigroups': [''],
+ 'resources': [
+ 'endpoints',
+ 'services',
+ 'pods',
+ 'namespaces',
+ ],
+ 'verbs': ['list', 'watch'],
+ },
+ {
+ 'apigroups': [''],
+ 'resources': ['nodes'],
+ 'verbs': ['get'],
+ },
+ ],
+ }],
+ },
+ 'kubernetesResources': {
+ 'pod': {
+ 'dnsPolicy': 'Default',
+ # Not yet supported by Juju; see:
+ # https://bugs.launchpad.net/juju/+bug/1895887
+ # 'tolerations': [{
+ # 'key': 'CriticalAddonsOnly',
+ # 'operator': 'Exists',
+ # }],
+ # 'affinity': {
+ # 'podAntiAffinity': {
+ # 'preferredDuringScheduling' +
+ # 'IgnoredDuringExecution': [{
+ # 'weight': 100,
+ # 'podAffinityTerm': {
+ # 'labelSelector': {
+ # 'matchExpressions': [{
+ # 'key': 'k8s-app',
+ # 'operator': 'In',
+ # 'values': ["kube-dns"],
+ # }],
+ # },
+ # 'topologyKey': 'kubernetes.io/hostname',
+ # },
+ # }],
+ # },
+ # },
+ # Can be done by the operator via placement (--to), but can't
+ # be specified by the charm yet, per same bug as above.
+ # 'nodeSelector': {
+ # 'kubernetes.io/os': 'linux',
+ # },
+ }
+ }
+ })
+ self.model.unit.status = ActiveStatus()
+
+ def provide_dns(self, event):
+ provided_data = event.relation.data[self.unit]
+ if not provided_data.get('ingress-address'):
+ event.defer()
+ return
+ provided_data.update({
+ 'domain': self.model.config['domain'],
+ 'sdn-ip': str(provided_data['ingress-address']),
+ 'port': "53",
+ })
+
+
+if __name__ == "__main__":
+ main(CoreDNSCharm)
diff --git a/coredns/tests/func/test_deploy.py b/coredns/tests/func/test_deploy.py
new file mode 100644
index 0000000..1497cc3
--- /dev/null
+++ b/coredns/tests/func/test_deploy.py
@@ -0,0 +1,51 @@
+import subprocess
+from pathlib import Path
+from time import sleep
+
+import pytest
+
+
+CHARM_DIR = Path(__file__).parent.parent.parent.resolve()
+SPEC_FILE = Path(__file__).parent / 'validate-dns-spec.yaml'
+
+
+def test_charm():
+ model = run('juju', 'switch').split('/')[-1]
+ coredns_ready = run(
+ 'kubectl', 'get', 'pod', '-n', model, '-l', 'juju-app=coredns',
+ '-o', 'jsonpath={..status.containerStatuses[0].ready}')
+ assert coredns_ready == 'true'
+ run('kubectl', 'apply', '-f', SPEC_FILE)
+ try:
+ wait_for_output('kubectl', 'get', 'pod/validate-dns',
+ expected='Running')
+ for name in ("www.ubuntu.com", "kubernetes.default.svc.cluster.local"):
+ run('kubectl', 'exec', 'validate-dns', '--', 'nslookup', name)
+ finally:
+ run('kubectl', 'delete', '-f', SPEC_FILE)
+
+
+def run(*args):
+ args = [str(a) for a in args]
+ try:
+ res = subprocess.run(args,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ return res.stdout.decode('utf8').strip()
+ except subprocess.CalledProcessError as e:
+ pytest.fail(f'Command {args} failed ({e.returncode}):\n'
+ f'stdout:\n{e.stdout.decode("utf8")}\n'
+ f'stderr:\n{e.stderr.decode("utf8")}\n')
+
+
+def wait_for_output(*args, expected='', timeout=3 * 60):
+ args = [str(a) for a in args]
+ output = None
+ for attempt in range(int(timeout / 5)):
+ output = run(*args)
+ if expected in output:
+ break
+ sleep(5)
+ else:
+ pytest.fail(f'Timed out waiting for "{expected}" from {args}:\n{output}')
diff --git a/coredns/tests/func/validate-dns-spec.yaml b/coredns/tests/func/validate-dns-spec.yaml
new file mode 100644
index 0000000..cfe5d27
--- /dev/null
+++ b/coredns/tests/func/validate-dns-spec.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: validate-dns
+spec:
+ containers:
+ - name: busybox
+ image: busybox
+ imagePullPolicy: IfNotPresent
+ args: ['sleep', '3600']
+ restartPolicy: Always
diff --git a/coredns/tests/unit/test_charm.py b/coredns/tests/unit/test_charm.py
new file mode 100644
index 0000000..f0f95fd
--- /dev/null
+++ b/coredns/tests/unit/test_charm.py
@@ -0,0 +1,42 @@
+import pytest
+
+from ops.model import ActiveStatus, BlockedStatus, WaitingStatus
+from ops.testing import Harness
+import yaml
+
+from charm import CoreDNSCharm
+
+
+if yaml.__with_libyaml__:
+ _DefaultDumper = yaml.CSafeDumper
+else:
+ _DefaultDumper = yaml.SafeDumper
+
+
+@pytest.fixture
+def harness():
+ return Harness(CoreDNSCharm)
+
+
+def test_not_leader(harness):
+ harness.begin()
+ assert isinstance(harness.charm.model.unit.status, WaitingStatus)
+
+
+def test_missing_image(harness):
+ harness.set_leader(True)
+ harness.begin_with_initial_hooks()
+ assert isinstance(harness.charm.model.unit.status, BlockedStatus)
+
+
+def test_main(harness):
+ harness.set_leader(True)
+ harness.add_oci_resource('coredns-image', {
+ 'registrypath': 'coredns/coredns:1.6.7',
+ 'username': '',
+ 'password': '',
+ })
+ harness.begin_with_initial_hooks()
+ assert isinstance(harness.charm.model.unit.status, ActiveStatus)
+ # confirm that we can serialize the pod spec
+ yaml.dump(harness.get_pod_spec(), Dumper=_DefaultDumper)
diff --git a/coredns/tox.ini b/coredns/tox.ini
new file mode 100644
index 0000000..a3006a7
--- /dev/null
+++ b/coredns/tox.ini
@@ -0,0 +1,27 @@
+[flake8]
+max-line-length = 88
+
+[tox]
+skipsdist = True
+envlist = lint,unit
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONPATH={toxinidir}/src
+ PYTHONBREAKPOINT=ipdb.set_trace
+passenv = HOME
+deps = pipenv
+commands =
+ pipenv install --dev --ignore-pipfile
+ pipenv run pytest --tb native -s {posargs:tests/unit}
+
+[testenv:lint]
+commands =
+ pipenv install --dev --ignore-pipfile
+ pipenv run flake8 {toxinidir}/src {toxinidir}/tests
+
+[testenv:func]
+commands =
+ pipenv install --dev --ignore-pipfile
+ pipenv run pytest --tb native -s {posargs:tests/func}
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/INSTALLER b/coredns/venv/PyYAML-5.3.1.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/coredns/venv/PyYAML-5.3.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/LICENSE b/coredns/venv/PyYAML-5.3.1.dist-info/LICENSE
new file mode 100644
index 0000000..3d82c28
--- /dev/null
+++ b/coredns/venv/PyYAML-5.3.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2017-2020 Ingy döt Net
+Copyright (c) 2006-2016 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/METADATA b/coredns/venv/PyYAML-5.3.1.dist-info/METADATA
new file mode 100644
index 0000000..a70dd20
--- /dev/null
+++ b/coredns/venv/PyYAML-5.3.1.dist-info/METADATA
@@ -0,0 +1,41 @@
+Metadata-Version: 2.1
+Name: PyYAML
+Version: 5.3.1
+Summary: YAML parser and emitter for Python
+Home-page: https://github.com/yaml/pyyaml
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+
+YAML is a data serialization format designed for human readability
+and interaction with scripting languages. PyYAML is a YAML parser
+and emitter for Python.
+
+PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+support, capable extension API, and sensible error messages. PyYAML
+supports standard YAML tags and provides Python-specific tags that
+allow to represent an arbitrary Python object.
+
+PyYAML is applicable for a broad range of tasks from complex
+configuration files to object serialization and persistence.
+
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/RECORD b/coredns/venv/PyYAML-5.3.1.dist-info/RECORD
new file mode 100644
index 0000000..a01343d
--- /dev/null
+++ b/coredns/venv/PyYAML-5.3.1.dist-info/RECORD
@@ -0,0 +1,41 @@
+PyYAML-5.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+PyYAML-5.3.1.dist-info/LICENSE,sha256=xAESRJ8lS5dTBFklJIMT6ScO-jbSJrItgtTMbEPFfyk,1101
+PyYAML-5.3.1.dist-info/METADATA,sha256=xTsZFjd8T4M-5rC2M3BHgx_KTTpEPy5vFDIXrbzRXPQ,1758
+PyYAML-5.3.1.dist-info/RECORD,,
+PyYAML-5.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+PyYAML-5.3.1.dist-info/WHEEL,sha256=hzx2-39jWfx-No5BPGm7YN661ryRYBuLP8gZdbxDo8I,103
+PyYAML-5.3.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
+yaml/__init__.py,sha256=XFUNbKTg4afAd0BETjGQ1mKQ97_g5jbE1C0WoKc74dc,13170
+yaml/__pycache__/__init__.cpython-38.pyc,,
+yaml/__pycache__/composer.cpython-38.pyc,,
+yaml/__pycache__/constructor.cpython-38.pyc,,
+yaml/__pycache__/cyaml.cpython-38.pyc,,
+yaml/__pycache__/dumper.cpython-38.pyc,,
+yaml/__pycache__/emitter.cpython-38.pyc,,
+yaml/__pycache__/error.cpython-38.pyc,,
+yaml/__pycache__/events.cpython-38.pyc,,
+yaml/__pycache__/loader.cpython-38.pyc,,
+yaml/__pycache__/nodes.cpython-38.pyc,,
+yaml/__pycache__/parser.cpython-38.pyc,,
+yaml/__pycache__/reader.cpython-38.pyc,,
+yaml/__pycache__/representer.cpython-38.pyc,,
+yaml/__pycache__/resolver.cpython-38.pyc,,
+yaml/__pycache__/scanner.cpython-38.pyc,,
+yaml/__pycache__/serializer.cpython-38.pyc,,
+yaml/__pycache__/tokens.cpython-38.pyc,,
+yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883
+yaml/constructor.py,sha256=O3Uaf0_J_5GQBoeI9ZNhpJAhtdagr_X2HzDgGbZOMnw,28627
+yaml/cyaml.py,sha256=LiMkvchNonfoy1F6ec9L2BiUz3r0bwF4hympASJX1Ic,3846
+yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837
+yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006
+yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
+yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
+yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061
+yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
+yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
+yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794
+yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184
+yaml/resolver.py,sha256=DJCjpQr8YQCEYYjKEYqTl0GrsZil2H4aFOI9b0Oe-U4,8970
+yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277
+yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
+yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/REQUESTED b/coredns/venv/PyYAML-5.3.1.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/WHEEL b/coredns/venv/PyYAML-5.3.1.dist-info/WHEEL
new file mode 100644
index 0000000..bb3795f
--- /dev/null
+++ b/coredns/venv/PyYAML-5.3.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: false
+Tag: cp38-cp38-linux_x86_64
+
diff --git a/coredns/venv/PyYAML-5.3.1.dist-info/top_level.txt b/coredns/venv/PyYAML-5.3.1.dist-info/top_level.txt
new file mode 100644
index 0000000..e6475e9
--- /dev/null
+++ b/coredns/venv/PyYAML-5.3.1.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_yaml
+yaml
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/INSTALLER b/coredns/venv/oci_image-1.0.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/coredns/venv/oci_image-1.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/METADATA b/coredns/venv/oci_image-1.0.0.dist-info/METADATA
new file mode 100644
index 0000000..b21b997
--- /dev/null
+++ b/coredns/venv/oci_image-1.0.0.dist-info/METADATA
@@ -0,0 +1,63 @@
+Metadata-Version: 2.1
+Name: oci-image
+Version: 1.0.0
+Summary: Helper for dealing with OCI Image resources in the charm operator framework
+Home-page: https://github.com/juju-solutions/resource-oci-image
+Author: Cory Johns
+Author-email: johnsca@gmail.com
+License: Apache License 2.0
+Platform: UNKNOWN
+
+# OCI Image Resource helper
+
+This is a helper for working with OCI image resources in the charm operator
+framework.
+
+## Installation
+
+Add it to your `requirements.txt`. Since it's not in PyPI, you'll need to use
+the GitHub archive URL (or `git+` URL, if you want to pin to a specific commit):
+
+```
+https://github.com/juju-solutions/resource-oci-image/archive/master.zip
+```
+
+## Usage
+
+The `OCIImageResource` class will wrap the framework resource for the given
+resource name, and calling `fetch` on it will either return the image info
+or raise an `OCIImageResourceError` if it can't fetch or parse the image
+info. The exception will have a `status` attribute you can use directly,
+or a `status_message` attribute if you just want that.
+
+Example usage:
+
+```python
+from ops.charm import CharmBase
+from ops.main import main
+from oci_image import OCIImageResource, OCIImageResourceError
+
+class MyCharm(CharmBase):
+ def __init__(self, *args):
+ super().__init__(*args)
+ self.image = OCIImageResource(self, 'resource-name')
+ self.framework.observe(self.on.start, self.on_start)
+
+ def on_start(self, event):
+ try:
+ image_info = self.image.fetch()
+ except OCIImageResourceError as e:
+ self.model.unit.status = e.status
+ event.defer()
+ return
+
+ self.model.pod.set_spec({'containers': [{
+ 'name': 'my-charm',
+ 'imageDetails': image_info,
+ }]})
+
+if __name__ == "__main__":
+ main(MyCharm)
+```
+
+
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/RECORD b/coredns/venv/oci_image-1.0.0.dist-info/RECORD
new file mode 100644
index 0000000..487344e
--- /dev/null
+++ b/coredns/venv/oci_image-1.0.0.dist-info/RECORD
@@ -0,0 +1,9 @@
+__pycache__/oci_image.cpython-38.pyc,,
+oci_image-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+oci_image-1.0.0.dist-info/METADATA,sha256=QIpPa4JcSPa_Ci0n-DaCNp4PkKovZudFW8FnpnauJnQ,1808
+oci_image-1.0.0.dist-info/RECORD,,
+oci_image-1.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+oci_image-1.0.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
+oci_image-1.0.0.dist-info/direct_url.json,sha256=sUsaIeKXs7oqCE-NdmqTsNJ8rmr97YMi0wuRNVObj0Y,215
+oci_image-1.0.0.dist-info/top_level.txt,sha256=M4dLaObLx7irI4EO-A4_VJP_b-A6dDD7hB5QyVKdHOY,10
+oci_image.py,sha256=c75VR2vSmOp9pPTP2cnsxo23CqhhFbRtnIOtMjzDyXY,1794
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/REQUESTED b/coredns/venv/oci_image-1.0.0.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/WHEEL b/coredns/venv/oci_image-1.0.0.dist-info/WHEEL
new file mode 100644
index 0000000..385faab
--- /dev/null
+++ b/coredns/venv/oci_image-1.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/direct_url.json b/coredns/venv/oci_image-1.0.0.dist-info/direct_url.json
new file mode 100644
index 0000000..56c97aa
--- /dev/null
+++ b/coredns/venv/oci_image-1.0.0.dist-info/direct_url.json
@@ -0,0 +1 @@
+{"url": "https://github.com/juju-solutions/resource-oci-image/", "vcs_info": {"commit_id": "c5778285d332edf3d9a538f9d0c06154b7ec1b0b", "requested_revision": "c5778285d332edf3d9a538f9d0c06154b7ec1b0b", "vcs": "git"}}
\ No newline at end of file
diff --git a/coredns/venv/oci_image-1.0.0.dist-info/top_level.txt b/coredns/venv/oci_image-1.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000..cd69623
--- /dev/null
+++ b/coredns/venv/oci_image-1.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+oci_image
diff --git a/coredns/venv/oci_image.py b/coredns/venv/oci_image.py
new file mode 100644
index 0000000..f4d3818
--- /dev/null
+++ b/coredns/venv/oci_image.py
@@ -0,0 +1,53 @@
+from pathlib import Path
+
+import yaml
+from ops.framework import Object
+from ops.model import BlockedStatus, ModelError
+
+
+class OCIImageResource(Object):
+ def __init__(self, charm, resource_name):
+ super().__init__(charm, resource_name)
+ self.resource_name = resource_name
+
+ def fetch(self):
+ try:
+ resource_path = self.model.resources.fetch(self.resource_name)
+ except ModelError as e:
+ raise MissingResourceError(self.resource_name) from e
+ if not resource_path.exists():
+ raise MissingResourceError(self.resource_name)
+ resource_text = Path(resource_path).read_text()
+ if not resource_text:
+ raise MissingResourceError(self.resource_name)
+ try:
+ resource_data = yaml.safe_load(resource_text)
+ except yaml.YAMLError as e:
+ raise InvalidResourceError(self.resource_name) from e
+ else:
+ # Translate the data from the format used by the charm store to the
+ # format used by the Juju K8s pod spec, since that is how this is
+ # typically used.
+ return {
+ 'imagePath': resource_data['registrypath'],
+ 'username': resource_data['username'],
+ 'password': resource_data['password'],
+ }
+
+
+class OCIImageResourceError(ModelError):
+ status_type = BlockedStatus
+ status_message = 'Resource error'
+
+ def __init__(self, resource_name):
+ super().__init__(resource_name)
+ self.status = self.status_type(
+ f'{self.status_message}: {resource_name}')
+
+
+class MissingResourceError(OCIImageResourceError):
+ status_message = 'Missing resource'
+
+
+class InvalidResourceError(OCIImageResourceError):
+ status_message = 'Invalid resource'
diff --git a/coredns/venv/ops-0.10.0.dist-info/INSTALLER b/coredns/venv/ops-0.10.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/coredns/venv/ops-0.10.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/coredns/venv/ops-0.10.0.dist-info/LICENSE.txt b/coredns/venv/ops-0.10.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/coredns/venv/ops-0.10.0.dist-info/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/coredns/venv/ops-0.10.0.dist-info/METADATA b/coredns/venv/ops-0.10.0.dist-info/METADATA
new file mode 100644
index 0000000..a8a0e8e
--- /dev/null
+++ b/coredns/venv/ops-0.10.0.dist-info/METADATA
@@ -0,0 +1,167 @@
+Metadata-Version: 2.1
+Name: ops
+Version: 0.10.0
+Summary: The Python library behind great charms
+Home-page: https://github.com/canonical/operator
+Author: The Charmcraft team at Canonical Ltd.
+Author-email: charmcraft@lists.launchpad.net
+License: Apache-2.0
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3.5
+Description-Content-Type: text/markdown
+Requires-Dist: PyYAML
+
+# The Operator Framework
+
+The Operator Framework provides a simple, lightweight, and powerful way of
+writing Juju charms, the best way to encapsulate operational experience in code.
+
+The framework will help you to:
+
+* model the integration of your services
+* manage the lifecycle of your application
+* create reusable and scalable components
+* keep your code simple and readable
+
+## Getting Started
+
+Charms written using the operator framework are just Python code. The intention
+is for it to feel very natural for somebody used to coding in Python, and
+reasonably easy to pick up for somebody who might be a domain expert but not
+necessarily a pythonista themselves.
+
+The dependencies of the operator framework are kept as minimal as possible;
+currently that's Python 3.5 or greater, and `PyYAML` (both are included by
+default in Ubuntu's cloud images from 16.04 on).
+
+
+## A Quick Introduction
+
+Operator framework charms are just Python code. The entry point to your charm is
+a particular Python file. It could be anything that makes sense to your project,
+but let's assume this is `src/charm.py`. This file must be executable (and it
+must have the appropriate shebang line).
+
+You need the usual `metadata.yaml` and (probably) `config.yaml` files, and a
+`requirements.txt` for any Python dependencies. In other words, your project
+might look like this:
+
+```
+my-charm
+├── config.yaml
+├── metadata.yaml
+├── requirements.txt
+└── src/
+ └── charm.py
+```
+
+`src/charm.py` here is the entry point to your charm code. At a minimum, it
+needs to define a subclass of `CharmBase` and pass that into the framework's
+`main` function:
+
+```python
+from ops.charm import CharmBase
+from ops.main import main
+
+class MyCharm(CharmBase):
+ def __init__(self, *args):
+ super().__init__(*args)
+ self.framework.observe(self.on.start, self.on_start)
+
+ def on_start(self, event):
+ # Handle the start event here.
+
+if __name__ == "__main__":
+ main(MyCharm)
+```
+
+That should be enough for you to be able to run
+
+```
+$ charmcraft build
+Done, charm left in 'my-charm.charm'
+$ juju deploy ./my-charm.charm
+```
+
+> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can
+> also be found on its [github page](https://github.com/canonical/charmcraft).
+
+Happy charming!
+
+## Testing your charms
+
+The operator framework provides a testing harness, so that you can test that
+your charm does the right thing when presented with different scenarios, without
+having to have a full deployment to do so. `pydoc3 ops.testing` has the details
+for that, including this example:
+
+```python
+harness = Harness(MyCharm)
+# Do initial setup here
+relation_id = harness.add_relation('db', 'postgresql')
+# Now instantiate the charm to see events as the model changes
+harness.begin()
+harness.add_relation_unit(relation_id, 'postgresql/0')
+harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+# Check that charm has properly handled the relation_joined event for postgresql/0
+self.assertEqual(harness.charm. ...)
+```
+
+## Talk to us
+
+If you need help, have ideas, or would just like to chat with us, reach out on
+IRC: we're in [#smooth-operator] on freenode (or try the [webchat]).
+
+We also pay attention to Juju's [discourse]; most discussion at this
+stage is on IRC, however.
+
+You can also deep dive into the [API docs] if that's your thing.
+
+[webchat]: https://webchat.freenode.net/#smooth-operator
+[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator
+[discourse]: https://discourse.juju.is/c/charming
+[API docs]: https://ops.rtfd.io/
+
+## Operator Framework development
+
+If you want to work in the framework *itself* you will need Python >= 3.5 and
+the dependencies declared in `requirements-dev.txt` installed in your system.
+Or you can use a virtualenv:
+
+ virtualenv --python=python3 env
+ source env/bin/activate
+ pip install -r requirements-dev.txt
+
+Then you can try `./run_tests`, it should all go green.
+
+If you see the error `yaml does not have libyaml extensions, using slower pure
+Python yaml`, you need to reinstall pyyaml with the correct extensions:
+
+ apt-get install libyaml-dev
+ pip install --force-reinstall --no-cache-dir pyyaml
+
+If you want to build the documentation you'll need the requirements from
+`docs/requirements.txt`, or in your virtualenv
+
+ pip install -r docs/requirements.txt
+
+and then you can run `./build_docs`.
+
+
diff --git a/coredns/venv/ops-0.10.0.dist-info/RECORD b/coredns/venv/ops-0.10.0.dist-info/RECORD
new file mode 100644
index 0000000..fe067e0
--- /dev/null
+++ b/coredns/venv/ops-0.10.0.dist-info/RECORD
@@ -0,0 +1,29 @@
+ops-0.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+ops-0.10.0.dist-info/LICENSE.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
+ops-0.10.0.dist-info/METADATA,sha256=AI7mL-PWkkYQ4f_NCulM5VcIQrMskxPIYp108DZrOcA,5577
+ops-0.10.0.dist-info/RECORD,,
+ops-0.10.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+ops-0.10.0.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
+ops-0.10.0.dist-info/top_level.txt,sha256=enC05wWafSg8iDKIvj3gvtAtEP2kYCyN5Gmd689q-_I,4
+ops/__init__.py,sha256=WaHb0dfp1KEe6jFV8Pm_mcdJ3ModiWujnQ6xLjNzPNQ,819
+ops/__pycache__/__init__.cpython-38.pyc,,
+ops/__pycache__/charm.cpython-38.pyc,,
+ops/__pycache__/framework.cpython-38.pyc,,
+ops/__pycache__/jujuversion.cpython-38.pyc,,
+ops/__pycache__/log.cpython-38.pyc,,
+ops/__pycache__/main.cpython-38.pyc,,
+ops/__pycache__/model.cpython-38.pyc,,
+ops/__pycache__/storage.cpython-38.pyc,,
+ops/__pycache__/testing.cpython-38.pyc,,
+ops/__pycache__/version.cpython-38.pyc,,
+ops/charm.py,sha256=i1fcd-pMzRV6f9AfMy0S_Jr_rZso3s9Xi-5GZWEs3nc,22512
+ops/framework.py,sha256=T9PWR4FXBI6Yd3XGwwNO51rJlyMUeO5vPdd4GmEjdzY,38298
+ops/jujuversion.py,sha256=T5KafqBHbQiHJ1OVoVbseUnZz7og4gPUz7CayXcHddk,3845
+ops/lib/__init__.py,sha256=7i2EN1jCUkVZT5NCi_q_ilBBzpCkWaW9mnBc3vBYCns,9188
+ops/lib/__pycache__/__init__.cpython-38.pyc,,
+ops/log.py,sha256=7jNn71--WpFngrZIwnJoaTRiaVrNVkLHK2enVu_VRA8,1860
+ops/main.py,sha256=TcOAS3VE1nMt-jF9uUzoyDWGTNl-OoAkS7XqQraWH3c,15375
+ops/model.py,sha256=katD2gQc35VArVMfGdI2AjPobFegQjShmDqVCKeLXZc,46796
+ops/storage.py,sha256=dal0athxe35cnWE8ol9N7nEUQDMcphDgRrQrmyGQDoA,11859
+ops/testing.py,sha256=HRjgq2ikVijGRMjVN2g-HJr8oQJ0ul8QEUUZv9D2_go,34727
+ops/version.py,sha256=6wsm0bsNX30wL9YmCZai2X5ISKQZYBIFJAbgmBn2Ri4,47
diff --git a/coredns/venv/ops-0.10.0.dist-info/REQUESTED b/coredns/venv/ops-0.10.0.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/coredns/venv/ops-0.10.0.dist-info/WHEEL b/coredns/venv/ops-0.10.0.dist-info/WHEEL
new file mode 100644
index 0000000..b552003
--- /dev/null
+++ b/coredns/venv/ops-0.10.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/coredns/venv/ops-0.10.0.dist-info/top_level.txt b/coredns/venv/ops-0.10.0.dist-info/top_level.txt
new file mode 100644
index 0000000..2d81d3b
--- /dev/null
+++ b/coredns/venv/ops-0.10.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+ops
diff --git a/coredns/venv/ops/__init__.py b/coredns/venv/ops/__init__.py
new file mode 100644
index 0000000..f17b296
--- /dev/null
+++ b/coredns/venv/ops/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Operator Framework."""
+
+from .version import version as __version__ # noqa: F401 (imported but unused)
+
+# Import here the bare minimum to break the circular import between modules
+from . import charm # noqa: F401 (imported but unused)
diff --git a/coredns/venv/ops/charm.py b/coredns/venv/ops/charm.py
new file mode 100644
index 0000000..d898de8
--- /dev/null
+++ b/coredns/venv/ops/charm.py
@@ -0,0 +1,575 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import enum
+import os
+import pathlib
+import typing
+
+import yaml
+
+from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents
+from ops import model
+
+
+def _loadYaml(source):
+ if yaml.__with_libyaml__:
+ return yaml.load(source, Loader=yaml.CSafeLoader)
+ return yaml.load(source, Loader=yaml.SafeLoader)
+
+
+class HookEvent(EventBase):
+ """A base class for events that trigger because of a Juju hook firing."""
+
+
+class ActionEvent(EventBase):
+ """A base class for events that trigger when a user asks for an Action to be run.
+
+ To read the parameters for the action, see the instance variable `params`.
+ To respond with the result of the action, call `set_results`. To add progress
+ messages that are visible as the action is progressing use `log`.
+
+ :ivar params: The parameters passed to the action (read by action-get)
+ """
+
+ def defer(self):
+ """Action events are not deferable like other events.
+
+ This is because an action runs synchronously and the user is waiting for the result.
+ """
+ raise RuntimeError('cannot defer action events')
+
+ def restore(self, snapshot: dict) -> None:
+ """Used by the operator framework to record the action.
+
+ Not meant to be called directly by Charm code.
+ """
+ env_action_name = os.environ.get('JUJU_ACTION_NAME')
+ event_action_name = self.handle.kind[:-len('_action')].replace('_', '-')
+ if event_action_name != env_action_name:
+ # This could only happen if the dev manually emits the action, or from a bug.
+ raise RuntimeError('action event kind does not match current action')
+ # Params are loaded at restore rather than __init__ because
+ # the model is not available in __init__.
+ self.params = self.framework.model._backend.action_get()
+
+ def set_results(self, results: typing.Mapping) -> None:
+ """Report the result of the action.
+
+ Args:
+ results: The result of the action as a Dict
+ """
+ self.framework.model._backend.action_set(results)
+
+ def log(self, message: str) -> None:
+ """Send a message that a user will see while the action is running.
+
+ Args:
+ message: The message for the user.
+ """
+ self.framework.model._backend.action_log(message)
+
+ def fail(self, message: str = '') -> None:
+ """Report that this action has failed.
+
+ Args:
+ message: Optional message to record why it has failed.
+ """
+ self.framework.model._backend.action_fail(message)
+
+
+class InstallEvent(HookEvent):
+ """Represents the `install` hook from Juju."""
+
+
+class StartEvent(HookEvent):
+ """Represents the `start` hook from Juju."""
+
+
+class StopEvent(HookEvent):
+ """Represents the `stop` hook from Juju."""
+
+
+class RemoveEvent(HookEvent):
+ """Represents the `remove` hook from Juju. """
+
+
+class ConfigChangedEvent(HookEvent):
+ """Represents the `config-changed` hook from Juju."""
+
+
+class UpdateStatusEvent(HookEvent):
+ """Represents the `update-status` hook from Juju."""
+
+
+class UpgradeCharmEvent(HookEvent):
+ """Represents the `upgrade-charm` hook from Juju.
+
+ This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju
+ has unpacked the upgraded charm code, and so this event will be handled with new code.
+ """
+
+
+class PreSeriesUpgradeEvent(HookEvent):
+ """Represents the `pre-series-upgrade` hook from Juju.
+
+ This happens when a user has run `juju upgrade-series MACHINE prepare` and
+ will fire for each unit that is running on the machine, telling them that
+ the user is preparing to upgrade the Machine's series (eg trusty->bionic).
+ The charm should take actions to prepare for the upgrade (a database charm
+ would want to write out a version-independent dump of the database, so that
+ when a new version of the database is available in a new series, it can be
+ used.)
+ Once all units on a machine have run `pre-series-upgrade`, the user will
+ initiate the steps to actually upgrade the machine (eg `do-release-upgrade`).
+ When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire.
+ """
+
+
+class PostSeriesUpgradeEvent(HookEvent):
+ """Represents the `post-series-upgrade` hook from Juju.
+
+ This is run after the user has done a distribution upgrade (or rolled back
+ and kept the same series). It is called in response to
+ `juju upgrade-series MACHINE complete`. Charms are expected to do whatever
+ steps are necessary to reconfigure their applications for the new series.
+ """
+
+
+class LeaderElectedEvent(HookEvent):
+ """Represents the `leader-elected` hook from Juju.
+
+ Juju will trigger this when a new lead unit is chosen for a given application.
+ This represents the leader of the charm information (not necessarily the primary
+ of a running application). The main utility is that charm authors can know
+ that only one unit will be a leader at any given time, so they can do
+ configuration, etc, that would otherwise require coordination between units.
+ (eg, selecting a password for a new relation)
+ """
+
+
+class LeaderSettingsChangedEvent(HookEvent):
+ """Represents the `leader-settings-changed` hook from Juju.
+
+ Deprecated. This represents when a lead unit would call `leader-set` to inform
+ the other units of an application that they have new information to handle.
+ This has been deprecated in favor of using a Peer relation, and having the
+ leader set a value in the Application data bag for that peer relation.
+ (see :class:`RelationChangedEvent`).
+ """
+
+
+class CollectMetricsEvent(HookEvent):
+ """Represents the `collect-metrics` hook from Juju.
+
+ Note that events firing during a CollectMetricsEvent are currently
+ sandboxed in how they can interact with Juju. To report metrics
+ use :meth:`.add_metrics`.
+ """
+
+ def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None:
+ """Record metrics that have been gathered by the charm for this unit.
+
+ Args:
+ metrics: A collection of {key: float} pairs that contains the
+ metrics that have been gathered
+ labels: {key:value} strings that can be applied to the
+ metrics that are being gathered
+ """
+ self.framework.model._backend.add_metrics(metrics, labels)
+
+
+class RelationEvent(HookEvent):
+ """A base class representing the various relation lifecycle events.
+
+ Charmers should not be creating RelationEvents directly. The events will be
+ generated by the framework from Juju related events. Users can observe them
+ from the various `CharmBase.on[relation_name].relation_*` events.
+
+ Attributes:
+ relation: The Relation involved in this event
+ app: The remote application that has triggered this event
+ unit: The remote unit that has triggered this event. This may be None
+ if the relation event was triggered as an Application level event
+ """
+
+ def __init__(self, handle, relation, app=None, unit=None):
+ super().__init__(handle)
+
+ if unit is not None and unit.app != app:
+ raise RuntimeError(
+ 'cannot create RelationEvent with application {} and unit {}'.format(app, unit))
+
+ self.relation = relation
+ self.app = app
+ self.unit = unit
+
+ def snapshot(self) -> dict:
+ """Used by the framework to serialize the event to disk.
+
+ Not meant to be called by Charm code.
+ """
+ snapshot = {
+ 'relation_name': self.relation.name,
+ 'relation_id': self.relation.id,
+ }
+ if self.app:
+ snapshot['app_name'] = self.app.name
+ if self.unit:
+ snapshot['unit_name'] = self.unit.name
+ return snapshot
+
+ def restore(self, snapshot: dict) -> None:
+ """Used by the framework to deserialize the event from disk.
+
+ Not meant to be called by Charm code.
+ """
+ self.relation = self.framework.model.get_relation(
+ snapshot['relation_name'], snapshot['relation_id'])
+
+ app_name = snapshot.get('app_name')
+ if app_name:
+ self.app = self.framework.model.get_app(app_name)
+ else:
+ self.app = None
+
+ unit_name = snapshot.get('unit_name')
+ if unit_name:
+ self.unit = self.framework.model.get_unit(unit_name)
+ else:
+ self.unit = None
+
+
+class RelationCreatedEvent(RelationEvent):
+ """Represents the `relation-created` hook from Juju.
+
+ This is triggered when a new relation to another app is added in Juju. This
+ can occur before units for those applications have started. All existing
+ relations should be established before start.
+ """
+
+
+class RelationJoinedEvent(RelationEvent):
+ """Represents the `relation-joined` hook from Juju.
+
+ This is triggered whenever a new unit of a related application joins the relation.
+ (eg, a unit was added to an existing related app, or a new relation was established
+ with an application that already had units.)
+ """
+
+
+class RelationChangedEvent(RelationEvent):
+ """Represents the `relation-changed` hook from Juju.
+
+ This is triggered whenever there is a change to the data bucket for a related
+ application or unit. Look at `event.relation.data[event.unit/app]` to see the
+ new information.
+ """
+
+
+class RelationDepartedEvent(RelationEvent):
+ """Represents the `relation-departed` hook from Juju.
+
+ This is the inverse of the RelationJoinedEvent, representing when a unit
+ is leaving the relation (the unit is being removed, the app is being removed,
+ the relation is being removed). It is fired once for each unit that is
+ going away.
+ """
+
+
+class RelationBrokenEvent(RelationEvent):
+ """Represents the `relation-broken` hook from Juju.
+
+ If a relation is being removed (`juju remove-relation` or `juju remove-application`),
+ once all the units have been removed, RelationBrokenEvent will fire to signal
+ that the relationship has been fully terminated.
+ """
+
+
+class StorageEvent(HookEvent):
+ """Base class representing Storage related events."""
+
+
+class StorageAttachedEvent(StorageEvent):
+ """Represents the `storage-attached` hook from Juju.
+
+ Called when new storage is available for the charm to use.
+ """
+
+
+class StorageDetachingEvent(StorageEvent):
+ """Represents the `storage-detaching` hook from Juju.
+
+ Called when storage a charm has been using is going away.
+ """
+
+
+class CharmEvents(ObjectEvents):
+ """The events that are generated by Juju in response to the lifecycle of an application."""
+
+ install = EventSource(InstallEvent)
+ start = EventSource(StartEvent)
+ stop = EventSource(StopEvent)
+ remove = EventSource(RemoveEvent)
+ update_status = EventSource(UpdateStatusEvent)
+ config_changed = EventSource(ConfigChangedEvent)
+ upgrade_charm = EventSource(UpgradeCharmEvent)
+ pre_series_upgrade = EventSource(PreSeriesUpgradeEvent)
+ post_series_upgrade = EventSource(PostSeriesUpgradeEvent)
+ leader_elected = EventSource(LeaderElectedEvent)
+ leader_settings_changed = EventSource(LeaderSettingsChangedEvent)
+ collect_metrics = EventSource(CollectMetricsEvent)
+
+
+class CharmBase(Object):
+ """Base class that represents the Charm overall.
+
+ Usually this initialization is done by ops.main.main() rather than Charm authors
+ directly instantiating a Charm.
+
+ Args:
+ framework: The framework responsible for managing the Model and events for this
+ Charm.
+ key: Ignored; will remove after deprecation period of the signature change.
+ """
+
+ on = CharmEvents()
+
+ def __init__(self, framework: Framework, key: typing.Optional = None):
+ super().__init__(framework, None)
+
+ for relation_name in self.framework.meta.relations:
+ relation_name = relation_name.replace('-', '_')
+ self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent)
+ self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent)
+ self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent)
+ self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent)
+ self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent)
+
+ for storage_name in self.framework.meta.storages:
+ storage_name = storage_name.replace('-', '_')
+ self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent)
+ self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent)
+
+ for action_name in self.framework.meta.actions:
+ action_name = action_name.replace('-', '_')
+ self.on.define_event(action_name + '_action', ActionEvent)
+
+ @property
+ def app(self) -> model.Application:
+ """Application that this unit is part of."""
+ return self.framework.model.app
+
+ @property
+ def unit(self) -> model.Unit:
+ """Unit that this execution is responsible for."""
+ return self.framework.model.unit
+
+ @property
+ def meta(self) -> 'CharmMeta':
+ """CharmMeta of this charm.
+ """
+ return self.framework.meta
+
+ @property
+ def charm_dir(self) -> pathlib.Path:
+ """Root directory of the Charm as it is running.
+ """
+ return self.framework.charm_dir
+
+
+class CharmMeta:
+ """Object containing the metadata for the charm.
+
+ This is read from metadata.yaml and/or actions.yaml. Generally charms will
+ define this information, rather than reading it at runtime. This class is
+ mostly for the framework to understand what the charm has defined.
+
+ The maintainers, tags, terms, series, and extra_bindings attributes are all
+ lists of strings. The requires, provides, peers, relations, storage,
+ resources, and payloads attributes are all mappings of names to instances
+ of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta.
+
+ The relations attribute is a convenience accessor which includes all of the
+ requires, provides, and peers RelationMeta items. If needed, the role of
+ the relation definition can be obtained from its role attribute.
+
+ Attributes:
+ name: The name of this charm
+ summary: Short description of what this charm does
+ description: Long description for this charm
+ maintainers: A list of strings of the email addresses of the maintainers
+ of this charm.
+ tags: Charm store tag metadata for categories associated with this charm.
+ terms: Charm store terms that should be agreed to before this charm can
+ be deployed. (Used for things like licensing issues.)
+ series: The list of supported OS series that this charm can support.
+ The first entry in the list is the default series that will be
+ used by deploy if no other series is requested by the user.
+ subordinate: True/False whether this charm is intended to be used as a
+ subordinate charm.
+ min_juju_version: If supplied, indicates this charm needs features that
+ are not available in older versions of Juju.
+ requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation.
+ provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation.
+ peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation.
+ relations: A dict containing all :class:`RelationMeta` attributes (merged from other
+ sections)
+ storages: A dict of {name: :class:`StorageMeta`} for each defined storage.
+ resources: A dict of {name: :class:`ResourceMeta`} for each defined resource.
+ payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload.
+ extra_bindings: A dict of additional named bindings that a charm can use
+ for network configuration.
+ actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined.
+ Args:
+ raw: a mapping containing the contents of metadata.yaml
+ actions_raw: a mapping containing the contents of actions.yaml
+ """
+
+ def __init__(self, raw: dict = {}, actions_raw: dict = {}):
+ self.name = raw.get('name', '')
+ self.summary = raw.get('summary', '')
+ self.description = raw.get('description', '')
+ self.maintainers = []
+ if 'maintainer' in raw:
+ self.maintainers.append(raw['maintainer'])
+ if 'maintainers' in raw:
+ self.maintainers.extend(raw['maintainers'])
+ self.tags = raw.get('tags', [])
+ self.terms = raw.get('terms', [])
+ self.series = raw.get('series', [])
+ self.subordinate = raw.get('subordinate', False)
+ self.min_juju_version = raw.get('min-juju-version')
+ self.requires = {name: RelationMeta(RelationRole.requires, name, rel)
+ for name, rel in raw.get('requires', {}).items()}
+ self.provides = {name: RelationMeta(RelationRole.provides, name, rel)
+ for name, rel in raw.get('provides', {}).items()}
+ self.peers = {name: RelationMeta(RelationRole.peer, name, rel)
+ for name, rel in raw.get('peers', {}).items()}
+ self.relations = {}
+ self.relations.update(self.requires)
+ self.relations.update(self.provides)
+ self.relations.update(self.peers)
+ self.storages = {name: StorageMeta(name, storage)
+ for name, storage in raw.get('storage', {}).items()}
+ self.resources = {name: ResourceMeta(name, res)
+ for name, res in raw.get('resources', {}).items()}
+ self.payloads = {name: PayloadMeta(name, payload)
+ for name, payload in raw.get('payloads', {}).items()}
+ self.extra_bindings = raw.get('extra-bindings', {})
+ self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()}
+
+ @classmethod
+ def from_yaml(
+ cls, metadata: typing.Union[str, typing.TextIO],
+ actions: typing.Optional[typing.Union[str, typing.TextIO]] = None):
+ """Instantiate a CharmMeta from a YAML description of metadata.yaml.
+
+ Args:
+ metadata: A YAML description of charm metadata (name, relations, etc.)
+ This can be a simple string, or a file-like object. (passed to `yaml.safe_load`).
+ actions: YAML description of Actions for this charm (eg actions.yaml)
+ """
+ meta = _loadYaml(metadata)
+ raw_actions = {}
+ if actions is not None:
+ raw_actions = _loadYaml(actions)
+ return cls(meta, raw_actions)
+
+
+class RelationRole(enum.Enum):
+ peer = 'peer'
+ requires = 'requires'
+ provides = 'provides'
+
+ def is_peer(self) -> bool:
+ """Return whether the current role is peer.
+
+ A convenience to avoid having to import charm.
+ """
+ return self is RelationRole.peer
+
+
+class RelationMeta:
+ """Object containing metadata about a relation definition.
+
+ Should not be constructed directly by Charm code. Is gotten from one of
+ :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`,
+ or :attr:`CharmMeta.relations`.
+
+ Attributes:
+ role: This is one of peer/requires/provides
+ relation_name: Name of this relation from metadata.yaml
+ interface_name: Optional definition of the interface protocol.
+ scope: "global" or "container" scope based on how the relation should be used.
+ """
+
+ def __init__(self, role: RelationRole, relation_name: str, raw: dict):
+ if not isinstance(role, RelationRole):
+ raise TypeError("role should be a Role, not {!r}".format(role))
+ self.role = role
+ self.relation_name = relation_name
+ self.interface_name = raw['interface']
+ self.scope = raw.get('scope')
+
+
+class StorageMeta:
+ """Object containing metadata about a storage definition."""
+
+ def __init__(self, name, raw):
+ self.storage_name = name
+ self.type = raw['type']
+ self.description = raw.get('description', '')
+ self.shared = raw.get('shared', False)
+ self.read_only = raw.get('read-only', False)
+ self.minimum_size = raw.get('minimum-size')
+ self.location = raw.get('location')
+ self.multiple_range = None
+ if 'multiple' in raw:
+ range = raw['multiple']['range']
+ if '-' not in range:
+ self.multiple_range = (int(range), int(range))
+ else:
+ range = range.split('-')
+ self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None)
+
+
+class ResourceMeta:
+ """Object containing metadata about a resource definition."""
+
+ def __init__(self, name, raw):
+ self.resource_name = name
+ self.type = raw['type']
+ self.filename = raw.get('filename', None)
+ self.description = raw.get('description', '')
+
+
+class PayloadMeta:
+ """Object containing metadata about a payload definition."""
+
+ def __init__(self, name, raw):
+ self.payload_name = name
+ self.type = raw['type']
+
+
+class ActionMeta:
+ """Object containing metadata about an action's definition."""
+
+ def __init__(self, name, raw=None):
+ raw = raw or {}
+ self.name = name
+ self.title = raw.get('title', '')
+ self.description = raw.get('description', '')
+ self.parameters = raw.get('params', {}) # {: }
+ self.required = raw.get('required', []) # [, ...]
diff --git a/coredns/venv/ops/framework.py b/coredns/venv/ops/framework.py
new file mode 100644
index 0000000..ad500ca
--- /dev/null
+++ b/coredns/venv/ops/framework.py
@@ -0,0 +1,1073 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import collections.abc
+import inspect
+import keyword
+import logging
+import marshal
+import os
+import pathlib
+import pdb
+import re
+import sys
+import types
+import weakref
+
+from ops import charm
+from ops.storage import (
+ NoSnapshotError,
+ SQLiteStorage,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Handle:
+ """Handle defines a name for an object in the form of a hierarchical path.
+
+ The provided parent is the object (or that object's handle) that this handle
+ sits under, or None if the object identified by this handle stands by itself
+ as the root of its own hierarchy.
+
+ The handle kind is a string that defines a namespace so objects with the
+ same parent and kind will have unique keys.
+
+ The handle key is a string uniquely identifying the object. No other objects
+ under the same parent and kind may have the same key.
+ """
+
+ def __init__(self, parent, kind, key):
+ if parent and not isinstance(parent, Handle):
+ parent = parent.handle
+ self._parent = parent
+ self._kind = kind
+ self._key = key
+ if parent:
+ if key:
+ self._path = "{}/{}[{}]".format(parent, kind, key)
+ else:
+ self._path = "{}/{}".format(parent, kind)
+ else:
+ if key:
+ self._path = "{}[{}]".format(kind, key)
+ else:
+ self._path = "{}".format(kind)
+
+ def nest(self, kind, key):
+ return Handle(self, kind, key)
+
+ def __hash__(self):
+ return hash((self.parent, self.kind, self.key))
+
+ def __eq__(self, other):
+ return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key)
+
+ def __str__(self):
+ return self.path
+
+ @property
+ def parent(self):
+ return self._parent
+
+ @property
+ def kind(self):
+ return self._kind
+
+ @property
+ def key(self):
+ return self._key
+
+ @property
+ def path(self):
+ return self._path
+
+ @classmethod
+ def from_path(cls, path):
+ handle = None
+ for pair in path.split("/"):
+ pair = pair.split("[")
+ good = False
+ if len(pair) == 1:
+ kind, key = pair[0], None
+ good = True
+ elif len(pair) == 2:
+ kind, key = pair
+ if key and key[-1] == ']':
+ key = key[:-1]
+ good = True
+ if not good:
+ raise RuntimeError("attempted to restore invalid handle path {}".format(path))
+ handle = Handle(handle, kind, key)
+ return handle
+
+
+class EventBase:
+
+ def __init__(self, handle):
+ self.handle = handle
+ self.deferred = False
+
+ def __repr__(self):
+ return "<%s via %s>" % (self.__class__.__name__, self.handle)
+
+ def defer(self):
+ logger.debug("Deferring %s.", self)
+ self.deferred = True
+
+ def snapshot(self):
+ """Return the snapshot data that should be persisted.
+
+ Subclasses must override to save any custom state.
+ """
+ return None
+
+ def restore(self, snapshot):
+ """Restore the value state from the given snapshot.
+
+ Subclasses must override to restore their custom state.
+ """
+ self.deferred = False
+
+
+class EventSource:
+ """EventSource wraps an event type with a descriptor to facilitate observing and emitting.
+
+ It is generally used as:
+
+ class SomethingHappened(EventBase):
+ pass
+
+ class SomeObject(Object):
+ something_happened = EventSource(SomethingHappened)
+
+ With that, instances of that type will offer the someobj.something_happened
+ attribute which is a BoundEvent and may be used to emit and observe the event.
+ """
+
+ def __init__(self, event_type):
+ if not isinstance(event_type, type) or not issubclass(event_type, EventBase):
+ raise RuntimeError(
+ 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type))
+ self.event_type = event_type
+ self.event_kind = None
+ self.emitter_type = None
+
+ def _set_name(self, emitter_type, event_kind):
+ if self.event_kind is not None:
+ raise RuntimeError(
+ 'EventSource({}) reused as {}.{} and {}.{}'.format(
+ self.event_type.__name__,
+ self.emitter_type.__name__,
+ self.event_kind,
+ emitter_type.__name__,
+ event_kind,
+ ))
+ self.event_kind = event_kind
+ self.emitter_type = emitter_type
+
+ def __get__(self, emitter, emitter_type=None):
+ if emitter is None:
+ return self
+ # Framework might not be available if accessed as CharmClass.on.event
+ # rather than charm_instance.on.event, but in that case it couldn't be
+ # emitted anyway, so there's no point to registering it.
+ framework = getattr(emitter, 'framework', None)
+ if framework is not None:
+ framework.register_type(self.event_type, emitter, self.event_kind)
+ return BoundEvent(emitter, self.event_type, self.event_kind)
+
+
+class BoundEvent:
+
+ def __repr__(self):
+ return ''.format(
+ self.event_type.__name__,
+ type(self.emitter).__name__,
+ self.event_kind,
+ hex(id(self)),
+ )
+
+ def __init__(self, emitter, event_type, event_kind):
+ self.emitter = emitter
+ self.event_type = event_type
+ self.event_kind = event_kind
+
+ def emit(self, *args, **kwargs):
+ """Emit event to all registered observers.
+
+ The current storage state is committed before and after each observer is notified.
+ """
+ framework = self.emitter.framework
+ key = framework._next_event_key()
+ event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs)
+ framework._emit(event)
+
+
+class HandleKind:
+ """Helper descriptor to define the Object.handle_kind field.
+
+ The handle_kind for an object defaults to its type name, but it may
+ be explicitly overridden if desired.
+ """
+
+ def __get__(self, obj, obj_type):
+ kind = obj_type.__dict__.get("handle_kind")
+ if kind:
+ return kind
+ return obj_type.__name__
+
+
+class _Metaclass(type):
+ """Helper class to ensure proper instantiation of Object-derived classes.
+
+ This class currently has a single purpose: events derived from EventSource
+ that are class attributes of Object-derived classes need to be told what
+ their name is in that class. For example, in
+
+ class SomeObject(Object):
+ something_happened = EventSource(SomethingHappened)
+
+ the instance of EventSource needs to know it's called 'something_happened'.
+
+ Starting from python 3.6 we could use __set_name__ on EventSource for this,
+ but until then this (meta)class does the equivalent work.
+
+ TODO: when we drop support for 3.5 drop this class, and rename _set_name in
+ EventSource to __set_name__; everything should continue to work.
+
+ """
+
+ def __new__(typ, *a, **kw):
+ k = super().__new__(typ, *a, **kw)
+ # k is now the Object-derived class; loop over its class attributes
+ for n, v in vars(k).items():
+ # we could do duck typing here if we want to support
+ # non-EventSource-derived shenanigans. We don't.
+ if isinstance(v, EventSource):
+ # this is what 3.6+ does automatically for us:
+ v._set_name(k, n)
+ return k
+
+
+class Object(metaclass=_Metaclass):
+
+ handle_kind = HandleKind()
+
+ def __init__(self, parent, key):
+ kind = self.handle_kind
+ if isinstance(parent, Framework):
+ self.framework = parent
+ # Avoid Framework instances having a circular reference to themselves.
+ if self.framework is self:
+ self.framework = weakref.proxy(self.framework)
+ self.handle = Handle(None, kind, key)
+ else:
+ self.framework = parent.framework
+ self.handle = Handle(parent, kind, key)
+ self.framework._track(self)
+
+ # TODO Detect conflicting handles here.
+
+ @property
+ def model(self):
+ return self.framework.model
+
+
+class ObjectEvents(Object):
+ """Convenience type to allow defining .on attributes at class level."""
+
+ handle_kind = "on"
+
+ def __init__(self, parent=None, key=None):
+ if parent is not None:
+ super().__init__(parent, key)
+ else:
+ self._cache = weakref.WeakKeyDictionary()
+
+ def __get__(self, emitter, emitter_type):
+ if emitter is None:
+ return self
+ instance = self._cache.get(emitter)
+ if instance is None:
+ # Same type, different instance, more data. Doing this unusual construct
+ # means people can subclass just this one class to have their own 'on'.
+ instance = self._cache[emitter] = type(self)(emitter)
+ return instance
+
+ @classmethod
+ def define_event(cls, event_kind, event_type):
+ """Define an event on this type at runtime.
+
+ cls: a type to define an event on.
+
+ event_kind: an attribute name that will be used to access the
+ event. Must be a valid python identifier, not be a keyword
+ or an existing attribute.
+
+ event_type: a type of the event to define.
+
+ """
+ prefix = 'unable to define an event with event_kind that '
+ if not event_kind.isidentifier():
+ raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind)
+ elif keyword.iskeyword(event_kind):
+ raise RuntimeError(prefix + 'is a python keyword: ' + event_kind)
+ try:
+ getattr(cls, event_kind)
+ raise RuntimeError(
+ prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind))
+ except AttributeError:
+ pass
+
+ event_descriptor = EventSource(event_type)
+ event_descriptor._set_name(cls, event_kind)
+ setattr(cls, event_kind, event_descriptor)
+
+ def events(self):
+ """Return a mapping of event_kinds to bound_events for all available events.
+ """
+ events_map = {}
+ # We have to iterate over the class rather than instance to allow for properties which
+ # might call this method (e.g., event views), leading to infinite recursion.
+ for attr_name, attr_value in inspect.getmembers(type(self)):
+ if isinstance(attr_value, EventSource):
+ # We actually care about the bound_event, however, since it
+ # provides the most info for users of this method.
+ event_kind = attr_name
+ bound_event = getattr(self, event_kind)
+ events_map[event_kind] = bound_event
+ return events_map
+
+ def __getitem__(self, key):
+ return PrefixedEvents(self, key)
+
+
+class PrefixedEvents:
+
+ def __init__(self, emitter, key):
+ self._emitter = emitter
+ self._prefix = key.replace("-", "_") + '_'
+
+ def __getattr__(self, name):
+ return getattr(self._emitter, self._prefix + name)
+
+
+class PreCommitEvent(EventBase):
+ pass
+
+
+class CommitEvent(EventBase):
+ pass
+
+
+class FrameworkEvents(ObjectEvents):
+ pre_commit = EventSource(PreCommitEvent)
+ commit = EventSource(CommitEvent)
+
+
+class NoTypeError(Exception):
+
+ def __init__(self, handle_path):
+ self.handle_path = handle_path
+
+ def __str__(self):
+ return "cannot restore {} since no class was registered for it".format(self.handle_path)
+
+
+# the message to show to the user when a pdb breakpoint goes active
+_BREAKPOINT_WELCOME_MESSAGE = """
+Starting pdb to debug charm operator.
+Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort.
+Future breakpoints may interrupt execution again.
+More details at https://discourse.jujucharms.com/t/debugging-charm-hooks
+
+"""
+
+
+_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$'
+
+
+class Framework(Object):
+
+ on = FrameworkEvents()
+
+ # Override properties from Object so that we can set them in __init__.
+ model = None
+ meta = None
+ charm_dir = None
+
+ def __init__(self, storage, charm_dir, meta, model):
+
+ super().__init__(self, None)
+
+ self.charm_dir = charm_dir
+ self.meta = meta
+ self.model = model
+ self._observers = [] # [(observer_path, method_name, parent_path, event_key)]
+ self._observer = weakref.WeakValueDictionary() # {observer_path: observer}
+ self._objects = weakref.WeakValueDictionary()
+ self._type_registry = {} # {(parent_path, kind): cls}
+ self._type_known = set() # {cls}
+
+ if isinstance(storage, (str, pathlib.Path)):
+ logger.warning(
+ "deprecated: Framework now takes a Storage not a path")
+ storage = SQLiteStorage(storage)
+ self._storage = storage
+
+ # We can't use the higher-level StoredState because it relies on events.
+ self.register_type(StoredStateData, None, StoredStateData.handle_kind)
+ stored_handle = Handle(None, StoredStateData.handle_kind, '_stored')
+ try:
+ self._stored = self.load_snapshot(stored_handle)
+ except NoSnapshotError:
+ self._stored = StoredStateData(self, '_stored')
+ self._stored['event_count'] = 0
+
+ # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do
+ # breakpoint(); if Python < 3.7, this doesn't affect anything
+ sys.breakpointhook = self.breakpoint
+
+ # Flag to indicate that we already presented the welcome message in a debugger breakpoint
+ self._breakpoint_welcomed = False
+
+ # Parse once the env var, which may be used multiple times later
+ debug_at = os.environ.get('JUJU_DEBUG_AT')
+ self._juju_debug_at = debug_at.split(',') if debug_at else ()
+
+ def close(self):
+ self._storage.close()
+
+ def _track(self, obj):
+ """Track object and ensure it is the only object created using its handle path."""
+ if obj is self:
+ # Framework objects don't track themselves
+ return
+ if obj.handle.path in self.framework._objects:
+ raise RuntimeError(
+ 'two objects claiming to be {} have been created'.format(obj.handle.path))
+ self._objects[obj.handle.path] = obj
+
+ def _forget(self, obj):
+ """Stop tracking the given object. See also _track."""
+ self._objects.pop(obj.handle.path, None)
+
+ def commit(self):
+ # Give a chance for objects to persist data they want to before a commit is made.
+ self.on.pre_commit.emit()
+ # Make sure snapshots are saved by instances of StoredStateData. Any possible state
+ # modifications in on_commit handlers of instances of other classes will not be persisted.
+ self.on.commit.emit()
+ # Save our event count after all events have been emitted.
+ self.save_snapshot(self._stored)
+ self._storage.commit()
+
+ def register_type(self, cls, parent, kind=None):
+ if parent and not isinstance(parent, Handle):
+ parent = parent.handle
+ if parent:
+ parent_path = parent.path
+ else:
+ parent_path = None
+ if not kind:
+ kind = cls.handle_kind
+ self._type_registry[(parent_path, kind)] = cls
+ self._type_known.add(cls)
+
+ def save_snapshot(self, value):
+ """Save a persistent snapshot of the provided value.
+
+ The provided value must implement the following interface:
+
+ value.handle = Handle(...)
+ value.snapshot() => {...} # Simple builtin types only.
+ value.restore(snapshot) # Restore custom state from prior snapshot.
+ """
+ if type(value) not in self._type_known:
+ raise RuntimeError(
+ 'cannot save {} values before registering that type'.format(type(value).__name__))
+ data = value.snapshot()
+
+ # Use marshal as a validator, enforcing the use of simple types, as we later the
+ # information is really pickled, which is too error prone for future evolution of the
+ # stored data (e.g. if the developer stores a custom object and later changes its
+ # class name; when unpickling the original class will not be there and event
+ # data loading will fail).
+ try:
+ marshal.dumps(data)
+ except ValueError:
+ msg = "unable to save the data for {}, it must contain only simple types: {!r}"
+ raise ValueError(msg.format(value.__class__.__name__, data))
+
+ self._storage.save_snapshot(value.handle.path, data)
+
+ def load_snapshot(self, handle):
+ parent_path = None
+ if handle.parent:
+ parent_path = handle.parent.path
+ cls = self._type_registry.get((parent_path, handle.kind))
+ if not cls:
+ raise NoTypeError(handle.path)
+ data = self._storage.load_snapshot(handle.path)
+ obj = cls.__new__(cls)
+ obj.framework = self
+ obj.handle = handle
+ obj.restore(data)
+ self._track(obj)
+ return obj
+
+ def drop_snapshot(self, handle):
+ self._storage.drop_snapshot(handle.path)
+
+ def observe(self, bound_event: BoundEvent, observer: types.MethodType):
+ """Register observer to be called when bound_event is emitted.
+
+ The bound_event is generally provided as an attribute of the object that emits
+ the event, and is created in this style:
+
+ class SomeObject:
+ something_happened = Event(SomethingHappened)
+
+ That event may be observed as:
+
+ framework.observe(someobj.something_happened, self._on_something_happened)
+
+ Raises:
+ RuntimeError: if bound_event or observer are the wrong type.
+ """
+ if not isinstance(bound_event, BoundEvent):
+ raise RuntimeError(
+ 'Framework.observe requires a BoundEvent as second parameter, got {}'.format(
+ bound_event))
+ if not isinstance(observer, types.MethodType):
+ # help users of older versions of the framework
+ if isinstance(observer, charm.CharmBase):
+ raise TypeError(
+ 'observer methods must now be explicitly provided;'
+ ' please replace observe(self.on.{0}, self)'
+ ' with e.g. observe(self.on.{0}, self._on_{0})'.format(
+ bound_event.event_kind))
+ raise RuntimeError(
+ 'Framework.observe requires a method as third parameter, got {}'.format(observer))
+
+ event_type = bound_event.event_type
+ event_kind = bound_event.event_kind
+ emitter = bound_event.emitter
+
+ self.register_type(event_type, emitter, event_kind)
+
+ if hasattr(emitter, "handle"):
+ emitter_path = emitter.handle.path
+ else:
+ raise RuntimeError(
+ 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__))
+
+ # Validate that the method has an acceptable call signature.
+ sig = inspect.signature(observer)
+ # Self isn't included in the params list, so the first arg will be the event.
+ extra_params = list(sig.parameters.values())[1:]
+
+ method_name = observer.__name__
+ observer = observer.__self__
+ if not sig.parameters:
+ raise TypeError(
+ '{}.{} must accept event parameter'.format(type(observer).__name__, method_name))
+ elif any(param.default is inspect.Parameter.empty for param in extra_params):
+ # Allow for additional optional params, since there's no reason to exclude them, but
+ # required params will break.
+ raise TypeError(
+ '{}.{} has extra required parameter'.format(type(observer).__name__, method_name))
+
+ # TODO Prevent the exact same parameters from being registered more than once.
+
+ self._observer[observer.handle.path] = observer
+ self._observers.append((observer.handle.path, method_name, emitter_path, event_kind))
+
+ def _next_event_key(self):
+ """Return the next event key that should be used, incrementing the internal counter."""
+ # Increment the count first; this means the keys will start at 1, and 0
+ # means no events have been emitted.
+ self._stored['event_count'] += 1
+ return str(self._stored['event_count'])
+
+ def _emit(self, event):
+ """See BoundEvent.emit for the public way to call this."""
+
+ saved = False
+ event_path = event.handle.path
+ event_kind = event.handle.kind
+ parent_path = event.handle.parent.path
+ # TODO Track observers by (parent_path, event_kind) rather than as a list of
+ # all observers. Avoiding linear search through all observers for every event
+ for observer_path, method_name, _parent_path, _event_kind in self._observers:
+ if _parent_path != parent_path:
+ continue
+ if _event_kind and _event_kind != event_kind:
+ continue
+ if not saved:
+ # Save the event for all known observers before the first notification
+ # takes place, so that either everyone interested sees it, or nobody does.
+ self.save_snapshot(event)
+ saved = True
+ # Again, only commit this after all notices are saved.
+ self._storage.save_notice(event_path, observer_path, method_name)
+ if saved:
+ self._reemit(event_path)
+
+ def reemit(self):
+ """Reemit previously deferred events to the observers that deferred them.
+
+ Only the specific observers that have previously deferred the event will be
+ notified again. Observers that asked to be notified about events after it's
+ been first emitted won't be notified, as that would mean potentially observing
+ events out of order.
+ """
+ self._reemit()
+
+ def _reemit(self, single_event_path=None):
+ last_event_path = None
+ deferred = True
+ for event_path, observer_path, method_name in self._storage.notices(single_event_path):
+ event_handle = Handle.from_path(event_path)
+
+ if last_event_path != event_path:
+ if not deferred and last_event_path is not None:
+ self._storage.drop_snapshot(last_event_path)
+ last_event_path = event_path
+ deferred = False
+
+ try:
+ event = self.load_snapshot(event_handle)
+ except NoTypeError:
+ self._storage.drop_notice(event_path, observer_path, method_name)
+ continue
+
+ event.deferred = False
+ observer = self._observer.get(observer_path)
+ if observer:
+ if single_event_path is None:
+ logger.debug("Re-emitting %s.", event)
+ custom_handler = getattr(observer, method_name, None)
+ if custom_handler:
+ event_is_from_juju = isinstance(event, charm.HookEvent)
+ event_is_action = isinstance(event, charm.ActionEvent)
+ if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at:
+ # Present the welcome message and run under PDB.
+ self._show_debug_code_message()
+ pdb.runcall(custom_handler, event)
+ else:
+ # Regular call to the registered method.
+ custom_handler(event)
+
+ if event.deferred:
+ deferred = True
+ else:
+ self._storage.drop_notice(event_path, observer_path, method_name)
+ # We intentionally consider this event to be dead and reload it from
+ # scratch in the next path.
+ self.framework._forget(event)
+
+ if not deferred and last_event_path is not None:
+ self._storage.drop_snapshot(last_event_path)
+
+ def _show_debug_code_message(self):
+ """Present the welcome message (only once!) when using debugger functionality."""
+ if not self._breakpoint_welcomed:
+ self._breakpoint_welcomed = True
+ print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='')
+
+ def breakpoint(self, name=None):
+ """Add breakpoint, optionally named, at the place where this method is called.
+
+ For the breakpoint to be activated the JUJU_DEBUG_AT environment variable
+ must be set to "all" or to the specific name parameter provided, if any. In every
+ other situation calling this method does nothing.
+
+ The framework also provides a standard breakpoint named "hook", that will
+ stop execution when a hook event is about to be handled.
+
+ For those reasons, the "all" and "hook" breakpoint names are reserved.
+ """
+ # If given, validate the name comply with all the rules
+ if name is not None:
+ if not isinstance(name, str):
+ raise TypeError('breakpoint names must be strings')
+ if name in ('hook', 'all'):
+ raise ValueError('breakpoint names "all" and "hook" are reserved')
+ if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name):
+ raise ValueError('breakpoint names must look like "foo" or "foo-bar"')
+
+ indicated_breakpoints = self._juju_debug_at
+ if not indicated_breakpoints:
+ return
+
+ if 'all' in indicated_breakpoints or name in indicated_breakpoints:
+ self._show_debug_code_message()
+
+ # If we call set_trace() directly it will open the debugger *here*, so indicating
+ # it to use our caller's frame
+ code_frame = inspect.currentframe().f_back
+ pdb.Pdb().set_trace(code_frame)
+ else:
+ logger.warning(
+ "Breakpoint %r skipped (not found in the requested breakpoints: %s)",
+ name, indicated_breakpoints)
+
+ def remove_unreferenced_events(self):
+ """Remove events from storage that are not referenced.
+
+ In older versions of the framework, events that had no observers would get recorded but
+ never deleted. This makes a best effort to find these events and remove them from the
+ database.
+ """
+ event_regex = re.compile(_event_regex)
+ to_remove = []
+ for handle_path in self._storage.list_snapshots():
+ if event_regex.match(handle_path):
+ notices = self._storage.notices(handle_path)
+ if next(notices, None) is None:
+ # There are no notices for this handle_path, it is valid to remove it
+ to_remove.append(handle_path)
+ for handle_path in to_remove:
+ self._storage.drop_snapshot(handle_path)
+
+
+class StoredStateData(Object):
+
+ def __init__(self, parent, attr_name):
+ super().__init__(parent, attr_name)
+ self._cache = {}
+ self.dirty = False
+
+ def __getitem__(self, key):
+ return self._cache.get(key)
+
+ def __setitem__(self, key, value):
+ self._cache[key] = value
+ self.dirty = True
+
+ def __contains__(self, key):
+ return key in self._cache
+
+ def snapshot(self):
+ return self._cache
+
+ def restore(self, snapshot):
+ self._cache = snapshot
+ self.dirty = False
+
+ def on_commit(self, event):
+ if self.dirty:
+ self.framework.save_snapshot(self)
+ self.dirty = False
+
+
+class BoundStoredState:
+
+ def __init__(self, parent, attr_name):
+ parent.framework.register_type(StoredStateData, parent)
+
+ handle = Handle(parent, StoredStateData.handle_kind, attr_name)
+ try:
+ data = parent.framework.load_snapshot(handle)
+ except NoSnapshotError:
+ data = StoredStateData(parent, attr_name)
+
+ # __dict__ is used to avoid infinite recursion.
+ self.__dict__["_data"] = data
+ self.__dict__["_attr_name"] = attr_name
+
+ parent.framework.observe(parent.framework.on.commit, self._data.on_commit)
+
+ def __getattr__(self, key):
+ # "on" is the only reserved key that can't be used in the data map.
+ if key == "on":
+ return self._data.on
+ if key not in self._data:
+ raise AttributeError("attribute '{}' is not stored".format(key))
+ return _wrap_stored(self._data, self._data[key])
+
+ def __setattr__(self, key, value):
+ if key == "on":
+ raise AttributeError("attribute 'on' is reserved and cannot be set")
+
+ value = _unwrap_stored(self._data, value)
+
+ if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)):
+ raise AttributeError(
+ 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format(
+ key, type(value).__name__))
+
+ self._data[key] = _unwrap_stored(self._data, value)
+
+ def set_default(self, **kwargs):
+ """"Set the value of any given key if it has not already been set"""
+ for k, v in kwargs.items():
+ if k not in self._data:
+ self._data[k] = v
+
+
+class StoredState:
+ """A class used to store data the charm needs persisted across invocations.
+
+ Example::
+
+ class MyClass(Object):
+ _stored = StoredState()
+
+ Instances of `MyClass` can transparently save state between invocations by
+ setting attributes on `_stored`. Initial state should be set with
+ `set_default` on the bound object, that is::
+
+ class MyClass(Object):
+ _stored = StoredState()
+
+ def __init__(self, parent, key):
+ super().__init__(parent, key)
+ self._stored.set_default(seen=set())
+ self.framework.observe(self.on.seen, self._on_seen)
+
+ def _on_seen(self, event):
+ self._stored.seen.add(event.uuid)
+
+ """
+
+ def __init__(self):
+ self.parent_type = None
+ self.attr_name = None
+
+ def __get__(self, parent, parent_type=None):
+ if self.parent_type is not None and self.parent_type not in parent_type.mro():
+ # the StoredState instance is being shared between two unrelated classes
+ # -> unclear what is exepcted of us -> bail out
+ raise RuntimeError(
+ 'StoredState shared by {} and {}'.format(
+ self.parent_type.__name__, parent_type.__name__))
+
+ if parent is None:
+ # accessing via the class directly (e.g. MyClass.stored)
+ return self
+
+ bound = None
+ if self.attr_name is not None:
+ bound = parent.__dict__.get(self.attr_name)
+ if bound is not None:
+ # we already have the thing from a previous pass, huzzah
+ return bound
+
+ # need to find ourselves amongst the parent's bases
+ for cls in parent_type.mro():
+ for attr_name, attr_value in cls.__dict__.items():
+ if attr_value is not self:
+ continue
+ # we've found ourselves! is it the first time?
+ if bound is not None:
+ # the StoredState instance is being stored in two different
+ # attributes -> unclear what is expected of us -> bail out
+ raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format(
+ cls.__name__, self.attr_name, attr_name))
+ # we've found ourselves for the first time; save where, and bind the object
+ self.attr_name = attr_name
+ self.parent_type = cls
+ bound = BoundStoredState(parent, attr_name)
+
+ if bound is not None:
+ # cache the bound object to avoid the expensive lookup the next time
+ # (don't use setattr, to keep things symmetric with the fast-path lookup above)
+ parent.__dict__[self.attr_name] = bound
+ return bound
+
+ raise AttributeError(
+ 'cannot find {} attribute in type {}'.format(
+ self.__class__.__name__, parent_type.__name__))
+
+
+def _wrap_stored(parent_data, value):
+ t = type(value)
+ if t is dict:
+ return StoredDict(parent_data, value)
+ if t is list:
+ return StoredList(parent_data, value)
+ if t is set:
+ return StoredSet(parent_data, value)
+ return value
+
+
+def _unwrap_stored(parent_data, value):
+ t = type(value)
+ if t is StoredDict or t is StoredList or t is StoredSet:
+ return value._under
+ return value
+
+
+class StoredDict(collections.abc.MutableMapping):
+
+ def __init__(self, stored_data, under):
+ self._stored_data = stored_data
+ self._under = under
+
+ def __getitem__(self, key):
+ return _wrap_stored(self._stored_data, self._under[key])
+
+ def __setitem__(self, key, value):
+ self._under[key] = _unwrap_stored(self._stored_data, value)
+ self._stored_data.dirty = True
+
+ def __delitem__(self, key):
+ del self._under[key]
+ self._stored_data.dirty = True
+
+ def __iter__(self):
+ return self._under.__iter__()
+
+ def __len__(self):
+ return len(self._under)
+
+ def __eq__(self, other):
+ if isinstance(other, StoredDict):
+ return self._under == other._under
+ elif isinstance(other, collections.abc.Mapping):
+ return self._under == other
+ else:
+ return NotImplemented
+
+
+class StoredList(collections.abc.MutableSequence):
+
+ def __init__(self, stored_data, under):
+ self._stored_data = stored_data
+ self._under = under
+
+ def __getitem__(self, index):
+ return _wrap_stored(self._stored_data, self._under[index])
+
+ def __setitem__(self, index, value):
+ self._under[index] = _unwrap_stored(self._stored_data, value)
+ self._stored_data.dirty = True
+
+ def __delitem__(self, index):
+ del self._under[index]
+ self._stored_data.dirty = True
+
+ def __len__(self):
+ return len(self._under)
+
+ def insert(self, index, value):
+ self._under.insert(index, value)
+ self._stored_data.dirty = True
+
+ def append(self, value):
+ self._under.append(value)
+ self._stored_data.dirty = True
+
+ def __eq__(self, other):
+ if isinstance(other, StoredList):
+ return self._under == other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under == other
+ else:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if isinstance(other, StoredList):
+ return self._under < other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under < other
+ else:
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, StoredList):
+ return self._under <= other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under <= other
+ else:
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, StoredList):
+ return self._under > other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under > other
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, StoredList):
+ return self._under >= other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under >= other
+ else:
+ return NotImplemented
+
+
+class StoredSet(collections.abc.MutableSet):
+
+ def __init__(self, stored_data, under):
+ self._stored_data = stored_data
+ self._under = under
+
+ def add(self, key):
+ self._under.add(key)
+ self._stored_data.dirty = True
+
+ def discard(self, key):
+ self._under.discard(key)
+ self._stored_data.dirty = True
+
+ def __contains__(self, key):
+ return key in self._under
+
+ def __iter__(self):
+ return self._under.__iter__()
+
+ def __len__(self):
+ return len(self._under)
+
+ @classmethod
+ def _from_iterable(cls, it):
+ """Construct an instance of the class from any iterable input.
+
+ Per https://docs.python.org/3/library/collections.abc.html
+ if the Set mixin is being used in a class with a different constructor signature,
+ you will need to override _from_iterable() with a classmethod that can construct
+ new instances from an iterable argument.
+ """
+ return set(it)
+
+ def __le__(self, other):
+ if isinstance(other, StoredSet):
+ return self._under <= other._under
+ elif isinstance(other, collections.abc.Set):
+ return self._under <= other
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, StoredSet):
+ return self._under >= other._under
+ elif isinstance(other, collections.abc.Set):
+ return self._under >= other
+ else:
+ return NotImplemented
+
+ def __eq__(self, other):
+ if isinstance(other, StoredSet):
+ return self._under == other._under
+ elif isinstance(other, collections.abc.Set):
+ return self._under == other
+ else:
+ return NotImplemented
diff --git a/coredns/venv/ops/jujuversion.py b/coredns/venv/ops/jujuversion.py
new file mode 100644
index 0000000..9837c50
--- /dev/null
+++ b/coredns/venv/ops/jujuversion.py
@@ -0,0 +1,106 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+from functools import total_ordering
+
+
+@total_ordering
+class JujuVersion:
+
+ PATTERN = r'''^
+ (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there
+ ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or -
+ (\.(?P\d{1,9}))?$ # and sometimes with a number.
+ '''
+
+ def __init__(self, version):
+ m = re.match(self.PATTERN, version, re.VERBOSE)
+ if not m:
+ raise RuntimeError('"{}" is not a valid Juju version string'.format(version))
+
+ d = m.groupdict()
+ self.major = int(m.group('major'))
+ self.minor = int(m.group('minor'))
+ self.tag = d['tag'] or ''
+ self.patch = int(d['patch'] or 0)
+ self.build = int(d['build'] or 0)
+
+ def __repr__(self):
+ if self.tag:
+ s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch)
+ else:
+ s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
+ if self.build > 0:
+ s += '.{}'.format(self.build)
+ return s
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if isinstance(other, str):
+ other = type(self)(other)
+ elif not isinstance(other, JujuVersion):
+ raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+ return (
+ self.major == other.major
+ and self.minor == other.minor
+ and self.tag == other.tag
+ and self.build == other.build
+ and self.patch == other.patch)
+
+ def __lt__(self, other):
+ if self is other:
+ return False
+ if isinstance(other, str):
+ other = type(self)(other)
+ elif not isinstance(other, JujuVersion):
+ raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+
+ if self.major != other.major:
+ return self.major < other.major
+ elif self.minor != other.minor:
+ return self.minor < other.minor
+ elif self.tag != other.tag:
+ if not self.tag:
+ return False
+ elif not other.tag:
+ return True
+ return self.tag < other.tag
+ elif self.patch != other.patch:
+ return self.patch < other.patch
+ elif self.build != other.build:
+ return self.build < other.build
+ return False
+
+ @classmethod
+ def from_environ(cls) -> 'JujuVersion':
+ """Build a JujuVersion from JUJU_VERSION."""
+ v = os.environ.get('JUJU_VERSION')
+ if v is None:
+ v = '0.0.0'
+ return cls(v)
+
+ def has_app_data(self) -> bool:
+ """Determine whether this juju version knows about app data."""
+ return (self.major, self.minor, self.patch) >= (2, 7, 0)
+
+ def is_dispatch_aware(self) -> bool:
+ """Determine whether this juju version knows about dispatch."""
+ return (self.major, self.minor, self.patch) >= (2, 8, 0)
+
+ def has_controller_storage(self) -> bool:
+ """Determine whether this juju version supports controller-side storage."""
+ return (self.major, self.minor, self.patch) >= (2, 8, 0)
diff --git a/coredns/venv/ops/lib/__init__.py b/coredns/venv/ops/lib/__init__.py
new file mode 100644
index 0000000..98c0cd1
--- /dev/null
+++ b/coredns/venv/ops/lib/__init__.py
@@ -0,0 +1,262 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import re
+import sys
+
+from ast import literal_eval
+from importlib.util import module_from_spec
+from importlib.machinery import ModuleSpec
+from pkgutil import get_importer
+from types import ModuleType
+from typing import List
+
+__all__ = ('use', 'autoimport')
+
+logger = logging.getLogger(__name__)
+
+_libraries = None
+
+_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''')
+_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''')
+
+# Not perfect, but should do for now.
+_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''')
+
+
+def use(name: str, api: int, author: str) -> ModuleType:
+ """Use a library from the ops libraries.
+
+ Args:
+ name: the name of the library requested.
+ api: the API version of the library.
+ author: the author of the library. If not given, requests the
+ one in the standard library.
+ Raises:
+ ImportError: if the library cannot be found.
+ TypeError: if the name, api, or author are the wrong type.
+ ValueError: if the name, api, or author are invalid.
+ """
+ if not isinstance(name, str):
+ raise TypeError("invalid library name: {!r} (must be a str)".format(name))
+ if not isinstance(author, str):
+ raise TypeError("invalid library author: {!r} (must be a str)".format(author))
+ if not isinstance(api, int):
+ raise TypeError("invalid library API: {!r} (must be an int)".format(api))
+ if api < 0:
+ raise ValueError('invalid library api: {} (must be ≥0)'.format(api))
+ if not _libname_re.match(name):
+ raise ValueError("invalid library name: {!r} (chars and digits only)".format(name))
+ if not _libauthor_re.match(author):
+ raise ValueError("invalid library author email: {!r}".format(author))
+
+ if _libraries is None:
+ autoimport()
+
+ versions = _libraries.get((name, author), ())
+ for lib in versions:
+ if lib.api == api:
+ return lib.import_module()
+
+ others = ', '.join(str(lib.api) for lib in versions)
+ if others:
+ msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format(
+ name, author, api, others)
+ else:
+ msg = 'cannot find library "{}" from "{}"'.format(name, author)
+
+ raise ImportError(msg, name=name)
+
+
+def autoimport():
+ """Find all libs in the path and enable use of them.
+
+ You only need to call this if you've installed a package or
+ otherwise changed sys.path in the current run, and need to see the
+ changes. Otherwise libraries are found on first call of `use`.
+ """
+ global _libraries
+ _libraries = {}
+ for spec in _find_all_specs(sys.path):
+ lib = _parse_lib(spec)
+ if lib is None:
+ continue
+
+ versions = _libraries.setdefault((lib.name, lib.author), [])
+ versions.append(lib)
+ versions.sort(reverse=True)
+
+
+def _find_all_specs(path):
+ for sys_dir in path:
+ if sys_dir == "":
+ sys_dir = "."
+ try:
+ top_dirs = os.listdir(sys_dir)
+ except (FileNotFoundError, NotADirectoryError):
+ continue
+ except OSError as e:
+ logger.debug("Tried to look for ops.lib packages under '%s': %s", sys_dir, e)
+ continue
+ logger.debug("Looking for ops.lib packages under '%s'", sys_dir)
+ for top_dir in top_dirs:
+ opslib = os.path.join(sys_dir, top_dir, 'opslib')
+ try:
+ lib_dirs = os.listdir(opslib)
+ except (FileNotFoundError, NotADirectoryError):
+ continue
+ except OSError as e:
+ logger.debug(" Tried '%s': %s", opslib, e) # *lots* of things checked here
+ continue
+ else:
+ logger.debug(" Trying '%s'", opslib)
+ finder = get_importer(opslib)
+ if finder is None:
+ logger.debug(" Finder for '%s' is None", opslib)
+ continue
+ if not hasattr(finder, 'find_spec'):
+ logger.debug(" Finder for '%s' has no find_spec", opslib)
+ continue
+ for lib_dir in lib_dirs:
+ spec_name = "{}.opslib.{}".format(top_dir, lib_dir)
+ spec = finder.find_spec(spec_name)
+ if spec is None:
+ logger.debug(" No spec for %r", spec_name)
+ continue
+ if spec.loader is None:
+ # a namespace package; not supported
+ logger.debug(" No loader for %r (probably a namespace package)", spec_name)
+ continue
+
+ logger.debug(" Found %r", spec_name)
+ yield spec
+
+
+# only the first this many lines of a file are looked at for the LIB* constants
+_MAX_LIB_LINES = 99
+# these keys, with these types, are needed to have an opslib
+_NEEDED_KEYS = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int}
+
+
+def _join_and(keys: List[str]) -> str:
+ if len(keys) == 0:
+ return ""
+ if len(keys) == 1:
+ return keys[0]
+ return ", ".join(keys[:-1]) + ", and " + keys[-1]
+
+
+class _Missing:
+ """A silly little helper to only work out the difference between
+ what was found and what was needed when logging"""
+
+ def __init__(self, found):
+ self._found = found
+
+ def __str__(self):
+ exp = set(_NEEDED_KEYS)
+ got = set(self._found)
+ if len(got) == 0:
+ return "missing {}".format(_join_and(sorted(exp)))
+ return "got {}, but missing {}".format(
+ _join_and(sorted(got)),
+ _join_and(sorted(exp - got)))
+
+
+def _parse_lib(spec):
+ if spec.origin is None:
+ # "can't happen"
+ logger.warning("No origin for %r (no idea why; please report)", spec.name)
+ return None
+
+ logger.debug(" Parsing %r", spec.name)
+
+ try:
+ with open(spec.origin, 'rt', encoding='utf-8') as f:
+ libinfo = {}
+ for n, line in enumerate(f):
+ if len(libinfo) == len(_NEEDED_KEYS):
+ break
+ if n > _MAX_LIB_LINES:
+ logger.debug(
+ " Missing opslib metadata after reading to line %d: %s",
+ _MAX_LIB_LINES, _Missing(libinfo))
+ return None
+ m = _libline_re.match(line)
+ if m is None:
+ continue
+ key, value = m.groups()
+ if key in _NEEDED_KEYS:
+ value = literal_eval(value)
+ if not isinstance(value, _NEEDED_KEYS[key]):
+ logger.debug(
+ " Bad type for %s: expected %s, got %s",
+ key, _NEEDED_KEYS[key].__name__, type(value).__name__)
+ return None
+ libinfo[key] = value
+ else:
+ if len(libinfo) != len(_NEEDED_KEYS):
+ logger.debug(
+ " Missing opslib metadata after reading to end of file: %s",
+ _Missing(libinfo))
+ return None
+ except Exception as e:
+ logger.debug(" Failed: %s", e)
+ return None
+
+ lib = _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH'])
+ logger.debug(" Success: found library %s", lib)
+
+ return lib
+
+
+class _Lib:
+
+ def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int):
+ self.spec = spec
+ self.name = name
+ self.author = author
+ self.api = api
+ self.patch = patch
+
+ self._module = None
+
+ def __repr__(self):
+ return "<_Lib {}>".format(self)
+
+ def __str__(self):
+ return "{0.name} by {0.author}, API {0.api}, patch {0.patch}".format(self)
+
+ def import_module(self) -> ModuleType:
+ if self._module is None:
+ module = module_from_spec(self.spec)
+ self.spec.loader.exec_module(module)
+ self._module = module
+ return self._module
+
+ def __eq__(self, other):
+ if not isinstance(other, _Lib):
+ return NotImplemented
+ a = (self.name, self.author, self.api, self.patch)
+ b = (other.name, other.author, other.api, other.patch)
+ return a == b
+
+ def __lt__(self, other):
+ if not isinstance(other, _Lib):
+ return NotImplemented
+ a = (self.name, self.author, self.api, self.patch)
+ b = (other.name, other.author, other.api, other.patch)
+ return a < b
diff --git a/coredns/venv/ops/log.py b/coredns/venv/ops/log.py
new file mode 100644
index 0000000..4aac554
--- /dev/null
+++ b/coredns/venv/ops/log.py
@@ -0,0 +1,51 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import logging
+
+
+class JujuLogHandler(logging.Handler):
+ """A handler for sending logs to Juju via juju-log."""
+
+ def __init__(self, model_backend, level=logging.DEBUG):
+ super().__init__(level)
+ self.model_backend = model_backend
+
+ def emit(self, record):
+ self.model_backend.juju_log(record.levelname, self.format(record))
+
+
+def setup_root_logging(model_backend, debug=False):
+ """Setup python logging to forward messages to juju-log.
+
+ By default, logging is set to DEBUG level, and messages will be filtered by Juju.
+ Charmers can also set their own default log level with::
+
+ logging.getLogger().setLevel(logging.INFO)
+
+ model_backend -- a ModelBackend to use for juju-log
+ debug -- if True, write logs to stderr as well as to juju-log.
+ """
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(JujuLogHandler(model_backend))
+ if debug:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ sys.excepthook = lambda etype, value, tb: logger.error(
+ "Uncaught exception while in charm code:", exc_info=(etype, value, tb))
diff --git a/coredns/venv/ops/main.py b/coredns/venv/ops/main.py
new file mode 100644
index 0000000..3e1ea94
--- /dev/null
+++ b/coredns/venv/ops/main.py
@@ -0,0 +1,404 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import typing
+import warnings
+from pathlib import Path
+
+import yaml
+
+import ops.charm
+import ops.framework
+import ops.model
+import ops.storage
+
+from ops.log import setup_root_logging
+from ops.jujuversion import JujuVersion
+
+CHARM_STATE_FILE = '.unit-state.db'
+
+
+logger = logging.getLogger()
+
+
+def _exe_path(path: Path) -> typing.Optional[Path]:
+ """Find and return the full path to the given binary.
+
+ Here path is the absolute path to a binary, but might be missing an extension.
+ """
+ p = shutil.which(path.name, mode=os.F_OK, path=str(path.parent))
+ if p is None:
+ return None
+ return Path(p)
+
+
+def _get_charm_dir():
+ charm_dir = os.environ.get("JUJU_CHARM_DIR")
+ if charm_dir is None:
+ # Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
+ charm_dir = Path('{}/../../..'.format(__file__)).resolve()
+ else:
+ charm_dir = Path(charm_dir).resolve()
+ return charm_dir
+
+
+def _create_event_link(charm, bound_event, link_to):
+ """Create a symlink for a particular event.
+
+ charm -- A charm object.
+ bound_event -- An event for which to create a symlink.
+ link_to -- What the event link should point to
+ """
+ if issubclass(bound_event.event_type, ops.charm.HookEvent):
+ event_dir = charm.framework.charm_dir / 'hooks'
+ event_path = event_dir / bound_event.event_kind.replace('_', '-')
+ elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
+ if not bound_event.event_kind.endswith("_action"):
+ raise RuntimeError(
+ 'action event name {} needs _action suffix'.format(bound_event.event_kind))
+ event_dir = charm.framework.charm_dir / 'actions'
+ # The event_kind is suffixed with "_action" while the executable is not.
+ event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
+ else:
+ raise RuntimeError(
+ 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
+
+ event_dir.mkdir(exist_ok=True)
+ if not event_path.exists():
+ target_path = os.path.relpath(link_to, str(event_dir))
+
+ # Ignore the non-symlink files or directories
+ # assuming the charm author knows what they are doing.
+ logger.debug(
+ 'Creating a new relative symlink at %s pointing to %s',
+ event_path, target_path)
+ event_path.symlink_to(target_path)
+
+
+def _setup_event_links(charm_dir, charm):
+ """Set up links for supported events that originate from Juju.
+
+ Whether a charm can handle an event or not can be determined by
+ introspecting which events are defined on it.
+
+ Hooks or actions are created as symlinks to the charm code file
+ which is determined by inspecting symlinks provided by the charm
+ author at hooks/install or hooks/start.
+
+ charm_dir -- A root directory of the charm.
+ charm -- An instance of the Charm class.
+
+ """
+ # XXX: on windows this function does not accomplish what it wants to:
+ # it creates symlinks with no extension pointing to a .py
+ # and juju only knows how to handle .exe, .bat, .cmd, and .ps1
+ # so it does its job, but does not accomplish anything as the
+ # hooks aren't 'callable'.
+ link_to = os.path.realpath(os.environ.get("JUJU_DISPATCH_PATH", sys.argv[0]))
+ for bound_event in charm.on.events().values():
+ # Only events that originate from Juju need symlinks.
+ if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
+ _create_event_link(charm, bound_event, link_to)
+
+
+def _emit_charm_event(charm, event_name):
+ """Emits a charm event based on a Juju event name.
+
+ charm -- A charm instance to emit an event from.
+ event_name -- A Juju event name to emit on a charm.
+ """
+ event_to_emit = None
+ try:
+ event_to_emit = getattr(charm.on, event_name)
+ except AttributeError:
+ logger.debug("Event %s not defined for %s.", event_name, charm)
+
+ # If the event is not supported by the charm implementation, do
+ # not error out or try to emit it. This is to support rollbacks.
+ if event_to_emit is not None:
+ args, kwargs = _get_event_args(charm, event_to_emit)
+ logger.debug('Emitting Juju event %s.', event_name)
+ event_to_emit.emit(*args, **kwargs)
+
+
+def _get_event_args(charm, bound_event):
+ event_type = bound_event.event_type
+ model = charm.framework.model
+
+ if issubclass(event_type, ops.charm.RelationEvent):
+ relation_name = os.environ['JUJU_RELATION']
+ relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+ relation = model.get_relation(relation_name, relation_id)
+ else:
+ relation = None
+
+ remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
+ remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
+ if remote_app_name or remote_unit_name:
+ if not remote_app_name:
+ if '/' not in remote_unit_name:
+ raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
+ remote_app_name = remote_unit_name.split('/')[0]
+ args = [relation, model.get_app(remote_app_name)]
+ if remote_unit_name:
+ args.append(model.get_unit(remote_unit_name))
+ return args, {}
+ elif relation:
+ return [relation], {}
+ return [], {}
+
+
+class _Dispatcher:
+ """Encapsulate how to figure out what event Juju wants us to run.
+
+ Also knows how to run “legacy” hooks when Juju called us via a top-level
+ ``dispatch`` binary.
+
+ Args:
+ charm_dir: the toplevel directory of the charm
+
+ Attributes:
+ event_name: the name of the event to run
+ is_dispatch_aware: are we running under a Juju that knows about the
+ dispatch binary, and is that binary present?
+
+ """
+
+ def __init__(self, charm_dir: Path):
+ self._charm_dir = charm_dir
+ self._exec_path = Path(os.environ.get('JUJU_DISPATCH_PATH', sys.argv[0]))
+
+ dispatch = charm_dir / 'dispatch'
+ if JujuVersion.from_environ().is_dispatch_aware() and _exe_path(dispatch) is not None:
+ self._init_dispatch()
+ else:
+ self._init_legacy()
+
+ def ensure_event_links(self, charm):
+ """Make sure necessary symlinks are present on disk"""
+
+ if self.is_dispatch_aware:
+ # links aren't needed
+ return
+
+ # When a charm is force-upgraded and a unit is in an error state Juju
+ # does not run upgrade-charm and instead runs the failed hook followed
+ # by config-changed. Given the nature of force-upgrading the hook setup
+ # code is not triggered on config-changed.
+ #
+ # 'start' event is included as Juju does not fire the install event for
+ # K8s charms (see LP: #1854635).
+ if (self.event_name in ('install', 'start', 'upgrade_charm')
+ or self.event_name.endswith('_storage_attached')):
+ _setup_event_links(self._charm_dir, charm)
+
+ def run_any_legacy_hook(self):
+ """Run any extant legacy hook.
+
+ If there is both a dispatch file and a legacy hook for the
+ current event, run the wanted legacy hook.
+ """
+
+ if not self.is_dispatch_aware:
+ # we *are* the legacy hook
+ return
+
+ dispatch_path = _exe_path(self._charm_dir / self._dispatch_path)
+ if dispatch_path is None:
+ logger.debug("Legacy %s does not exist.", self._dispatch_path)
+ return
+
+ # super strange that there isn't an is_executable
+ if not os.access(str(dispatch_path), os.X_OK):
+ logger.warning("Legacy %s exists but is not executable.", self._dispatch_path)
+ return
+
+ if dispatch_path.resolve() == Path(sys.argv[0]).resolve():
+ logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path)
+ return
+
+ argv = sys.argv.copy()
+ argv[0] = str(dispatch_path)
+ logger.info("Running legacy %s.", self._dispatch_path)
+ try:
+ subprocess.run(argv, check=True)
+ except subprocess.CalledProcessError as e:
+ logger.warning("Legacy %s exited with status %d.", self._dispatch_path, e.returncode)
+ sys.exit(e.returncode)
+ except OSError as e:
+ logger.warning("Unable to run legacy %s: %s", self._dispatch_path, e)
+ sys.exit(1)
+ else:
+ logger.debug("Legacy %s exited with status 0.", self._dispatch_path)
+
+ def _set_name_from_path(self, path: Path):
+ """Sets the name attribute to that which can be inferred from the given path."""
+ name = path.name.replace('-', '_')
+ if path.parent.name == 'actions':
+ name = '{}_action'.format(name)
+ self.event_name = name
+
+ def _init_legacy(self):
+ """Set up the 'legacy' dispatcher.
+
+ The current Juju doesn't know about 'dispatch' and calls hooks
+ explicitly.
+ """
+ self.is_dispatch_aware = False
+ self._set_name_from_path(self._exec_path)
+
+ def _init_dispatch(self):
+ """Set up the new 'dispatch' dispatcher.
+
+ The current Juju will run 'dispatch' if it exists, and otherwise fall
+ back to the old behaviour.
+
+ JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install,
+ in both cases.
+ """
+ self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH'])
+
+ if 'OPERATOR_DISPATCH' in os.environ:
+ logger.debug("Charm called itself via %s.", self._dispatch_path)
+ sys.exit(0)
+ os.environ['OPERATOR_DISPATCH'] = '1'
+
+ self.is_dispatch_aware = True
+ self._set_name_from_path(self._dispatch_path)
+
+ def is_restricted_context(self):
+ """"Return True if we are running in a restricted Juju context.
+
+ When in a restricted context, most commands (relation-get, config-get,
+ state-get) are not available. As such, we change how we interact with
+ Juju.
+ """
+ return self.event_name in ('collect_metrics',)
+
+
+def _should_use_controller_storage(db_path: Path, meta: ops.charm.CharmMeta) -> bool:
+ """Figure out whether we want to use controller storage or not."""
+ # if you've previously used local state, carry on using that
+ if db_path.exists():
+ logger.debug("Using local storage: %s already exists", db_path)
+ return False
+
+ # if you're not in k8s you don't need controller storage
+ if 'kubernetes' not in meta.series:
+ logger.debug("Using local storage: not a kubernetes charm")
+ return False
+
+ # are we in a new enough Juju?
+ cur_version = JujuVersion.from_environ()
+
+ if cur_version.has_controller_storage():
+ logger.debug("Using controller storage: JUJU_VERSION=%s", cur_version)
+ return True
+ else:
+ logger.debug("Using local storage: JUJU_VERSION=%s", cur_version)
+ return False
+
+
+def main(charm_class: ops.charm.CharmBase, use_juju_for_storage: bool = None):
+ """Setup the charm and dispatch the observed event.
+
+ The event name is based on the way this executable was called (argv[0]).
+
+ Args:
+ charm_class: your charm class.
+ use_juju_for_storage: whether to use controller-side storage. If not specified
+ then kubernetes charms that haven't previously used local storage and that
+ are running on a new enough Juju default to controller-side storage,
+ otherwise local storage is used.
+ """
+ charm_dir = _get_charm_dir()
+
+ model_backend = ops.model._ModelBackend()
+ debug = ('JUJU_DEBUG' in os.environ)
+ setup_root_logging(model_backend, debug=debug)
+ logger.debug("Operator Framework %s up and running.", ops.__version__)
+
+ dispatcher = _Dispatcher(charm_dir)
+ dispatcher.run_any_legacy_hook()
+
+ metadata = (charm_dir / 'metadata.yaml').read_text()
+ actions_meta = charm_dir / 'actions.yaml'
+ if actions_meta.exists():
+ actions_metadata = actions_meta.read_text()
+ else:
+ actions_metadata = None
+
+ if not yaml.__with_libyaml__:
+ logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader')
+ meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata)
+ model = ops.model.Model(meta, model_backend)
+
+ charm_state_path = charm_dir / CHARM_STATE_FILE
+
+ if use_juju_for_storage and not ops.storage.juju_backend_available():
+ # raise an exception; the charm is broken and needs fixing.
+ msg = 'charm set use_juju_for_storage=True, but Juju version {} does not support it'
+ raise RuntimeError(msg.format(JujuVersion.from_environ()))
+
+ if use_juju_for_storage is None:
+ use_juju_for_storage = _should_use_controller_storage(charm_state_path, meta)
+
+ if use_juju_for_storage:
+ if dispatcher.is_restricted_context():
+ # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event
+ # Though we eventually expect that juju will run collect-metrics in a
+ # non-restricted context. Once we can determine that we are running collect-metrics
+ # in a non-restricted context, we should fire the event as normal.
+ logger.debug('"%s" is not supported when using Juju for storage\n'
+ 'see: https://github.com/canonical/operator/issues/348',
+ dispatcher.event_name)
+ # Note that we don't exit nonzero, because that would cause Juju to rerun the hook
+ return
+ store = ops.storage.JujuStorage()
+ else:
+ store = ops.storage.SQLiteStorage(charm_state_path)
+ framework = ops.framework.Framework(store, charm_dir, meta, model)
+ try:
+ sig = inspect.signature(charm_class)
+ try:
+ sig.bind(framework)
+ except TypeError:
+ msg = (
+ "the second argument, 'key', has been deprecated and will be "
+ "removed after the 0.7 release")
+ warnings.warn(msg, DeprecationWarning)
+ charm = charm_class(framework, None)
+ else:
+ charm = charm_class(framework)
+ dispatcher.ensure_event_links(charm)
+
+ # TODO: Remove the collect_metrics check below as soon as the relevant
+ # Juju changes are made.
+ #
+ # Skip reemission of deferred events for collect-metrics events because
+ # they do not have the full access to all hook tools.
+ if not dispatcher.is_restricted_context():
+ framework.reemit()
+
+ _emit_charm_event(charm, dispatcher.event_name)
+
+ framework.commit()
+ finally:
+ framework.close()
diff --git a/coredns/venv/ops/model.py b/coredns/venv/ops/model.py
new file mode 100644
index 0000000..55addf5
--- /dev/null
+++ b/coredns/venv/ops/model.py
@@ -0,0 +1,1284 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import decimal
+import ipaddress
+import json
+import os
+import re
+import shutil
+import tempfile
+import time
+import typing
+import weakref
+
+from abc import ABC, abstractmethod
+from collections.abc import Mapping, MutableMapping
+from pathlib import Path
+from subprocess import run, PIPE, CalledProcessError
+import yaml
+
+import ops
+from ops.jujuversion import JujuVersion
+
+
+if yaml.__with_libyaml__:
+ _DefaultDumper = yaml.CSafeDumper
+else:
+ _DefaultDumper = yaml.SafeDumper
+
+
+class Model:
+ """Represents the Juju Model as seen from this unit.
+
+ This should not be instantiated directly by Charmers, but can be accessed as `self.model`
+ from any class that derives from Object.
+ """
+
+ def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'):
+ self._cache = _ModelCache(backend)
+ self._backend = backend
+ self._unit = self.get_unit(self._backend.unit_name)
+ self._relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache)
+ self._config = ConfigData(self._backend)
+ self._resources = Resources(list(meta.resources), self._backend)
+ self._pod = Pod(self._backend)
+ self._storages = StorageMapping(list(meta.storages), self._backend)
+ self._bindings = BindingMapping(self._backend)
+
+ @property
+ def unit(self) -> 'Unit':
+ """A :class:`Unit` that represents the unit that is running this code (eg yourself)"""
+ return self._unit
+
+ @property
+ def app(self):
+ """A :class:`Application` that represents the application this unit is a part of."""
+ return self._unit.app
+
+ @property
+ def relations(self) -> 'RelationMapping':
+ """Mapping of endpoint to list of :class:`Relation`
+
+ Answers the question "what am I currently related to".
+ See also :meth:`.get_relation`.
+ """
+ return self._relations
+
+ @property
+ def config(self) -> 'ConfigData':
+ """Return a mapping of config for the current application."""
+ return self._config
+
+ @property
+ def resources(self) -> 'Resources':
+ """Access to resources for this charm.
+
+ Use ``model.resources.fetch(resource_name)`` to get the path on disk
+ where the resource can be found.
+ """
+ return self._resources
+
+ @property
+ def storages(self) -> 'StorageMapping':
+ """Mapping of storage_name to :class:`Storage` as defined in metadata.yaml"""
+ return self._storages
+
+ @property
+ def pod(self) -> 'Pod':
+ """Use ``model.pod.set_spec`` to set the container specification for Kubernetes charms."""
+ return self._pod
+
+ @property
+ def name(self) -> str:
+ """Return the name of the Model that this unit is running in.
+
+ This is read from the environment variable ``JUJU_MODEL_NAME``.
+ """
+ return self._backend.model_name
+
+ def get_unit(self, unit_name: str) -> 'Unit':
+ """Get an arbitrary unit by name.
+
+ Internally this uses a cache, so asking for the same unit two times will
+ return the same object.
+ """
+ return self._cache.get(Unit, unit_name)
+
+ def get_app(self, app_name: str) -> 'Application':
+ """Get an application by name.
+
+ Internally this uses a cache, so asking for the same application two times will
+ return the same object.
+ """
+ return self._cache.get(Application, app_name)
+
+ def get_relation(
+ self, relation_name: str,
+ relation_id: typing.Optional[int] = None) -> 'Relation':
+ """Get a specific Relation instance.
+
+ If relation_id is not given, this will return the Relation instance if the
+ relation is established only once or None if it is not established. If this
+ same relation is established multiple times the error TooManyRelatedAppsError is raised.
+
+ Args:
+ relation_name: The name of the endpoint for this charm
+ relation_id: An identifier for a specific relation. Used to disambiguate when a
+ given application has more than one relation on a given endpoint.
+ Raises:
+ TooManyRelatedAppsError: is raised if there is more than one relation to the
+ supplied relation_name and no relation_id was supplied
+ """
+ return self.relations._get_unique(relation_name, relation_id)
+
+ def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding':
+ """Get a network space binding.
+
+ Args:
+ binding_key: The relation name or instance to obtain bindings for.
+ Returns:
+ If ``binding_key`` is a relation name, the method returns the default binding
+ for that relation. If a relation instance is provided, the method first looks
+ up a more specific binding for that specific relation ID, and if none is found
+ falls back to the default binding for the relation name.
+ """
+ return self._bindings.get(binding_key)
+
+
+class _ModelCache:
+
+ def __init__(self, backend):
+ self._backend = backend
+ self._weakrefs = weakref.WeakValueDictionary()
+
+ def get(self, entity_type, *args):
+ key = (entity_type,) + args
+ entity = self._weakrefs.get(key)
+ if entity is None:
+ entity = entity_type(*args, backend=self._backend, cache=self)
+ self._weakrefs[key] = entity
+ return entity
+
+
+class Application:
+ """Represents a named application in the model.
+
+ This might be your application, or might be an application that you are related to.
+ Charmers should not instantiate Application objects directly, but should use
+ :meth:`Model.get_app` if they need a reference to a given application.
+
+ Attributes:
+ name: The name of this application (eg, 'mysql'). This name may differ from the name of
+ the charm, if the user has deployed it to a different name.
+ """
+
+ def __init__(self, name, backend, cache):
+ self.name = name
+ self._backend = backend
+ self._cache = cache
+ self._is_our_app = self.name == self._backend.app_name
+ self._status = None
+
+ def _invalidate(self):
+ self._status = None
+
+ @property
+ def status(self) -> 'StatusBase':
+ """Used to report or read the status of the overall application.
+
+ Can only be read and set by the lead unit of the application.
+
+ The status of remote units is always Unknown.
+
+ Raises:
+ RuntimeError: if you try to set the status of another application, or if you try to
+ set the status of this application as a unit that is not the leader.
+ InvalidStatusError: if you try to set the status to something that is not a
+ :class:`StatusBase`
+
+ Example::
+
+ self.model.app.status = BlockedStatus('I need a human to come help me')
+ """
+ if not self._is_our_app:
+ return UnknownStatus()
+
+ if not self._backend.is_leader():
+ raise RuntimeError('cannot get application status as a non-leader unit')
+
+ if self._status:
+ return self._status
+
+ s = self._backend.status_get(is_app=True)
+ self._status = StatusBase.from_name(s['status'], s['message'])
+ return self._status
+
+ @status.setter
+ def status(self, value: 'StatusBase'):
+ if not isinstance(value, StatusBase):
+ raise InvalidStatusError(
+ 'invalid value provided for application {} status: {}'.format(self, value)
+ )
+
+ if not self._is_our_app:
+ raise RuntimeError('cannot to set status for a remote application {}'.format(self))
+
+ if not self._backend.is_leader():
+ raise RuntimeError('cannot set application status as a non-leader unit')
+
+ self._backend.status_set(value.name, value.message, is_app=True)
+ self._status = value
+
+ def __repr__(self):
+ return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+
+class Unit:
+ """Represents a named unit in the model.
+
+ This might be your unit, another unit of your application, or a unit of another application
+ that you are related to.
+
+ Attributes:
+ name: The name of the unit (eg, 'mysql/0')
+ app: The Application the unit is a part of.
+ """
+
+ def __init__(self, name, backend, cache):
+ self.name = name
+
+ app_name = name.split('/')[0]
+ self.app = cache.get(Application, app_name)
+
+ self._backend = backend
+ self._cache = cache
+ self._is_our_unit = self.name == self._backend.unit_name
+ self._status = None
+
+ def _invalidate(self):
+ self._status = None
+
+ @property
+ def status(self) -> 'StatusBase':
+ """Used to report or read the status of a specific unit.
+
+ The status of any unit other than yourself is always Unknown.
+
+ Raises:
+ RuntimeError: if you try to set the status of a unit other than yourself.
+ InvalidStatusError: if you try to set the status to something other than
+ a :class:`StatusBase`
+ Example::
+
+ self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators')
+ """
+ if not self._is_our_unit:
+ return UnknownStatus()
+
+ if self._status:
+ return self._status
+
+ s = self._backend.status_get(is_app=False)
+ self._status = StatusBase.from_name(s['status'], s['message'])
+ return self._status
+
+ @status.setter
+ def status(self, value: 'StatusBase'):
+ if not isinstance(value, StatusBase):
+ raise InvalidStatusError(
+ 'invalid value provided for unit {} status: {}'.format(self, value)
+ )
+
+ if not self._is_our_unit:
+ raise RuntimeError('cannot set status for a remote unit {}'.format(self))
+
+ self._backend.status_set(value.name, value.message, is_app=False)
+ self._status = value
+
+ def __repr__(self):
+ return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+ def is_leader(self) -> bool:
+ """Return whether this unit is the leader of its application.
+
+ This can only be called for your own unit.
+ Returns:
+ True if you are the leader, False otherwise
+ Raises:
+ RuntimeError: if called for a unit that is not yourself
+ """
+ if self._is_our_unit:
+ # This value is not cached as it is not guaranteed to persist for the whole duration
+ # of a hook execution.
+ return self._backend.is_leader()
+ else:
+ raise RuntimeError(
+ 'leadership status of remote units ({}) is not visible to other'
+ ' applications'.format(self)
+ )
+
+ def set_workload_version(self, version: str) -> None:
+ """Record the version of the software running as the workload.
+
+ This shouldn't be confused with the revision of the charm. This is informative only;
+ shown in the output of 'juju status'.
+ """
+ if not isinstance(version, str):
+ raise TypeError("workload version must be a str, not {}: {!r}".format(
+ type(version).__name__, version))
+ self._backend.application_version_set(version)
+
+
+class LazyMapping(Mapping, ABC):
+ """Represents a dict that isn't populated until it is accessed.
+
+ Charm authors should generally never need to use this directly, but it forms
+ the basis for many of the dicts that the framework tracks.
+ """
+
+ _lazy_data = None
+
+ @abstractmethod
+ def _load(self):
+ raise NotImplementedError()
+
+ @property
+ def _data(self):
+ data = self._lazy_data
+ if data is None:
+ data = self._lazy_data = self._load()
+ return data
+
+ def _invalidate(self):
+ self._lazy_data = None
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def __repr__(self):
+ return repr(self._data)
+
+
+class RelationMapping(Mapping):
+ """Map of relation names to lists of :class:`Relation` instances."""
+
+ def __init__(self, relations_meta, our_unit, backend, cache):
+ self._peers = set()
+ for name, relation_meta in relations_meta.items():
+ if relation_meta.role.is_peer():
+ self._peers.add(name)
+ self._our_unit = our_unit
+ self._backend = backend
+ self._cache = cache
+ self._data = {relation_name: None for relation_name in relations_meta}
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, relation_name):
+ is_peer = relation_name in self._peers
+ relation_list = self._data[relation_name]
+ if relation_list is None:
+ relation_list = self._data[relation_name] = []
+ for rid in self._backend.relation_ids(relation_name):
+ relation = Relation(relation_name, rid, is_peer,
+ self._our_unit, self._backend, self._cache)
+ relation_list.append(relation)
+ return relation_list
+
+ def _invalidate(self, relation_name):
+ """Used to wipe the cache of a given relation_name.
+
+ Not meant to be used by Charm authors. The content of relation data is
+ static for the lifetime of a hook, so it is safe to cache in memory once
+ accessed.
+ """
+ self._data[relation_name] = None
+
+ def _get_unique(self, relation_name, relation_id=None):
+ if relation_id is not None:
+ if not isinstance(relation_id, int):
+ raise ModelError('relation id {} must be int or None not {}'.format(
+ relation_id,
+ type(relation_id).__name__))
+ for relation in self[relation_name]:
+ if relation.id == relation_id:
+ return relation
+ else:
+ # The relation may be dead, but it is not forgotten.
+ is_peer = relation_name in self._peers
+ return Relation(relation_name, relation_id, is_peer,
+ self._our_unit, self._backend, self._cache)
+ num_related = len(self[relation_name])
+ if num_related == 0:
+ return None
+ elif num_related == 1:
+ return self[relation_name][0]
+ else:
+ # TODO: We need something in the framework to catch and gracefully handle
+ # errors, ideally integrating the error catching with Juju's mechanisms.
+ raise TooManyRelatedAppsError(relation_name, num_related, 1)
+
+
+class BindingMapping:
+ """Mapping of endpoints to network bindings.
+
+ Charm authors should not instantiate this directly, but access it via
+ :meth:`Model.get_binding`
+ """
+
+ def __init__(self, backend):
+ self._backend = backend
+ self._data = {}
+
+ def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding':
+ """Get a specific Binding for an endpoint/relation.
+
+ Not used directly by Charm authors. See :meth:`Model.get_binding`
+ """
+ if isinstance(binding_key, Relation):
+ binding_name = binding_key.name
+ relation_id = binding_key.id
+ elif isinstance(binding_key, str):
+ binding_name = binding_key
+ relation_id = None
+ else:
+ raise ModelError('binding key must be str or relation instance, not {}'
+ ''.format(type(binding_key).__name__))
+ binding = self._data.get(binding_key)
+ if binding is None:
+ binding = Binding(binding_name, relation_id, self._backend)
+ self._data[binding_key] = binding
+ return binding
+
+
+class Binding:
+ """Binding to a network space.
+
+ Attributes:
+ name: The name of the endpoint this binding represents (eg, 'db')
+ """
+
+ def __init__(self, name, relation_id, backend):
+ self.name = name
+ self._relation_id = relation_id
+ self._backend = backend
+ self._network = None
+
+ @property
+ def network(self) -> 'Network':
+ """The network information for this binding."""
+ if self._network is None:
+ try:
+ self._network = Network(self._backend.network_get(self.name, self._relation_id))
+ except RelationNotFoundError:
+ if self._relation_id is None:
+ raise
+ # If a relation is dead, we can still get network info associated with an
+ # endpoint itself
+ self._network = Network(self._backend.network_get(self.name))
+ return self._network
+
+
+class Network:
+ """Network space details.
+
+ Charm authors should not instantiate this directly, but should get access to the Network
+ definition from :meth:`Model.get_binding` and its ``network`` attribute.
+
+ Attributes:
+ interfaces: A list of :class:`NetworkInterface` details. This includes the
+ information about how your application should be configured (eg, what
+ IP addresses should you bind to.)
+ Note that multiple addresses for a single interface are represented as multiple
+ interfaces. (eg, ``[NetworkInfo('ens1', '10.1.1.1/32'),
+ NetworkInfo('ens1', '10.1.2.1/32'])``)
+ ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP
+ addresses that other units should use to get in touch with you.
+ egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that
+ other units will see you connecting from. Due to things like NAT it isn't always
+ possible to narrow it down to a single address, but when it is clear, the CIDRs
+ will be constrained to a single address. (eg, 10.0.0.1/32)
+ Args:
+ network_info: A dict of network information as returned by ``network-get``.
+ """
+
+ def __init__(self, network_info: dict):
+ self.interfaces = []
+ # Treat multiple addresses on an interface as multiple logical
+ # interfaces with the same name.
+ for interface_info in network_info['bind-addresses']:
+ interface_name = interface_info['interface-name']
+ for address_info in interface_info['addresses']:
+ self.interfaces.append(NetworkInterface(interface_name, address_info))
+ self.ingress_addresses = []
+ for address in network_info['ingress-addresses']:
+ self.ingress_addresses.append(ipaddress.ip_address(address))
+ self.egress_subnets = []
+ for subnet in network_info['egress-subnets']:
+ self.egress_subnets.append(ipaddress.ip_network(subnet))
+
+ @property
+ def bind_address(self):
+ """A single address that your application should bind() to.
+
+ For the common case where there is a single answer. This represents a single
+ address from :attr:`.interfaces` that can be used to configure where your
+ application should bind() and listen().
+ """
+ return self.interfaces[0].address
+
+ @property
+ def ingress_address(self):
+ """The address other applications should use to connect to your unit.
+
+ Due to things like public/private addresses, NAT and tunneling, the address you bind()
+ to is not always the address other people can use to connect() to you.
+ This is just the first address from :attr:`.ingress_addresses`.
+ """
+ return self.ingress_addresses[0]
+
+
+class NetworkInterface:
+ """Represents a single network interface that the charm needs to know about.
+
+ Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding`
+ to get the network information for a given endpoint.
+
+ Attributes:
+ name: The name of the interface (eg. 'eth0', or 'ens1')
+ subnet: An :class:`ipaddress.ip_network` representation of the IP for the network
+ interface. This may be a single address (eg '10.0.1.2/32')
+ """
+
+ def __init__(self, name: str, address_info: dict):
+ self.name = name
+ # TODO: expose a hardware address here, see LP: #1864070.
+ self.address = ipaddress.ip_address(address_info['value'])
+ cidr = address_info['cidr']
+ if not cidr:
+ # The cidr field may be empty, see LP: #1864102.
+ # In this case, make it a /32 or /128 IP network.
+ self.subnet = ipaddress.ip_network(address_info['value'])
+ else:
+ self.subnet = ipaddress.ip_network(cidr)
+ # TODO: expose a hostname/canonical name for the address here, see LP: #1864086.
+
+
+class Relation:
+ """Represents an established relation between this application and another application.
+
+ This class should not be instantiated directly, instead use :meth:`Model.get_relation`
+ or :attr:`RelationEvent.relation`.
+
+ Attributes:
+ name: The name of the local endpoint of the relation (eg 'db')
+ id: The identifier for a particular relation (integer)
+ app: An :class:`Application` representing the remote application of this relation.
+ For peer relations this will be the local application.
+ units: A set of :class:`Unit` for units that have started and joined this relation.
+ data: A :class:`RelationData` holding the data buckets for each entity
+ of a relation. Accessed via eg Relation.data[unit]['foo']
+ """
+
+ def __init__(
+ self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit,
+ backend: '_ModelBackend', cache: '_ModelCache'):
+ self.name = relation_name
+ self.id = relation_id
+ self.app = None
+ self.units = set()
+
+ # For peer relations, both the remote and the local app are the same.
+ if is_peer:
+ self.app = our_unit.app
+ try:
+ for unit_name in backend.relation_list(self.id):
+ unit = cache.get(Unit, unit_name)
+ self.units.add(unit)
+ if self.app is None:
+ self.app = unit.app
+ except RelationNotFoundError:
+ # If the relation is dead, just treat it as if it has no remote units.
+ pass
+ self.data = RelationData(self, our_unit, backend)
+
+ def __repr__(self):
+ return '<{}.{} {}:{}>'.format(type(self).__module__,
+ type(self).__name__,
+ self.name,
+ self.id)
+
+
+class RelationData(Mapping):
+ """Represents the various data buckets of a given relation.
+
+ Each unit and application involved in a relation has their own data bucket.
+ Eg: ``{entity: RelationDataContent}``
+ where entity can be either a :class:`Unit` or a :class:`Application`.
+
+ Units can read and write their own data, and if they are the leader,
+ they can read and write their application data. They are allowed to read
+ remote unit and application data.
+
+ This class should not be created directly. It should be accessed via
+ :attr:`Relation.data`
+ """
+
+ def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'):
+ self.relation = weakref.proxy(relation)
+ self._data = {
+ our_unit: RelationDataContent(self.relation, our_unit, backend),
+ our_unit.app: RelationDataContent(self.relation, our_unit.app, backend),
+ }
+ self._data.update({
+ unit: RelationDataContent(self.relation, unit, backend)
+ for unit in self.relation.units})
+ # The relation might be dead so avoid a None key here.
+ if self.relation.app is not None:
+ self._data.update({
+ self.relation.app: RelationDataContent(self.relation, self.relation.app, backend),
+ })
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def __repr__(self):
+ return repr(self._data)
+
+
+# We mix in MutableMapping here to get some convenience implementations, but whether it's actually
+# mutable or not is controlled by the flag.
+class RelationDataContent(LazyMapping, MutableMapping):
+
+ def __init__(self, relation, entity, backend):
+ self.relation = relation
+ self._entity = entity
+ self._backend = backend
+ self._is_app = isinstance(entity, Application)
+
+ def _load(self):
+ try:
+ return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app)
+ except RelationNotFoundError:
+ # Dead relations tell no tales (and have no data).
+ return {}
+
+ def _is_mutable(self):
+ if self._is_app:
+ is_our_app = self._backend.app_name == self._entity.name
+ if not is_our_app:
+ return False
+ # Whether the application data bag is mutable or not depends on
+ # whether this unit is a leader or not, but this is not guaranteed
+ # to be always true during the same hook execution.
+ return self._backend.is_leader()
+ else:
+ is_our_unit = self._backend.unit_name == self._entity.name
+ if is_our_unit:
+ return True
+ return False
+
+ def __setitem__(self, key, value):
+ if not self._is_mutable():
+ raise RelationDataError('cannot set relation data for {}'.format(self._entity.name))
+ if not isinstance(value, str):
+ raise RelationDataError('relation data values must be strings')
+
+ self._backend.relation_set(self.relation.id, key, value, self._is_app)
+
+ # Don't load data unnecessarily if we're only updating.
+ if self._lazy_data is not None:
+ if value == '':
+ # Match the behavior of Juju, which is that setting the value to an
+ # empty string will remove the key entirely from the relation data.
+ self._data.pop(key, None)
+ else:
+ self._data[key] = value
+
+ def __delitem__(self, key):
+ # Match the behavior of Juju, which is that setting the value to an empty
+ # string will remove the key entirely from the relation data.
+ self.__setitem__(key, '')
+
+
+class ConfigData(LazyMapping):
+
+ def __init__(self, backend):
+ self._backend = backend
+
+ def _load(self):
+ return self._backend.config_get()
+
+
+class StatusBase:
+ """Status values specific to applications and units.
+
+ To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just
+ directly use the child class to indicate their status.
+ """
+
+ _statuses = {}
+ name = None
+
+ def __init__(self, message: str):
+ self.message = message
+
+ def __new__(cls, *args, **kwargs):
+ if cls is StatusBase:
+ raise TypeError("cannot instantiate a base class")
+ return super().__new__(cls)
+
+ def __eq__(self, other):
+ if not isinstance(self, type(other)):
+ return False
+ return self.message == other.message
+
+ def __repr__(self):
+ return "{.__class__.__name__}({!r})".format(self, self.message)
+
+ @classmethod
+ def from_name(cls, name: str, message: str):
+ if name == 'unknown':
+ # unknown is special
+ return UnknownStatus()
+ else:
+ return cls._statuses[name](message)
+
+ @classmethod
+ def register(cls, child):
+ if child.name is None:
+ raise AttributeError('cannot register a Status which has no name')
+ cls._statuses[child.name] = child
+ return child
+
+
+@StatusBase.register
+class UnknownStatus(StatusBase):
+ """The unit status is unknown.
+
+ A unit-agent has finished calling install, config-changed and start, but the
+ charm has not called status-set yet.
+
+ """
+ name = 'unknown'
+
+ def __init__(self):
+ # Unknown status cannot be set and does not have a message associated with it.
+ super().__init__('')
+
+ def __repr__(self):
+ return "UnknownStatus()"
+
+
+@StatusBase.register
+class ActiveStatus(StatusBase):
+ """The unit is ready.
+
+ The unit believes it is correctly offering all the services it has been asked to offer.
+ """
+ name = 'active'
+
+ def __init__(self, message: str = ''):
+ super().__init__(message)
+
+
+@StatusBase.register
+class BlockedStatus(StatusBase):
+ """The unit requires manual intervention.
+
+ An operator has to manually intervene to unblock the unit and let it proceed.
+ """
+ name = 'blocked'
+
+
+@StatusBase.register
+class MaintenanceStatus(StatusBase):
+ """The unit is performing maintenance tasks.
+
+ The unit is not yet providing services, but is actively doing work in preparation
+ for providing those services. This is a "spinning" state, not an error state. It
+ reflects activity on the unit itself, not on peers or related units.
+
+ """
+ name = 'maintenance'
+
+
+@StatusBase.register
+class WaitingStatus(StatusBase):
+ """A unit is unable to progress.
+
+ The unit is unable to progress to an active state because an application to which
+ it is related is not running.
+
+ """
+ name = 'waiting'
+
+
+class Resources:
+ """Object representing resources for the charm.
+ """
+
+ def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'):
+ self._backend = backend
+ self._paths = {name: None for name in names}
+
+ def fetch(self, name: str) -> Path:
+ """Fetch the resource from the controller or store.
+
+ If successfully fetched, this returns a Path object to where the resource is stored
+ on disk, otherwise it raises a ModelError.
+ """
+ if name not in self._paths:
+ raise RuntimeError('invalid resource name: {}'.format(name))
+ if self._paths[name] is None:
+ self._paths[name] = Path(self._backend.resource_get(name))
+ return self._paths[name]
+
+
+class Pod:
+ """Represents the definition of a pod spec in Kubernetes models.
+
+ Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`.
+ """
+
+ def __init__(self, backend: '_ModelBackend'):
+ self._backend = backend
+
+ def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None):
+ """Set the specification for pods that Juju should start in kubernetes.
+
+ See `juju help-tool pod-spec-set` for details of what should be passed.
+
+ Args:
+ spec: The mapping defining the pod specification
+ k8s_resources: Additional kubernetes specific specification.
+
+ Returns:
+ None
+ """
+ if not self._backend.is_leader():
+ raise ModelError('cannot set a pod spec as this unit is not a leader')
+ self._backend.pod_spec_set(spec, k8s_resources)
+
+
+class StorageMapping(Mapping):
+ """Map of storage names to lists of Storage instances."""
+
+ def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'):
+ self._backend = backend
+ self._storage_map = {storage_name: None for storage_name in storage_names}
+
+ def __contains__(self, key: str):
+ return key in self._storage_map
+
+ def __len__(self):
+ return len(self._storage_map)
+
+ def __iter__(self):
+ return iter(self._storage_map)
+
+ def __getitem__(self, storage_name: str) -> typing.List['Storage']:
+ storage_list = self._storage_map[storage_name]
+ if storage_list is None:
+ storage_list = self._storage_map[storage_name] = []
+ for storage_id in self._backend.storage_list(storage_name):
+ storage_list.append(Storage(storage_name, storage_id, self._backend))
+ return storage_list
+
+ def request(self, storage_name: str, count: int = 1):
+ """Requests new storage instances of a given name.
+
+ Uses storage-add tool to request additional storage. Juju will notify the unit
+ via -storage-attached events when it becomes available.
+ """
+ if storage_name not in self._storage_map:
+ raise ModelError(('cannot add storage {!r}:'
+ ' it is not present in the charm metadata').format(storage_name))
+ self._backend.storage_add(storage_name, count)
+
+
+class Storage:
+ """"Represents a storage as defined in metadata.yaml
+
+ Attributes:
+ name: Simple string name of the storage
+ id: The provider id for storage
+ """
+
+ def __init__(self, storage_name, storage_id, backend):
+ self.name = storage_name
+ self.id = storage_id
+ self._backend = backend
+ self._location = None
+
+ @property
+ def location(self):
+ if self._location is None:
+ raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location")
+ self._location = Path(raw)
+ return self._location
+
+
+class ModelError(Exception):
+ """Base class for exceptions raised when interacting with the Model."""
+ pass
+
+
+class TooManyRelatedAppsError(ModelError):
+ """Raised by :meth:`Model.get_relation` if there is more than one related application."""
+
+ def __init__(self, relation_name, num_related, max_supported):
+ super().__init__('Too many remote applications on {} ({} > {})'.format(
+ relation_name, num_related, max_supported))
+ self.relation_name = relation_name
+ self.num_related = num_related
+ self.max_supported = max_supported
+
+
+class RelationDataError(ModelError):
+ """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid.
+
+ This is raised if you're either trying to set a value to something that isn't a string,
+ or if you are trying to set a value in a bucket that you don't have access to. (eg,
+ another application/unit or setting your application data but you aren't the leader.)
+ """
+
+
+class RelationNotFoundError(ModelError):
+ """Backend error when querying juju for a given relation and that relation doesn't exist."""
+
+
+class InvalidStatusError(ModelError):
+ """Raised if trying to set an Application or Unit status to something invalid."""
+
+
+class _ModelBackend:
+ """Represents the connection between the Model representation and talking to Juju.
+
+ Charm authors should not directly interact with the ModelBackend, it is a private
+ implementation of Model.
+ """
+
+ LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30)
+
+ def __init__(self, unit_name=None, model_name=None):
+ if unit_name is None:
+ self.unit_name = os.environ['JUJU_UNIT_NAME']
+ else:
+ self.unit_name = unit_name
+ if model_name is None:
+ model_name = os.environ.get('JUJU_MODEL_NAME')
+ self.model_name = model_name
+ self.app_name = self.unit_name.split('/')[0]
+
+ self._is_leader = None
+ self._leader_check_time = None
+
+ def _run(self, *args, return_output=False, use_json=False):
+ kwargs = dict(stdout=PIPE, stderr=PIPE, check=True)
+ args = (shutil.which(args[0]),) + args[1:]
+ if use_json:
+ args += ('--format=json',)
+ try:
+ result = run(args, **kwargs)
+ except CalledProcessError as e:
+ raise ModelError(e.stderr)
+ if return_output:
+ if result.stdout is None:
+ return ''
+ else:
+ text = result.stdout.decode('utf8')
+ if use_json:
+ return json.loads(text)
+ else:
+ return text
+
+ def relation_ids(self, relation_name):
+ relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
+ return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
+
+ def relation_list(self, relation_id):
+ try:
+ return self._run('relation-list', '-r', str(relation_id),
+ return_output=True, use_json=True)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def relation_get(self, relation_id, member_name, is_app):
+ if not isinstance(is_app, bool):
+ raise TypeError('is_app parameter to relation_get must be a boolean')
+
+ if is_app:
+ version = JujuVersion.from_environ()
+ if not version.has_app_data():
+ raise RuntimeError(
+ 'getting application data is not supported on Juju version {}'.format(version))
+
+ args = ['relation-get', '-r', str(relation_id), '-', member_name]
+ if is_app:
+ args.append('--app')
+
+ try:
+ return self._run(*args, return_output=True, use_json=True)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def relation_set(self, relation_id, key, value, is_app):
+ if not isinstance(is_app, bool):
+ raise TypeError('is_app parameter to relation_set must be a boolean')
+
+ if is_app:
+ version = JujuVersion.from_environ()
+ if not version.has_app_data():
+ raise RuntimeError(
+ 'setting application data is not supported on Juju version {}'.format(version))
+
+ args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)]
+ if is_app:
+ args.append('--app')
+
+ try:
+ return self._run(*args)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def config_get(self):
+ return self._run('config-get', return_output=True, use_json=True)
+
+ def is_leader(self):
+ """Obtain the current leadership status for the unit the charm code is executing on.
+
+ The value is cached for the duration of a lease which is 30s in Juju.
+ """
+ now = time.monotonic()
+ if self._leader_check_time is None:
+ check = True
+ else:
+ time_since_check = datetime.timedelta(seconds=now - self._leader_check_time)
+ check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None)
+ if check:
+ # Current time MUST be saved before running is-leader to ensure the cache
+ # is only used inside the window that is-leader itself asserts.
+ self._leader_check_time = now
+ self._is_leader = self._run('is-leader', return_output=True, use_json=True)
+
+ return self._is_leader
+
+ def resource_get(self, resource_name):
+ return self._run('resource-get', resource_name, return_output=True).strip()
+
+ def pod_spec_set(self, spec, k8s_resources):
+ tmpdir = Path(tempfile.mkdtemp('-pod-spec-set'))
+ try:
+ spec_path = tmpdir / 'spec.yaml'
+ with spec_path.open("wt", encoding="utf8") as f:
+ yaml.dump(spec, stream=f, Dumper=_DefaultDumper)
+ args = ['--file', str(spec_path)]
+ if k8s_resources:
+ k8s_res_path = tmpdir / 'k8s-resources.yaml'
+ with k8s_res_path.open("wt", encoding="utf8") as f:
+ yaml.dump(k8s_resources, stream=f, Dumper=_DefaultDumper)
+ args.extend(['--k8s-resources', str(k8s_res_path)])
+ self._run('pod-spec-set', *args)
+ finally:
+ shutil.rmtree(str(tmpdir))
+
+ def status_get(self, *, is_app=False):
+ """Get a status of a unit or an application.
+
+ Args:
+ is_app: A boolean indicating whether the status should be retrieved for a unit
+ or an application.
+ """
+ content = self._run(
+ 'status-get', '--include-data', '--application={}'.format(is_app),
+ use_json=True,
+ return_output=True)
+ # Unit status looks like (in YAML):
+ # message: 'load: 0.28 0.26 0.26'
+ # status: active
+ # status-data: {}
+ # Application status looks like (in YAML):
+ # application-status:
+ # message: 'load: 0.28 0.26 0.26'
+ # status: active
+ # status-data: {}
+ # units:
+ # uo/0:
+ # message: 'load: 0.28 0.26 0.26'
+ # status: active
+ # status-data: {}
+
+ if is_app:
+ return {'status': content['application-status']['status'],
+ 'message': content['application-status']['message']}
+ else:
+ return content
+
+ def status_set(self, status, message='', *, is_app=False):
+ """Set a status of a unit or an application.
+
+ Args:
+ app: A boolean indicating whether the status should be set for a unit or an
+ application.
+ """
+ if not isinstance(is_app, bool):
+ raise TypeError('is_app parameter must be boolean')
+ return self._run('status-set', '--application={}'.format(is_app), status, message)
+
+ def storage_list(self, name):
+ return [int(s.split('/')[1]) for s in self._run('storage-list', name,
+ return_output=True, use_json=True)]
+
+ def storage_get(self, storage_name_id, attribute):
+ return self._run('storage-get', '-s', storage_name_id, attribute,
+ return_output=True, use_json=True)
+
+ def storage_add(self, name, count=1):
+ if not isinstance(count, int) or isinstance(count, bool):
+ raise TypeError('storage count must be integer, got: {} ({})'.format(count,
+ type(count)))
+ self._run('storage-add', '{}={}'.format(name, count))
+
+ def action_get(self):
+ return self._run('action-get', return_output=True, use_json=True)
+
+ def action_set(self, results):
+ self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()])
+
+ def action_log(self, message):
+ self._run('action-log', message)
+
+ def action_fail(self, message=''):
+ self._run('action-fail', message)
+
+ def application_version_set(self, version):
+ self._run('application-version-set', '--', version)
+
+ def juju_log(self, level, message):
+ self._run('juju-log', '--log-level', level, message)
+
+ def network_get(self, binding_name, relation_id=None):
+ """Return network info provided by network-get for a given binding.
+
+ Args:
+ binding_name: A name of a binding (relation name or extra-binding name).
+ relation_id: An optional relation id to get network info for.
+ """
+ cmd = ['network-get', binding_name]
+ if relation_id is not None:
+ cmd.extend(['-r', str(relation_id)])
+ try:
+ return self._run(*cmd, return_output=True, use_json=True)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def add_metrics(self, metrics, labels=None):
+ cmd = ['add-metric']
+
+ if labels:
+ label_args = []
+ for k, v in labels.items():
+ _ModelBackendValidator.validate_metric_label(k)
+ _ModelBackendValidator.validate_label_value(k, v)
+ label_args.append('{}={}'.format(k, v))
+ cmd.extend(['--labels', ','.join(label_args)])
+
+ metric_args = []
+ for k, v in metrics.items():
+ _ModelBackendValidator.validate_metric_key(k)
+ metric_value = _ModelBackendValidator.format_metric_value(v)
+ metric_args.append('{}={}'.format(k, metric_value))
+ cmd.extend(metric_args)
+ self._run(*cmd)
+
+
+class _ModelBackendValidator:
+ """Provides facilities for validating inputs and formatting them for model backends."""
+
+ METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$')
+
+ @classmethod
+ def validate_metric_key(cls, key):
+ if cls.METRIC_KEY_REGEX.match(key) is None:
+ raise ModelError(
+ 'invalid metric key {!r}: must match {}'.format(
+ key, cls.METRIC_KEY_REGEX.pattern))
+
+ @classmethod
+ def validate_metric_label(cls, label_name):
+ if cls.METRIC_KEY_REGEX.match(label_name) is None:
+ raise ModelError(
+ 'invalid metric label name {!r}: must match {}'.format(
+ label_name, cls.METRIC_KEY_REGEX.pattern))
+
+ @classmethod
+ def format_metric_value(cls, value):
+ try:
+ decimal_value = decimal.Decimal.from_float(value)
+ except TypeError as e:
+ e2 = ModelError('invalid metric value {!r} provided:'
+ ' must be a positive finite float'.format(value))
+ raise e2 from e
+ if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0:
+ raise ModelError('invalid metric value {!r} provided:'
+ ' must be a positive finite float'.format(value))
+ return str(decimal_value)
+
+ @classmethod
+ def validate_label_value(cls, label, value):
+ # Label values cannot be empty, contain commas or equal signs as those are
+ # used by add-metric as separators.
+ if not value:
+ raise ModelError(
+ 'metric label {} has an empty value, which is not allowed'.format(label))
+ v = str(value)
+ if re.search('[,=]', v) is not None:
+ raise ModelError(
+ 'metric label values must not contain "," or "=": {}={!r}'.format(label, value))
diff --git a/coredns/venv/ops/storage.py b/coredns/venv/ops/storage.py
new file mode 100644
index 0000000..ec82d64
--- /dev/null
+++ b/coredns/venv/ops/storage.py
@@ -0,0 +1,318 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from datetime import timedelta
+import pickle
+import shutil
+import subprocess
+import sqlite3
+import typing
+
+import yaml
+
+
+def _run(args, **kw):
+ cmd = shutil.which(args[0])
+ if cmd is None:
+ raise FileNotFoundError(args[0])
+ return subprocess.run([cmd, *args[1:]], **kw)
+
+
+class SQLiteStorage:
+
+ DB_LOCK_TIMEOUT = timedelta(hours=1)
+
+ def __init__(self, filename):
+ # The isolation_level argument is set to None such that the implicit
+ # transaction management behavior of the sqlite3 module is disabled.
+ self._db = sqlite3.connect(str(filename),
+ isolation_level=None,
+ timeout=self.DB_LOCK_TIMEOUT.total_seconds())
+ self._setup()
+
+ def _setup(self):
+ # Make sure that the database is locked until the connection is closed,
+ # not until the transaction ends.
+ self._db.execute("PRAGMA locking_mode=EXCLUSIVE")
+ c = self._db.execute("BEGIN")
+ c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'")
+ if c.fetchone()[0] == 0:
+ # Keep in mind what might happen if the process dies somewhere below.
+ # The system must not be rendered permanently broken by that.
+ self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)")
+ self._db.execute('''
+ CREATE TABLE notice (
+ sequence INTEGER PRIMARY KEY AUTOINCREMENT,
+ event_path TEXT,
+ observer_path TEXT,
+ method_name TEXT)
+ ''')
+ self._db.commit()
+
+ def close(self):
+ self._db.close()
+
+ def commit(self):
+ self._db.commit()
+
+ # There's commit but no rollback. For abort to be supported, we'll need logic that
+ # can rollback decisions made by third-party code in terms of the internal state
+ # of objects that have been snapshotted, and hooks to let them know about it and
+ # take the needed actions to undo their logic until the last snapshot.
+ # This is doable but will increase significantly the chances for mistakes.
+
+ def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
+ """Part of the Storage API, persist a snapshot data under the given handle.
+
+ Args:
+ handle_path: The string identifying the snapshot.
+ snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This
+ might be a dict/tuple/int, but must only contain 'simple' python types.
+ """
+ # Use pickle for serialization, so the value remains portable.
+ raw_data = pickle.dumps(snapshot_data)
+ self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data))
+
+ def load_snapshot(self, handle_path: str) -> typing.Any:
+ """Part of the Storage API, retrieve a snapshot that was previously saved.
+
+ Args:
+ handle_path: The string identifying the snapshot.
+ Raises:
+ NoSnapshotError: if there is no snapshot for the given handle_path.
+ """
+ c = self._db.cursor()
+ c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,))
+ row = c.fetchone()
+ if row:
+ return pickle.loads(row[0])
+ raise NoSnapshotError(handle_path)
+
+ def drop_snapshot(self, handle_path: str):
+ """Part of the Storage API, remove a snapshot that was previously saved.
+
+ Dropping a snapshot that doesn't exist is treated as a no-op.
+ """
+ self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,))
+
+ def list_snapshots(self) -> typing.Generator[str, None, None]:
+ """Return the name of all snapshots that are currently saved."""
+ c = self._db.cursor()
+ c.execute("SELECT handle FROM snapshot")
+ while True:
+ rows = c.fetchmany()
+ if not rows:
+ break
+ for row in rows:
+ yield row[0]
+
+ def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
+ """Part of the Storage API, record an notice (event and observer)"""
+ self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)',
+ (event_path, observer_path, method_name))
+
+ def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
+ """Part of the Storage API, remove a notice that was previously recorded."""
+ self._db.execute('''
+ DELETE FROM notice
+ WHERE event_path=?
+ AND observer_path=?
+ AND method_name=?
+ ''', (event_path, observer_path, method_name))
+
+ def notices(self, event_path: typing.Optional[str]) ->\
+ typing.Generator[typing.Tuple[str, str, str], None, None]:
+ """Part of the Storage API, return all notices that begin with event_path.
+
+ Args:
+ event_path: If supplied, will only yield events that match event_path. If not
+ supplied (or None/'') will return all events.
+ Returns:
+ Iterable of (event_path, observer_path, method_name) tuples
+ """
+ if event_path:
+ c = self._db.execute('''
+ SELECT event_path, observer_path, method_name
+ FROM notice
+ WHERE event_path=?
+ ORDER BY sequence
+ ''', (event_path,))
+ else:
+ c = self._db.execute('''
+ SELECT event_path, observer_path, method_name
+ FROM notice
+ ORDER BY sequence
+ ''')
+ while True:
+ rows = c.fetchmany()
+ if not rows:
+ break
+ for row in rows:
+ yield tuple(row)
+
+
+class JujuStorage:
+ """"Storing the content tracked by the Framework in Juju.
+
+ This uses :class:`_JujuStorageBackend` to interact with state-get/state-set
+ as the way to store state for the framework and for components.
+ """
+
+ NOTICE_KEY = "#notices#"
+
+ def __init__(self, backend: '_JujuStorageBackend' = None):
+ self._backend = backend
+ if backend is None:
+ self._backend = _JujuStorageBackend()
+
+ def close(self):
+ return
+
+ def commit(self):
+ return
+
+ def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
+ self._backend.set(handle_path, snapshot_data)
+
+ def load_snapshot(self, handle_path):
+ try:
+ content = self._backend.get(handle_path)
+ except KeyError:
+ raise NoSnapshotError(handle_path)
+ return content
+
+ def drop_snapshot(self, handle_path):
+ self._backend.delete(handle_path)
+
+ def save_notice(self, event_path: str, observer_path: str, method_name: str):
+ notice_list = self._load_notice_list()
+ notice_list.append([event_path, observer_path, method_name])
+ self._save_notice_list(notice_list)
+
+ def drop_notice(self, event_path: str, observer_path: str, method_name: str):
+ notice_list = self._load_notice_list()
+ notice_list.remove([event_path, observer_path, method_name])
+ self._save_notice_list(notice_list)
+
+ def notices(self, event_path: str):
+ notice_list = self._load_notice_list()
+ for row in notice_list:
+ if row[0] != event_path:
+ continue
+ yield tuple(row)
+
+ def _load_notice_list(self) -> typing.List[typing.Tuple[str]]:
+ try:
+ notice_list = self._backend.get(self.NOTICE_KEY)
+ except KeyError:
+ return []
+ if notice_list is None:
+ return []
+ return notice_list
+
+ def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None:
+ self._backend.set(self.NOTICE_KEY, notices)
+
+
+class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)):
+ """Handle a couple basic python types.
+
+ yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one
+ that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just
+ subclass SafeLoader and add tuples back in.
+ """
+ # Taken from the example at:
+ # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml
+
+ construct_python_tuple = yaml.Loader.construct_python_tuple
+
+
+_SimpleLoader.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ _SimpleLoader.construct_python_tuple)
+
+
+class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)):
+ """Add types supported by 'marshal'
+
+ YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So
+ we want to only support dumping out types that are safe to load.
+ """
+
+
+_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple
+_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple)
+
+
+def juju_backend_available() -> bool:
+ """Check if Juju state storage is available."""
+ p = shutil.which('state-get')
+ return p is not None
+
+
+class _JujuStorageBackend:
+ """Implements the interface from the Operator framework to Juju's state-get/set/etc."""
+
+ def set(self, key: str, value: typing.Any) -> None:
+ """Set a key to a given value.
+
+ Args:
+ key: The string key that will be used to find the value later
+ value: Arbitrary content that will be returned by get().
+ Raises:
+ CalledProcessError: if 'state-set' returns an error code.
+ """
+ # default_flow_style=None means that it can use Block for
+ # complex types (types that have nested types) but use flow
+ # for simple types (like an array). Not all versions of PyYAML
+ # have the same default style.
+ encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None)
+ content = yaml.dump(
+ {key: encoded_value}, encoding='utf8', default_style='|',
+ default_flow_style=False,
+ Dumper=_SimpleDumper)
+ _run(["state-set", "--file", "-"], input=content, check=True)
+
+ def get(self, key: str) -> typing.Any:
+ """Get the bytes value associated with a given key.
+
+ Args:
+ key: The string key that will be used to find the value
+ Raises:
+ CalledProcessError: if 'state-get' returns an error code.
+ """
+ # We don't capture stderr here so it can end up in debug logs.
+ p = _run(["state-get", key], stdout=subprocess.PIPE, check=True, universal_newlines=True)
+ if p.stdout == '' or p.stdout == '\n':
+ raise KeyError(key)
+ return yaml.load(p.stdout, Loader=_SimpleLoader)
+
+ def delete(self, key: str) -> None:
+ """Remove a key from being tracked.
+
+ Args:
+ key: The key to stop storing
+ Raises:
+ CalledProcessError: if 'state-delete' returns an error code.
+ """
+ _run(["state-delete", key], check=True)
+
+
+class NoSnapshotError(Exception):
+
+ def __init__(self, handle_path):
+ self.handle_path = handle_path
+
+ def __str__(self):
+ return 'no snapshot data found for {} object'.format(self.handle_path)
diff --git a/coredns/venv/ops/testing.py b/coredns/venv/ops/testing.py
new file mode 100644
index 0000000..416dced
--- /dev/null
+++ b/coredns/venv/ops/testing.py
@@ -0,0 +1,818 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import pathlib
+import random
+import tempfile
+import typing
+import yaml
+from contextlib import contextmanager
+from textwrap import dedent
+
+from ops import (
+ charm,
+ framework,
+ model,
+ storage,
+)
+
+
+# OptionalYAML is something like metadata.yaml or actions.yaml. You can
+# pass in a file-like object or the string directly.
+OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]]
+
+
+# noinspection PyProtectedMember
+class Harness:
+ """This class represents a way to build up the model that will drive a test suite.
+
+ The model that is created is from the viewpoint of the charm that you are testing.
+
+ Example::
+
+ harness = Harness(MyCharm)
+ # Do initial setup here
+ relation_id = harness.add_relation('db', 'postgresql')
+ # Now instantiate the charm to see events as the model changes
+ harness.begin()
+ harness.add_relation_unit(relation_id, 'postgresql/0')
+ harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+ # Check that charm has properly handled the relation_joined event for postgresql/0
+ self.assertEqual(harness.charm. ...)
+
+ Args:
+ charm_cls: The Charm class that you'll be testing.
+ meta: charm.CharmBase is a A string or file-like object containing the contents of
+ metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the
+ parent directory of the Charm, and if not found fall back to a trivial
+ 'name: test-charm' metadata.
+ actions: A string or file-like object containing the contents of
+ actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the
+ parent directory of the Charm.
+ config: A string or file-like object containing the contents of
+ config.yaml. If not supplied, we will look for a 'config.yaml' file in the
+ parent directory of the Charm.
+ """
+
+ def __init__(
+ self,
+ charm_cls: typing.Type[charm.CharmBase],
+ *,
+ meta: OptionalYAML = None,
+ actions: OptionalYAML = None,
+ config: OptionalYAML = None):
+ self._charm_cls = charm_cls
+ self._charm = None
+ self._charm_dir = 'no-disk-path' # this may be updated by _create_meta
+ self._meta = self._create_meta(meta, actions)
+ self._unit_name = self._meta.name + '/0'
+ self._framework = None
+ self._hooks_enabled = True
+ self._relation_id_counter = 0
+ self._backend = _TestingModelBackend(self._unit_name, self._meta)
+ self._model = model.Model(self._meta, self._backend)
+ self._storage = storage.SQLiteStorage(':memory:')
+ self._oci_resources = {}
+ self._framework = framework.Framework(
+ self._storage, self._charm_dir, self._meta, self._model)
+ self._update_config(key_values=self._load_config_defaults(config))
+
+ @property
+ def charm(self) -> charm.CharmBase:
+ """Return the instance of the charm class that was passed to __init__.
+
+ Note that the Charm is not instantiated until you have called
+ :meth:`.begin()`.
+ """
+ return self._charm
+
+ @property
+ def model(self) -> model.Model:
+ """Return the :class:`~ops.model.Model` that is being driven by this Harness."""
+ return self._model
+
+ @property
+ def framework(self) -> framework.Framework:
+ """Return the Framework that is being driven by this Harness."""
+ return self._framework
+
+ def begin(self) -> None:
+ """Instantiate the Charm and start handling events.
+
+ Before calling :meth:`.begin`(), there is no Charm instance, so changes to the Model won't
+ emit events. You must call :meth:`.begin` before :attr:`.charm` is valid.
+ """
+ if self._charm is not None:
+ raise RuntimeError('cannot call the begin method on the harness more than once')
+
+ # The Framework adds attributes to class objects for events, etc. As such, we can't re-use
+ # the original class against multiple Frameworks. So create a locally defined class
+ # and register it.
+ # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of
+ # Class attributes which should clean up this ugliness. The API can stay the same
+ class TestEvents(self._charm_cls.on.__class__):
+ pass
+
+ TestEvents.__name__ = self._charm_cls.on.__class__.__name__
+
+ class TestCharm(self._charm_cls):
+ on = TestEvents()
+
+ # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo,
+ # rather than TestCharm has no attribute foo.
+ TestCharm.__name__ = self._charm_cls.__name__
+ self._charm = TestCharm(self._framework)
+
+ def begin_with_initial_hooks(self) -> None:
+ """Called when you want the Harness to fire the same hooks that Juju would fire at startup.
+
+ This triggers install, relation-created, config-changed, start, and any relation-joined
+ hooks. Based on what relations have been defined before you called begin().
+ Note that all of these are fired before returning control to the test suite, so if you
+ want to introspect what happens at each step, you need to fire them directly
+ (eg Charm.on.install.emit()).
+
+ To use this with all the normal hooks, you should instantiate the harness, setup any
+ relations that you want active when the charm starts, and then call this method.
+
+ Example::
+
+ harness = Harness(MyCharm)
+ # Do initial setup here
+ relation_id = harness.add_relation('db', 'postgresql')
+ harness.add_relation_unit(relation_id, 'postgresql/0')
+ harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+ harness.set_leader(True)
+ harness.update_config({'initial': 'config'})
+ harness.begin_with_initial_hooks()
+ # This will cause
+ # install, db-relation-created('postgresql'), leader-elected, config-changed, start
+ # db-relation-joined('postrgesql/0'), db-relation-changed('postgresql/0')
+ # To be fired.
+ """
+ self.begin()
+ # TODO: jam 2020-08-03 This should also handle storage-attached hooks once we have support
+ # for dealing with storage.
+ self._charm.on.install.emit()
+ # Juju itself iterates what relation to fire based on a map[int]relation, so it doesn't
+ # guarantee a stable ordering between relation events. It *does* give a stable ordering
+ # of joined units for a given relation.
+ items = list(self._meta.relations.items())
+ random.shuffle(items)
+ this_app_name = self._meta.name
+ for relname, rel_meta in items:
+ if rel_meta.role == charm.RelationRole.peer:
+ # If the user has directly added a relation, leave it be, but otherwise ensure
+ # that peer relations are always established at before leader-elected.
+ rel_ids = self._backend._relation_ids_map.get(relname)
+ if rel_ids is None:
+ self.add_relation(relname, self._meta.name)
+ else:
+ random.shuffle(rel_ids)
+ for rel_id in rel_ids:
+ self._emit_relation_created(relname, rel_id, this_app_name)
+ else:
+ rel_ids = self._backend._relation_ids_map.get(relname, [])
+ random.shuffle(rel_ids)
+ for rel_id in rel_ids:
+ app_name = self._backend._relation_app_and_units[rel_id]["app"]
+ self._emit_relation_created(relname, rel_id, app_name)
+ if self._backend._is_leader:
+ self._charm.on.leader_elected.emit()
+ else:
+ self._charm.on.leader_settings_changed.emit()
+ self._charm.on.config_changed.emit()
+ self._charm.on.start.emit()
+ all_ids = list(self._backend._relation_names.items())
+ random.shuffle(all_ids)
+ for rel_id, rel_name in all_ids:
+ rel_app_and_units = self._backend._relation_app_and_units[rel_id]
+ app_name = rel_app_and_units["app"]
+ # Note: Juju *does* fire relation events for a given relation in the sorted order of
+ # the unit names. It also always fires relation-changed immediately after
+ # relation-joined for the same unit.
+ # Juju only fires relation-changed (app) if there is data for the related application
+ relation = self._model.get_relation(rel_name, rel_id)
+ if self._backend._relation_data[rel_id].get(app_name):
+ app = self._model.get_app(app_name)
+ self._charm.on[rel_name].relation_changed.emit(
+ relation, app, None)
+ for unit_name in sorted(rel_app_and_units["units"]):
+ remote_unit = self._model.get_unit(unit_name)
+ self._charm.on[rel_name].relation_joined.emit(
+ relation, remote_unit.app, remote_unit)
+ self._charm.on[rel_name].relation_changed.emit(
+ relation, remote_unit.app, remote_unit)
+
+ def cleanup(self) -> None:
+ """Called by your test infrastructure to cleanup any temporary directories/files/etc.
+
+ Currently this only needs to be called if you test with resources. But it is reasonable
+ to always include a `testcase.addCleanup(harness.cleanup)` just in case.
+ """
+ self._backend._cleanup()
+
+ def _create_meta(self, charm_metadata, action_metadata):
+ """Create a CharmMeta object.
+
+ Handle the cases where a user doesn't supply explicit metadata snippets.
+ """
+ filename = inspect.getfile(self._charm_cls)
+ charm_dir = pathlib.Path(filename).parents[1]
+
+ if charm_metadata is None:
+ metadata_path = charm_dir / 'metadata.yaml'
+ if metadata_path.is_file():
+ charm_metadata = metadata_path.read_text()
+ self._charm_dir = charm_dir
+ else:
+ # The simplest of metadata that the framework can support
+ charm_metadata = 'name: test-charm'
+ elif isinstance(charm_metadata, str):
+ charm_metadata = dedent(charm_metadata)
+
+ if action_metadata is None:
+ actions_path = charm_dir / 'actions.yaml'
+ if actions_path.is_file():
+ action_metadata = actions_path.read_text()
+ self._charm_dir = charm_dir
+ elif isinstance(action_metadata, str):
+ action_metadata = dedent(action_metadata)
+
+ return charm.CharmMeta.from_yaml(charm_metadata, action_metadata)
+
+ def _load_config_defaults(self, charm_config):
+ """Load default values from config.yaml
+
+ Handle the case where a user doesn't supply explicit config snippets.
+ """
+ filename = inspect.getfile(self._charm_cls)
+ charm_dir = pathlib.Path(filename).parents[1]
+
+ if charm_config is None:
+ config_path = charm_dir / 'config.yaml'
+ if config_path.is_file():
+ charm_config = config_path.read_text()
+ self._charm_dir = charm_dir
+ else:
+ # The simplest of config that the framework can support
+ charm_config = '{}'
+ elif isinstance(charm_config, str):
+ charm_config = dedent(charm_config)
+ charm_config = yaml.load(charm_config, Loader=yaml.SafeLoader)
+ charm_config = charm_config.get('options', {})
+ return {key: value['default'] for key, value in charm_config.items()
+ if 'default' in value}
+
+ def add_oci_resource(self, resource_name: str,
+ contents: typing.Mapping[str, str] = None) -> None:
+ """Add oci resources to the backend.
+
+ This will register an oci resource and create a temporary file for processing metadata
+ about the resource. A default set of values will be used for all the file contents
+ unless a specific contents dict is provided.
+
+ Args:
+ resource_name: Name of the resource to add custom contents to.
+ contents: Optional custom dict to write for the named resource.
+ """
+ if not contents:
+ contents = {'registrypath': 'registrypath',
+ 'username': 'username',
+ 'password': 'password',
+ }
+ if resource_name not in self._meta.resources.keys():
+ raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
+ if self._meta.resources[resource_name].type != "oci-image":
+ raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name))
+
+ as_yaml = yaml.dump(contents, Dumper=yaml.SafeDumper)
+ self._backend._resources_map[resource_name] = ('contents.yaml', as_yaml)
+
+ def add_resource(self, resource_name: str, content: typing.AnyStr) -> None:
+ """Add content for a resource to the backend.
+
+ This will register the content, so that a call to `Model.resources.fetch(resource_name)`
+ will return a path to a file containing that content.
+
+ Args:
+ resource_name: The name of the resource being added
+ contents: Either string or bytes content, which will be the content of the filename
+ returned by resource-get. If contents is a string, it will be encoded in utf-8
+ """
+ if resource_name not in self._meta.resources.keys():
+ raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
+ record = self._meta.resources[resource_name]
+ if record.type != "file":
+ raise RuntimeError(
+ 'Resource {} is not a file, but actually {}'.format(resource_name, record.type))
+ filename = record.filename
+ if filename is None:
+ filename = resource_name
+
+ self._backend._resources_map[resource_name] = (filename, content)
+
+ def populate_oci_resources(self) -> None:
+ """Populate all OCI resources."""
+ for name, data in self._meta.resources.items():
+ if data.type == "oci-image":
+ self.add_oci_resource(name)
+
+ def disable_hooks(self) -> None:
+ """Stop emitting hook events when the model changes.
+
+ This can be used by developers to stop changes to the model from emitting events that
+ the charm will react to. Call :meth:`.enable_hooks`
+ to re-enable them.
+ """
+ self._hooks_enabled = False
+
+ def enable_hooks(self) -> None:
+ """Re-enable hook events from charm.on when the model is changed.
+
+ By default hook events are enabled once you call :meth:`.begin`,
+ but if you have used :meth:`.disable_hooks`, this can be used to
+ enable them again.
+ """
+ self._hooks_enabled = True
+
+ @contextmanager
+ def hooks_disabled(self):
+ """A context manager to run code with hooks disabled.
+
+ Example::
+
+ with harness.hooks_disabled():
+ # things in here don't fire events
+ harness.set_leader(True)
+ harness.update_config(unset=['foo', 'bar'])
+ # things here will again fire events
+ """
+ self.disable_hooks()
+ try:
+ yield None
+ finally:
+ self.enable_hooks()
+
+ def _next_relation_id(self):
+ rel_id = self._relation_id_counter
+ self._relation_id_counter += 1
+ return rel_id
+
+ def add_relation(self, relation_name: str, remote_app: str) -> int:
+ """Declare that there is a new relation between this app and `remote_app`.
+
+ Args:
+ relation_name: The relation on Charm that is being related to
+ remote_app: The name of the application that is being related to
+
+ Return:
+ The relation_id created by this add_relation.
+ """
+ rel_id = self._next_relation_id()
+ self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id)
+ self._backend._relation_names[rel_id] = relation_name
+ self._backend._relation_list_map[rel_id] = []
+ self._backend._relation_data[rel_id] = {
+ remote_app: {},
+ self._backend.unit_name: {},
+ self._backend.app_name: {},
+ }
+ self._backend._relation_app_and_units[rel_id] = {
+ "app": remote_app,
+ "units": [],
+ }
+ # Reload the relation_ids list
+ if self._model is not None:
+ self._model.relations._invalidate(relation_name)
+ self._emit_relation_created(relation_name, rel_id, remote_app)
+ return rel_id
+
+ def _emit_relation_created(self, relation_name: str, relation_id: int,
+ remote_app: str) -> None:
+ """Trigger relation-created for a given relation with a given remote application."""
+ if self._charm is None or not self._hooks_enabled:
+ return
+ if self._charm is None or not self._hooks_enabled:
+ return
+ relation = self._model.get_relation(relation_name, relation_id)
+ app = self._model.get_app(remote_app)
+ self._charm.on[relation_name].relation_created.emit(
+ relation, app)
+
+ def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None:
+ """Add a new unit to a relation.
+
+ Example::
+
+ rel_id = harness.add_relation('db', 'postgresql')
+ harness.add_relation_unit(rel_id, 'postgresql/0')
+
+ This will trigger a `relation_joined` event. This would naturally be
+ followed by a `relation_changed` event, which you can trigger with
+ :meth:`.update_relation_data`. This separation is artificial in the
+ sense that Juju will always fire the two, but is intended to make
+ testing relations and their data bags slightly more natural.
+
+ Args:
+ relation_id: The integer relation identifier (as returned by add_relation).
+ remote_unit_name: A string representing the remote unit that is being added.
+ Return:
+ None
+ """
+ self._backend._relation_list_map[relation_id].append(remote_unit_name)
+ self._backend._relation_data[relation_id][remote_unit_name] = {}
+ # TODO: jam 2020-08-03 This is where we could assert that the unit name matches the
+ # application name (eg you don't have a relation to 'foo' but add units of 'bar/0'
+ self._backend._relation_app_and_units[relation_id]["units"].append(remote_unit_name)
+ relation_name = self._backend._relation_names[relation_id]
+ # Make sure that the Model reloads the relation_list for this relation_id, as well as
+ # reloading the relation data for this unit.
+ if self._model is not None:
+ remote_unit = self._model.get_unit(remote_unit_name)
+ relation = self._model.get_relation(relation_name, relation_id)
+ unit_cache = relation.data.get(remote_unit, None)
+ if unit_cache is not None:
+ unit_cache._invalidate()
+ self._model.relations._invalidate(relation_name)
+ if self._charm is None or not self._hooks_enabled:
+ return
+ self._charm.on[relation_name].relation_joined.emit(
+ relation, remote_unit.app, remote_unit)
+
+ def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping:
+ """Get the relation data bucket for a single app or unit in a given relation.
+
+ This ignores all of the safety checks of who can and can't see data in relations (eg,
+ non-leaders can't read their own application's relation data because there are no events
+ that keep that data up-to-date for the unit).
+
+ Args:
+ relation_id: The relation whose content we want to look at.
+ app_or_unit: The name of the application or unit whose data we want to read
+ Return:
+ a dict containing the relation data for `app_or_unit` or None.
+ Raises:
+ KeyError: if relation_id doesn't exist
+ """
+ return self._backend._relation_data[relation_id].get(app_or_unit, None)
+
+ def get_pod_spec(self) -> (typing.Mapping, typing.Mapping):
+ """Return the content of the pod spec as last set by the charm.
+
+ This returns both the pod spec and any k8s_resources that were supplied.
+ See the signature of Model.pod.set_spec
+ """
+ return self._backend._pod_spec
+
+ def get_workload_version(self) -> str:
+ """Read the workload version that was set by the unit."""
+ return self._backend._workload_version
+
+ def set_model_name(self, name: str) -> None:
+ """Set the name of the Model that this is representing.
+
+ This cannot be called once begin() has been called. But it lets you set the value that
+ will be returned by Model.name.
+ """
+ if self._charm is not None:
+ raise RuntimeError('cannot set the Model name after begin()')
+ self._backend.model_name = name
+
+ def update_relation_data(
+ self,
+ relation_id: int,
+ app_or_unit: str,
+ key_values: typing.Mapping,
+ ) -> None:
+ """Update the relation data for a given unit or application in a given relation.
+
+ This also triggers the `relation_changed` event for this relation_id.
+
+ Args:
+ relation_id: The integer relation_id representing this relation.
+ app_or_unit: The unit or application name that is being updated.
+ This can be the local or remote application.
+ key_values: Each key/value will be updated in the relation data.
+ """
+ relation_name = self._backend._relation_names[relation_id]
+ relation = self._model.get_relation(relation_name, relation_id)
+ if '/' in app_or_unit:
+ entity = self._model.get_unit(app_or_unit)
+ else:
+ entity = self._model.get_app(app_or_unit)
+ rel_data = relation.data.get(entity, None)
+ if rel_data is not None:
+ # rel_data may have cached now-stale data, so _invalidate() it.
+ # Note, this won't cause the data to be loaded if it wasn't already.
+ rel_data._invalidate()
+
+ new_values = self._backend._relation_data[relation_id][app_or_unit].copy()
+ for k, v in key_values.items():
+ if v == '':
+ new_values.pop(k, None)
+ else:
+ new_values[k] = v
+ self._backend._relation_data[relation_id][app_or_unit] = new_values
+
+ if app_or_unit == self._model.unit.name:
+ # No events for our own unit
+ return
+ if app_or_unit == self._model.app.name:
+ # updating our own app only generates an event if it is a peer relation and we
+ # aren't the leader
+ is_peer = self._meta.relations[relation_name].role.is_peer()
+ if not is_peer:
+ return
+ if self._model.unit.is_leader():
+ return
+ self._emit_relation_changed(relation_id, app_or_unit)
+
+ def _emit_relation_changed(self, relation_id, app_or_unit):
+ if self._charm is None or not self._hooks_enabled:
+ return
+ rel_name = self._backend._relation_names[relation_id]
+ relation = self.model.get_relation(rel_name, relation_id)
+ if '/' in app_or_unit:
+ app_name = app_or_unit.split('/')[0]
+ unit_name = app_or_unit
+ app = self.model.get_app(app_name)
+ unit = self.model.get_unit(unit_name)
+ args = (relation, app, unit)
+ else:
+ app_name = app_or_unit
+ app = self.model.get_app(app_name)
+ args = (relation, app)
+ self._charm.on[rel_name].relation_changed.emit(*args)
+
+ def _update_config(
+ self,
+ key_values: typing.Mapping[str, str] = None,
+ unset: typing.Iterable[str] = (),
+ ) -> None:
+ """Update the config as seen by the charm.
+
+ This will *not* trigger a `config_changed` event, and is intended for internal use.
+
+ Note that the `key_values` mapping will only add or update configuration items.
+ To remove existing ones, see the `unset` parameter.
+
+ Args:
+ key_values: A Mapping of key:value pairs to update in config.
+ unset: An iterable of keys to remove from Config. (Note that this does
+ not currently reset the config values to the default defined in config.yaml.)
+ """
+ # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config
+ # is a LazyMapping, but its _load returns a dict and this method mutates
+ # the dict that Config is caching. Arguably we should be doing some sort
+ # of charm.framework.model.config._invalidate()
+ config = self._backend._config
+ if key_values is not None:
+ for key, value in key_values.items():
+ config[key] = value
+ for key in unset:
+ config.pop(key, None)
+
+ def update_config(
+ self,
+ key_values: typing.Mapping[str, str] = None,
+ unset: typing.Iterable[str] = (),
+ ) -> None:
+ """Update the config as seen by the charm.
+
+ This will trigger a `config_changed` event.
+
+ Note that the `key_values` mapping will only add or update configuration items.
+ To remove existing ones, see the `unset` parameter.
+
+ Args:
+ key_values: A Mapping of key:value pairs to update in config.
+ unset: An iterable of keys to remove from Config. (Note that this does
+ not currently reset the config values to the default defined in config.yaml.)
+ """
+ self._update_config(key_values, unset)
+ if self._charm is None or not self._hooks_enabled:
+ return
+ self._charm.on.config_changed.emit()
+
+ def set_leader(self, is_leader: bool = True) -> None:
+ """Set whether this unit is the leader or not.
+
+ If this charm becomes a leader then `leader_elected` will be triggered.
+
+ Args:
+ is_leader: True/False as to whether this unit is the leader.
+ """
+ was_leader = self._backend._is_leader
+ self._backend._is_leader = is_leader
+ # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
+ # the Model objects, so this automatically gets noticed.
+ if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
+ self._charm.on.leader_elected.emit()
+
+ def _get_backend_calls(self, reset: bool = True) -> list:
+ """Return the calls that we have made to the TestingModelBackend.
+
+ This is useful mostly for testing the framework itself, so that we can assert that we
+ do/don't trigger extra calls.
+
+ Args:
+ reset: If True, reset the calls list back to empty, if false, the call list is
+ preserved.
+ Return:
+ ``[(call1, args...), (call2, args...)]``
+ """
+ calls = self._backend._calls.copy()
+ if reset:
+ self._backend._calls.clear()
+ return calls
+
+
+def _record_calls(cls):
+ """Replace methods on cls with methods that record that they have been called.
+
+ Iterate all attributes of cls, and for public methods, replace them with a wrapped method
+ that records the method called along with the arguments and keyword arguments.
+ """
+ for meth_name, orig_method in cls.__dict__.items():
+ if meth_name.startswith('_'):
+ continue
+
+ def decorator(orig_method):
+ def wrapped(self, *args, **kwargs):
+ full_args = (orig_method.__name__,) + args
+ if kwargs:
+ full_args = full_args + (kwargs,)
+ self._calls.append(full_args)
+ return orig_method(self, *args, **kwargs)
+ return wrapped
+
+ setattr(cls, meth_name, decorator(orig_method))
+ return cls
+
+
+class _ResourceEntry:
+ """Tracks the contents of a Resource."""
+
+ def __init__(self, resource_name):
+ self.name = resource_name
+
+
+@_record_calls
+class _TestingModelBackend:
+ """This conforms to the interface for ModelBackend but provides canned data.
+
+ DO NOT use this class directly, it is used by `Harness`_ to drive the model.
+ `Harness`_ is responsible for maintaining the internal consistency of the values here,
+ as the only public methods of this type are for implementing ModelBackend.
+ """
+
+ def __init__(self, unit_name, meta):
+ self.unit_name = unit_name
+ self.app_name = self.unit_name.split('/')[0]
+ self.model_name = None
+ self._calls = []
+ self._meta = meta
+ self._is_leader = None
+ self._relation_ids_map = {} # relation name to [relation_ids,...]
+ self._relation_names = {} # reverse map from relation_id to relation_name
+ self._relation_list_map = {} # relation_id: [unit_name,...]
+ self._relation_data = {} # {relation_id: {name: data}}
+ # {relation_id: {"app": app_name, "units": ["app/0",...]}
+ self._relation_app_and_units = {}
+ self._config = {}
+ self._is_leader = False
+ self._resources_map = {} # {resource_name: resource_content}
+ self._pod_spec = None
+ self._app_status = {'status': 'unknown', 'message': ''}
+ self._unit_status = {'status': 'maintenance', 'message': ''}
+ self._workload_version = None
+ self._resource_dir = None
+
+ def _cleanup(self):
+ if self._resource_dir is not None:
+ self._resource_dir.cleanup()
+ self._resource_dir = None
+
+ def _get_resource_dir(self) -> pathlib.Path:
+ if self._resource_dir is None:
+ # In actual Juju, the resource path for a charm's resource is
+ # $AGENT_DIR/resources/$RESOURCE_NAME/$RESOURCE_FILENAME
+ # However, charms shouldn't depend on this.
+ self._resource_dir = tempfile.TemporaryDirectory(prefix='tmp-ops-test-resource-')
+ return pathlib.Path(self._resource_dir.name)
+
+ def relation_ids(self, relation_name):
+ try:
+ return self._relation_ids_map[relation_name]
+ except KeyError as e:
+ if relation_name not in self._meta.relations:
+ raise model.ModelError('{} is not a known relation'.format(relation_name)) from e
+ return []
+
+ def relation_list(self, relation_id):
+ try:
+ return self._relation_list_map[relation_id]
+ except KeyError as e:
+ raise model.RelationNotFoundError from e
+
+ def relation_get(self, relation_id, member_name, is_app):
+ if is_app and '/' in member_name:
+ member_name = member_name.split('/')[0]
+ if relation_id not in self._relation_data:
+ raise model.RelationNotFoundError()
+ return self._relation_data[relation_id][member_name].copy()
+
+ def relation_set(self, relation_id, key, value, is_app):
+ relation = self._relation_data[relation_id]
+ if is_app:
+ bucket_key = self.app_name
+ else:
+ bucket_key = self.unit_name
+ if bucket_key not in relation:
+ relation[bucket_key] = {}
+ bucket = relation[bucket_key]
+ if value == '':
+ bucket.pop(key, None)
+ else:
+ bucket[key] = value
+
+ def config_get(self):
+ return self._config
+
+ def is_leader(self):
+ return self._is_leader
+
+ def application_version_set(self, version):
+ self._workload_version = version
+
+ def resource_get(self, resource_name):
+ if resource_name not in self._resources_map:
+ raise model.ModelError(
+ "ERROR could not download resource: HTTP request failed: "
+ "Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found".format(
+ self.unit_name.replace('/', '-'), resource_name, self.app_name, resource_name
+ ))
+ filename, contents = self._resources_map[resource_name]
+ resource_dir = self._get_resource_dir()
+ resource_filename = resource_dir / resource_name / filename
+ if not resource_filename.exists():
+ if isinstance(contents, bytes):
+ mode = 'wb'
+ else:
+ mode = 'wt'
+ resource_filename.parent.mkdir(exist_ok=True)
+ with resource_filename.open(mode=mode) as resource_file:
+ resource_file.write(contents)
+ return resource_filename
+
+ def pod_spec_set(self, spec, k8s_resources):
+ self._pod_spec = (spec, k8s_resources)
+
+ def status_get(self, *, is_app=False):
+ if is_app:
+ return self._app_status
+ else:
+ return self._unit_status
+
+ def status_set(self, status, message='', *, is_app=False):
+ if is_app:
+ self._app_status = {'status': status, 'message': message}
+ else:
+ self._unit_status = {'status': status, 'message': message}
+
+ def storage_list(self, name):
+ raise NotImplementedError(self.storage_list)
+
+ def storage_get(self, storage_name_id, attribute):
+ raise NotImplementedError(self.storage_get)
+
+ def storage_add(self, name, count=1):
+ raise NotImplementedError(self.storage_add)
+
+ def action_get(self):
+ raise NotImplementedError(self.action_get)
+
+ def action_set(self, results):
+ raise NotImplementedError(self.action_set)
+
+ def action_log(self, message):
+ raise NotImplementedError(self.action_log)
+
+ def action_fail(self, message=''):
+ raise NotImplementedError(self.action_fail)
+
+ def network_get(self, endpoint_name, relation_id=None):
+ raise NotImplementedError(self.network_get)
diff --git a/coredns/venv/ops/version.py b/coredns/venv/ops/version.py
new file mode 100644
index 0000000..9ecc830
--- /dev/null
+++ b/coredns/venv/ops/version.py
@@ -0,0 +1,3 @@
+# this is a generated file
+
+version = '0.10.0'
diff --git a/coredns/venv/yaml/__init__.py b/coredns/venv/yaml/__init__.py
new file mode 100644
index 0000000..13d687c
--- /dev/null
+++ b/coredns/venv/yaml/__init__.py
@@ -0,0 +1,427 @@
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '5.3.1'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+#------------------------------------------------------------------------------
+# Warnings control
+#------------------------------------------------------------------------------
+
+# 'Global' warnings state:
+_warnings_enabled = {
+ 'YAMLLoadWarning': True,
+}
+
+# Get or set global warnings' state
+def warnings(settings=None):
+ if settings is None:
+ return _warnings_enabled
+
+ if type(settings) is dict:
+ for key in settings:
+ if key in _warnings_enabled:
+ _warnings_enabled[key] = settings[key]
+
+# Warn when load() is called without Loader=...
+class YAMLLoadWarning(RuntimeWarning):
+ pass
+
+def load_warning(method):
+ if _warnings_enabled['YAMLLoadWarning'] is False:
+ return
+
+ import warnings
+
+ message = (
+ "calling yaml.%s() without Loader=... is deprecated, as the "
+ "default Loader is unsafe. Please read "
+ "https://msg.pyyaml.org/load for full details."
+ ) % method
+
+ warnings.warn(message, YAMLLoadWarning, stacklevel=3)
+
+#------------------------------------------------------------------------------
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=None):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ if Loader is None:
+ load_warning('load')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=None):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ if Loader is None:
+ load_warning('load_all')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def full_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, FullLoader)
+
+def full_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, FullLoader)
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load_all(stream, SafeLoader)
+
+def unsafe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, UnsafeLoader)
+
+def unsafe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, UnsafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=None, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None:
+ loader.Loader.add_implicit_resolver(tag, regexp, first)
+ loader.FullLoader.add_implicit_resolver(tag, regexp, first)
+ loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
+ else:
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None:
+ loader.Loader.add_path_resolver(tag, path, kind)
+ loader.FullLoader.add_path_resolver(tag, path, kind)
+ loader.UnsafeLoader.add_path_resolver(tag, path, kind)
+ else:
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=None):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_constructor(tag, constructor)
+ loader.FullLoader.add_constructor(tag, constructor)
+ loader.UnsafeLoader.add_constructor(tag, constructor)
+ else:
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ if isinstance(cls.yaml_loader, list):
+ for loader in cls.yaml_loader:
+ loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ else:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = [Loader, FullLoader, UnsafeLoader]
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/coredns/venv/yaml/composer.py b/coredns/venv/yaml/composer.py
new file mode 100644
index 0000000..6d15cb4
--- /dev/null
+++ b/coredns/venv/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurrence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurrence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/coredns/venv/yaml/constructor.py b/coredns/venv/yaml/constructor.py
new file mode 100644
index 0000000..1948b12
--- /dev/null
+++ b/coredns/venv/yaml/constructor.py
@@ -0,0 +1,748 @@
+
+__all__ = [
+ 'BaseConstructor',
+ 'SafeConstructor',
+ 'FullConstructor',
+ 'UnsafeConstructor',
+ 'Constructor',
+ 'ConstructorError'
+]
+
+from .error import *
+from .nodes import *
+
+import collections.abc, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def check_state_key(self, key):
+ """Block special attributes/methods from being set in a newly created
+ object, to prevent user-controlled methods from being called during
+ deserialization"""
+ if self.get_state_keys_blacklist_regexp().match(key):
+ raise ConstructorError(None, None,
+ "blacklisted key '%s' in instance state found" % (key,), None)
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag_prefix is not None and node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.abc.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P[0-9][0-9][0-9][0-9])
+ -(?P[0-9][0-9]?)
+ -(?P[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P[0-9][0-9]?)
+ :(?P[0-9][0-9])
+ :(?P[0-9][0-9])
+ (?:\.(?P[0-9]*))?
+ (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?)
+ (?::(?P[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ tzinfo = None
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ tzinfo = datetime.timezone(delta)
+ elif values['tz']:
+ tzinfo = datetime.timezone.utc
+ return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ tzinfo=tzinfo)
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class FullConstructor(SafeConstructor):
+ # 'extend' is blacklisted because it is used by
+ # construct_python_object_apply to add `listitems` to a newly generate
+ # python instance
+ def get_state_keys_blacklist(self):
+ return ['^extend$', '^__.*__$']
+
+ def get_state_keys_blacklist_regexp(self):
+ if not hasattr(self, 'state_keys_blacklist_regexp'):
+ self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
+ return self.state_keys_blacklist_regexp
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ if unsafe:
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ if name not in sys.modules:
+ raise ConstructorError("while constructing a Python module", mark,
+ "module %r is not imported" % name, mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ if unsafe:
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ if module_name not in sys.modules:
+ raise ConstructorError("while constructing a Python object", mark,
+ "module %r is not imported" % module_name, mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False, unsafe=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if not (unsafe or isinstance(cls, type)):
+ raise ConstructorError("while constructing a Python instance", node.start_mark,
+ "expected a class, but found %r" % type(cls),
+ node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state, unsafe=False):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ if not unsafe and state:
+ for key in state.keys():
+ self.check_state_key(key)
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ if not unsafe:
+ self.check_state_key(key)
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ FullConstructor.construct_yaml_null)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ FullConstructor.construct_yaml_bool)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ FullConstructor.construct_python_str)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ FullConstructor.construct_python_unicode)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ FullConstructor.construct_python_bytes)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ FullConstructor.construct_yaml_int)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ FullConstructor.construct_python_long)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ FullConstructor.construct_yaml_float)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ FullConstructor.construct_python_complex)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ FullConstructor.construct_yaml_seq)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ FullConstructor.construct_python_tuple)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ FullConstructor.construct_yaml_map)
+
+FullConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ FullConstructor.construct_python_name)
+
+FullConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ FullConstructor.construct_python_module)
+
+FullConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ FullConstructor.construct_python_object)
+
+FullConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ FullConstructor.construct_python_object_new)
+
+class UnsafeConstructor(FullConstructor):
+
+ def find_python_module(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
+
+ def find_python_name(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ return super(UnsafeConstructor, self).make_python_instance(
+ suffix, node, args, kwds, newobj, unsafe=True)
+
+ def set_python_instance_state(self, instance, state):
+ return super(UnsafeConstructor, self).set_python_instance_state(
+ instance, state, unsafe=True)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ UnsafeConstructor.construct_python_object_apply)
+
+# Constructor is same as UnsafeConstructor. Need to leave this in place in case
+# people have extended it directly.
+class Constructor(UnsafeConstructor):
+ pass
diff --git a/coredns/venv/yaml/cyaml.py b/coredns/venv/yaml/cyaml.py
new file mode 100644
index 0000000..1e606c7
--- /dev/null
+++ b/coredns/venv/yaml/cyaml.py
@@ -0,0 +1,101 @@
+
+__all__ = [
+ 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper'
+]
+
+from _yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CFullLoader(CParser, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ UnsafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/coredns/venv/yaml/dumper.py b/coredns/venv/yaml/dumper.py
new file mode 100644
index 0000000..6aadba5
--- /dev/null
+++ b/coredns/venv/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/coredns/venv/yaml/emitter.py b/coredns/venv/yaml/emitter.py
new file mode 100644
index 0000000..a664d01
--- /dev/null
+++ b/coredns/venv/yaml/emitter.py
@@ -0,0 +1,1137 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overridden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ch)
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'
+ or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
diff --git a/coredns/venv/yaml/error.py b/coredns/venv/yaml/error.py
new file mode 100644
index 0000000..b796b4d
--- /dev/null
+++ b/coredns/venv/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/coredns/venv/yaml/events.py b/coredns/venv/yaml/events.py
new file mode 100644
index 0000000..f79ad38
--- /dev/null
+++ b/coredns/venv/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/coredns/venv/yaml/loader.py b/coredns/venv/yaml/loader.py
new file mode 100644
index 0000000..e90c112
--- /dev/null
+++ b/coredns/venv/yaml/loader.py
@@ -0,0 +1,63 @@
+
+__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+# UnsafeLoader is the same as Loader (which is and was always unsafe on
+# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
+# FullLoad should be able to load almost all YAML safely. Loader is left intact
+# to ensure backwards compatibility.
+class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
diff --git a/coredns/venv/yaml/nodes.py b/coredns/venv/yaml/nodes.py
new file mode 100644
index 0000000..c4f070c
--- /dev/null
+++ b/coredns/venv/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = ''
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/coredns/venv/yaml/parser.py b/coredns/venv/yaml/parser.py
new file mode 100644
index 0000000..13a5995
--- /dev/null
+++ b/coredns/venv/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected , but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected , but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/coredns/venv/yaml/reader.py b/coredns/venv/yaml/reader.py
new file mode 100644
index 0000000..774b021
--- /dev/null
+++ b/coredns/venv/yaml/reader.py
@@ -0,0 +1,185 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = ""
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = ""
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
diff --git a/coredns/venv/yaml/representer.py b/coredns/venv/yaml/representer.py
new file mode 100644
index 0000000..3b0b192
--- /dev/null
+++ b/coredns/venv/yaml/representer.py
@@ -0,0 +1,389 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, copyreg, types, base64, collections
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
+ self.default_style = default_style
+ self.sort_keys = sort_keys
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_keys:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data is None:
+ return True
+ if isinstance(data, tuple) and data == ():
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object", data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent an object", data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+ def represent_ordered_dict(self, data):
+ # Provide uniform representation across different Python versions.
+ data_type = type(data)
+ tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
+ % (data_type.__module__, data_type.__name__)
+ items = [[key, value] for key, value in data.items()]
+ return self.represent_sequence(tag, [items])
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(collections.OrderedDict,
+ Representer.represent_ordered_dict)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/coredns/venv/yaml/resolver.py b/coredns/venv/yaml/resolver.py
new file mode 100644
index 0000000..02b82e7
--- /dev/null
+++ b/coredns/venv/yaml/resolver.py
@@ -0,0 +1,227 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ implicit_resolvers = {}
+ for key in cls.yaml_implicit_resolvers:
+ implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+ cls.yaml_implicit_resolvers = implicit_resolvers
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/coredns/venv/yaml/scanner.py b/coredns/venv/yaml/scanner.py
new file mode 100644
index 0000000..7437ede
--- /dev/null
+++ b/coredns/venv/yaml/scanner.py
@@ -0,0 +1,1435 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ # Return None if no more tokens.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+ else:
+ return None
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid indentation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not necessary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if :
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpreted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ '/': '/',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',' or '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+ + (u',[]{}' if self.flow_level else u''))\
+ or (self.flow_level and ch in ',?[]{}'):
+ break
+ length += 1
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
diff --git a/coredns/venv/yaml/serializer.py b/coredns/venv/yaml/serializer.py
new file mode 100644
index 0000000..fe911e6
--- /dev/null
+++ b/coredns/venv/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/coredns/venv/yaml/tokens.py b/coredns/venv/yaml/tokens.py
new file mode 100644
index 0000000..4d0b48a
--- /dev/null
+++ b/coredns/venv/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = ''
+
+class DirectiveToken(Token):
+ id = ''
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = ''
+
+class DocumentEndToken(Token):
+ id = ''
+
+class StreamStartToken(Token):
+ id = ''
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = ''
+
+class BlockSequenceStartToken(Token):
+ id = ''
+
+class BlockMappingStartToken(Token):
+ id = ''
+
+class BlockEndToken(Token):
+ id = ''
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = ''
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/deploy.sh b/deploy.sh
new file mode 100755
index 0000000..fea7c49
--- /dev/null
+++ b/deploy.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+set -x
+
+juju scp resources/core.snap 0:
+juju run --machine 0 "sudo snap install --dangerous /home/ubuntu/core.snap"
+juju scp resources/core.snap 1:
+juju run --machine 1 "sudo snap install --dangerous /home/ubuntu/core.snap"
+juju scp resources/core.snap 2:
+juju run --machine 2 "sudo snap install --dangerous /home/ubuntu/core.snap"
+
+juju deploy --to 0 ./easyrsa
+juju deploy --to 0 ./etcd \
+ --config bind_to_all_interfaces=false \
+ --config channel=3.4/stable
+juju deploy --to 1 ./kubernetes-master \
+ --config channel=1.21/stable \
+ --config service-cidr=172.31.192.0/21 \
+ --config enable-dashboard-addons=false \
+ --config proxy-extra-args='bind-address=0.0.0.0 proxy-mode=ipvs'
+juju deploy --to 2 ./kubernetes-worker \
+ --config channel=1.21/stable \
+ --config ingress=false \
+ --config proxy-extra-args='bind-address=0.0.0.0 proxy-mode=ipvs'
+juju deploy ./containerd
+juju deploy ./calico \
+ --config cidr=172.31.128.0/18 \
+ --config vxlan=Always \
+ --config ignore-loose-rpf=true
+
+juju attach easyrsa easyrsa=./resources/easyrsa/easyrsa.tgz
+juju attach etcd snapshot=./resources/etcd/snapshot.gz
+juju attach kubernetes-worker cni-amd64=./resources/kubernetes-worker/cni-amd64.tgz
+juju attach calico calico=./resources/calico/calico.gz
+juju attach calico calico-node-image=./resources/calico/calico-node-image.gz
+juju attach calico calico-upgrade=./resources/calico/calico-upgrade.gz
+
+juju attach etcd etcd=./resources/etcd/etcd.snap
+juju attach kubernetes-master cdk-addons=./resources/kubernetes-master/cdk-addons.snap
+juju attach kubernetes-master kube-apiserver=./resources/kubernetes-master/kube-apiserver.snap
+juju attach kubernetes-master kube-controller-manager=./resources/kubernetes-master/kube-controller-manager.snap
+juju attach kubernetes-master kube-scheduler=./resources/kubernetes-master/kube-scheduler.snap
+juju attach kubernetes-master kube-proxy=./resources/kubernetes-master/kube-proxy.snap
+juju attach kubernetes-master kubectl=./resources/kubernetes-master/kubectl.snap
+juju attach kubernetes-worker kube-proxy=./resources/kubernetes-worker/kube-proxy.snap
+juju attach kubernetes-worker kubectl=./resources/kubernetes-worker/kubectl.snap
+juju attach kubernetes-worker kubelet=./resources/kubernetes-worker/kubelet.snap
+
+juju relate etcd:certificates easyrsa:client
+juju relate kubernetes-master:kube-control kubernetes-worker:kube-control
+juju relate kubernetes-master:certificates easyrsa:client
+juju relate kubernetes-worker:certificates easyrsa:client
+juju relate kubernetes-master:etcd etcd:db
+juju relate containerd:containerd kubernetes-worker:container-runtime
+juju relate containerd:containerd kubernetes-master:container-runtime
+juju relate kubernetes-master:kube-api-endpoint kubernetes-worker:kube-api-endpoint
+
+juju relate calico:etcd etcd:db
+juju relate calico:cni kubernetes-master:cni
+juju relate calico:cni kubernetes-worker:cni
+
+juju deploy --to 1 ./kubeapi-load-balancer
+juju remove-relation kubernetes-master:kube-api-endpoint kubernetes-worker:kube-api-endpoint
+juju relate kubernetes-master:kube-api-endpoint kubeapi-load-balancer:apiserver
+juju relate kubernetes-worker:kube-api-endpoint kubeapi-load-balancer:website
+juju relate kubernetes-master:loadbalancer kubeapi-load-balancer:loadbalancer
+juju relate kubeapi-load-balancer:certificates easyrsa:client
+
+# CoreDNS
+juju config -m controller kubernetes-master dns-provider=none
+juju add-k8s k8s-cloud --controller infra-demo
+
+juju add-model k8s-model k8s-cloud
+juju deploy ./coredns
+juju offer coredns:dns-provider
+
+juju consume -m controller k8s-model.coredns
+juju relate -m controller coredns kubernetes-master
diff --git a/juju.assert b/juju.assert
new file mode 100644
index 0000000..786656e
--- /dev/null
+++ b/juju.assert
@@ -0,0 +1,181 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+format: 4
+authority-id: canonical
+revision: 14
+series: 16
+snap-id: e2CPHpB1fUxcKtCyJTsm5t3hN9axJ0yj
+plugs:
+ lxd:
+ allow-auto-connection: true
+ allow-installation: true
+ personal-files:
+ allow-auto-connection:
+ -
+ plug-attributes:
+ write: \$HOME/\.local/share/juju
+ -
+ plug-attributes:
+ read: \$HOME/snap/lxd/common/config
+ -
+ plug-attributes:
+ read: \$HOME/\.aws
+ plug-names:
+ - dot-aws
+ -
+ plug-attributes:
+ read: \$HOME/\.azure
+ plug-names:
+ - dot-azure
+ -
+ plug-attributes:
+ read: \$HOME/\.config/gcloud
+ plug-names:
+ - dot-google
+ -
+ plug-attributes:
+ read: \$HOME/\.kube
+ plug-names:
+ - dot-kubernetes
+ -
+ plug-attributes:
+ read: \$HOME/\.maasrc
+ plug-names:
+ - dot-maas
+ -
+ plug-attributes:
+ read: \$HOME/\.oci
+ plug-names:
+ - dot-oracle
+ -
+ plug-attributes:
+ read: \$HOME/\.novarc
+ plug-names:
+ - dot-openstack
+ allow-installation:
+ -
+ plug-attributes:
+ write: \$HOME/\.local/share/juju
+ plug-names:
+ - dot-local-share-juju
+ -
+ plug-attributes:
+ read: \$HOME/snap/lxd/common/config
+ plug-names:
+ - config-lxd
+ -
+ plug-attributes:
+ read: \$HOME/\.aws
+ plug-names:
+ - dot-aws
+ -
+ plug-attributes:
+ read: \$HOME/\.azure
+ plug-names:
+ - dot-azure
+ -
+ plug-attributes:
+ read: \$HOME/\.config/gcloud
+ plug-names:
+ - dot-google
+ -
+ plug-attributes:
+ read: \$HOME/\.kube
+ plug-names:
+ - dot-kubernetes
+ -
+ plug-attributes:
+ read: \$HOME/\.maasrc
+ plug-names:
+ - dot-maas
+ -
+ plug-attributes:
+ read: \$HOME/\.oci
+ plug-names:
+ - dot-oracle
+ -
+ plug-attributes:
+ read: \$HOME/\.novarc
+ plug-names:
+ - dot-openstack
+ ssh-keys:
+ allow-auto-connection: true
+ ssh-public-keys:
+ allow-auto-connection: true
+publisher-id: canonical
+snap-name: juju
+timestamp: 2022-10-26T07:29:51.180410Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCY1jh8gAAXHcQAJpI5PQlnkcgO6O8PqmrT6Iqhp4fNnZpBC4QTAgms+l0zmkO
+EF3wZEDpRXODWN7iZugakGAiLackmvr+W+nB4njBFkWS7vlxBqhxy44NuHRxcuQ/R56hPBkDPKzO
+PliyenZNuo97bExHmVF9f0fpcmzn0grNslYDplhQbFYrt2Z99KB3+wyg2vsx/RPEX88yOnAuG7f7
+oTWn6v9gXHsj+PuIE73ccVE0GlXGhCqJ98xenXxTBdk0CqjLUJyrvy0jZvLAJYRNA+R8zDXYjMQW
+oCNmg1cEJHAfaDhEp3IRQsK8n3cOGzwGEyxGtYSIQGtlPSc9kswDrQcI0MmVqqLaZp9MvojymtSZ
+y4lVsYSVzKzuPlUy+O9Ta/TIidVVzwmlqfeB6FxvatKC25nemgxRFcMUhwJc3LMq8oErSfb3bE0q
+cTmJ//OOGB8P93IJdNF6G1vDczQN4pysmFmMyLqz+qzBwHV1ZIjg8yiee7Ds7UBmr0GJyhN+OPzg
+mMhd6/FhSVlui4/b+K1kYl4UOOEy/LaFmmOZ+lT9pWTRKPl6PJULRiZrNfFLsDM7MQS1vGxLEjzD
+3pmA7q8JYk37+1xt6HBm1Cr/fZNt0VSKsGYLLJF8DmiTPEyAS2mLYziuNDyrgwz0v5P2x6xqw/w6
+zWxFMCTa3JM6Huie5fxz4HtwcaEm
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: alG0MJImoc49osVZ8p-iZXF3vpcY9G8QpM-c55miUhSX8YEm-7l7fwUZN66KpWqQ
+developer-id: canonical
+provenance: global-upload
+snap-id: e2CPHpB1fUxcKtCyJTsm5t3hN9axJ0yj
+snap-revision: 22345
+snap-size: 97341440
+timestamp: 2023-03-01T05:36:11.373973Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCY/7kSwAAYq4QABV7B3pacnEPzxSMbOQkKsoeAX4T8iWm05xT6hKM/kGcWR5n
+UzmVRCaGqNWjveHvckkbYLFT4E+rw8I2gBntZ8P68iA7Rj08Zn7v9lWGE5bJktaDe2GGjnC3ewI4
+vWp3GkgMVvYXIpRk46nhBjarSdFbF0xbxcFFqqNvzqK53T/7YNkF3A1p3ZUwlGtlDR6oGBxZiYba
+q5uVQZ/Xii8olXxrXNth36O3MtZcNZPp9DyQRKjgEtGI0gT7YVv1JUYf/ziZTfXHPRNCo2OM+0at
+wqs/4Lsbz+UoZ2o8YgVypywDYO0wSKvII+/VuOe6/b5thWHoG28Lk1gnC+sOcQWpRlD9vNmlpaeK
+DKhHozqOX2V4AtR/PWcLOJdCEQnk7jQIkLihMjtOMR2mOZwYs3/iyWZoi0VNrxFVmOo0yB4Uwl96
+PNvu8KBpdgfgWY5k05+YF6bwUGRx+mTaKSHsAmpb+csJ+26sGo5J7fSS/gZcUfbUt8HthEz82AG8
+27N27hvnVC+bxAYQpSQ2RrU96raW9y/Mkmj5EahGJIQJK3rTW1m5OGC9HBLAJs1gi0hVXf6yQAnA
+W7CN8lhq1xc9rhBC9PURzJxqhvJ4suQFKuoCgvx6JcYlIgYgVnqtxADBkrh09CNrE9YblgiFIJa7
+AhswMDksHC36NGzUW9rHaBvtZLq6
diff --git a/juju.snap b/juju.snap
new file mode 100644
index 0000000..310e2e2
Binary files /dev/null and b/juju.snap differ
diff --git a/keepalived/._.build.manifest b/keepalived/._.build.manifest
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._.build.manifest differ
diff --git a/keepalived/._.gitignore b/keepalived/._.gitignore
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._.gitignore differ
diff --git a/keepalived/._.travis b/keepalived/._.travis
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._.travis differ
diff --git a/keepalived/._LICENSE b/keepalived/._LICENSE
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._LICENSE differ
diff --git a/keepalived/._Makefile b/keepalived/._Makefile
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._Makefile differ
diff --git a/keepalived/._README.md b/keepalived/._README.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._README.md differ
diff --git a/keepalived/._bin b/keepalived/._bin
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._bin differ
diff --git a/keepalived/._config.yaml b/keepalived/._config.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._config.yaml differ
diff --git a/keepalived/._copyright b/keepalived/._copyright
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._copyright differ
diff --git a/keepalived/._copyright.layer-basic b/keepalived/._copyright.layer-basic
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._copyright.layer-basic differ
diff --git a/keepalived/._copyright.layer-options b/keepalived/._copyright.layer-options
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._copyright.layer-options differ
diff --git a/keepalived/._docs b/keepalived/._docs
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._docs differ
diff --git a/keepalived/._hooks b/keepalived/._hooks
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._hooks differ
diff --git a/keepalived/._icon.svg b/keepalived/._icon.svg
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._icon.svg differ
diff --git a/keepalived/._layer.yaml b/keepalived/._layer.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._layer.yaml differ
diff --git a/keepalived/._lib b/keepalived/._lib
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._lib differ
diff --git a/keepalived/._make_docs b/keepalived/._make_docs
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._make_docs differ
diff --git a/keepalived/._metadata.yaml b/keepalived/._metadata.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._metadata.yaml differ
diff --git a/keepalived/._pydocmd.yml b/keepalived/._pydocmd.yml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._pydocmd.yml differ
diff --git a/keepalived/._reactive b/keepalived/._reactive
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._reactive differ
diff --git a/keepalived/._requirements.txt b/keepalived/._requirements.txt
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._requirements.txt differ
diff --git a/keepalived/._revision b/keepalived/._revision
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._revision differ
diff --git a/keepalived/._templates b/keepalived/._templates
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._templates differ
diff --git a/keepalived/._tox.ini b/keepalived/._tox.ini
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._tox.ini differ
diff --git a/keepalived/._version b/keepalived/._version
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._version differ
diff --git a/keepalived/._wheelhouse b/keepalived/._wheelhouse
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._wheelhouse differ
diff --git a/keepalived/._wheelhouse.txt b/keepalived/._wheelhouse.txt
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/._wheelhouse.txt differ
diff --git a/keepalived/.build.manifest b/keepalived/.build.manifest
new file mode 100644
index 0000000..07c063f
--- /dev/null
+++ b/keepalived/.build.manifest
@@ -0,0 +1,546 @@
+{
+ "layers": [
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "fcdcea4e5de3e1556c24e6704607862d0ba00a56",
+ "url": "layer:options"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "0d10732a6e14ea2f940a35ab61425a97c5db6a16",
+ "url": "layer:basic"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "a7d7b6423db37a47611310039e6ed1929c0a2eab",
+ "url": "layer:status"
+ },
+ {
+ "branch": "refs/heads/stable",
+ "rev": "348a4a770068a42afec8230ea167346689baafd2",
+ "url": "keepalived"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "4e2f90052b3c02031d09f10900c9e9cb22565dee",
+ "url": "interface:juju-info"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "5021f8a23f6e6e4cc449d2d02f2d8cb99763ec27",
+ "url": "interface:public-address"
+ },
+ {
+ "branch": "refs/heads/master\nrefs/heads/stable",
+ "rev": "632131b1f122daf6fb601fd4c9f1e4dbb1a92e09",
+ "url": "interface:http"
+ }
+ ],
+ "signatures": {
+ ".build.manifest": [
+ "build",
+ "dynamic",
+ "unchecked"
+ ],
+ ".gitignore": [
+ "layer:status",
+ "static",
+ "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b"
+ ],
+ ".travis/profile-update.yaml": [
+ "layer:basic",
+ "static",
+ "731e20aa59bf61c024d317ad630e478301a9386ccc0afe56e6c1c09db07ac83b"
+ ],
+ "LICENSE": [
+ "layer:status",
+ "static",
+ "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"
+ ],
+ "Makefile": [
+ "layer:basic",
+ "static",
+ "b7ab3a34e5faf79b96a8632039a0ad0aa87f2a9b5f0ba604e007cafb22190301"
+ ],
+ "README.md": [
+ "keepalived",
+ "static",
+ "c65fa846ba9a75aab5a5d9f1a38818ed806df4246d58fc1034c7f21cb168f470"
+ ],
+ "bin/charm-env": [
+ "layer:basic",
+ "static",
+ "fb6a20fac4102a6a4b6ffe903fcf666998f9a95a3647e6f9af7a1eeb44e58fd5"
+ ],
+ "bin/layer_option": [
+ "layer:options",
+ "static",
+ "e959bf29da4c5edff28b2602c24113c4df9e25cdc9f2aa3b5d46c8577b2a40cc"
+ ],
+ "config.yaml": [
+ "keepalived",
+ "dynamic",
+ "d04d990bb872982a66430cf14b74a864df38eb0e96dd54b80d7f28c83b599249"
+ ],
+ "copyright": [
+ "layer:status",
+ "static",
+ "7c0e36e618a8544faaaa3f8e0533c2f1f4a18bcacbdd8b99b537742e6b587d58"
+ ],
+ "copyright.layer-basic": [
+ "layer:basic",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "copyright.layer-options": [
+ "layer:options",
+ "static",
+ "f6740d66fd60b60f2533d9fcb53907078d1e20920a0219afce7182e2a1c97629"
+ ],
+ "docs/status.md": [
+ "layer:status",
+ "static",
+ "975dec9f8c938196e102e954a80226bda293407c4e5ae857c118bf692154702a"
+ ],
+ "hooks/config-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/hook.template": [
+ "layer:basic",
+ "static",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/install": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/juju-info-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/juju-info-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/juju-info-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/juju-info-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/juju-info-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/lb-sink-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/lb-sink-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/lb-sink-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/lb-sink-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/lb-sink-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-elected": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/leader-settings-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/loadbalancer-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/post-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/pre-series-upgrade": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/relations/http/.gitignore": [
+ "interface:http",
+ "static",
+ "83b4ca18cc39800b1d260b5633cd0252e21501b21e7c33e718db44f1a68a09b8"
+ ],
+ "hooks/relations/http/README.md": [
+ "interface:http",
+ "static",
+ "9c95320ad040745374fc03e972077f52c27e07eb0386ec93ae19bd50dca24c0d"
+ ],
+ "hooks/relations/http/__init__.py": [
+ "interface:http",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/http/interface.yaml": [
+ "interface:http",
+ "static",
+ "d0b64038b85b7791ee4f3a42d73ffc8c208f206f73f899cbf33a519d12f9ad13"
+ ],
+ "hooks/relations/http/provides.py": [
+ "interface:http",
+ "static",
+ "8c72cd8a5a6ea24f53b6dba11f4353c75265bfa7d3ecc2dd096c8963eab8c877"
+ ],
+ "hooks/relations/http/requires.py": [
+ "interface:http",
+ "static",
+ "76cc886368eaf9c2403a6dc46b40531c3f4eaf67b08829f890c57cb645430abd"
+ ],
+ "hooks/relations/juju-info/.gitignore": [
+ "interface:juju-info",
+ "static",
+ "315971ad9cc5d6ada2391f0940e1800149b211a18be3c7a8f396735d7978702b"
+ ],
+ "hooks/relations/juju-info/README.md": [
+ "interface:juju-info",
+ "static",
+ "745aade1bda4e3cb7b07109c8c7560b2f56e076fcd6e8298465c7f66a970cfa0"
+ ],
+ "hooks/relations/juju-info/__init__.py": [
+ "interface:juju-info",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/juju-info/docs/peers.md": [
+ "interface:juju-info",
+ "static",
+ "710be74f2673dd71a8a41d51417b109bfe68782cf527f100138b628fce531c87"
+ ],
+ "hooks/relations/juju-info/docs/provides.md": [
+ "interface:juju-info",
+ "static",
+ "f986015a7c6790057f694d91ccda2c281cf07634a2dfbe6607e67768a9d9f7b0"
+ ],
+ "hooks/relations/juju-info/docs/requires.md": [
+ "interface:juju-info",
+ "static",
+ "bbe0d5b69bfee4992adcfcbf974703e351027f23062dda20845616cb49878010"
+ ],
+ "hooks/relations/juju-info/interface.yaml": [
+ "interface:juju-info",
+ "static",
+ "0d0057ed0bda75157e8314a1259e1a8bb883d2f738ad1b731177e821b5b1b542"
+ ],
+ "hooks/relations/juju-info/make_docs": [
+ "interface:juju-info",
+ "static",
+ "136818e53eb2ee7c5a178c5793d1a06811bebfbeb8875b9fc8e425d93ad9b433"
+ ],
+ "hooks/relations/juju-info/peers.py": [
+ "interface:juju-info",
+ "static",
+ "3a778e2d89f736caf4e1e743ebc96d9ce53af7f3e0bef4b706dd74e5eb373b6e"
+ ],
+ "hooks/relations/juju-info/provides.py": [
+ "interface:juju-info",
+ "static",
+ "3a778e2d89f736caf4e1e743ebc96d9ce53af7f3e0bef4b706dd74e5eb373b6e"
+ ],
+ "hooks/relations/juju-info/pydocmd.yml": [
+ "interface:juju-info",
+ "static",
+ "78f3a1376cc4ef1c297a239104203f70701da29d089fefceb5323503422ee848"
+ ],
+ "hooks/relations/juju-info/requires.py": [
+ "interface:juju-info",
+ "static",
+ "3a778e2d89f736caf4e1e743ebc96d9ce53af7f3e0bef4b706dd74e5eb373b6e"
+ ],
+ "hooks/relations/juju-info/tox.ini": [
+ "interface:juju-info",
+ "static",
+ "caeace713d04686331d19b2d466e066e4123bd9197152ab99deb1a29a1501502"
+ ],
+ "hooks/relations/public-address/README.md": [
+ "interface:public-address",
+ "static",
+ "7225effe61bfd8571447b8b685a2ecb52be17431b3066a5306330954c4cb064d"
+ ],
+ "hooks/relations/public-address/__init__.py": [
+ "interface:public-address",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "hooks/relations/public-address/interface.yaml": [
+ "interface:public-address",
+ "static",
+ "49d6777a54aa84c7d3be8d531be237564e90f2e4cb2be05ef5617a372a382340"
+ ],
+ "hooks/relations/public-address/provides.py": [
+ "interface:public-address",
+ "static",
+ "7c99b0fe987d38773ed3e67c0378fdb78748c04d6895489cd4bca40aaeb051b2"
+ ],
+ "hooks/relations/public-address/requires.py": [
+ "interface:public-address",
+ "static",
+ "d6a7c6c0762d29a5db19afb4cf82af50812988d5e19a3a48fcbe8b0f6fec12a5"
+ ],
+ "hooks/start": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/stop": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/update-status": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/upgrade-charm": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-broken": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-changed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-created": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-departed": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "hooks/website-relation-joined": [
+ "layer:basic",
+ "dynamic",
+ "2b693cb2a11594a80cc91235c2dc219a0a6303ae62bee8aa87eb35781f7158f7"
+ ],
+ "icon.svg": [
+ "keepalived",
+ "static",
+ "7c51f3a3274f85f40de905124d32227711d3eea8e81bc9b14d6962471026f6af"
+ ],
+ "layer.yaml": [
+ "keepalived",
+ "dynamic",
+ "900ed82afe183b924d8ebe49ad3d886b1967b35094e2e6431359619c19e6b9cc"
+ ],
+ "lib/charms/layer/__init__.py": [
+ "layer:basic",
+ "static",
+ "dfe0d26c6bf409767de6e2546bc648f150e1b396243619bad3aa0553ab7e0e6f"
+ ],
+ "lib/charms/layer/basic.py": [
+ "layer:basic",
+ "static",
+ "3126b5754ad39402ee27e64527044ddd231ed1cd137fcedaffb51e63a635f108"
+ ],
+ "lib/charms/layer/execd.py": [
+ "layer:basic",
+ "static",
+ "fda8bd491032db1db8ddaf4e99e7cc878c6fb5432efe1f91cadb5b34765d076d"
+ ],
+ "lib/charms/layer/options.py": [
+ "layer:options",
+ "static",
+ "8ae7a07d22542fc964f2d2bee8219d1c78a68dace70a1b38d36d4aea47b1c3b2"
+ ],
+ "lib/charms/layer/status.py": [
+ "layer:status",
+ "static",
+ "d560a5e07b2e5f2b0f25f30e1f0278b06f3f90c01e4dbad5c83d71efc79018c6"
+ ],
+ "make_docs": [
+ "layer:status",
+ "static",
+ "c990f55c8e879793a62ed8464ee3d7e0d7d2225fdecaf17af24b0df0e2daa8c1"
+ ],
+ "metadata.yaml": [
+ "keepalived",
+ "dynamic",
+ "dac11c4ab89f07c6202bde9cd8a88843df4b4f130c74a24cebb2e1806c733094"
+ ],
+ "pydocmd.yml": [
+ "layer:status",
+ "static",
+ "11d9293901f32f75f4256ae4ac2073b92ce1d7ef7b6c892ba9fbb98690a0b330"
+ ],
+ "reactive/__init__.py": [
+ "layer:basic",
+ "static",
+ "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ ],
+ "reactive/keepalived.py": [
+ "keepalived",
+ "static",
+ "496473d95ce5f261a7119431236a68675a93ab2f35db9c355591f77d598e8caf"
+ ],
+ "reactive/status.py": [
+ "layer:status",
+ "static",
+ "30207fc206f24e91def5252f1c7f7c8e23c0aed0e93076babf5e03c05296d207"
+ ],
+ "requirements.txt": [
+ "layer:basic",
+ "static",
+ "a00f75d80849e5b4fc5ad2e7536f947c25b1a4044b341caa8ee87a92d3a4c804"
+ ],
+ "templates/50-keepalived.conf": [
+ "keepalived",
+ "static",
+ "0feef827d3edbbdb047f4efa9fcb81c8b88600c07856f00eedeebf6d43010e96"
+ ],
+ "templates/keepalived.conf": [
+ "keepalived",
+ "static",
+ "a8a28fb2ad0195f78dece2f6594e5c2e826340e33d01bd71c1b9ededd6bb0955"
+ ],
+ "tox.ini": [
+ "layer:status",
+ "static",
+ "2669a78e8e51c1606874e1cc97ca99e660ff547a79592572a38a268d99b25b67"
+ ],
+ "version": [
+ "keepalived",
+ "dynamic",
+ "a3bff55840dcf7d1866186038c890d78b43261e77ecf1e0f6378daf4a7fe3e21"
+ ],
+ "wheelhouse.txt": [
+ "layer:basic",
+ "dynamic",
+ "44b8a3ab6ccaf3a81c8a96526a285462e01964e6090fd40104f3a087bab43c0c"
+ ],
+ "wheelhouse/Jinja2-2.10.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "065c4f02ebe7f7cf559e49ee5a95fb800a9e4528727aec6f24402a5374c65013"
+ ],
+ "wheelhouse/MarkupSafe-1.1.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"
+ ],
+ "wheelhouse/PyYAML-5.2.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c"
+ ],
+ "wheelhouse/Tempita-0.5.2.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "cacecf0baa674d356641f1d406b8bff1d756d739c46b869a54de515d08e6fc9c"
+ ],
+ "wheelhouse/charmhelpers-0.20.22.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "b7550108118ce4f87488343384441797777d0da746e1346ed4e6361b4eab0ddb"
+ ],
+ "wheelhouse/charms.reactive-1.4.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "bba21b4fd40b26c240c9ef2aa10c6fdf73592031c68591da4e7ccc46ca9cb616"
+ ],
+ "wheelhouse/netaddr-0.7.19.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"
+ ],
+ "wheelhouse/pbr-5.6.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"
+ ],
+ "wheelhouse/pip-18.1.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "c0a292bd977ef590379a3f05d7b7f65135487b67470f6281289a94e015650ea1"
+ ],
+ "wheelhouse/pyaml-20.4.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "29a5c2a68660a799103d6949167bd6c7953d031449d08802386372de1db6ad71"
+ ],
+ "wheelhouse/setuptools-41.6.0.zip": [
+ "layer:basic",
+ "dynamic",
+ "6afa61b391dcd16cb8890ec9f66cc4015a8a31a6e1c2b4e0c464514be1a3d722"
+ ],
+ "wheelhouse/setuptools_scm-1.17.0.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "70a4cf5584e966ae92f54a764e6437af992ba42ac4bca7eb37cc5d02b98ec40a"
+ ],
+ "wheelhouse/six-1.16.0.tar.gz": [
+ "__pip__",
+ "dynamic",
+ "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"
+ ],
+ "wheelhouse/wheel-0.33.6.tar.gz": [
+ "layer:basic",
+ "dynamic",
+ "10c9da68765315ed98850f8e048347c3eb06dd81822dc2ab1d4fde9dc9702646"
+ ]
+ }
+}
\ No newline at end of file
diff --git a/keepalived/.gitignore b/keepalived/.gitignore
new file mode 100644
index 0000000..ba1431e
--- /dev/null
+++ b/keepalived/.gitignore
@@ -0,0 +1,2 @@
+.tox
+__pycache__
diff --git a/keepalived/.travis/._profile-update.yaml b/keepalived/.travis/._profile-update.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/.travis/._profile-update.yaml differ
diff --git a/keepalived/.travis/profile-update.yaml b/keepalived/.travis/profile-update.yaml
new file mode 100644
index 0000000..57f96eb
--- /dev/null
+++ b/keepalived/.travis/profile-update.yaml
@@ -0,0 +1,12 @@
+config: {}
+description: Default LXD profile - updated
+devices:
+ eth0:
+ name: eth0
+ parent: lxdbr0
+ nictype: bridged
+ type: nic
+ root:
+ path: /
+ pool: default
+ type: disk
diff --git a/keepalived/LICENSE b/keepalived/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/keepalived/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/keepalived/Makefile b/keepalived/Makefile
new file mode 100644
index 0000000..a1ad3a5
--- /dev/null
+++ b/keepalived/Makefile
@@ -0,0 +1,24 @@
+#!/usr/bin/make
+
+all: lint unit_test
+
+
+.PHONY: clean
+clean:
+ @rm -rf .tox
+
+.PHONY: apt_prereqs
+apt_prereqs:
+ @# Need tox, but don't install the apt version unless we have to (don't want to conflict with pip)
+ @which tox >/dev/null || (sudo apt-get install -y python-pip && sudo pip install tox)
+
+.PHONY: lint
+lint: apt_prereqs
+ @tox --notest
+ @PATH=.tox/py34/bin:.tox/py35/bin flake8 $(wildcard hooks reactive lib unit_tests tests)
+ @charm proof
+
+.PHONY: unit_test
+unit_test: apt_prereqs
+ @echo Starting tests...
+ tox
diff --git a/keepalived/README.md b/keepalived/README.md
new file mode 100644
index 0000000..e332ef4
--- /dev/null
+++ b/keepalived/README.md
@@ -0,0 +1,8 @@
+# Keepalived Charm
+
+[Keepalived](http://www.keepalived.org/) is software which provides high
+availability by assigning two or more nodes a virtual IP and monitoring
+those nodes, failing over when one goes down.
+
+This charm is maintained along with the components of Charmed Kubernetes. For full information,
+please visit the [official Charmed Kubernetes docs](https://www.ubuntu.com/kubernetes/docs/charm-keepalived).
diff --git a/keepalived/bin/._charm-env b/keepalived/bin/._charm-env
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/bin/._charm-env differ
diff --git a/keepalived/bin/._layer_option b/keepalived/bin/._layer_option
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/bin/._layer_option differ
diff --git a/keepalived/bin/charm-env b/keepalived/bin/charm-env
new file mode 100755
index 0000000..d211ce9
--- /dev/null
+++ b/keepalived/bin/charm-env
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+VERSION="1.0.0"
+
+
+find_charm_dirs() {
+ # Hopefully, $JUJU_CHARM_DIR is set so which venv to use in unambiguous.
+ if [[ -n "$JUJU_CHARM_DIR" || -n "$CHARM_DIR" ]]; then
+ if [[ -z "$JUJU_CHARM_DIR" ]]; then
+ # accept $CHARM_DIR to be more forgiving
+ export JUJU_CHARM_DIR="$CHARM_DIR"
+ fi
+ if [[ -z "$CHARM_DIR" ]]; then
+ # set CHARM_DIR as well to help with backwards compatibility
+ export CHARM_DIR="$JUJU_CHARM_DIR"
+ fi
+ return
+ fi
+ # Try to guess the value for JUJU_CHARM_DIR by looking for a non-subordinate
+ # (because there's got to be at least one principle) charm directory;
+ # if there are several, pick the first by alpha order.
+ agents_dir="/var/lib/juju/agents"
+ if [[ -d "$agents_dir" ]]; then
+ desired_charm="$1"
+ found_charm_dir=""
+ if [[ -n "$desired_charm" ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ charm_name="$(grep -o '^['\''"]\?name['\''"]\?:.*' $charm_dir/metadata.yaml 2> /dev/null | sed -e 's/.*: *//' -e 's/['\''"]//g')"
+ if [[ "$charm_name" == "$desired_charm" ]]; then
+ if [[ -n "$found_charm_dir" ]]; then
+ >&2 echo "Ambiguous possibilities for JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ found_charm_dir="$charm_dir"
+ fi
+ done
+ if [[ -z "$found_charm_dir" ]]; then
+ >&2 echo "Unable to determine JUJU_CHARM_DIR matching '$desired_charm'; please run within a Juju hook context"
+ exit 1
+ fi
+ export JUJU_CHARM_DIR="$found_charm_dir"
+ export CHARM_DIR="$found_charm_dir"
+ return
+ fi
+ # shellcheck disable=SC2126
+ non_subordinates="$(grep -L 'subordinate"\?:.*true' "$agents_dir"/unit-*/charm/metadata.yaml | wc -l)"
+ if [[ "$non_subordinates" -gt 1 ]]; then
+ >&2 echo 'Ambiguous possibilities for JUJU_CHARM_DIR; please use --charm or run within a Juju hook context'
+ exit 1
+ elif [[ "$non_subordinates" -eq 1 ]]; then
+ for charm_dir in $(/bin/ls -d "$agents_dir"/unit-*/charm); do
+ if grep -q 'subordinate"\?:.*true' "$charm_dir/metadata.yaml"; then
+ continue
+ fi
+ export JUJU_CHARM_DIR="$charm_dir"
+ export CHARM_DIR="$charm_dir"
+ return
+ done
+ fi
+ fi
+ >&2 echo 'Unable to determine JUJU_CHARM_DIR; please run within a Juju hook context'
+ exit 1
+}
+
+try_activate_venv() {
+ if [[ -d "$JUJU_CHARM_DIR/../.venv" ]]; then
+ . "$JUJU_CHARM_DIR/../.venv/bin/activate"
+ fi
+}
+
+find_wrapped() {
+ PATH="${PATH/\/usr\/local\/sbin:}" which "$(basename "$0")"
+}
+
+
+if [[ "$1" == "--version" || "$1" == "-v" ]]; then
+ echo "$VERSION"
+ exit 0
+fi
+
+
+# allow --charm option to hint which JUJU_CHARM_DIR to choose when ambiguous
+# NB: --charm option must come first
+# NB: option must be processed outside find_charm_dirs to modify $@
+charm_name=""
+if [[ "$1" == "--charm" ]]; then
+ charm_name="$2"
+ shift; shift
+fi
+
+find_charm_dirs "$charm_name"
+try_activate_venv
+export PYTHONPATH="$JUJU_CHARM_DIR/lib:$PYTHONPATH"
+
+if [[ "$(basename "$0")" == "charm-env" ]]; then
+ # being used as a shebang
+ exec "$@"
+elif [[ "$0" == "$BASH_SOURCE" ]]; then
+ # being invoked as a symlink wrapping something to find in the venv
+ exec "$(find_wrapped)" "$@"
+elif [[ "$(basename "$BASH_SOURCE")" == "charm-env" ]]; then
+ # being sourced directly; do nothing
+ /bin/true
+else
+ # being sourced for wrapped bash helpers
+ . "$(find_wrapped)"
+fi
diff --git a/keepalived/bin/layer_option b/keepalived/bin/layer_option
new file mode 100755
index 0000000..3253ef8
--- /dev/null
+++ b/keepalived/bin/layer_option
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import sys
+import argparse
+from charms import layer
+
+
+parser = argparse.ArgumentParser(description='Access layer options.')
+parser.add_argument('section',
+ help='the section, or layer, the option is from')
+parser.add_argument('option',
+ help='the option to access')
+
+args = parser.parse_args()
+value = layer.options.get(args.section, args.option)
+if isinstance(value, bool):
+ sys.exit(0 if value else 1)
+elif isinstance(value, list):
+ for val in value:
+ print(val)
+else:
+ print(value)
diff --git a/keepalived/config.yaml b/keepalived/config.yaml
new file mode 100644
index 0000000..9e32946
--- /dev/null
+++ b/keepalived/config.yaml
@@ -0,0 +1,35 @@
+"options":
+ "virtual_ip":
+ "type": "string"
+ "default": ""
+ "description": |
+ Virtual IP/netmask that will be moved between instances,
+ e.g.: 10.1.2.3/16
+ "vip_hostname":
+ "type": "string"
+ "default": ""
+ "description": |
+ A VIP hostname to pass to clients.
+ "port":
+ "type": "int"
+ "default": !!int "443"
+ "description": |
+ A port to pass to clients.
+ "router_id":
+ "type": "int"
+ "default": !!int "23"
+ "description": |
+ Virtual router identifier - a number between 1 and 255
+ that's unique within the network segment
+ "network_interface":
+ "type": "string"
+ "default": ""
+ "description": |
+ Network interface name for the VIP. The default value is
+ the result of running the following command:
+ `route | grep default | head -n 1 | awk {'print $8'}`.
+ "healthcheck_interval":
+ "type": "int"
+ "default": !!int "2"
+ "description": |
+ vrrp_script-based health-check interval, in seconds
diff --git a/keepalived/copyright b/keepalived/copyright
new file mode 100644
index 0000000..a91bdf1
--- /dev/null
+++ b/keepalived/copyright
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2018, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/keepalived/copyright.layer-basic b/keepalived/copyright.layer-basic
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/keepalived/copyright.layer-basic
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/keepalived/copyright.layer-options b/keepalived/copyright.layer-options
new file mode 100644
index 0000000..d4fdd18
--- /dev/null
+++ b/keepalived/copyright.layer-options
@@ -0,0 +1,16 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015-2017, Canonical Ltd., All Rights Reserved.
+License: Apache License 2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/keepalived/docs/._status.md b/keepalived/docs/._status.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/docs/._status.md differ
diff --git a/keepalived/docs/status.md b/keepalived/docs/status.md
new file mode 100644
index 0000000..c6cceab
--- /dev/null
+++ b/keepalived/docs/status.md
@@ -0,0 +1,91 @@
+
+
+```python
+maintenance(message)
+```
+
+Set the status to the `MAINTENANCE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
maint
+
+```python
+maint(message)
+```
+
+Shorthand alias for
+[maintenance](status.md#charms.layer.status.maintenance).
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
blocked
+
+```python
+blocked(message)
+```
+
+Set the status to the `BLOCKED` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
waiting
+
+```python
+waiting(message)
+```
+
+Set the status to the `WAITING` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
active
+
+```python
+active(message)
+```
+
+Set the status to the `ACTIVE` state with the given operator message.
+
+__Parameters__
+
+- __`message` (str)__: Message to convey to the operator.
+
+
status_set
+
+```python
+status_set(workload_state, message)
+```
+
+Set the status to the given workload state with a message.
+
+__Parameters__
+
+- __`workload_state` (WorkloadState or str)__: State of the workload. Should be
+ a [WorkloadState](status.md#charms.layer.status.WorkloadState) enum
+ member, or the string value of one of those members.
+- __`message` (str)__: Message to convey to the operator.
+
diff --git a/keepalived/hooks/._config-changed b/keepalived/hooks/._config-changed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._config-changed differ
diff --git a/keepalived/hooks/._hook.template b/keepalived/hooks/._hook.template
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._hook.template differ
diff --git a/keepalived/hooks/._install b/keepalived/hooks/._install
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._install differ
diff --git a/keepalived/hooks/._juju-info-relation-broken b/keepalived/hooks/._juju-info-relation-broken
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._juju-info-relation-broken differ
diff --git a/keepalived/hooks/._juju-info-relation-changed b/keepalived/hooks/._juju-info-relation-changed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._juju-info-relation-changed differ
diff --git a/keepalived/hooks/._juju-info-relation-created b/keepalived/hooks/._juju-info-relation-created
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._juju-info-relation-created differ
diff --git a/keepalived/hooks/._juju-info-relation-departed b/keepalived/hooks/._juju-info-relation-departed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._juju-info-relation-departed differ
diff --git a/keepalived/hooks/._juju-info-relation-joined b/keepalived/hooks/._juju-info-relation-joined
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._juju-info-relation-joined differ
diff --git a/keepalived/hooks/._lb-sink-relation-broken b/keepalived/hooks/._lb-sink-relation-broken
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._lb-sink-relation-broken differ
diff --git a/keepalived/hooks/._lb-sink-relation-changed b/keepalived/hooks/._lb-sink-relation-changed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._lb-sink-relation-changed differ
diff --git a/keepalived/hooks/._lb-sink-relation-created b/keepalived/hooks/._lb-sink-relation-created
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._lb-sink-relation-created differ
diff --git a/keepalived/hooks/._lb-sink-relation-departed b/keepalived/hooks/._lb-sink-relation-departed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._lb-sink-relation-departed differ
diff --git a/keepalived/hooks/._lb-sink-relation-joined b/keepalived/hooks/._lb-sink-relation-joined
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._lb-sink-relation-joined differ
diff --git a/keepalived/hooks/._leader-elected b/keepalived/hooks/._leader-elected
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._leader-elected differ
diff --git a/keepalived/hooks/._leader-settings-changed b/keepalived/hooks/._leader-settings-changed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._leader-settings-changed differ
diff --git a/keepalived/hooks/._loadbalancer-relation-broken b/keepalived/hooks/._loadbalancer-relation-broken
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._loadbalancer-relation-broken differ
diff --git a/keepalived/hooks/._loadbalancer-relation-changed b/keepalived/hooks/._loadbalancer-relation-changed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._loadbalancer-relation-changed differ
diff --git a/keepalived/hooks/._loadbalancer-relation-created b/keepalived/hooks/._loadbalancer-relation-created
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._loadbalancer-relation-created differ
diff --git a/keepalived/hooks/._loadbalancer-relation-departed b/keepalived/hooks/._loadbalancer-relation-departed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._loadbalancer-relation-departed differ
diff --git a/keepalived/hooks/._loadbalancer-relation-joined b/keepalived/hooks/._loadbalancer-relation-joined
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._loadbalancer-relation-joined differ
diff --git a/keepalived/hooks/._post-series-upgrade b/keepalived/hooks/._post-series-upgrade
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._post-series-upgrade differ
diff --git a/keepalived/hooks/._pre-series-upgrade b/keepalived/hooks/._pre-series-upgrade
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._pre-series-upgrade differ
diff --git a/keepalived/hooks/._relations b/keepalived/hooks/._relations
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._relations differ
diff --git a/keepalived/hooks/._start b/keepalived/hooks/._start
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._start differ
diff --git a/keepalived/hooks/._stop b/keepalived/hooks/._stop
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._stop differ
diff --git a/keepalived/hooks/._update-status b/keepalived/hooks/._update-status
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._update-status differ
diff --git a/keepalived/hooks/._upgrade-charm b/keepalived/hooks/._upgrade-charm
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._upgrade-charm differ
diff --git a/keepalived/hooks/._website-relation-broken b/keepalived/hooks/._website-relation-broken
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._website-relation-broken differ
diff --git a/keepalived/hooks/._website-relation-changed b/keepalived/hooks/._website-relation-changed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._website-relation-changed differ
diff --git a/keepalived/hooks/._website-relation-created b/keepalived/hooks/._website-relation-created
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._website-relation-created differ
diff --git a/keepalived/hooks/._website-relation-departed b/keepalived/hooks/._website-relation-departed
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._website-relation-departed differ
diff --git a/keepalived/hooks/._website-relation-joined b/keepalived/hooks/._website-relation-joined
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/._website-relation-joined differ
diff --git a/keepalived/hooks/config-changed b/keepalived/hooks/config-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/config-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/hook.template b/keepalived/hooks/hook.template
new file mode 100644
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/hook.template
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/install b/keepalived/hooks/install
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/install
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/juju-info-relation-broken b/keepalived/hooks/juju-info-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/juju-info-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/juju-info-relation-changed b/keepalived/hooks/juju-info-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/juju-info-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/juju-info-relation-created b/keepalived/hooks/juju-info-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/juju-info-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/juju-info-relation-departed b/keepalived/hooks/juju-info-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/juju-info-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/juju-info-relation-joined b/keepalived/hooks/juju-info-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/juju-info-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/lb-sink-relation-broken b/keepalived/hooks/lb-sink-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/lb-sink-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/lb-sink-relation-changed b/keepalived/hooks/lb-sink-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/lb-sink-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/lb-sink-relation-created b/keepalived/hooks/lb-sink-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/lb-sink-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/lb-sink-relation-departed b/keepalived/hooks/lb-sink-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/lb-sink-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/lb-sink-relation-joined b/keepalived/hooks/lb-sink-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/lb-sink-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/leader-elected b/keepalived/hooks/leader-elected
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/leader-elected
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/leader-settings-changed b/keepalived/hooks/leader-settings-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/leader-settings-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/loadbalancer-relation-broken b/keepalived/hooks/loadbalancer-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/loadbalancer-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/loadbalancer-relation-changed b/keepalived/hooks/loadbalancer-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/loadbalancer-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/loadbalancer-relation-created b/keepalived/hooks/loadbalancer-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/loadbalancer-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/loadbalancer-relation-departed b/keepalived/hooks/loadbalancer-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/loadbalancer-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/loadbalancer-relation-joined b/keepalived/hooks/loadbalancer-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/loadbalancer-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/post-series-upgrade b/keepalived/hooks/post-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/post-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/pre-series-upgrade b/keepalived/hooks/pre-series-upgrade
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/pre-series-upgrade
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/relations/._http b/keepalived/hooks/relations/._http
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/._http differ
diff --git a/keepalived/hooks/relations/._juju-info b/keepalived/hooks/relations/._juju-info
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/._juju-info differ
diff --git a/keepalived/hooks/relations/._public-address b/keepalived/hooks/relations/._public-address
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/._public-address differ
diff --git a/keepalived/hooks/relations/http/._.gitignore b/keepalived/hooks/relations/http/._.gitignore
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/http/._.gitignore differ
diff --git a/keepalived/hooks/relations/http/._README.md b/keepalived/hooks/relations/http/._README.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/http/._README.md differ
diff --git a/keepalived/hooks/relations/http/.___init__.py b/keepalived/hooks/relations/http/.___init__.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/http/.___init__.py differ
diff --git a/keepalived/hooks/relations/http/._interface.yaml b/keepalived/hooks/relations/http/._interface.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/http/._interface.yaml differ
diff --git a/keepalived/hooks/relations/http/._provides.py b/keepalived/hooks/relations/http/._provides.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/http/._provides.py differ
diff --git a/keepalived/hooks/relations/http/._requires.py b/keepalived/hooks/relations/http/._requires.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/http/._requires.py differ
diff --git a/keepalived/hooks/relations/http/.gitignore b/keepalived/hooks/relations/http/.gitignore
new file mode 100644
index 0000000..3374ec2
--- /dev/null
+++ b/keepalived/hooks/relations/http/.gitignore
@@ -0,0 +1,5 @@
+# Emacs save files
+*~
+\#*\#
+.\#*
+
diff --git a/keepalived/hooks/relations/http/README.md b/keepalived/hooks/relations/http/README.md
new file mode 100644
index 0000000..3d7822a
--- /dev/null
+++ b/keepalived/hooks/relations/http/README.md
@@ -0,0 +1,68 @@
+# Overview
+
+This interface layer implements the basic form of the `http` interface protocol,
+which is used for things such as reverse-proxies, load-balanced servers, REST
+service discovery, et cetera.
+
+# Usage
+
+## Provides
+
+By providing the `http` interface, your charm is providing an HTTP server that
+can be load-balanced, reverse-proxied, used as a REST endpoint, etc.
+
+Your charm need only provide the port on which it is serving its content, as
+soon as the `{relation_name}.available` state is set:
+
+```python
+@when('website.available')
+def configure_website(website):
+ website.configure(port=hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `http` interface, your charm is consuming one or more HTTP
+servers, as a REST endpoint, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `services()` method returns a list of available HTTP services and their
+associated hosts and ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charms.reactive.helpers import data_changed
+
+@when('reverseproxy.available')
+def update_reverse_proxy_config(reverseproxy):
+ services = reverseproxy.services()
+ if not data_changed('reverseproxy.services', services):
+ return
+ for service in services:
+ for host in service['hosts']:
+ hookenv.log('{} has a unit {}:{}'.format(
+ services['service_name'],
+ host['hostname'],
+ host['port']))
+```
diff --git a/keepalived/hooks/relations/http/__init__.py b/keepalived/hooks/relations/http/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keepalived/hooks/relations/http/interface.yaml b/keepalived/hooks/relations/http/interface.yaml
new file mode 100644
index 0000000..54e7748
--- /dev/null
+++ b/keepalived/hooks/relations/http/interface.yaml
@@ -0,0 +1,4 @@
+name: http
+summary: Basic HTTP interface
+version: 1
+repo: https://git.launchpad.net/~bcsaller/charms/+source/http
diff --git a/keepalived/hooks/relations/http/provides.py b/keepalived/hooks/relations/http/provides.py
new file mode 100644
index 0000000..86fa9b3
--- /dev/null
+++ b/keepalived/hooks/relations/http/provides.py
@@ -0,0 +1,67 @@
+import json
+
+from charmhelpers.core import hookenv
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpProvides(Endpoint):
+
+ @when('endpoint.{endpoint_name}.joined')
+ def joined(self):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def get_ingress_address(self, rel_id=None):
+ # If no rel_id is provided, we fallback to the first one
+ if rel_id is None:
+ rel_id = self.relations[0].relation_id
+ return hookenv.ingress_address(rel_id, hookenv.local_unit())
+
+ def configure(self, port, private_address=None, hostname=None):
+ ''' configure the address(es). private_address and hostname can
+ be None, a single string address/hostname, or a list of addresses
+ and hostnames. Note that if a list is passed, it is assumed both
+ private_address and hostname are either lists or None '''
+ for relation in self.relations:
+ ingress_address = self.get_ingress_address(relation.relation_id)
+ if type(private_address) is list or type(hostname) is list:
+ # build 3 lists to zip together that are the same length
+ length = max(len(private_address), len(hostname))
+ p = [port] * length
+ a = private_address + [ingress_address] *\
+ (length - len(private_address))
+ h = hostname + [ingress_address] * (length - len(hostname))
+ zipped_list = zip(p, a, h)
+ # now build an array of dictionaries from that in the desired
+ # format for the interface
+ data_list = [{'hostname': h, 'port': p, 'private-address': a}
+ for p, a, h in zipped_list]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ data = data_list.pop(0)
+ data['extended_data'] = json.dumps(data_list)
+
+ relation.to_publish_raw.update(data)
+ else:
+ relation.to_publish_raw.update({
+ 'hostname': hostname or ingress_address,
+ 'private-address': private_address or ingress_address,
+ 'port': port,
+ })
+
+ def set_remote(self, **kwargs):
+ # NB: This method provides backwards compatibility for charms that
+ # called RelationBase.set_remote. Most commonly, this was done by
+ # charms that needed to pass reverse proxy stanzas to http proxies.
+ # This type of interaction with base relation classes is discouraged,
+ # and should be handled with logic encapsulated in appropriate
+ # interfaces. Eventually, this method will be deprecated in favor of
+ # that behavior.
+ for relation in self.relations:
+ relation.to_publish_raw.update(kwargs)
diff --git a/keepalived/hooks/relations/http/requires.py b/keepalived/hooks/relations/http/requires.py
new file mode 100644
index 0000000..17ea6b7
--- /dev/null
+++ b/keepalived/hooks/relations/http/requires.py
@@ -0,0 +1,76 @@
+import json
+
+from charms.reactive import when, when_not
+from charms.reactive import set_flag, clear_flag
+from charms.reactive import Endpoint
+
+
+class HttpRequires(Endpoint):
+
+ @when('endpoint.{endpoint_name}.changed')
+ def changed(self):
+ if any(unit.received_raw['port'] for unit in self.all_joined_units):
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+
+ def services(self):
+ """
+ Returns a list of available HTTP services and their associated hosts
+ and ports.
+
+ The return value is a list of dicts of the following form::
+
+ [
+ {
+ 'service_name': name_of_service,
+ 'hosts': [
+ {
+ 'hostname': address_of_host,
+ 'private-address': private_address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+ ],
+ },
+ # ...
+ ]
+ """
+ def build_service_host(data):
+ private_address = data['private-address']
+ host = data['hostname'] or private_address
+ if host and data['port']:
+ return (host, private_address, data['port'])
+ else:
+ return None
+
+ services = {}
+ for relation in self.relations:
+ service_name = relation.application_name
+ service = services.setdefault(service_name, {
+ 'service_name': service_name,
+ 'hosts': [],
+ })
+ host_set = set()
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ host = build_service_host(data)
+ if host:
+ host_set.add(host)
+
+ # if we have extended data, add it
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ host = build_service_host(ed)
+ if host:
+ host_set.add(host)
+
+ service['hosts'] = [
+ {'hostname': h, 'private-address': pa, 'port': p}
+ for h, pa, p in sorted(host_set)
+ ]
+
+ ret = [s for s in services.values() if s['hosts']]
+ return ret
diff --git a/keepalived/hooks/relations/juju-info/._.gitignore b/keepalived/hooks/relations/juju-info/._.gitignore
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._.gitignore differ
diff --git a/keepalived/hooks/relations/juju-info/._README.md b/keepalived/hooks/relations/juju-info/._README.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._README.md differ
diff --git a/keepalived/hooks/relations/juju-info/.___init__.py b/keepalived/hooks/relations/juju-info/.___init__.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/.___init__.py differ
diff --git a/keepalived/hooks/relations/juju-info/._docs b/keepalived/hooks/relations/juju-info/._docs
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._docs differ
diff --git a/keepalived/hooks/relations/juju-info/._interface.yaml b/keepalived/hooks/relations/juju-info/._interface.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._interface.yaml differ
diff --git a/keepalived/hooks/relations/juju-info/._make_docs b/keepalived/hooks/relations/juju-info/._make_docs
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._make_docs differ
diff --git a/keepalived/hooks/relations/juju-info/._peers.py b/keepalived/hooks/relations/juju-info/._peers.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._peers.py differ
diff --git a/keepalived/hooks/relations/juju-info/._provides.py b/keepalived/hooks/relations/juju-info/._provides.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._provides.py differ
diff --git a/keepalived/hooks/relations/juju-info/._pydocmd.yml b/keepalived/hooks/relations/juju-info/._pydocmd.yml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._pydocmd.yml differ
diff --git a/keepalived/hooks/relations/juju-info/._requires.py b/keepalived/hooks/relations/juju-info/._requires.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._requires.py differ
diff --git a/keepalived/hooks/relations/juju-info/._tox.ini b/keepalived/hooks/relations/juju-info/._tox.ini
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/._tox.ini differ
diff --git a/keepalived/hooks/relations/juju-info/.gitignore b/keepalived/hooks/relations/juju-info/.gitignore
new file mode 100644
index 0000000..ba1431e
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/.gitignore
@@ -0,0 +1,2 @@
+.tox
+__pycache__
diff --git a/keepalived/hooks/relations/juju-info/README.md b/keepalived/hooks/relations/juju-info/README.md
new file mode 100644
index 0000000..d19e435
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/README.md
@@ -0,0 +1,39 @@
+# Juju-Info Interface
+
+The juju info interface is a special and implicit relationship that works with
+any charm. It is mainly useful for subordinate charms that can add
+functionality to any exisiting machine without the host charm being aware of
+it.
+
+
+### Flags
+
+`{{endpoint_name}}.connected`
+
+Note: This flag keys off of what the charm author names the relationship
+endpoint, which should *not* be the name of the interface:
+
+An example of a properly implemented relationship would resemble the following:
+
+
+```yaml
+requires:
+ host-system:
+ interface: juju-info
+```
+
+This might then be used in your charm would like:
+
+```python
+@when_any('host-system.connected')
+def handle_host():
+ host = endpoint_from_flag('host-system.connected')
+ for address in host.addresses:
+ hookenv.log('Connected to: {}'.format(address))
+```
+
+## Reference
+
+* [Requires API documentation](docs/requires.md)
+* [Provides API documentation](docs/provides.md)
+* [Peers API documentation](docs/peers.md)
diff --git a/keepalived/hooks/relations/juju-info/__init__.py b/keepalived/hooks/relations/juju-info/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keepalived/hooks/relations/juju-info/docs/._peers.md b/keepalived/hooks/relations/juju-info/docs/._peers.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/docs/._peers.md differ
diff --git a/keepalived/hooks/relations/juju-info/docs/._provides.md b/keepalived/hooks/relations/juju-info/docs/._provides.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/docs/._provides.md differ
diff --git a/keepalived/hooks/relations/juju-info/docs/._requires.md b/keepalived/hooks/relations/juju-info/docs/._requires.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/juju-info/docs/._requires.md differ
diff --git a/keepalived/hooks/relations/juju-info/docs/peers.md b/keepalived/hooks/relations/juju-info/docs/peers.md
new file mode 100644
index 0000000..39c9601
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/docs/peers.md
@@ -0,0 +1,52 @@
+
+
+
+A flat list of all addresses received from related apps / units.
+
+This list is de-duplicated and sorted by address, so it will be stable
+for change comparison. If you need to know which app / unit an address
+comes from, see `received_addresses_map`.
+
+Note: This uses ingress-address, so it will work with cross-model
+relations.
+
+
addresses_map
+
+
+A nested dictionary of all addresses received from related apps / units
+by app name then unit name.
+
+For example::
+
+ {
+ 'app1': {
+ 'app1/0': '10.0.0.1',
+ 'app1/1': '10.0.0.2',
+ }
+ }
+
+Note: This uses ingress-address, so it will work with cross-model
+relations.
+
+
+
+
+A flat list of all addresses received from related apps / units.
+
+This list is de-duplicated and sorted by address, so it will be stable
+for change comparison. If you need to know which app / unit an address
+comes from, see `received_addresses_map`.
+
+Note: This uses ingress-address, so it will work with cross-model
+relations.
+
+
addresses_map
+
+
+A nested dictionary of all addresses received from related apps / units
+by app name then unit name.
+
+For example::
+
+ {
+ 'app1': {
+ 'app1/0': '10.0.0.1',
+ 'app1/1': '10.0.0.2',
+ }
+ }
+
+Note: This uses ingress-address, so it will work with cross-model
+relations.
+
+
+
+
+A flat list of all addresses received from related apps / units.
+
+This list is de-duplicated and sorted by address, so it will be stable
+for change comparison. If you need to know which app / unit an address
+comes from, see `received_addresses_map`.
+
+Note: This uses ingress-address, so it will work with cross-model
+relations.
+
+
addresses_map
+
+
+A nested dictionary of all addresses received from related apps / units
+by app name then unit name.
+
+For example::
+
+ {
+ 'app1': {
+ 'app1/0': '10.0.0.1',
+ 'app1/1': '10.0.0.2',
+ }
+ }
+
+Note: This uses ingress-address, so it will work with cross-model
+relations.
+
+
unit_count
+
+
+Number of joined units.
+
+
get_private_address
+
+```python
+JujuInfoClient.get_private_address()
+```
+
+Deprecated.
+
diff --git a/keepalived/hooks/relations/juju-info/interface.yaml b/keepalived/hooks/relations/juju-info/interface.yaml
new file mode 100644
index 0000000..b98f1fc
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/interface.yaml
@@ -0,0 +1,4 @@
+name: juju-info
+summary: Used in subordinate charms (rarely)
+version: 1
+maintainer: "Charles Butler "
diff --git a/keepalived/hooks/relations/juju-info/make_docs b/keepalived/hooks/relations/juju-info/make_docs
new file mode 100644
index 0000000..7f382d9
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/make_docs
@@ -0,0 +1,21 @@
+#!.tox/py3/bin/python
+
+import sys
+from shutil import rmtree
+from unittest.mock import patch
+
+import pydocmd.__main__
+
+
+with patch('charmhelpers.core.hookenv.metadata') as metadata:
+ metadata.return_value = {
+ 'requires': {'juju-info': {'interface': 'juju-info'}},
+ 'provides': {'juju-info': {'interface': 'juju-info'}},
+ 'peers': {'juju-info': {'interface': 'juju-info'}},
+ }
+ sys.path.insert(0, '.')
+ print(sys.argv)
+ if len(sys.argv) == 1:
+ sys.argv.extend(['build'])
+ pydocmd.__main__.main()
+ rmtree('_build')
diff --git a/keepalived/hooks/relations/juju-info/peers.py b/keepalived/hooks/relations/juju-info/peers.py
new file mode 100644
index 0000000..9c637f5
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/peers.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not, set_flag, clear_flag
+
+
+class JujuInfoClient(Endpoint):
+ @when('endpoint.{endpoint_name}.joined')
+ def changed(self):
+ set_flag(self.expand_name('{endpoint_name}.connected'))
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.connected'))
+
+ def get_private_address(self):
+ """
+ Deprecated.
+ """
+ return self.all_joined_units[0]['private-address']
+
+ @property
+ def unit_count(self):
+ """
+ Number of joined units.
+ """
+ return len(self.all_joined_units)
+
+ @property
+ def addresses(self):
+ """
+ A flat list of all addresses received from related apps / units.
+
+ This list is de-duplicated and sorted by address, so it will be stable
+ for change comparison. If you need to know which app / unit an address
+ comes from, see `received_addresses_map`.
+
+ Note: This uses ingress-address, so it will work with cross-model
+ relations.
+ """
+ addrs = {u.received_raw['ingress-address']
+ for u in self.all_joined_units}
+ return list(sorted(addrs))
+
+ @property
+ def addresses_map(self):
+ """
+ A nested dictionary of all addresses received from related apps / units
+ by app name then unit name.
+
+ For example::
+
+ {
+ 'app1': {
+ 'app1/0': '10.0.0.1',
+ 'app1/1': '10.0.0.2',
+ }
+ }
+
+ Note: This uses ingress-address, so it will work with cross-model
+ relations.
+ """
+ return {
+ r.application_name: {
+ u.unit_name: u.received_raw['ingress-address']
+ for u in r.joined_units
+ } for r in self.relations
+ }
diff --git a/keepalived/hooks/relations/juju-info/provides.py b/keepalived/hooks/relations/juju-info/provides.py
new file mode 100644
index 0000000..9c637f5
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/provides.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not, set_flag, clear_flag
+
+
+class JujuInfoClient(Endpoint):
+ @when('endpoint.{endpoint_name}.joined')
+ def changed(self):
+ set_flag(self.expand_name('{endpoint_name}.connected'))
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.connected'))
+
+ def get_private_address(self):
+ """
+ Deprecated.
+ """
+ return self.all_joined_units[0]['private-address']
+
+ @property
+ def unit_count(self):
+ """
+ Number of joined units.
+ """
+ return len(self.all_joined_units)
+
+ @property
+ def addresses(self):
+ """
+ A flat list of all addresses received from related apps / units.
+
+ This list is de-duplicated and sorted by address, so it will be stable
+ for change comparison. If you need to know which app / unit an address
+ comes from, see `received_addresses_map`.
+
+ Note: This uses ingress-address, so it will work with cross-model
+ relations.
+ """
+ addrs = {u.received_raw['ingress-address']
+ for u in self.all_joined_units}
+ return list(sorted(addrs))
+
+ @property
+ def addresses_map(self):
+ """
+ A nested dictionary of all addresses received from related apps / units
+ by app name then unit name.
+
+ For example::
+
+ {
+ 'app1': {
+ 'app1/0': '10.0.0.1',
+ 'app1/1': '10.0.0.2',
+ }
+ }
+
+ Note: This uses ingress-address, so it will work with cross-model
+ relations.
+ """
+ return {
+ r.application_name: {
+ u.unit_name: u.received_raw['ingress-address']
+ for u in r.joined_units
+ } for r in self.relations
+ }
diff --git a/keepalived/hooks/relations/juju-info/pydocmd.yml b/keepalived/hooks/relations/juju-info/pydocmd.yml
new file mode 100644
index 0000000..7d95883
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/pydocmd.yml
@@ -0,0 +1,19 @@
+site_name: 'Juju Info Interface'
+
+generate:
+ - requires.md:
+ - requires
+ - requires.JujuInfoClient+
+ - provides.md:
+ - provides
+ - provides.JujuInfoClient+
+ - peers.md:
+ - peers
+ - peers.JujuInfoClient+
+
+pages:
+ - Requires: requires.md
+ - Provides: provides.md
+ - Peers: peers.md
+
+gens_dir: docs
diff --git a/keepalived/hooks/relations/juju-info/requires.py b/keepalived/hooks/relations/juju-info/requires.py
new file mode 100644
index 0000000..9c637f5
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/requires.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from charms.reactive import Endpoint
+from charms.reactive import when, when_not, set_flag, clear_flag
+
+
+class JujuInfoClient(Endpoint):
+ @when('endpoint.{endpoint_name}.joined')
+ def changed(self):
+ set_flag(self.expand_name('{endpoint_name}.connected'))
+ set_flag(self.expand_name('{endpoint_name}.available'))
+
+ @when_not('endpoint.{endpoint_name}.joined')
+ def broken(self):
+ clear_flag(self.expand_name('{endpoint_name}.available'))
+ clear_flag(self.expand_name('{endpoint_name}.connected'))
+
+ def get_private_address(self):
+ """
+ Deprecated.
+ """
+ return self.all_joined_units[0]['private-address']
+
+ @property
+ def unit_count(self):
+ """
+ Number of joined units.
+ """
+ return len(self.all_joined_units)
+
+ @property
+ def addresses(self):
+ """
+ A flat list of all addresses received from related apps / units.
+
+ This list is de-duplicated and sorted by address, so it will be stable
+ for change comparison. If you need to know which app / unit an address
+ comes from, see `received_addresses_map`.
+
+ Note: This uses ingress-address, so it will work with cross-model
+ relations.
+ """
+ addrs = {u.received_raw['ingress-address']
+ for u in self.all_joined_units}
+ return list(sorted(addrs))
+
+ @property
+ def addresses_map(self):
+ """
+ A nested dictionary of all addresses received from related apps / units
+ by app name then unit name.
+
+ For example::
+
+ {
+ 'app1': {
+ 'app1/0': '10.0.0.1',
+ 'app1/1': '10.0.0.2',
+ }
+ }
+
+ Note: This uses ingress-address, so it will work with cross-model
+ relations.
+ """
+ return {
+ r.application_name: {
+ u.unit_name: u.received_raw['ingress-address']
+ for u in r.joined_units
+ } for r in self.relations
+ }
diff --git a/keepalived/hooks/relations/juju-info/tox.ini b/keepalived/hooks/relations/juju-info/tox.ini
new file mode 100644
index 0000000..fcec9c1
--- /dev/null
+++ b/keepalived/hooks/relations/juju-info/tox.ini
@@ -0,0 +1,14 @@
+[tox]
+envlist = py3
+skipsdist = true
+
+[testenv]
+basepython=python3
+envdir={toxworkdir}/py3
+deps=
+ pytest
+ charms.reactive
+ git+https://github.com/NiklasRosenstein/pydoc-markdown#egg=pydoc-markdown
+
+[testenv:docs]
+commands=python make_docs
diff --git a/keepalived/hooks/relations/public-address/._README.md b/keepalived/hooks/relations/public-address/._README.md
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/public-address/._README.md differ
diff --git a/keepalived/hooks/relations/public-address/.___init__.py b/keepalived/hooks/relations/public-address/.___init__.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/public-address/.___init__.py differ
diff --git a/keepalived/hooks/relations/public-address/._interface.yaml b/keepalived/hooks/relations/public-address/._interface.yaml
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/public-address/._interface.yaml differ
diff --git a/keepalived/hooks/relations/public-address/._provides.py b/keepalived/hooks/relations/public-address/._provides.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/public-address/._provides.py differ
diff --git a/keepalived/hooks/relations/public-address/._requires.py b/keepalived/hooks/relations/public-address/._requires.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/hooks/relations/public-address/._requires.py differ
diff --git a/keepalived/hooks/relations/public-address/README.md b/keepalived/hooks/relations/public-address/README.md
new file mode 100644
index 0000000..06be3ae
--- /dev/null
+++ b/keepalived/hooks/relations/public-address/README.md
@@ -0,0 +1,59 @@
+# Overview
+
+This interface layer implements a public address protocol useful for load
+balancers and their subordinates. The load balancers (providers) set their
+own public address and port, which is then available to the subordinates
+(requirers).
+
+# Usage
+
+## Provides
+
+By providing the `public-address` interface, your charm is providing an HTTP
+server that can load-balance for another HTTP based service.
+
+Your charm need only provide the address and port on which it is serving its
+content, as soon as the `{relation_name}.available` state is set:
+
+```python
+from charmhelpers.core import hookenv
+@when('website.available')
+def configure_website(website):
+ website.set_address_port(hookenv.unit_get('public-address'), hookenv.config('port'))
+```
+
+## Requires
+
+By requiring the `public-address` interface, your charm is consuming one or
+more HTTP servers, to load-balance a set of servers, etc.
+
+Your charm should respond to the `{relation_name}.available` state, which
+indicates that there is at least one HTTP server connected.
+
+The `get_addresses_ports()` method returns a list of available addresses and
+ports.
+
+The return value is a list of dicts of the following form:
+
+```python
+[
+ {
+ 'public-address': address_of_host,
+ 'port': port_for_host,
+ },
+ # ...
+]
+```
+
+A trivial example of handling this interface would be:
+
+```python
+from charmhelpers.core import hookenv
+@when('loadbalancer.available')
+def update_reverse_proxy_config(loadbalancer):
+ hosts = loadbalancer.get_addresses_ports()
+ for host in hosts:
+ hookenv.log('The loadbalancer for this unit is {}:{}'.format(
+ host['public-address'],
+ host['port']))
+```
diff --git a/keepalived/hooks/relations/public-address/__init__.py b/keepalived/hooks/relations/public-address/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keepalived/hooks/relations/public-address/interface.yaml b/keepalived/hooks/relations/public-address/interface.yaml
new file mode 100644
index 0000000..c9849e4
--- /dev/null
+++ b/keepalived/hooks/relations/public-address/interface.yaml
@@ -0,0 +1,4 @@
+name: public-address
+summary: A basic interface to provide the public address for load balancers.
+version: 1
+repo: https://githb.com/juju-solutions/interface-public-address.git
diff --git a/keepalived/hooks/relations/public-address/provides.py b/keepalived/hooks/relations/public-address/provides.py
new file mode 100644
index 0000000..09b9915
--- /dev/null
+++ b/keepalived/hooks/relations/public-address/provides.py
@@ -0,0 +1,60 @@
+import json
+
+from charms.reactive import toggle_flag
+from charms.reactive import Endpoint
+
+
+class PublicAdddressProvides(Endpoint):
+
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ self.is_joined)
+
+ def set_address_port(self, address, port, relation=None):
+ if relation is None:
+ # no relation specified, so send the same data to everyone
+ relations = self.relations
+ else:
+ # specific relation given, so only send the data to that one
+ relations = [relation]
+ if type(address) is list:
+ # build 2 lists to zip together that are the same length
+ length = len(address)
+ p = [port] * length
+ combined = zip(address, p)
+ clients = [{'public-address': a, 'port': p}
+ for a, p in combined]
+ # for backwards compatibility, we just send a single entry
+ # and have an array of dictionaries in a field of that
+ # entry for the other entries.
+ first = clients.pop(0)
+ first['extended_data'] = json.dumps(clients)
+ for relation in relations:
+ relation.to_publish_raw.update(first)
+ else:
+ for relation in relations:
+ relation.to_publish_raw.update({'public-address': address,
+ 'port': port})
+
+ @property
+ def requests(self):
+ return [Request(rel) for rel in self.relations]
+
+
+class Request:
+ def __init__(self, rel):
+ self.rel = rel
+
+ @property
+ def application_name(self):
+ return self.rel.application_name
+
+ @property
+ def members(self):
+ return [(u.received_raw.get('ingress-address',
+ u.received_raw['private-address']),
+ u.received_raw.get('port', '6443'))
+ for u in self.rel.joined_units]
+
+ def set_address_port(self, address, port):
+ self.rel.endpoint.set_address_port(address, port, self.rel)
diff --git a/keepalived/hooks/relations/public-address/requires.py b/keepalived/hooks/relations/public-address/requires.py
new file mode 100644
index 0000000..467d129
--- /dev/null
+++ b/keepalived/hooks/relations/public-address/requires.py
@@ -0,0 +1,44 @@
+import json
+
+from charms.reactive import toggle_flag, Endpoint
+
+
+class PublicAddressRequires(Endpoint):
+ def manage_flags(self):
+ toggle_flag(self.expand_name('{endpoint_name}.available'),
+ len(self.get_addresses_ports()) > 0)
+
+ def set_backend_port(self, port):
+ """
+ Set the port that the backend service is listening on.
+
+ Defaults to 6443 if not set.
+ """
+ for rel in self.relations:
+ rel.to_publish_raw['port'] = str(port)
+
+ def get_addresses_ports(self):
+ '''Returns a list of available HTTP providers and their associated
+ public addresses and ports.
+
+ The return value is a list of dicts of the following form::
+ [
+ {
+ 'public-address': address_for_frontend,
+ 'port': port_for_frontend,
+ },
+ # ...
+ ]
+ '''
+ hosts = set()
+ for relation in self.relations:
+ for unit in relation.joined_units:
+ data = unit.received_raw
+ hosts.add((data['public-address'], data['port']))
+ if 'extended_data' in data:
+ for ed in json.loads(data['extended_data']):
+ hosts.add((ed['public-address'], ed['port']))
+
+ return [{'public-address': pa, 'port': p}
+ for pa, p in sorted(host for host in hosts
+ if None not in host)]
diff --git a/keepalived/hooks/start b/keepalived/hooks/start
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/start
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/stop b/keepalived/hooks/stop
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/stop
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/update-status b/keepalived/hooks/update-status
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/update-status
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/upgrade-charm b/keepalived/hooks/upgrade-charm
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/upgrade-charm
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/website-relation-broken b/keepalived/hooks/website-relation-broken
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/website-relation-broken
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/website-relation-changed b/keepalived/hooks/website-relation-changed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/website-relation-changed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/website-relation-created b/keepalived/hooks/website-relation-created
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/website-relation-created
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/website-relation-departed b/keepalived/hooks/website-relation-departed
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/website-relation-departed
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/hooks/website-relation-joined b/keepalived/hooks/website-relation-joined
new file mode 100755
index 0000000..9858c6b
--- /dev/null
+++ b/keepalived/hooks/website-relation-joined
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Load modules from $JUJU_CHARM_DIR/lib
+import sys
+sys.path.append('lib')
+
+from charms.layer import basic # noqa
+basic.bootstrap_charm_deps()
+
+from charmhelpers.core import hookenv # noqa
+hookenv.atstart(basic.init_config_states)
+hookenv.atexit(basic.clear_config_states)
+
+
+# This will load and run the appropriate @hook and other decorated
+# handlers from $JUJU_CHARM_DIR/reactive, $JUJU_CHARM_DIR/hooks/reactive,
+# and $JUJU_CHARM_DIR/hooks/relations.
+#
+# See https://jujucharms.com/docs/stable/authors-charm-building
+# for more information on this pattern.
+from charms.reactive import main # noqa
+main()
diff --git a/keepalived/icon.svg b/keepalived/icon.svg
new file mode 100644
index 0000000..7b82b7a
--- /dev/null
+++ b/keepalived/icon.svg
@@ -0,0 +1,293 @@
+
+
+
+
diff --git a/keepalived/layer.yaml b/keepalived/layer.yaml
new file mode 100644
index 0000000..5a202c9
--- /dev/null
+++ b/keepalived/layer.yaml
@@ -0,0 +1,19 @@
+"includes":
+- "layer:options"
+- "layer:basic"
+- "interface:juju-info"
+- "interface:public-address"
+- "interface:http"
+- "layer:status"
+"exclude": [".travis.yml", "tests", "tox.ini", "test-requirements.txt", "unit_tests"]
+"options":
+ "basic":
+ "use_venv": !!bool "true"
+ "packages": []
+ "python_packages": []
+ "include_system_packages": !!bool "false"
+ "status":
+ "patch-hookenv": !!bool "true"
+ "keepalived": {}
+"repo": "https://github.com/juju-solutions/charm-keepalived.git"
+"is": "keepalived"
diff --git a/keepalived/lib/._charms b/keepalived/lib/._charms
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/._charms differ
diff --git a/keepalived/lib/charms/._layer b/keepalived/lib/charms/._layer
new file mode 100755
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/charms/._layer differ
diff --git a/keepalived/lib/charms/layer/.___init__.py b/keepalived/lib/charms/layer/.___init__.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/charms/layer/.___init__.py differ
diff --git a/keepalived/lib/charms/layer/._basic.py b/keepalived/lib/charms/layer/._basic.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/charms/layer/._basic.py differ
diff --git a/keepalived/lib/charms/layer/._execd.py b/keepalived/lib/charms/layer/._execd.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/charms/layer/._execd.py differ
diff --git a/keepalived/lib/charms/layer/._options.py b/keepalived/lib/charms/layer/._options.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/charms/layer/._options.py differ
diff --git a/keepalived/lib/charms/layer/._status.py b/keepalived/lib/charms/layer/._status.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/lib/charms/layer/._status.py differ
diff --git a/keepalived/lib/charms/layer/__init__.py b/keepalived/lib/charms/layer/__init__.py
new file mode 100644
index 0000000..a8e0c64
--- /dev/null
+++ b/keepalived/lib/charms/layer/__init__.py
@@ -0,0 +1,60 @@
+import sys
+from importlib import import_module
+from pathlib import Path
+
+
+def import_layer_libs():
+ """
+ Ensure that all layer libraries are imported.
+
+ This makes it possible to do the following:
+
+ from charms import layer
+
+ layer.foo.do_foo_thing()
+
+ Note: This function must be called after bootstrap.
+ """
+ for module_file in Path('lib/charms/layer').glob('*'):
+ module_name = module_file.stem
+ if module_name in ('__init__', 'basic', 'execd') or not (
+ module_file.suffix == '.py' or module_file.is_dir()
+ ):
+ continue
+ import_module('charms.layer.{}'.format(module_name))
+
+
+# Terrible hack to support the old terrible interface.
+# Try to get people to call layer.options.get() instead so
+# that we can remove this garbage.
+# Cribbed from https://stackoverfLow.com/a/48100440/4941864
+class OptionsBackwardsCompatibilityHack(sys.modules[__name__].__class__):
+ def __call__(self, section=None, layer_file=None):
+ if layer_file is None:
+ return self.get(section=section)
+ else:
+ return self.get(section=section,
+ layer_file=Path(layer_file))
+
+
+def patch_options_interface():
+ from charms.layer import options
+ if sys.version_info.minor >= 5:
+ options.__class__ = OptionsBackwardsCompatibilityHack
+ else:
+ # Py 3.4 doesn't support changing the __class__, so we have to do it
+ # another way. The last line is needed because we already have a
+ # reference that doesn't get updated with sys.modules.
+ name = options.__name__
+ hack = OptionsBackwardsCompatibilityHack(name)
+ hack.get = options.get
+ sys.modules[name] = hack
+ sys.modules[__name__].options = hack
+
+
+try:
+ patch_options_interface()
+except ImportError:
+ # This may fail if pyyaml hasn't been installed yet. But in that
+ # case, the bootstrap logic will try it again once it has.
+ pass
diff --git a/keepalived/lib/charms/layer/basic.py b/keepalived/lib/charms/layer/basic.py
new file mode 100644
index 0000000..7507203
--- /dev/null
+++ b/keepalived/lib/charms/layer/basic.py
@@ -0,0 +1,446 @@
+import os
+import sys
+import re
+import shutil
+from distutils.version import LooseVersion
+from pkg_resources import Requirement
+from glob import glob
+from subprocess import check_call, check_output, CalledProcessError
+from time import sleep
+
+from charms import layer
+from charms.layer.execd import execd_preinstall
+
+
+def _get_subprocess_env():
+ env = os.environ.copy()
+ env['LANG'] = env.get('LANG', 'C.UTF-8')
+ return env
+
+
+def get_series():
+ """
+ Return series for a few known OS:es.
+ Tested as of 2019 november:
+ * centos6, centos7, rhel6.
+ * bionic
+ """
+ series = ""
+
+ # Looking for content in /etc/os-release
+ # works for ubuntu + some centos
+ if os.path.isfile('/etc/os-release'):
+ d = {}
+ with open('/etc/os-release', 'r') as rel:
+ for l in rel:
+ if not re.match(r'^\s*$', l):
+ k, v = l.split('=')
+ d[k.strip()] = v.strip().replace('"', '')
+ series = "{ID}{VERSION_ID}".format(**d)
+
+ # Looking for content in /etc/redhat-release
+ # works for redhat enterprise systems
+ elif os.path.isfile('/etc/redhat-release'):
+ with open('/etc/redhat-release', 'r') as redhatlsb:
+ # CentOS Linux release 7.7.1908 (Core)
+ line = redhatlsb.readline()
+ release = int(line.split("release")[1].split()[0][0])
+ series = "centos" + str(release)
+
+ # Looking for content in /etc/lsb-release
+ # works for ubuntu
+ elif os.path.isfile('/etc/lsb-release'):
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ series = d['DISTRIB_CODENAME']
+
+ # This is what happens if we cant figure out the OS.
+ else:
+ series = "unknown"
+ return series
+
+
+def bootstrap_charm_deps():
+ """
+ Set up the base charm dependencies so that the reactive system can run.
+ """
+ # execd must happen first, before any attempt to install packages or
+ # access the network, because sites use this hook to do bespoke
+ # configuration and install secrets so the rest of this bootstrap
+ # and the charm itself can actually succeed. This call does nothing
+ # unless the operator has created and populated $JUJU_CHARM_DIR/exec.d.
+ execd_preinstall()
+ # ensure that $JUJU_CHARM_DIR/bin is on the path, for helper scripts
+
+ series = get_series()
+
+ # OMG?! is build-essentials needed?
+ ubuntu_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-yaml',
+ 'python3-dev',
+ 'python3-wheel',
+ 'build-essential']
+
+ # I'm not going to "yum group info "Development Tools"
+ # omitting above madness
+ centos_packages = ['python3-pip',
+ 'python3-setuptools',
+ 'python3-devel',
+ 'python3-wheel']
+
+ packages_needed = []
+ if 'centos' in series:
+ packages_needed = centos_packages
+ else:
+ packages_needed = ubuntu_packages
+
+ charm_dir = os.environ['JUJU_CHARM_DIR']
+ os.environ['PATH'] += ':%s' % os.path.join(charm_dir, 'bin')
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpip = os.path.join(vbin, 'pip')
+ vpy = os.path.join(vbin, 'python')
+ hook_name = os.path.basename(sys.argv[0])
+ is_bootstrapped = os.path.exists('wheelhouse/.bootstrapped')
+ is_charm_upgrade = hook_name == 'upgrade-charm'
+ is_series_upgrade = hook_name == 'post-series-upgrade'
+ is_post_upgrade = os.path.exists('wheelhouse/.upgraded')
+ is_upgrade = (not is_post_upgrade and
+ (is_charm_upgrade or is_series_upgrade))
+ if is_bootstrapped and not is_upgrade:
+ # older subordinates might have downgraded charm-env, so we should
+ # restore it if necessary
+ install_or_update_charm_env()
+ activate_venv()
+ # the .upgrade file prevents us from getting stuck in a loop
+ # when re-execing to activate the venv; at this point, we've
+ # activated the venv, so it's safe to clear it
+ if is_post_upgrade:
+ os.unlink('wheelhouse/.upgraded')
+ return
+ if os.path.exists(venv):
+ try:
+ # focal installs or upgrades prior to PR 160 could leave the venv
+ # in a broken state which would prevent subsequent charm upgrades
+ _load_installed_versions(vpip)
+ except CalledProcessError:
+ is_broken_venv = True
+ else:
+ is_broken_venv = False
+ if is_upgrade or is_broken_venv:
+ # All upgrades should do a full clear of the venv, rather than
+ # just updating it, to bring in updates to Python itself
+ shutil.rmtree(venv)
+ if is_upgrade:
+ if os.path.exists('wheelhouse/.bootstrapped'):
+ os.unlink('wheelhouse/.bootstrapped')
+ # bootstrap wheelhouse
+ if os.path.exists('wheelhouse'):
+ pre_eoan = series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty',
+ 'ubuntu16.04', 'xenial',
+ 'ubuntu18.04', 'bionic')
+ pydistutils_lines = [
+ "[easy_install]\n",
+ "find_links = file://{}/wheelhouse/\n".format(charm_dir),
+ "no_index=True\n",
+ "index_url=\n", # deliberately nothing here; disables it.
+ ]
+ if pre_eoan:
+ pydistutils_lines.append("allow_hosts = ''\n")
+ with open('/root/.pydistutils.cfg', 'w') as fp:
+ # make sure that easy_install also only uses the wheelhouse
+ # (see https://github.com/pypa/pip/issues/410)
+ fp.writelines(pydistutils_lines)
+ if 'centos' in series:
+ yum_install(packages_needed)
+ else:
+ apt_install(packages_needed)
+ from charms.layer import options
+ cfg = options.get('basic')
+ # include packages defined in layer.yaml
+ if 'centos' in series:
+ yum_install(cfg.get('packages', []))
+ else:
+ apt_install(cfg.get('packages', []))
+ # if we're using a venv, set it up
+ if cfg.get('use_venv'):
+ if not os.path.exists(venv):
+ series = get_series()
+ if series in ('ubuntu12.04', 'precise',
+ 'ubuntu14.04', 'trusty'):
+ apt_install(['python-virtualenv'])
+ elif 'centos' in series:
+ yum_install(['python-virtualenv'])
+ else:
+ apt_install(['virtualenv'])
+ cmd = ['virtualenv', '-ppython3', '--never-download', venv]
+ if cfg.get('include_system_packages'):
+ cmd.append('--system-site-packages')
+ check_call(cmd, env=_get_subprocess_env())
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ pip = vpip
+ else:
+ pip = 'pip3'
+ # save a copy of system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip'):
+ shutil.copy2('/usr/bin/pip', '/usr/bin/pip.save')
+ pre_install_pkgs = ['pip', 'setuptools', 'setuptools-scm']
+ # we bundle these packages to work around bugs in older versions (such
+ # as https://github.com/pypa/pip/issues/56), but if the system already
+ # provided a newer version, downgrading it can cause other problems
+ _update_if_newer(pip, pre_install_pkgs)
+ # install the rest of the wheelhouse deps (extract the pkg names into
+ # a set so that we can ignore the pre-install packages and let pip
+ # choose the best version in case there are multiple from layer
+ # conflicts)
+ pkgs = _load_wheelhouse_versions().keys() - set(pre_install_pkgs)
+ reinstall_flag = '--force-reinstall'
+ if not cfg.get('use_venv', True) and pre_eoan:
+ reinstall_flag = '--ignore-installed'
+ check_call([pip, 'install', '-U', reinstall_flag, '--no-index',
+ '--no-cache-dir', '-f', 'wheelhouse'] + list(pkgs),
+ env=_get_subprocess_env())
+ # re-enable installation from pypi
+ os.remove('/root/.pydistutils.cfg')
+
+ # install pyyaml for centos7, since, unlike the ubuntu image, the
+ # default image for centos doesn't include pyyaml; see the discussion:
+ # https://discourse.jujucharms.com/t/charms-for-centos-lets-begin
+ if 'centos' in series:
+ check_call([pip, 'install', '-U', 'pyyaml'],
+ env=_get_subprocess_env())
+
+ # install python packages from layer options
+ if cfg.get('python_packages'):
+ check_call([pip, 'install', '-U'] + cfg.get('python_packages'),
+ env=_get_subprocess_env())
+ if not cfg.get('use_venv'):
+ # restore system pip to prevent `pip3 install -U pip`
+ # from changing it
+ if os.path.exists('/usr/bin/pip.save'):
+ shutil.copy2('/usr/bin/pip.save', '/usr/bin/pip')
+ os.remove('/usr/bin/pip.save')
+ # setup wrappers to ensure envs are used for scripts
+ install_or_update_charm_env()
+ for wrapper in ('charms.reactive', 'charms.reactive.sh',
+ 'chlp', 'layer_option'):
+ src = os.path.join('/usr/local/sbin', 'charm-env')
+ dst = os.path.join('/usr/local/sbin', wrapper)
+ if not os.path.exists(dst):
+ os.symlink(src, dst)
+ if cfg.get('use_venv'):
+ shutil.copy2('bin/layer_option', vbin)
+ else:
+ shutil.copy2('bin/layer_option', '/usr/local/bin/')
+ # re-link the charm copy to the wrapper in case charms
+ # call bin/layer_option directly (as was the old pattern)
+ os.remove('bin/layer_option')
+ os.symlink('/usr/local/sbin/layer_option', 'bin/layer_option')
+ # flag us as having already bootstrapped so we don't do it again
+ open('wheelhouse/.bootstrapped', 'w').close()
+ if is_upgrade:
+ # flag us as having already upgraded so we don't do it again
+ open('wheelhouse/.upgraded', 'w').close()
+ # Ensure that the newly bootstrapped libs are available.
+ # Note: this only seems to be an issue with namespace packages.
+ # Non-namespace-package libs (e.g., charmhelpers) are available
+ # without having to reload the interpreter. :/
+ reload_interpreter(vpy if cfg.get('use_venv') else sys.argv[0])
+
+
+def _load_installed_versions(pip):
+ pip_freeze = check_output([pip, 'freeze']).decode('utf8')
+ versions = {}
+ for pkg_ver in pip_freeze.splitlines():
+ try:
+ req = Requirement.parse(pkg_ver)
+ except ValueError:
+ continue
+ versions.update({
+ req.project_name: LooseVersion(ver)
+ for op, ver in req.specs if op == '=='
+ })
+ return versions
+
+
+def _load_wheelhouse_versions():
+ versions = {}
+ for wheel in glob('wheelhouse/*'):
+ pkg, ver = os.path.basename(wheel).rsplit('-', 1)
+ # nb: LooseVersion ignores the file extension
+ versions[pkg.replace('_', '-')] = LooseVersion(ver)
+ return versions
+
+
+def _update_if_newer(pip, pkgs):
+ installed = _load_installed_versions(pip)
+ wheelhouse = _load_wheelhouse_versions()
+ for pkg in pkgs:
+ if pkg not in installed or wheelhouse[pkg] > installed[pkg]:
+ check_call([pip, 'install', '-U', '--no-index', '-f', 'wheelhouse',
+ pkg], env=_get_subprocess_env())
+
+
+def install_or_update_charm_env():
+ # On Trusty python3-pkg-resources is not installed
+ try:
+ from pkg_resources import parse_version
+ except ImportError:
+ apt_install(['python3-pkg-resources'])
+ from pkg_resources import parse_version
+
+ try:
+ installed_version = parse_version(
+ check_output(['/usr/local/sbin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ installed_version = parse_version('0.0.0')
+ try:
+ bundled_version = parse_version(
+ check_output(['bin/charm-env',
+ '--version']).decode('utf8'))
+ except (CalledProcessError, FileNotFoundError):
+ bundled_version = parse_version('0.0.0')
+ if installed_version < bundled_version:
+ shutil.copy2('bin/charm-env', '/usr/local/sbin/')
+
+
+def activate_venv():
+ """
+ Activate the venv if enabled in ``layer.yaml``.
+
+ This is handled automatically for normal hooks, but actions might
+ need to invoke this manually, using something like:
+
+ # Load modules from $JUJU_CHARM_DIR/lib
+ import sys
+ sys.path.append('lib')
+
+ from charms.layer.basic import activate_venv
+ activate_venv()
+
+ This will ensure that modules installed in the charm's
+ virtual environment are available to the action.
+ """
+ from charms.layer import options
+ venv = os.path.abspath('../.venv')
+ vbin = os.path.join(venv, 'bin')
+ vpy = os.path.join(vbin, 'python')
+ use_venv = options.get('basic', 'use_venv')
+ if use_venv and '.venv' not in sys.executable:
+ # activate the venv
+ os.environ['PATH'] = ':'.join([vbin, os.environ['PATH']])
+ reload_interpreter(vpy)
+ layer.patch_options_interface()
+ layer.import_layer_libs()
+
+
+def reload_interpreter(python):
+ """
+ Reload the python interpreter to ensure that all deps are available.
+
+ Newly installed modules in namespace packages sometimes seemt to
+ not be picked up by Python 3.
+ """
+ os.execve(python, [python] + list(sys.argv), os.environ)
+
+
+def apt_install(packages):
+ """
+ Install apt packages.
+
+ This ensures a consistent set of options that are often missed but
+ should really be set.
+ """
+ if isinstance(packages, (str, bytes)):
+ packages = [packages]
+
+ env = _get_subprocess_env()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ cmd = ['apt-get',
+ '--option=Dpkg::Options::=--force-confold',
+ '--assume-yes',
+ 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2: # third attempt
+ raise
+ try:
+ # sometimes apt-get update needs to be run
+ check_call(['apt-get', 'update'], env=env)
+ except CalledProcessError:
+ # sometimes it's a dpkg lock issue
+ pass
+ sleep(5)
+ else:
+ break
+
+
+def yum_install(packages):
+ """ Installs packages with yum.
+ This function largely mimics the apt_install function for consistency.
+ """
+ if packages:
+ env = os.environ.copy()
+ cmd = ['yum', '-y', 'install']
+ for attempt in range(3):
+ try:
+ check_call(cmd + packages, env=env)
+ except CalledProcessError:
+ if attempt == 2:
+ raise
+ try:
+ check_call(['yum', 'update'], env=env)
+ except CalledProcessError:
+ pass
+ sleep(5)
+ else:
+ break
+ else:
+ pass
+
+
+def init_config_states():
+ import yaml
+ from charmhelpers.core import hookenv
+ from charms.reactive import set_state
+ from charms.reactive import toggle_state
+ config = hookenv.config()
+ config_defaults = {}
+ config_defs = {}
+ config_yaml = os.path.join(hookenv.charm_dir(), 'config.yaml')
+ if os.path.exists(config_yaml):
+ with open(config_yaml) as fp:
+ config_defs = yaml.safe_load(fp).get('options', {})
+ config_defaults = {key: value.get('default')
+ for key, value in config_defs.items()}
+ for opt in config_defs.keys():
+ if config.changed(opt):
+ set_state('config.changed')
+ set_state('config.changed.{}'.format(opt))
+ toggle_state('config.set.{}'.format(opt), config.get(opt))
+ toggle_state('config.default.{}'.format(opt),
+ config.get(opt) == config_defaults[opt])
+
+
+def clear_config_states():
+ from charmhelpers.core import hookenv, unitdata
+ from charms.reactive import remove_state
+ config = hookenv.config()
+ remove_state('config.changed')
+ for opt in config.keys():
+ remove_state('config.changed.{}'.format(opt))
+ remove_state('config.set.{}'.format(opt))
+ remove_state('config.default.{}'.format(opt))
+ unitdata.kv().flush()
diff --git a/keepalived/lib/charms/layer/execd.py b/keepalived/lib/charms/layer/execd.py
new file mode 100644
index 0000000..438d9a1
--- /dev/null
+++ b/keepalived/lib/charms/layer/execd.py
@@ -0,0 +1,114 @@
+# Copyright 2014-2016 Canonical Limited.
+#
+# This file is part of layer-basic, the reactive base layer for Juju.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see .
+
+# This module may only import from the Python standard library.
+import os
+import sys
+import subprocess
+import time
+
+'''
+execd/preinstall
+
+Read the layer-basic docs for more info on how to use this feature.
+https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#exec-d-support
+'''
+
+
+def default_execd_dir():
+ return os.path.join(os.environ['JUJU_CHARM_DIR'], 'exec.d')
+
+
+def execd_module_paths(execd_dir=None):
+ """Generate a list of full paths to modules within execd_dir."""
+ if not execd_dir:
+ execd_dir = default_execd_dir()
+
+ if not os.path.exists(execd_dir):
+ return
+
+ for subpath in os.listdir(execd_dir):
+ module = os.path.join(execd_dir, subpath)
+ if os.path.isdir(module):
+ yield module
+
+
+def execd_submodule_paths(command, execd_dir=None):
+ """Generate a list of full paths to the specified command within exec_dir.
+ """
+ for module_path in execd_module_paths(execd_dir):
+ path = os.path.join(module_path, command)
+ if os.access(path, os.X_OK) and os.path.isfile(path):
+ yield path
+
+
+def execd_sentinel_path(submodule_path):
+ module_path = os.path.dirname(submodule_path)
+ execd_path = os.path.dirname(module_path)
+ module_name = os.path.basename(module_path)
+ submodule_name = os.path.basename(submodule_path)
+ return os.path.join(execd_path,
+ '.{}_{}.done'.format(module_name, submodule_name))
+
+
+def execd_run(command, execd_dir=None, stop_on_error=True, stderr=None):
+ """Run command for each module within execd_dir which defines it."""
+ if stderr is None:
+ stderr = sys.stdout
+ for submodule_path in execd_submodule_paths(command, execd_dir):
+ # Only run each execd once. We cannot simply run them in the
+ # install hook, as potentially storage hooks are run before that.
+ # We cannot rely on them being idempotent.
+ sentinel = execd_sentinel_path(submodule_path)
+ if os.path.exists(sentinel):
+ continue
+
+ try:
+ subprocess.check_call([submodule_path], stderr=stderr,
+ universal_newlines=True)
+ with open(sentinel, 'w') as f:
+ f.write('{} ran successfully {}\n'.format(submodule_path,
+ time.ctime()))
+ f.write('Removing this file will cause it to be run again\n')
+ except subprocess.CalledProcessError as e:
+ # Logs get the details. We can't use juju-log, as the
+ # output may be substantial and exceed command line
+ # length limits.
+ print("ERROR ({}) running {}".format(e.returncode, e.cmd),
+ file=stderr)
+ print("STDOUT<"
+- "Valentin Boucher "
+"description": |
+ keepalived is used for monitoring real servers within a Linux Virtual
+ Server (LVS) cluster.
+"tags":
+- "networking"
+"series":
+- "focal"
+- "bionic"
+- "xenial"
+"requires":
+ "juju-info":
+ "scope": "container"
+ "interface": "juju-info"
+ "lb-sink":
+ "interface": "http"
+"provides":
+ "loadbalancer":
+ "interface": "public-address"
+ "website":
+ "interface": "http"
+"subordinate": !!bool "true"
diff --git a/keepalived/pydocmd.yml b/keepalived/pydocmd.yml
new file mode 100644
index 0000000..ab3b2ef
--- /dev/null
+++ b/keepalived/pydocmd.yml
@@ -0,0 +1,16 @@
+site_name: 'Status Management Layer'
+
+generate:
+ - status.md:
+ - charms.layer.status.WorkloadState
+ - charms.layer.status.maintenance
+ - charms.layer.status.maint
+ - charms.layer.status.blocked
+ - charms.layer.status.waiting
+ - charms.layer.status.active
+ - charms.layer.status.status_set
+
+pages:
+ - Status Management Layer: status.md
+
+gens_dir: docs
diff --git a/keepalived/reactive/.___init__.py b/keepalived/reactive/.___init__.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/reactive/.___init__.py differ
diff --git a/keepalived/reactive/._keepalived.py b/keepalived/reactive/._keepalived.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/reactive/._keepalived.py differ
diff --git a/keepalived/reactive/._status.py b/keepalived/reactive/._status.py
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/reactive/._status.py differ
diff --git a/keepalived/reactive/__init__.py b/keepalived/reactive/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/keepalived/reactive/keepalived.py b/keepalived/reactive/keepalived.py
new file mode 100644
index 0000000..fcc6c64
--- /dev/null
+++ b/keepalived/reactive/keepalived.py
@@ -0,0 +1,124 @@
+import os
+import re
+from subprocess import check_output
+
+from charms.reactive import set_flag, when, when_not, hook
+from charms.reactive.flags import clear_flag
+
+
+from charmhelpers.core.templating import render
+from charmhelpers.fetch import apt_update, apt_install
+from charmhelpers.core.hookenv import config, is_leader
+from charmhelpers.core.host import service_restart
+from charmhelpers.core.host import service_pause, service_resume
+
+from charms.layer import status
+
+
+SYSCTL_FILE = os.path.join(os.sep, 'etc', 'sysctl.d', '50-keepalived.conf')
+KEEPALIVED_CONFIG_FILE = os.path.join(os.sep, 'etc', 'keepalived',
+ 'keepalived.conf')
+
+
+@when_not('keepalived.package.installed')
+def install_keepalived_package():
+ ''' Install keepalived package '''
+ status.maintenance('Installing keepalived')
+
+ apt_update(fatal=True)
+ apt_install(['keepalived', 'net-tools'], fatal=True)
+
+ set_flag('keepalived.package.installed')
+
+
+def default_route_interface():
+ ''' Returns the network interface of the system's default route '''
+ default_interface = None
+ cmd = ['route']
+ output = check_output(cmd).decode('utf8')
+ for line in output.split('\n'):
+ if 'default' in line:
+ default_interface = line.split(' ')[-1]
+ return default_interface
+
+
+@when('keepalived.package.installed')
+@when_not('keepalived.started')
+@when_not('upgrade.series.in-progress')
+def configure_keepalived_service():
+ ''' Set up the keepalived service '''
+
+ virtual_ip = config().get('virtual_ip')
+ if virtual_ip == "":
+ status.blocked('Please configure virtual ips')
+ return
+
+ network_interface = config().get('network_interface')
+ if network_interface == "":
+ network_interface = default_route_interface()
+
+ context = {'is_leader': is_leader(),
+ 'virtual_ip': virtual_ip,
+ 'network_interface': network_interface,
+ 'router_id': config().get('router_id'),
+ 'service_port': config().get('port'),
+ 'healthcheck_interval': config().get('healthcheck_interval'),
+ }
+ render(source='keepalived.conf',
+ target=KEEPALIVED_CONFIG_FILE,
+ context=context,
+ perms=0o644)
+ service_restart('keepalived')
+
+ render(source='50-keepalived.conf',
+ target=SYSCTL_FILE,
+ context={'sysctl': {'net.ipv4.ip_nonlocal_bind': 1}},
+ perms=0o644)
+ service_restart('procps')
+
+ status.active('VIP ready')
+ set_flag('keepalived.started')
+
+
+@when('config.changed')
+def reconfigure():
+ clear_flag('keepalived.started')
+
+
+@when('website.available', 'keepalived.started')
+def website_available(website):
+ ipaddr = re.split('/', config()['virtual_ip'])[0]
+ vip_hostname = config()['vip_hostname']
+ hostname = vip_hostname if vip_hostname else ipaddr
+ # a port to export over a relation
+ # TODO: this could be more tightly coupled with the actual
+ # service via a relation
+ port = config()['port']
+ website.configure(port=port, private_address=ipaddr, hostname=hostname)
+
+
+@when('loadbalancer.available', 'keepalived.started')
+def loadbalancer_available(loadbalancer):
+ ''' Send the virtual IP '''
+ ipaddr = re.split('/', config()['virtual_ip'])[0]
+ port = config()['port']
+ loadbalancer.set_address_port(ipaddr, port)
+
+
+@hook('upgrade-charm')
+def upgrade_charm():
+ clear_flag('keepalived.started')
+
+
+@hook('pre-series-upgrade')
+def pre_series_upgrade():
+ service_pause('keepalived')
+ service_pause('procps')
+ status.blocked('Series upgrade in progress')
+
+
+@hook('post-series-upgrade')
+def post_series_upgrade():
+ service_resume('keepalived')
+ service_resume('procps')
+ clear_flag('keepalived.started')
diff --git a/keepalived/reactive/status.py b/keepalived/reactive/status.py
new file mode 100644
index 0000000..2f33f3f
--- /dev/null
+++ b/keepalived/reactive/status.py
@@ -0,0 +1,4 @@
+from charms import layer
+
+
+layer.status._initialize()
diff --git a/keepalived/requirements.txt b/keepalived/requirements.txt
new file mode 100644
index 0000000..55543d9
--- /dev/null
+++ b/keepalived/requirements.txt
@@ -0,0 +1,3 @@
+mock
+flake8
+pytest
diff --git a/keepalived/revision b/keepalived/revision
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/keepalived/revision
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/keepalived/templates/._50-keepalived.conf b/keepalived/templates/._50-keepalived.conf
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/templates/._50-keepalived.conf differ
diff --git a/keepalived/templates/._keepalived.conf b/keepalived/templates/._keepalived.conf
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/templates/._keepalived.conf differ
diff --git a/keepalived/templates/50-keepalived.conf b/keepalived/templates/50-keepalived.conf
new file mode 100644
index 0000000..6b023e1
--- /dev/null
+++ b/keepalived/templates/50-keepalived.conf
@@ -0,0 +1,3 @@
+{% for key in sysctl %}
+{{ key }}={{ sysctl[key] }}
+{% endfor %}
\ No newline at end of file
diff --git a/keepalived/templates/keepalived.conf b/keepalived/templates/keepalived.conf
new file mode 100644
index 0000000..db80787
--- /dev/null
+++ b/keepalived/templates/keepalived.conf
@@ -0,0 +1,22 @@
+vrrp_script chk_svc_port {
+ # returns 1 if connection is refused
+ script "/bin/bash -c '=18.1,<19.0
+# pin Jinja2, PyYAML and MarkupSafe to the last versions supporting python 3.5
+# for trusty
+Jinja2<=2.10.1
+PyYAML<=5.2
+MarkupSafe<2.0.0
+setuptools<42
+setuptools-scm<=1.17.0
+charmhelpers>=0.4.0,<1.0.0
+charms.reactive>=0.1.0,<2.0.0
+wheel<0.34
+# pin netaddr to avoid pulling importlib-resources
+netaddr<=0.7.19
+
diff --git a/keepalived/wheelhouse/._Jinja2-2.10.1.tar.gz b/keepalived/wheelhouse/._Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._Jinja2-2.10.1.tar.gz differ
diff --git a/keepalived/wheelhouse/._MarkupSafe-1.1.1.tar.gz b/keepalived/wheelhouse/._MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._MarkupSafe-1.1.1.tar.gz differ
diff --git a/keepalived/wheelhouse/._PyYAML-5.2.tar.gz b/keepalived/wheelhouse/._PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._PyYAML-5.2.tar.gz differ
diff --git a/keepalived/wheelhouse/._Tempita-0.5.2.tar.gz b/keepalived/wheelhouse/._Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._Tempita-0.5.2.tar.gz differ
diff --git a/keepalived/wheelhouse/._charmhelpers-0.20.22.tar.gz b/keepalived/wheelhouse/._charmhelpers-0.20.22.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._charmhelpers-0.20.22.tar.gz differ
diff --git a/keepalived/wheelhouse/._charms.reactive-1.4.1.tar.gz b/keepalived/wheelhouse/._charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._charms.reactive-1.4.1.tar.gz differ
diff --git a/keepalived/wheelhouse/._netaddr-0.7.19.tar.gz b/keepalived/wheelhouse/._netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._netaddr-0.7.19.tar.gz differ
diff --git a/keepalived/wheelhouse/._pbr-5.6.0.tar.gz b/keepalived/wheelhouse/._pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._pbr-5.6.0.tar.gz differ
diff --git a/keepalived/wheelhouse/._pip-18.1.tar.gz b/keepalived/wheelhouse/._pip-18.1.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._pip-18.1.tar.gz differ
diff --git a/keepalived/wheelhouse/._pyaml-20.4.0.tar.gz b/keepalived/wheelhouse/._pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._pyaml-20.4.0.tar.gz differ
diff --git a/keepalived/wheelhouse/._setuptools-41.6.0.zip b/keepalived/wheelhouse/._setuptools-41.6.0.zip
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._setuptools-41.6.0.zip differ
diff --git a/keepalived/wheelhouse/._setuptools_scm-1.17.0.tar.gz b/keepalived/wheelhouse/._setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._setuptools_scm-1.17.0.tar.gz differ
diff --git a/keepalived/wheelhouse/._six-1.16.0.tar.gz b/keepalived/wheelhouse/._six-1.16.0.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._six-1.16.0.tar.gz differ
diff --git a/keepalived/wheelhouse/._wheel-0.33.6.tar.gz b/keepalived/wheelhouse/._wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..279bb65
Binary files /dev/null and b/keepalived/wheelhouse/._wheel-0.33.6.tar.gz differ
diff --git a/keepalived/wheelhouse/Jinja2-2.10.1.tar.gz b/keepalived/wheelhouse/Jinja2-2.10.1.tar.gz
new file mode 100644
index 0000000..ffd1054
Binary files /dev/null and b/keepalived/wheelhouse/Jinja2-2.10.1.tar.gz differ
diff --git a/keepalived/wheelhouse/MarkupSafe-1.1.1.tar.gz b/keepalived/wheelhouse/MarkupSafe-1.1.1.tar.gz
new file mode 100644
index 0000000..a6dad8e
Binary files /dev/null and b/keepalived/wheelhouse/MarkupSafe-1.1.1.tar.gz differ
diff --git a/keepalived/wheelhouse/PyYAML-5.2.tar.gz b/keepalived/wheelhouse/PyYAML-5.2.tar.gz
new file mode 100644
index 0000000..666d12a
Binary files /dev/null and b/keepalived/wheelhouse/PyYAML-5.2.tar.gz differ
diff --git a/keepalived/wheelhouse/Tempita-0.5.2.tar.gz b/keepalived/wheelhouse/Tempita-0.5.2.tar.gz
new file mode 100644
index 0000000..755befc
Binary files /dev/null and b/keepalived/wheelhouse/Tempita-0.5.2.tar.gz differ
diff --git a/keepalived/wheelhouse/charmhelpers-0.20.22.tar.gz b/keepalived/wheelhouse/charmhelpers-0.20.22.tar.gz
new file mode 100644
index 0000000..bd5d222
Binary files /dev/null and b/keepalived/wheelhouse/charmhelpers-0.20.22.tar.gz differ
diff --git a/keepalived/wheelhouse/charms.reactive-1.4.1.tar.gz b/keepalived/wheelhouse/charms.reactive-1.4.1.tar.gz
new file mode 100644
index 0000000..03bc1fe
Binary files /dev/null and b/keepalived/wheelhouse/charms.reactive-1.4.1.tar.gz differ
diff --git a/keepalived/wheelhouse/netaddr-0.7.19.tar.gz b/keepalived/wheelhouse/netaddr-0.7.19.tar.gz
new file mode 100644
index 0000000..cc31d9d
Binary files /dev/null and b/keepalived/wheelhouse/netaddr-0.7.19.tar.gz differ
diff --git a/keepalived/wheelhouse/pbr-5.6.0.tar.gz b/keepalived/wheelhouse/pbr-5.6.0.tar.gz
new file mode 100644
index 0000000..0d5c965
Binary files /dev/null and b/keepalived/wheelhouse/pbr-5.6.0.tar.gz differ
diff --git a/keepalived/wheelhouse/pip-18.1.tar.gz b/keepalived/wheelhouse/pip-18.1.tar.gz
new file mode 100644
index 0000000..a18192d
Binary files /dev/null and b/keepalived/wheelhouse/pip-18.1.tar.gz differ
diff --git a/keepalived/wheelhouse/pyaml-20.4.0.tar.gz b/keepalived/wheelhouse/pyaml-20.4.0.tar.gz
new file mode 100644
index 0000000..0d5fd76
Binary files /dev/null and b/keepalived/wheelhouse/pyaml-20.4.0.tar.gz differ
diff --git a/keepalived/wheelhouse/setuptools-41.6.0.zip b/keepalived/wheelhouse/setuptools-41.6.0.zip
new file mode 100644
index 0000000..3345759
Binary files /dev/null and b/keepalived/wheelhouse/setuptools-41.6.0.zip differ
diff --git a/keepalived/wheelhouse/setuptools_scm-1.17.0.tar.gz b/keepalived/wheelhouse/setuptools_scm-1.17.0.tar.gz
new file mode 100644
index 0000000..43b16c7
Binary files /dev/null and b/keepalived/wheelhouse/setuptools_scm-1.17.0.tar.gz differ
diff --git a/keepalived/wheelhouse/six-1.16.0.tar.gz b/keepalived/wheelhouse/six-1.16.0.tar.gz
new file mode 100644
index 0000000..5bf3a27
Binary files /dev/null and b/keepalived/wheelhouse/six-1.16.0.tar.gz differ
diff --git a/keepalived/wheelhouse/wheel-0.33.6.tar.gz b/keepalived/wheelhouse/wheel-0.33.6.tar.gz
new file mode 100644
index 0000000..c922c4e
Binary files /dev/null and b/keepalived/wheelhouse/wheel-0.33.6.tar.gz differ
diff --git a/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py b/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py
index 0ac309f..cf19813 100644
--- a/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py
+++ b/kubeapi-load-balancer/lib/charms/layer/kubernetes_common.py
@@ -159,7 +159,17 @@ def get_ingress_address(endpoint_name):
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
- addresses = network_info['ingress-addresses']
+ excluded_ips = []
+ excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"]
+ for addr in network_info["bind-addresses"]:
+ for prefix in excluded_interfaces:
+ if addr["interface-name"].startswith(prefix):
+ for ip in addr["addresses"]:
+ excluded_ips.append(ip["value"])
+
+ ingress_addresses = network_info["ingress-addresses"]
+ network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips]
+ addresses = network_info["ingress-addresses"]
# Need to prefer non-fan IP addresses due to various issues, e.g.
# https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
diff --git a/kubernetes-master/lib/charms/layer/kubernetes_common.py b/kubernetes-master/lib/charms/layer/kubernetes_common.py
index 0ac309f..cf19813 100644
--- a/kubernetes-master/lib/charms/layer/kubernetes_common.py
+++ b/kubernetes-master/lib/charms/layer/kubernetes_common.py
@@ -159,7 +159,17 @@ def get_ingress_address(endpoint_name):
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
- addresses = network_info['ingress-addresses']
+ excluded_ips = []
+ excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"]
+ for addr in network_info["bind-addresses"]:
+ for prefix in excluded_interfaces:
+ if addr["interface-name"].startswith(prefix):
+ for ip in addr["addresses"]:
+ excluded_ips.append(ip["value"])
+
+ ingress_addresses = network_info["ingress-addresses"]
+ network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips]
+ addresses = network_info["ingress-addresses"]
# Need to prefer non-fan IP addresses due to various issues, e.g.
# https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
diff --git a/kubernetes-worker/lib/charms/layer/kubernetes_common.py b/kubernetes-worker/lib/charms/layer/kubernetes_common.py
index 0ac309f..a357e54 100644
--- a/kubernetes-worker/lib/charms/layer/kubernetes_common.py
+++ b/kubernetes-worker/lib/charms/layer/kubernetes_common.py
@@ -151,6 +151,8 @@ def calculate_and_store_resource_checksums(checksum_prefix, snap_resources):
def get_ingress_address(endpoint_name):
try:
network_info = hookenv.network_get(endpoint_name)
+ with open('/tmp/ares-netinfo.txt', 'w') as f:
+ print(network_info, file=f)
except NotImplementedError:
network_info = {}
@@ -159,7 +161,17 @@ def get_ingress_address(endpoint_name):
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
- addresses = network_info['ingress-addresses']
+ excluded_ips = []
+ excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"]
+ for addr in network_info["bind-addresses"]:
+ for prefix in excluded_interfaces:
+ if addr["interface-name"].startswith(prefix):
+ for ip in addr["addresses"]:
+ excluded_ips.append(ip["value"])
+
+ ingress_addresses = network_info["ingress-addresses"]
+ network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips]
+ addresses = network_info["ingress-addresses"]
# Need to prefer non-fan IP addresses due to various issues, e.g.
# https://bugs.launchpad.net/charm-gcp-integrator/+bug/1822997
diff --git a/resources/calico/calico-arm64.gz b/resources/calico/calico-arm64.gz
new file mode 100644
index 0000000..545b6ea
Binary files /dev/null and b/resources/calico/calico-arm64.gz differ
diff --git a/resources/calico/calico-node-image.gz b/resources/calico/calico-node-image.gz
new file mode 100644
index 0000000..e69de29
diff --git a/resources/calico/calico-upgrade-arm64.gz b/resources/calico/calico-upgrade-arm64.gz
new file mode 100644
index 0000000..34f3ccd
Binary files /dev/null and b/resources/calico/calico-upgrade-arm64.gz differ
diff --git a/resources/calico/calico-upgrade.gz b/resources/calico/calico-upgrade.gz
new file mode 100644
index 0000000..d647ee0
Binary files /dev/null and b/resources/calico/calico-upgrade.gz differ
diff --git a/resources/calico/calico.gz b/resources/calico/calico.gz
new file mode 100644
index 0000000..42112bc
Binary files /dev/null and b/resources/calico/calico.gz differ
diff --git a/resources/core/core.assert b/resources/core/core.assert
new file mode 100644
index 0000000..b043266
--- /dev/null
+++ b/resources/core/core.assert
@@ -0,0 +1,82 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: 99T7MUlRhtI3U0QFgl5mXXESAiSwt776
+publisher-id: canonical
+snap-name: core
+timestamp: 2016-09-28T17:33:53.740774Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCV+v/AQAA2tsQAFHkr0inUpkgz3HpLoRfZ47iiRg76DniS/LQhxufoOeALzmB
+TaOU91po50zhIvU8gYAUaFOvW948xGm6I+7HllGXpb5HswNMPdAhjMOuKJON0De3jvwuYoFcDncY
+AHM9EVfl2QdWSlVMD09mBEoVVI9jRCSoHBtqLjDwk7woJ+VEGjiL6X1XRuelV8IYKAVR6RywoyTd
+hlPGMk/qmIjzgvMBw3Bh/bS+15P4Iv0ylCllLbizlsVd7x/W1S5v4BMo5zrWClg96aZ0HrWnbxqC
+yctpWh8XsWXViqhF5hvlzIszaXfqtSLRVXrGXGxEu3SmJYcae+CLan0jZDU7G7Qn+z5JhqxEuAph
+K1TW8hDHjjDNhNO47dpMEp4WfNvhC0gu0l1jhpo/ylp4vIW6Wb2SOFxkz+rCjh9OiV+1zuIzrv/X
+BROWHSDEQb9XwzkoLeroUUVsLqBuB0sVdreqp9VPdic6z07uMLVTQzJs/Od4zdnBlIxF1hVsumxZ
+VJeJ63toKOJ7rhzB7tz1dwSLJ+iiX2o3evbc/5LP5hMZfyH5QIh9X7EtEIYc8PT5+nOLyUMmUJSY
+m9c8a7+qltq3FjT+OiTsONpKedKr/rnV1Nppf9g+VVGy6kL+Z0CiI8FFOsGm1G84CLPQ9sLbuBnr
+pl/dxHMTsng80JoGcppES6cz0gLI
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: t8HFCORrRN15XYnlpmHc5ga52a1P0ktB6L5V9Ad2uXlo3MJ-7IbpaYBqFrH2bmJL
+developer-id: canonical
+provenance: global-upload
+snap-id: 99T7MUlRhtI3U0QFgl5mXXESAiSwt776
+snap-revision: 14946
+snap-size: 122433536
+timestamp: 2023-02-22T16:54:34.647289Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCY/ZIygAAww0QAGTeNR6qNAR/1FQR0C/FLj9Pj8k0cCHYeZMnjPTH9Z1EDZ0g
+POuwvOz1iTsvJWHWpFx4EBvKFsXuXdpRe51EtXEYKB/j2eLV3smFg3ATKdHfy3KgTzwP8Pid9gdw
+uTvbhi5IKQqDk5jnsrymBdKzgzKH8eL7fT6T0H00At56z6GCQ6TswbtRSkGNpv74UAJg+gJxF1ah
+vbFMVX2HYifzzxLyVQG1XRV2ZqUx9CXOKc5sCa1ePpO6e3YyxnPINoRnDm7nejWGPDQ8qROd+qaF
+vPEQ+pdwETlV3kLXVISpt9JceK0tsU7+a84lj+neZEBSkj4bpq7uQKlCQI6ojpJrYwGBtGNc91Yc
+ninfLUzeTUks9annxxCDSuZlcec8l9wCEA4wEBExLJDmZJu45f/4YcaBfTnRuLMdnWWy6YCY6lgH
+MFUaoUrEtLjEN0sk6L/0ai/gG2/35/G9+C7eEYsIaB9zPGPppsw9ymcC5DxSRr8gxFJKxZwtCYku
+/jbiK01dcfMxk1E2VbuGZfIn8C51rDcCl+Yx23alMcl1kMZaF6Wo/Wx6HRpbNfA0uxlt6BTdVcYv
+hoohvZafeCSX7mY9U8lcjd1uC5iI/1Z0ajUDJ81fAJvY8YgziXU+YYx+vnijMOhZrFu60sMHzdDA
+zsxS4IaRTLyl1p+G5mP9h7nIOH9N
diff --git a/resources/core/core.snap b/resources/core/core.snap
new file mode 100644
index 0000000..3603310
Binary files /dev/null and b/resources/core/core.snap differ
diff --git a/resources/easyrsa/easyrsa.tgz b/resources/easyrsa/easyrsa.tgz
new file mode 100644
index 0000000..65b2ebb
Binary files /dev/null and b/resources/easyrsa/easyrsa.tgz differ
diff --git a/resources/etcd/etcd.snap b/resources/etcd/etcd.snap
new file mode 100644
index 0000000..6a9e108
Binary files /dev/null and b/resources/etcd/etcd.snap differ
diff --git a/resources/etcd/snapshot.gz b/resources/etcd/snapshot.gz
new file mode 100644
index 0000000..9d7d2ef
Binary files /dev/null and b/resources/etcd/snapshot.gz differ
diff --git a/resources/kubernetes-master/cdk-addons.assert b/resources/kubernetes-master/cdk-addons.assert
new file mode 100644
index 0000000..00a3ad6
--- /dev/null
+++ b/resources/kubernetes-master/cdk-addons.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: eeEICnDiB3tj94pKQM1BwtEcvVgHe95n
+publisher-id: canonical
+snap-name: cdk-addons
+timestamp: 2017-03-16T20:47:12.297604Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr50AAAjksQAMHHWjb4gHOWT6613zWxvCRx7jMxQjGzlMlRKQ/PdxIEWnC9
+//cJE2UKpN13WUx6jbmHVWk+nLbHuc+YjsuDUDbZiZb5M+NIlWmjnkQCUk4JfkAdxUPaG+2+HLRw
+BZ1Q3+BXTfSQM+fdY4uEB6nmigUPP1WY2a6rsJSPkB7bFa3Sz5cE/+fnIV5YC0j/x2jdylX+CmmB
+Uz6zfisVgCcOmTKGHKnrj/w8T1e3ogYYv8LuaNwMFlqX+ERjObaNKyyyGVzzKprjbla12DrIJf1d
+3bY3x1WmnWSJYPVBIqhw22yo2xF7z0dNDdlcOr6rxyRxn8/DKo3pb2GvDRW+EkoSZ1bQJWz5487v
+GcqoraIwROsHy2kpqzFLyinY8jmPq9ziiBURA/YTAy7E262Byp6yRtaymMgwvMiiB+qVZt9cARtu
+UXtn88e85fSOPQJ9enG2HNV1bH3MTx9+Au2ZaWc+0bt7woODab+R85cgP+p+P2PUsm+tRlpTc3fD
+onVQ7Iw+Bs3ja9ZSReVQtUOH2zg3om2xfKQsoz+uHOWiM4Ty9JxNGP4a8XVpk85Shak+/ZOxWEGg
+25Zh8LS6nadAqh6onO2lL+4tJ5ULYFTi8Ao+bsw6owW1vDTT/J1X6QHgp585rbbAavdJGtM2fhd9
+DXhKCB+T2gvZE0bu52FmpMBrF1rW
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: Eav7FLvCvOaFA_iZ_1fH85rHg425CUWuR1eyPBGXWyW8pcq6HTcI9CQXxW8zhL-V
+developer-id: canonical
+snap-id: eeEICnDiB3tj94pKQM1BwtEcvVgHe95n
+snap-revision: 8352
+snap-size: 10821632
+timestamp: 2021-10-21T10:51:23.297559Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYXFGKwAAKOkQALrkkx/kqR+WTYw9sVon7zdqvtKVFGy2GhoU5ihXE76E1u8o
+xCkt8VBP0tX0r+21oTEMoQYAy9ufWCrROeo+7DHh1cONIhbck5shvrKcLRKYNBTfUSPpjItUqGNw
+DJF3NSQMnQjLXQ4xnjr9076roEEP2H0mPhoz9CtXvQY0X0vASlPCMHSfjcuCt/yOTG9HIz8py/1H
+MjCTMTsZr7xi6dOFfZO8ncNH5Q0ui6WRJib6AV6wpEEvt02IiJuyM9r5XqHDkFSR+I4VmN7PomHm
+KfLS9C9eLPKfCSMjnOTnH/33+LFMLg7SV+C1bHJ9fllR+mvqQ69zhufcPr2ygTV78sk9ThLT9Jdm
+EBCrxWPc7WNN+Y1pIS1ocNmss4nvP5Q+qEoRXLqaj7PFo0tHWYOuYWFELW6AC32kONxpgi+RgyM1
+Lx41IiXAydS589B365Rvfo8tsWyLOdkJq94cK76C+EKQ2/Re6GlxUMWIzOAqHqjBYEzUkYz08Stv
+qC5jc9/d/yWenKsBfmRNm8vHYxYMLPxWO1xKWR1RK3UOQ+exs1Hz8tG7kL3tF5FVsluajsXq4NZ1
+Zfvj6PVuayS4YXuPKCTYUckVG0t4w7dmymH/SkxfukdUcGtDgnpUj54u9sHCDWLqPiUmLVCYMkYQ
+g3Z3Gr9zVB7/ZaMCUsoB5qtYFbsF
diff --git a/resources/kubernetes-master/cdk-addons.snap b/resources/kubernetes-master/cdk-addons.snap
new file mode 100644
index 0000000..c4bd5e3
Binary files /dev/null and b/resources/kubernetes-master/cdk-addons.snap differ
diff --git a/resources/kubernetes-master/cni-amd64.tgz b/resources/kubernetes-master/cni-amd64.tgz
new file mode 100644
index 0000000..d144b41
Binary files /dev/null and b/resources/kubernetes-master/cni-amd64.tgz differ
diff --git a/resources/kubernetes-master/kube-apiserver.assert b/resources/kubernetes-master/kube-apiserver.assert
new file mode 100644
index 0000000..fae220f
--- /dev/null
+++ b/resources/kubernetes-master/kube-apiserver.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: KMZLusdClmUyLXAjjcI4sVnpjk1kM653
+publisher-id: canonical
+snap-name: kube-apiserver
+timestamp: 2017-03-16T20:43:03.407408Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr41wAAHK0QAHG68PnU7yLieNMoPqiNSK13us0NTmYCZYTJmoSYvoMvphUB
+x+5kXiMCD+cYu3dYRV/wagPK+qltr4Vv9i74YIwFkmZjDHzVIakvrp/B3e0z3kq+BVAgmS4G3S+2
+SGa6RpxfpXdoCqNP7vZOHwAOTvnhC+kGhX7M3O7k+HPAbce8kGDNIkjbKh/bKLg5KqFfn9SDxA3e
+Bp408V4rZtunpcLw+nyMt6p7n/WeZqoajveqXo71/xBvCo+jNSTEnqh57nkqcmkg3USLNBC+Tk+r
+rfOMD31Vz+vrdCCiG9daRmV0+XDK3Kc2eRflEfKTEzjPFFvLzv9dsKIOVK7IQozyYKJZvWJwJhgG
+xpdi5F8irQlPgtCB/PMnNw3Bhqx6YI44V6u/Qg6ZqCYE3I/EHXZx07iGvigOIjnGuuUAPmbjW1zM
+RU0YmfDXGpFEt84aNtdwNEKpdQIPPbiJuOI/p5tZtw84zN20nM8LG2JYVL+DDn+J4wOxETUZPz07
+YTE/symbKU2qYg38zu+jW25RXLtJAT9ZLuT/APxcFEemP1QAai9qmkPHb0PxKLPBk8Kcox06SwkR
+lveA+G+lE2PSYJF7eiUP7oiVKkvVquioXnQacC8vakRJlf6dvGP6aDxlPSBVspYPEJRcHQRtqIX0
+khfzffQAZliVJOFppKTzPWBKpB6H
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: kI_OD4cC5YdTDjejItlAFsYvef9osMgyUEV-Mq5442eVqewsjlOltcEyR-ELt-Dm
+developer-id: canonical
+snap-id: KMZLusdClmUyLXAjjcI4sVnpjk1kM653
+snap-revision: 2795
+snap-size: 22396928
+timestamp: 2022-06-17T22:01:25.407704Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz5tQAAcHIQAKciSKXOCfX93SO0bAIMJhXm3QfmFmGOamryYQdYYqJ0IsEw
+IKH3FZzPK/pldgSyURwz+pMhPZjCHxzWLV0DIiabgOoWgGO1Gjh5FipX36B2N7HQYQfFzTBTkosg
+pjHLLc44VWxupNGgicfTZkeqvTFSA3a7bn6YUGux9M9nj38Ypr7zPkbXWLWYLzQvax2Vgzo0zGSR
+OTK2sZymsZC8AbRCX5GLqTpK6QN57kQJoq5v2txuKMOvUWHM7h0IRFCKyK9pSnX4rKvs2jnPG4WL
+SkAhgLOBYAnucn6Mra8TwHjObxvXgqGYxsMC2ZpCUXomAzd7YZka0qXnkspKj/a64BbJtzdkDWb4
+jr13Mavqt/tScWArf/YT7DpiWrcW6Zp6TLa4XAVlite2DuORcLQVycOCDlnpIuDt4b5cDZqmqjjC
+j3Qz3WA00MBZEhn6nzCWaDjsXGUkXVMyG2SKSgPImhv3rOcl3EojctlRhnkMkSGDQKklNRdZyzfc
+0Hngc7jP0AOr06WaIq94Nir+hFJmstk1G16i1H5n9CGaWOxwn4Dcj5rbpuDwEZlXVfa8TR/DvHmv
+5OdRWS38zggY8sdn7Rr+5iNe9WqmxZCg4uZ9dTwTldfgzTD2/lp8iMzELsO/7tbY+CsqxxUA/Tgg
+BRAXog6eEip+u+xDYUKJSpgCU0Q3
diff --git a/resources/kubernetes-master/kube-apiserver.snap b/resources/kubernetes-master/kube-apiserver.snap
new file mode 100644
index 0000000..4700a87
Binary files /dev/null and b/resources/kubernetes-master/kube-apiserver.snap differ
diff --git a/resources/kubernetes-master/kube-controller-manager.assert b/resources/kubernetes-master/kube-controller-manager.assert
new file mode 100644
index 0000000..22c677c
--- /dev/null
+++ b/resources/kubernetes-master/kube-controller-manager.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: Bsjue8Sm6L3zWS05BvjopQeEfTKlLHEF
+publisher-id: canonical
+snap-name: kube-controller-manager
+timestamp: 2017-03-16T20:44:30.611095Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr5LgAARm8QAHv5OnmPVA6GGOzlhAkTcozyACSVPAO9Efc614k0qA0uuhyr
+NV29aiv/PnpICM6l9NYo4//I1QrLXnvFN/4uNA98/uKqXxkdDUw7869Uq/qj24iRJN3dNwuxPJ1m
+cj/rCvfZZqukm5qUQ/8IpPFSVaxzb4OlLsgag38ZzIF3zoOx65R/xz9k1pmecPA5VlV4L48qRbE2
+PryvMTn7QaVeha5tskP95rUHtjNW2raDSqy3jJt2smPbebUoLHGIpTpTBUCKXYCMaMC6KuMYfVYU
+68C5yXvt5OFoWFkJ+3F17ZwZoDQRp6X9qwu11tMRVlkC4Gajh1yAfTEckzWe1Egk9Y4RvvkWvoFL
+/XCjwoe14EHo2Ttyon9EdDDAIkg6AJkSsYCa0muZO6x189a7f2Su8tOZ3vKmjcfp3+er+KiFyrLF
+Q7d9JEXtS5/81LisE3rC5YCZ9OUm8MxzwODjMfrlY3mxLbvgWnBIbjueob10aVTEFYrGqTmyAcr5
+gxv0axVCYOArRj3ApVZ17vBagv47Q/5mYNL51ZUMt4iYGqziC9fHJ7/yDdu5T57GIKC9l2xaSaQI
+84P8PlPIx6NpksBf7iV13+g0fuRlRKemnZYJyBk4IvnBXPnAXBn1dZYFZMItwqWLEVuseITlMoKK
+/8/nbUYh/dcxbQrj+LKpCZrbd1iK
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: jKz1gn4kzUcUYvZJrDTsMO6oIdcLuxQ7uWHL09Z3IB8izIqmSm-fxjz1uuGGYDMk
+developer-id: canonical
+snap-id: Bsjue8Sm6L3zWS05BvjopQeEfTKlLHEF
+snap-revision: 2588
+snap-size: 66985984
+timestamp: 2022-06-17T22:10:30.230859Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz71gAAmsYQALyIXAF4CPKsAsL2fIwbnVtwJnn9YQI+ujrBqCyKKjuryY01
+u8K1e5r8uCid/lLSUBChCJWsyqB/QY9ff6wu+RmL6sKuUHbcZWsKaAUuLoKy3suu+Qmd20BZy4Fy
+u5kbkpbpV7evQ+OZtKvMq4W+QUssHm8E5+Koot3oFIF9coMDKZeibZaDS+bnKiT4furunpMd4qHI
+PZIdjUNeks+I2KsSflv6tYoILZKeKu1CZ3gaeuxvYQauS+UJyB4ujGbf/tXcGlkVtI6ShVRhBW3t
+YXg8wO2MtzBS+fJQZjV6o51wOFTb7C/siGMu9P6pOXZ5FRmm5I7VuS570ahUbKgICy3kZy1RuJIA
+R2ED9vPzRV0V2aszyIFkYckKukioKMJd6cLCGlV7BWapLPJTLEV6WyTGN4exuw6pxyKGViy3wn/l
+WerR33RF6hyoTj5VzainGvNqjoMgDipUXOn5rsVq/PzEYfWLf0dnznoXU8ngZJ8rrI0htRz0MC3W
+j1grmcaAQJXeM/dCDzkUsp5e/gh3y7tYfOX14+zitIxJTwNuLed7uTN+8knjPi3/C491NDO30e1Y
+xXU+LPV5tk3KNwZtTJYWJgDlyeO4KfMdk9czED3uWy6gZ8C2m1hwcSnZN2Y3hJA6yYJzrwk/Lqws
+5F9mZy2xe8O5b3YmP0fjoFdSNABP
diff --git a/resources/kubernetes-master/kube-controller-manager.snap b/resources/kubernetes-master/kube-controller-manager.snap
new file mode 100644
index 0000000..292297e
Binary files /dev/null and b/resources/kubernetes-master/kube-controller-manager.snap differ
diff --git a/resources/kubernetes-master/kube-proxy.assert b/resources/kubernetes-master/kube-proxy.assert
new file mode 100644
index 0000000..314ca64
--- /dev/null
+++ b/resources/kubernetes-master/kube-proxy.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: 0euyIM60JIN6PlZgyL5WB4OaDncn7cBm
+publisher-id: canonical
+snap-name: kube-proxy
+timestamp: 2017-03-16T20:46:43.058596Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr5swAA0+oQAGpXrtCU4YohVrughPYYf0bEkiQZOq/hSADhjT6yUnARuYtf
+GTI4mICS+MAds+ZLEMxrvthnAffbgTKunZXPUT2rXrxPh6eKy3Gf15654iza/9cs/qtON2H2jcpz
+5O0dGD2AoHAv617HBMrmeAJZjt7kL0+RmtiZQzwX/aT1IHh2AADMtLCpJhAONqu91iAemDDD+4BS
+SL9GiiljvpToeyRpeO3rk6sJxe5TsjFfuylfkUwoZwEoAS+0LLK8hGhIMA72ypUfDFRswJGaKFVu
+Ezrrs7AXK8I1YfDUZsDdkxb9cLyZsJ0Kf3pdLZrBrUWo8norHyeo79m9Llch/+tNbFy1N/s06JhE
+6Woq7Y44YPEGsmiJYHM3uTuVlr+sIRYsgcalZ0/VaVQiVCUF5P1c9IoQ3aVdWQh0xzge6g/Qdvm9
+5bq9rW3wIdriW5fA6vOo4uRZNMHPKHVZ2MhIeMYmGN5l4fnD+SMIdGe+UFu2H9Uiw6CyBnBgGLlP
+aIeWyufMrx6P6wq9H0vVDDLGErzMsLdT1WS9UQYfD8nhVSzW7/qrdoh0XEV458pm9ikHYX8NRZR4
+yzXSbeOM+KTvQCdv3YWdxpqRwkrlFRT1qYgPheOOnw9/7S12JUEerueai05FaC2qEVQuZ4AZssR8
+Juy13TkBttSmEISdBLDwsp6KQSxG
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: uratLlJbO9KNPMXlxaqTCQ7PRMoW50oteyqI0t8IsBkjEmc61qEzqWMxzH2EkBKx
+developer-id: canonical
+snap-id: 0euyIM60JIN6PlZgyL5WB4OaDncn7cBm
+snap-revision: 2538
+snap-size: 9596928
+timestamp: 2022-06-17T22:14:17.254760Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz8uQAAHTwQACpwU+Tr6hSjU2h2CBrmAROJD/AV14NtdptKp3f3pfoD8E16
+PXduAh7OI7y2sulsAOZ/4eAzEUghMl/wBrdYZiEtYB9DgUG1wsyR8ILmQmGRiIkgFXzHnfsazm6W
+qTIHeRZgPysx1Qmpx+RPSmhyZ93z7zVYbgxf/WxDQXmcl/r9tVTXeX3o8+vZ38xi6DJ4OiAqRFyL
+OdgwCCkHN9Vbqw3yM8MQOnVopRQzuw98pl96b59jcZq31eSzpIfvppQrhmnv0ZsnMt2UpjAQnZVy
+YKLanodoXoeaD0YIZLX6zi5Vt4eFmymcUlqYEirRbUO5mAG3m40zVq6odYnn8EpvjEDWNNxrrQmK
+0uSgerYsCr6to3pLc5Ij2FUIJ44G2o9bwCEChqczAr+Gubv1zauw1rMKnwUl8OxAIWA9RXk2xfuR
+FxMPO3ABB8KpgzJSuOxD2rUrTJFdE3ReKLT9usDkWsLtEax/0R58HvBjcyxblMEorl6Aw6ftJ5P+
+eSeYgWC/VzJNanBo+m/TKQpVtuupEWg/QL1SczYYtzne+abV0SPg8ysfF/hFRR7A1ILe+kJ7cXgU
+siFGTAY31mXdKFrOyg9uk+DuHbVmxS1W4VAG4GMl+/jC7fS+vJW8/IhL41zwos/lgwCNdF6ONOT0
+wMuQskKr+VIrykMrpHU4rN2OtDqR
diff --git a/resources/kubernetes-master/kube-proxy.snap b/resources/kubernetes-master/kube-proxy.snap
new file mode 100644
index 0000000..2eff05e
Binary files /dev/null and b/resources/kubernetes-master/kube-proxy.snap differ
diff --git a/resources/kubernetes-master/kube-scheduler.assert b/resources/kubernetes-master/kube-scheduler.assert
new file mode 100644
index 0000000..bb963ee
--- /dev/null
+++ b/resources/kubernetes-master/kube-scheduler.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: 9OWeQaa6pKxtFCKrBWmF6OaZknlHFCL8
+publisher-id: canonical
+snap-name: kube-scheduler
+timestamp: 2017-03-16T20:45:31.788525Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr5awAANNUQAB2wkevkLcmxF11EIf5GqWtcOyR2hkX7kyqGh/IBqB3qqtMl
+cjH6HCH2fZx2qoX7STl6Ks2RBag6utYFj/U1YbYfGM8AQ0lyyHK9yB60Qxr9RnvyGSvH8rAq7Jh/
+GdZNxYeUsz9HFMGARCTS6cBbpAvQDQaWFRE9mPya1XzwIy+nq8GZMr5VIW5KdmfoYhtCnkNOAI78
+2DndYKT9lO32PaAoEAlMU9HDmKZ+MaN/0fEgBO3z0xLXp+RqB5pZ4J/eAlc78N4yYRtg9cx6Zt3A
+XZBpXopERZPOy28fB4xKmk2N0nepXuvC9X6U1rVRxTLptGe1yz7luM/hiZsQb41rNWIxJsiiOnXd
+O2FFhysRIiEeA8dm1cRYfWXfsHqjgOjQWrxQFEimC9GdB/767S0Cpi0a9iEUot2YZtn7DcCwLh/f
+N/vSVUgetdrxXw5luayPaEOZ8ry9ZRfT6labgiVHQUsiqZWAutNnVYMFx56wgjtihF3RZRW+oPNO
+DVug04IzCWANHj+aRwTWgP9rHyBmj+bNyXmt6k7blCGKUNzUm+ET0qWaZEtabtADIDXguiV7dAZb
+ZLgpFqKrfA/fv2TOxM218WVmNymkibOaeMLsbfVoYDNQdrTBLp9wbwPikq8OGkpU7Ucpucp9Gr59
+b/Z+eBbpgtaHnkAcrC1O9//MZOd9
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: f1G3MaDfCRkZu6XXXL_LmktnlHj16UDu6ccNVhjX6voQrP_zPyJdryYWcfcmKyx2
+developer-id: canonical
+snap-id: 9OWeQaa6pKxtFCKrBWmF6OaZknlHFCL8
+snap-revision: 2507
+snap-size: 10448896
+timestamp: 2022-06-17T22:20:39.928170Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz+NwAAjWgQAJg3CbfQlFd3KvsYkNnEIE1t5QW8/5a/gm0LDO2SEwnxL7Hr
+Y6c+/3Cya3QBhv7/37LtbHmMLo7WxRL69RJI7hcR0/IW+eFE12sNJSDQOOFb0NvTUtKa1c7/NCOG
+goNl8ok3P1o2Bq+3QXhP51NTCD0t7m57iCZI5sEytmPEVVJ/3J5jjz5DxqweJm4gB27JQnALuI3+
++s6P/TUy07d+jIDekSOmbBmoFUrawgQeLwlr46p81Pqi9/Tlb5SOYAcFUttLyhkPXkMiD1btZ7W4
+dy3uX8ecue/W8nW4y3E/YaNf6+aSLTwwCUPEhBkQOMfW+HeXCtumhsfeKguvfloIGgxfxvVgably
+AkbTyQXQ57gtlV4CKZCgUgSKPBZ2VwCkEqyQg9gxCcRKnTBdg0/oO6i1K5HxHGSt66p0uLBWpbeV
+HjjZcRxwzwkkp9+J0FL68dU58Cr5tD88fArfTMvsqDS7IrXwLJi2cQPvHk5JLVnhvwlpIPzDrYos
+8tgJmrV6wK3Z6MK1jRLqMZgeJAqW7FG6w6c2mlZAcQ+XHViK4+ZQtk+Vprok9qqLcz3mb4BPE2sr
+cUznb4GLCkGAfsq9JS0+sTIXMP4338dkrJ0oNP+i0xc7FV5J1VI4k0apOxfvKgKUSug05iaAipV1
+1dH62HS9GcTxYV3UFuJ2uiqvSNPz
diff --git a/resources/kubernetes-master/kube-scheduler.snap b/resources/kubernetes-master/kube-scheduler.snap
new file mode 100644
index 0000000..d684943
Binary files /dev/null and b/resources/kubernetes-master/kube-scheduler.snap differ
diff --git a/resources/kubernetes-master/kubectl.assert b/resources/kubernetes-master/kubectl.assert
new file mode 100644
index 0000000..a55fdbd
--- /dev/null
+++ b/resources/kubernetes-master/kubectl.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: ZgG2URycDgvxSVskfoZxn44uaRMw0iwe
+publisher-id: canonical
+snap-name: kubectl
+timestamp: 2017-03-16T20:34:10.224081Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr2wgAACBYQAIeP5oNBIYNJfa00iFAlwV7DA89rFIZ+eyOA82o+vREA6b81
+XBTGkBs8GgLQ2W2VyzHeAewIfRggVzWeW99VQfh/5hGDkN4YZCFbVJvHPKU2D9lNs8LorBsiC7k0
+6GXC402V1HV6rIRvNxCTrMpUtuzM+XEeXZXN9QuCbOGbf+vCUTRpt9czlcXSS5Ull3ulsqSxwCv3
+uwyi2EB+ZhPXLQv4i3NnIKXRZd/Up+H2rA15K65l9YBLsnnFuf0dUwtVm6Ziq7c3AY8P4r8PR/4x
+eKw2JD5QNpWA603YMOGrtqwdJS6S0g44+QayogaZy9JddyOZ5ksNa6YCjYe5l7yUUP7RSqjAQr3w
+uBvoSXiaBLDpeluXbTcKzl2h/S9Wrsx8zP+5Hz+iCxH0ODZ/yNcELNFahSpxfeF1t+fv6JpGWVTT
+rCVhWsJsZn+F2NTxlTtVrEGIbyZOyeO2f3WSxpxhA7L37SmiI0944gMoVFw9r1NGIeibSVvObHXw
+/LFJrO2M3rr7dVTQ7LZ+iGwf0K2of1Wg8fME+Zpz7vgB523dFDZu3FAbpD5seEHUbnsqUdacUgCJ
+TiAJVQk5AOvucKzcE3P5ZB3UsxxjrBRQrineIHFmtxdkzv/OpDLQEaNwxA4cNMMJ7vDNZHP46CgJ
+TIvp3RCWXI7RoGZKNKYtOVdJUqV8
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: ZRMaQxovy-4gmRytMJBreDGUZzENUqE_ZfJ4Q6DEM0l8OYtpXG6RBbqvCf8zRM5N
+developer-id: canonical
+snap-id: ZgG2URycDgvxSVskfoZxn44uaRMw0iwe
+snap-revision: 2461
+snap-size: 10805248
+timestamp: 2022-06-17T22:27:23.754903Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz/ywAAFo0QALDAaU4L251R3Ucl+GhpbwLoMR/4dkLCFrQEG6oBgGMMwV0f
+IT2jzVzlGr+VNDbqoHXuw33tmEc46ohMvsrZclWvwcg4b0ukLZjqxlXEeh4Izf7HA7m9TNk/UsVB
+ovdskcK/czBxU60oOS1Oic00iy/9cca1NnONN+BRl1B/0jRSDk4RzO00OO7SHzwdxPmgExIkg8A7
+2O64ozgLPdtLZlReaaMZ+4aFnxYbiFv85AAE+m+rAfzZq6JUUUe7aQ2sBOifF4CHsCY7pfXHK0O2
+ceF6+rPAgreVIFJyoCjpdJuGyyXeZd6mkqhGIBncEpbZRN+DURiT4/0U3HN8K4UeSYin9i+Ke3fb
+/ol/Pp/ETwdXPEr8A0iQytJ3i+S5vkDpNlCF3yURqAm4elSK5KTV46cjPFPjwbG+pznthDjjXD4n
+QtLqTCREg2lE4Dn42S5p9EZxYq8uaPsxYfpkje2Z8TonDtUwsSNZcveRqADtT94dflr8/qSvnuJW
+8iUbRlWk5ixlOo9xjv2luyoWPq/n9CVvrp9K/R7VAq4CbqNmW0QhGPOi+C0vqWVgWaAHXFx4YEiW
+cidKRjFag+FaWJiCU3eKJRREwwfotZJaMDWzAxnLAGn9CaIjjipd9f8qc8kXx6rwOmHIAiJ9KxKC
+RwitihaavnDz/2VehK78t1XWmKUr
diff --git a/resources/kubernetes-master/kubectl.snap b/resources/kubernetes-master/kubectl.snap
new file mode 100644
index 0000000..6236a26
Binary files /dev/null and b/resources/kubernetes-master/kubectl.snap differ
diff --git a/resources/kubernetes-worker/cni-amd64.tgz b/resources/kubernetes-worker/cni-amd64.tgz
new file mode 100644
index 0000000..4dac67d
Binary files /dev/null and b/resources/kubernetes-worker/cni-amd64.tgz differ
diff --git a/resources/kubernetes-worker/kube-proxy.assert b/resources/kubernetes-worker/kube-proxy.assert
new file mode 100644
index 0000000..314ca64
--- /dev/null
+++ b/resources/kubernetes-worker/kube-proxy.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: 0euyIM60JIN6PlZgyL5WB4OaDncn7cBm
+publisher-id: canonical
+snap-name: kube-proxy
+timestamp: 2017-03-16T20:46:43.058596Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr5swAA0+oQAGpXrtCU4YohVrughPYYf0bEkiQZOq/hSADhjT6yUnARuYtf
+GTI4mICS+MAds+ZLEMxrvthnAffbgTKunZXPUT2rXrxPh6eKy3Gf15654iza/9cs/qtON2H2jcpz
+5O0dGD2AoHAv617HBMrmeAJZjt7kL0+RmtiZQzwX/aT1IHh2AADMtLCpJhAONqu91iAemDDD+4BS
+SL9GiiljvpToeyRpeO3rk6sJxe5TsjFfuylfkUwoZwEoAS+0LLK8hGhIMA72ypUfDFRswJGaKFVu
+Ezrrs7AXK8I1YfDUZsDdkxb9cLyZsJ0Kf3pdLZrBrUWo8norHyeo79m9Llch/+tNbFy1N/s06JhE
+6Woq7Y44YPEGsmiJYHM3uTuVlr+sIRYsgcalZ0/VaVQiVCUF5P1c9IoQ3aVdWQh0xzge6g/Qdvm9
+5bq9rW3wIdriW5fA6vOo4uRZNMHPKHVZ2MhIeMYmGN5l4fnD+SMIdGe+UFu2H9Uiw6CyBnBgGLlP
+aIeWyufMrx6P6wq9H0vVDDLGErzMsLdT1WS9UQYfD8nhVSzW7/qrdoh0XEV458pm9ikHYX8NRZR4
+yzXSbeOM+KTvQCdv3YWdxpqRwkrlFRT1qYgPheOOnw9/7S12JUEerueai05FaC2qEVQuZ4AZssR8
+Juy13TkBttSmEISdBLDwsp6KQSxG
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: uratLlJbO9KNPMXlxaqTCQ7PRMoW50oteyqI0t8IsBkjEmc61qEzqWMxzH2EkBKx
+developer-id: canonical
+snap-id: 0euyIM60JIN6PlZgyL5WB4OaDncn7cBm
+snap-revision: 2538
+snap-size: 9596928
+timestamp: 2022-06-17T22:14:17.254760Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz8uQAAHTwQACpwU+Tr6hSjU2h2CBrmAROJD/AV14NtdptKp3f3pfoD8E16
+PXduAh7OI7y2sulsAOZ/4eAzEUghMl/wBrdYZiEtYB9DgUG1wsyR8ILmQmGRiIkgFXzHnfsazm6W
+qTIHeRZgPysx1Qmpx+RPSmhyZ93z7zVYbgxf/WxDQXmcl/r9tVTXeX3o8+vZ38xi6DJ4OiAqRFyL
+OdgwCCkHN9Vbqw3yM8MQOnVopRQzuw98pl96b59jcZq31eSzpIfvppQrhmnv0ZsnMt2UpjAQnZVy
+YKLanodoXoeaD0YIZLX6zi5Vt4eFmymcUlqYEirRbUO5mAG3m40zVq6odYnn8EpvjEDWNNxrrQmK
+0uSgerYsCr6to3pLc5Ij2FUIJ44G2o9bwCEChqczAr+Gubv1zauw1rMKnwUl8OxAIWA9RXk2xfuR
+FxMPO3ABB8KpgzJSuOxD2rUrTJFdE3ReKLT9usDkWsLtEax/0R58HvBjcyxblMEorl6Aw6ftJ5P+
+eSeYgWC/VzJNanBo+m/TKQpVtuupEWg/QL1SczYYtzne+abV0SPg8ysfF/hFRR7A1ILe+kJ7cXgU
+siFGTAY31mXdKFrOyg9uk+DuHbVmxS1W4VAG4GMl+/jC7fS+vJW8/IhL41zwos/lgwCNdF6ONOT0
+wMuQskKr+VIrykMrpHU4rN2OtDqR
diff --git a/resources/kubernetes-worker/kube-proxy.snap b/resources/kubernetes-worker/kube-proxy.snap
new file mode 100644
index 0000000..2eff05e
Binary files /dev/null and b/resources/kubernetes-worker/kube-proxy.snap differ
diff --git a/resources/kubernetes-worker/kubectl.assert b/resources/kubernetes-worker/kubectl.assert
new file mode 100644
index 0000000..a55fdbd
--- /dev/null
+++ b/resources/kubernetes-worker/kubectl.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: ZgG2URycDgvxSVskfoZxn44uaRMw0iwe
+publisher-id: canonical
+snap-name: kubectl
+timestamp: 2017-03-16T20:34:10.224081Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr2wgAACBYQAIeP5oNBIYNJfa00iFAlwV7DA89rFIZ+eyOA82o+vREA6b81
+XBTGkBs8GgLQ2W2VyzHeAewIfRggVzWeW99VQfh/5hGDkN4YZCFbVJvHPKU2D9lNs8LorBsiC7k0
+6GXC402V1HV6rIRvNxCTrMpUtuzM+XEeXZXN9QuCbOGbf+vCUTRpt9czlcXSS5Ull3ulsqSxwCv3
+uwyi2EB+ZhPXLQv4i3NnIKXRZd/Up+H2rA15K65l9YBLsnnFuf0dUwtVm6Ziq7c3AY8P4r8PR/4x
+eKw2JD5QNpWA603YMOGrtqwdJS6S0g44+QayogaZy9JddyOZ5ksNa6YCjYe5l7yUUP7RSqjAQr3w
+uBvoSXiaBLDpeluXbTcKzl2h/S9Wrsx8zP+5Hz+iCxH0ODZ/yNcELNFahSpxfeF1t+fv6JpGWVTT
+rCVhWsJsZn+F2NTxlTtVrEGIbyZOyeO2f3WSxpxhA7L37SmiI0944gMoVFw9r1NGIeibSVvObHXw
+/LFJrO2M3rr7dVTQ7LZ+iGwf0K2of1Wg8fME+Zpz7vgB523dFDZu3FAbpD5seEHUbnsqUdacUgCJ
+TiAJVQk5AOvucKzcE3P5ZB3UsxxjrBRQrineIHFmtxdkzv/OpDLQEaNwxA4cNMMJ7vDNZHP46CgJ
+TIvp3RCWXI7RoGZKNKYtOVdJUqV8
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: ZRMaQxovy-4gmRytMJBreDGUZzENUqE_ZfJ4Q6DEM0l8OYtpXG6RBbqvCf8zRM5N
+developer-id: canonical
+snap-id: ZgG2URycDgvxSVskfoZxn44uaRMw0iwe
+snap-revision: 2461
+snap-size: 10805248
+timestamp: 2022-06-17T22:27:23.754903Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYqz/ywAAFo0QALDAaU4L251R3Ucl+GhpbwLoMR/4dkLCFrQEG6oBgGMMwV0f
+IT2jzVzlGr+VNDbqoHXuw33tmEc46ohMvsrZclWvwcg4b0ukLZjqxlXEeh4Izf7HA7m9TNk/UsVB
+ovdskcK/czBxU60oOS1Oic00iy/9cca1NnONN+BRl1B/0jRSDk4RzO00OO7SHzwdxPmgExIkg8A7
+2O64ozgLPdtLZlReaaMZ+4aFnxYbiFv85AAE+m+rAfzZq6JUUUe7aQ2sBOifF4CHsCY7pfXHK0O2
+ceF6+rPAgreVIFJyoCjpdJuGyyXeZd6mkqhGIBncEpbZRN+DURiT4/0U3HN8K4UeSYin9i+Ke3fb
+/ol/Pp/ETwdXPEr8A0iQytJ3i+S5vkDpNlCF3yURqAm4elSK5KTV46cjPFPjwbG+pznthDjjXD4n
+QtLqTCREg2lE4Dn42S5p9EZxYq8uaPsxYfpkje2Z8TonDtUwsSNZcveRqADtT94dflr8/qSvnuJW
+8iUbRlWk5ixlOo9xjv2luyoWPq/n9CVvrp9K/R7VAq4CbqNmW0QhGPOi+C0vqWVgWaAHXFx4YEiW
+cidKRjFag+FaWJiCU3eKJRREwwfotZJaMDWzAxnLAGn9CaIjjipd9f8qc8kXx6rwOmHIAiJ9KxKC
+RwitihaavnDz/2VehK78t1XWmKUr
diff --git a/resources/kubernetes-worker/kubectl.snap b/resources/kubernetes-worker/kubectl.snap
new file mode 100644
index 0000000..6236a26
Binary files /dev/null and b/resources/kubernetes-worker/kubectl.snap differ
diff --git a/resources/kubernetes-worker/kubelet.assert b/resources/kubernetes-worker/kubelet.assert
new file mode 100644
index 0000000..946c9e5
--- /dev/null
+++ b/resources/kubernetes-worker/kubelet.assert
@@ -0,0 +1,81 @@
+type: account-key
+authority-id: canonical
+revision: 2
+public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+account-id: canonical
+name: store
+since: 2016-04-01T00:00:00.0Z
+body-length: 717
+sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswHNiEB9Lxk
+
+AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9ji
+qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482R
+vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJi
+UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuKL
+Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQGA
+o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl9
+VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9F
+2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7ant
+Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIcG
+vUvV7RjVzv17ut0AEQEAAQ==
+
+AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsMV
+WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/bP
+nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiLg
+3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kLe
+eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrYm
+inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ19
+rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+k
+rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWEY
+aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQI
+6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nOu
+haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpFo
+yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O96
+HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi7
+skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PKW
+CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjdeu
+ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OFq
+qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqRy
+IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3tr
+oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k
+
+type: snap-declaration
+authority-id: canonical
+series: 16
+snap-id: Jgg3PL8MXHm3QlpzkzAfJsrAfsej3U1z
+publisher-id: canonical
+snap-name: kubelet
+timestamp: 2017-03-16T20:46:14.995528Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCWMr5lwAAzNIQAJ50adY2FdG184c2D3loU17vKNeq8NaZYTQyIsJU+pFc10Zo
+BKj1H67hZFTcF2nNu2n3WuAKrPqY/rdKK9zBqhiTPwBcUMXdDyjQGTnZUKUOHFsy2JVmiklHaNVf
+CaFpoPGrUx+ASVUJnzwLmNtrcpsYVj3uzEmTZ1B+wmgRojtsPQTe1SKuLw9Y+rIYb6YcbGg9lLkW
+pKdlHLocPay7NvBKRYgcunqMj3pAu6ZOehLLKK2vWLsYQXLOka6si62sTV/8EffdhE+qmb6MHe0w
+Hwt9h6CNDAbyjjVT3CTLrARDYV6jjn0zH5ZwzMIzRXpG8CBZqSKdtUzpsVsHHjcnVSwRMf82qvTj
+icjJ+4rB5gelL/s0H9V4RZ83Wygj1WqZgHUHIln+BqtQ08IG0dZ8RAx3+J9ECdK7ddEMsCdfsQQT
+HlimQELOSxkpWKLwHkDXUJar2itQPFCoboagUUZkpwG8IrzmHcMRV4yKMTuuEnMMwXAGiu0VWsCy
+KLG+HZdtI2n+rJHAGJ1+0WNlcnzlWs9bMUXGBezjmcOPjyXI1QE4vhgI+Ye0KK9jU67LeswEXNnu
+C7kflpcOLWO1fN98sP5RFy1mERXK1N/uAe5xH1GNQUPplm9ZQgynQpm6iwCmTcEQHLp9Z6+vXpSf
+kqcKSRUyvsjLQJA3BtT4VQeaBhPM
+
+type: snap-revision
+authority-id: canonical
+snap-sha3-384: L_VoDjTxLAnDxvvEMXB8d-N4YwGtsnR4XtvrSeEdwVQ2nZNfmKlU-zpzypiadQNW
+developer-id: canonical
+snap-id: Jgg3PL8MXHm3QlpzkzAfJsrAfsej3U1z
+snap-revision: 2435
+snap-size: 22306816
+timestamp: 2022-06-17T22:42:21.825228Z
+sign-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0hqUel3m8ul
+
+AcLBUgQAAQoABgUCYq0DTQAA3rQQAItN6PhxBdCWkyJXzn9t5RbGrB9UMsFIQO5DpcNvN16Yi8Dx
+I42yY07hl13PBlvwMmypLrWkYATXpHUV+ig81po1efvH6mPDGO45iUOsqslAoA0F/+CggZsHnC00
+CiH+HNTBhp92aM4fEct/Q4X3G7vMxBXTShZY6iMK6fmz02h2oR7ngxXTnxMqNBzKyhZITl2YfNVZ
+6od7DfsFjrwEAel1mWdbnuVwXfVxf6Yd0PuUoiWFHAyf5QC7cw8wu4ia0MzNurj9ukZCMUmva4AN
+jg/srwYBHIcXHzqBzgRzjejzqb/WLXAsqShYMAbFi65J8uBNCesRR+9puhXy4joAqr5XL/dAI3mq
+wVFI78U3KRfOPxKrPlA6x62qt3azIjp6z7g9CZpwyYXBXzAsKb8KsDhgfueGqDmA66OHFw83YNql
+MCdeLKSwFnE9YAVuY82Kj1mkdi0a3jrU+FkFuw/jP44u0O7W3TTrB45JJMb7u8RFdRCFNCnZvWfD
+rLXKzYfkfRwLu2/CtxS80gwUM+qM1NuhfaSzpUTMirsMgmoWKaA3HgYuFTWUiswiFevuNnTMgS00
+1/kT6Euerfo3ielr6589GBnziUe7ePAeo87n8u1BW1oa3JEZzE9Oy8PsEmJtVY9oWRPmHIOcTHMZ
+eFTZN8FOJtriovMS+R7INwRwepeH
diff --git a/resources/kubernetes-worker/kubelet.snap b/resources/kubernetes-worker/kubelet.snap
new file mode 100644
index 0000000..0d9fa68
Binary files /dev/null and b/resources/kubernetes-worker/kubelet.snap differ