Compare commits
28 Commits
Guide
...
bugfix/1.2
| Author | SHA1 | Date |
|---|---|---|
|
|
cd08f548d1 | |
|
|
373a6359d2 | |
|
|
b0d2e038af | |
|
|
220a26a390 | |
|
|
af3c70b1a2 | |
|
|
d3df2fdc21 | |
|
|
61c1062b68 | |
|
|
14039d4163 | |
|
|
405fdbb952 | |
|
|
c0d54a5e31 | |
|
|
aac9176819 | |
|
|
8fe4848510 | |
|
|
ca454b86fd | |
|
|
38d4d046a6 | |
|
|
626033bfd5 | |
|
|
599bf254a1 | |
|
|
e0afe564ce | |
|
|
4d6b4ffd08 | |
|
|
92ee0340db | |
|
|
9390a8d80f | |
|
|
cde295d64c | |
|
|
bb2340904f | |
|
|
c573b3781f | |
|
|
ee0ec69f2d | |
|
|
103d9be2d8 | |
|
|
0d30c6af69 | |
|
|
1a778714ea | |
|
|
8a8a10790d |
|
|
@ -1,50 +1,5 @@
|
|||
# These are some examples of commonly ignored file patterns.
|
||||
# You should customize this list as applicable to your project.
|
||||
# Learn more about .gitignore:
|
||||
# https://www.atlassian.com/git/tutorials/saving-changes/gitignore
|
||||
|
||||
# Node artifact files
|
||||
node_modules/
|
||||
dist/
|
||||
|
||||
# Compiled Java class files
|
||||
*.class
|
||||
|
||||
# Compiled Python bytecode
|
||||
*.py[cod]
|
||||
|
||||
# Log files
|
||||
*.log
|
||||
|
||||
# Package files
|
||||
*.jar
|
||||
|
||||
# Maven
|
||||
target/
|
||||
dist/
|
||||
|
||||
# JetBrains IDE
|
||||
.idea/
|
||||
|
||||
# Unit test reports
|
||||
TEST*.xml
|
||||
|
||||
# Generated by MacOS
|
||||
.DS_Store
|
||||
|
||||
# Generated by Windows
|
||||
Thumbs.db
|
||||
|
||||
# Applications
|
||||
*.app
|
||||
*.exe
|
||||
*.war
|
||||
|
||||
# Large media files
|
||||
*.mp4
|
||||
*.tiff
|
||||
*.avi
|
||||
*.flv
|
||||
*.mov
|
||||
*.wmv
|
||||
|
||||
|
|
|
|||
110
ReadME.MD
110
ReadME.MD
|
|
@ -1,120 +1,14 @@
|
|||
## 安装 Charm 2.x
|
||||
|
||||
```bash
|
||||
sudo snap install charm --channel=2.x --classic
|
||||
```
|
||||
|
||||
## 基础安装包
|
||||
|
||||
```bash
|
||||
charm list-resources cs:~containers/etcd-633
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/etcd-633/resource/etcd/3 -O etcd.tar.gz
|
||||
|
||||
charm list-resources cs:~containers/easyrsa-419
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/easyrsa-419/resource/easyrsa/5 -O easyrsa.tar.gz
|
||||
|
||||
charm list-resources cs:~containers/kubernetes-master-1077
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/kubernetes-master-1077/resource/cni-amd64/12 -O cni-amd64.tar.gz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/kubernetes-master-1077/resource/cni-arm64/12 -O cni-arm64.tar.gz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/kubernetes-master-1077/resource/cni-s390x/12 -O cni-s390x.tar.gz
|
||||
|
||||
charm list-resources cs:~containers/kubernetes-worker-815
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/kubernetes-worker-815/resource/cni-amd64/983 -O cni-amd64.tgz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/kubernetes-worker-815/resource/cni-arm64/974 -O cni-arm64.tgz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/kubernetes-worker-815/resource/cni-s390x/986 -O cni-s390x.tgz
|
||||
|
||||
charm list-resources cs:~containers/calico-838
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/calico-838/resource/calico/977 -O calico.tgz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/calico-838/resource/calico-arm64/976 -O calico-arm64.tgz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/calico-838/resource/calico-node-image/659 -O calico-node-image.tgz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/calico-838/resource/calico-upgrade/804 -O calico-upgrade.tgz
|
||||
wget https://api.jujucharms.com/charmstore/v5/~containers/calico-838/resource/calico-upgrade-arm64/804 -O calico-upgrade-arm64.tgz
|
||||
```
|
||||
|
||||
## 向 controller 上传资源
|
||||
|
||||
```bash
|
||||
juju attach-resource <charm-name> resource-name=<filepath>
|
||||
juju attach-resource calico calico=/home/sa/charm/calico/calico-amd64.tar.gz
|
||||
juju attach-resource calico calico-upgrade=/home/sa/charm/calico/calico-upgrade-amd64.tar.gz
|
||||
```
|
||||
|
||||
## Snap Download
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
SNAP_CHANNEL="1.22/stable" # Need juju 2.9
|
||||
|
||||
ALL_SNAPS="kube-apiserver kube-scheduler kube-controller-manager kube-proxy kubectl kubelet cdk-addons"
|
||||
MASTER_SNAPS="kube-apiserver kube-scheduler kube-controller-manager kube-proxy kubectl cdk-addons"
|
||||
WORKER_SNAPS="kube-proxy kubelet kubectl"
|
||||
|
||||
# Download Juju 2.9
|
||||
snap download --channel=2.9 juju
|
||||
|
||||
# Download latest snaps from designated channel
|
||||
for snap in $ALL_SNAPS
|
||||
do
|
||||
snap download --channel=$SNAP_CHANNEL $snap
|
||||
done
|
||||
|
||||
# Attach new snaps to master units
|
||||
for snap in $MASTER_SNAPS
|
||||
do
|
||||
juju attach kubernetes-master $snap=`ls ${snap}_*.snap`
|
||||
done
|
||||
|
||||
# Attach new snaps to worker units
|
||||
for snap in $WORKER_SNAPS
|
||||
do
|
||||
juju attach kubernetes-worker $snap=`ls ${snap}_*.snap`
|
||||
done
|
||||
|
||||
# Upgrade to new snaps on masters, one at a time
|
||||
for unit in `juju status --format json | jq -r '.applications|.["kubernetes-master"].units | keys[]'`
|
||||
do
|
||||
juju run-action $unit upgrade --wait
|
||||
done
|
||||
|
||||
# Upgrade to new snaps on workers, one at a time
|
||||
for unit in `juju status --format json | jq -r '.applications|.["kubernetes-worker"].units | keys[]'`
|
||||
do
|
||||
juju run-action $unit upgrade --wait
|
||||
done
|
||||
```
|
||||
|
||||
## Final Release
|
||||
|
||||
需要手动做 Fix
|
||||
# Kubernetes 1.22
|
||||
cs:~containers/charmed-kubernetes-814
|
||||
|
||||
```Bash
|
||||
<<<<<<< HEAD
|
||||
https://raw.githubusercontent.com/charmed-kubernetes/bundle/main/releases/1.22/bundle.yaml
|
||||
charm pull cs:~containers/containerd-178
|
||||
charm pull cs:~containers/easyrsa-420
|
||||
charm pull cs:~containers/etcd-634
|
||||
charm pull cs:~containers/kubernetes-master-1078
|
||||
charm pull cs:~containers/kubernetes-worker-816
|
||||
charm pull cs:~containers/calico-838
|
||||
charm pull cs:~containers/kubeapi-load-balancer-844
|
||||
```
|
||||
=======
|
||||
charm pull cs:~containers/etcd-633
|
||||
charm pull cs:~containers/easyrsa-419
|
||||
charm pull cs:~containers/kubernetes-master-1077
|
||||
charm pull cs:~containers/kubernetes-worker-815
|
||||
charm pull cs:~containers/calico-838
|
||||
charm pull cs:~containers/containerd-177
|
||||
charm pull cs:~containers/kata-138
|
||||
# Extend
|
||||
charm pull cs:~containers/kubeapi-load-balancer-843
|
||||
charm pull cs:~containers/keepalived-110
|
||||
charm pull cs:~containers/coredns-20
|
||||
# Other
|
||||
charm pull cs:~containers/ubuntu-20
|
||||
charm pull cs:~containers/nrpe-75
|
||||
```
|
||||
>>>>>>> 0ba2a75b267453d9584def41868de085d111994f
|
||||
|
|
|
|||
|
|
@ -1,92 +0,0 @@
|
|||
name: Test Suite for CoreDNS
|
||||
|
||||
on:
|
||||
- pull_request
|
||||
|
||||
jobs:
|
||||
lint-and-unit-tests:
|
||||
name: Lint & Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install Tox
|
||||
run: pip install tox
|
||||
- name: Run lint & unit tests
|
||||
run: tox
|
||||
|
||||
func-test:
|
||||
name: Functional test with MicroK8s
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Fix global gitconfig for confined snap
|
||||
run: |
|
||||
# GH automatically includes the git-lfs plugin and configures it in
|
||||
# /etc/gitconfig. However, the confinement of the charmcraft snap
|
||||
# means that it can see that this file exists but cannot read it, even
|
||||
# if the file permissions should allow it; this breaks git usage within
|
||||
# the snap. To get around this, we move it from the global gitconfig to
|
||||
# the user's .gitconfig file.
|
||||
cat /etc/gitconfig >> $HOME/.gitconfig
|
||||
sudo rm /etc/gitconfig
|
||||
- name: Install MicroK8s
|
||||
uses: balchua/microk8s-actions@v0.1.3
|
||||
with:
|
||||
rbac: 'true'
|
||||
storage: 'true'
|
||||
dns: 'true' # required for juju, will adjust later
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install tox
|
||||
sudo snap install juju --classic
|
||||
sudo snap install juju-wait --classic
|
||||
sudo usermod -aG microk8s $USER
|
||||
sudo snap install charmcraft --beta
|
||||
sudo snap install yq
|
||||
- name: Build charm
|
||||
run: |
|
||||
if ! charmcraft build; then
|
||||
echo Build failed, full log:
|
||||
cat "$(ls -1t "$HOME"/snap/charmcraft/common/charmcraft-log-* | head -n1)"
|
||||
exit 1
|
||||
fi
|
||||
- name: Bootstrap MicroK8s with Juju
|
||||
run: sg microk8s 'juju bootstrap microk8s microk8s'
|
||||
- name: Add model
|
||||
run: juju add-model coredns microk8s
|
||||
- name: Deploy CoreDNS
|
||||
run: |
|
||||
upstream_image=$(yq eval '.resources.coredns-image.upstream-source' metadata.yaml)
|
||||
juju deploy ./coredns.charm --resource coredns-image=$upstream_image --config forward=8.8.8.8
|
||||
- name: Wait for stable environment
|
||||
run: juju wait -wv
|
||||
- name: Tell MicroK8s to use CoreDNS charm
|
||||
run: |
|
||||
cluster_ip=$(sudo microk8s.kubectl get svc -n coredns coredns -o jsonpath='{..spec.clusterIP}')
|
||||
sudo sed -i -e "s/--cluster-dns=.*/--cluster-dns=$cluster_ip/" /var/snap/microk8s/current/args/kubelet
|
||||
sudo systemctl restart snap.microk8s.daemon-kubelet
|
||||
- name: Run functional test
|
||||
run: tox -e func
|
||||
- name: Juju Status
|
||||
if: failure()
|
||||
run: sudo juju status
|
||||
- name: Juju Log
|
||||
if: failure()
|
||||
run: sudo juju debug-log --replay --no-tail -i coredns
|
||||
- name: Microk8s Status
|
||||
if: failure()
|
||||
run: sudo microk8s.kubectl get all -A
|
||||
- name: Microk8s Pod Log
|
||||
if: failure()
|
||||
run: sudo microk8s.kubectl logs -n coredns -l juju-app=coredns
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
.tox/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
placeholders/
|
||||
*.charm
|
||||
build/
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
# Contributor Guide
|
||||
|
||||
This Juju charm is open source ([Apache License 2.0](./LICENSE)) and we actively seek any community contibutions
|
||||
for code, suggestions and documentation.
|
||||
This page details a few notes, workflows and suggestions for how to make contributions most effective and help us
|
||||
all build a better charm - please give them a read before working on any contributions.
|
||||
|
||||
## Licensing
|
||||
|
||||
This charm has been created under the [Apache License 2.0](./LICENSE), which will cover any contributions you may
|
||||
make to this project. Please familiarise yourself with the terms of the license.
|
||||
|
||||
Additionally, this charm uses the Harmony CLA agreement. It’s the easiest way for you to give us permission to
|
||||
use your contributions.
|
||||
In effect, you’re giving us a license, but you still own the copyright — so you retain the right to modify your
|
||||
code and use it in other projects. Please [sign the CLA here](https://ubuntu.com/legal/contributors/agreement) before
|
||||
making any contributions.
|
||||
|
||||
## Code of conduct
|
||||
|
||||
We have adopted the Ubuntu code of Conduct. You can read this in full [here](https://ubuntu.com/community/code-of-conduct).
|
||||
|
||||
## Contributing code
|
||||
|
||||
To contribute code to this project, please use the following workflow:
|
||||
|
||||
1. [Submit a bug](https://bugs.launchpad.net/charm-coredns/+filebug) to explain the need for and track the change.
|
||||
2. Create a branch on your fork of the repo with your changes, including a unit test covering the new or modified code.
|
||||
3. Submit a PR. The PR description should include a link to the bug on Launchpad.
|
||||
4. Update the Launchpad bug to include a link to the PR and the `review-needed` tag.
|
||||
5. Once reviewed and merged, the change will become available on the edge channel and assigned to an appropriate milestone
|
||||
for further release according to priority.
|
||||
|
||||
|
||||
202
coredns/LICENSE
202
coredns/LICENSE
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
[[source]]
|
||||
name = "pypi"
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
|
||||
[dev-packages]
|
||||
pytest = "*"
|
||||
flake8 = "*"
|
||||
ipdb = "*"
|
||||
|
||||
[packages]
|
||||
ops = "*"
|
||||
oci-image = {git = "https://github.com/juju-solutions/resource-oci-image/"}
|
||||
|
||||
[requires]
|
||||
python_version = "3.8"
|
||||
|
|
@ -1,246 +0,0 @@
|
|||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "3a93ef1bf6ad71dacc9efebae3e194bb569d6bf8728161b19e95dbd7c407aa22"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.8"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"oci-image": {
|
||||
"git": "https://github.com/juju-solutions/resource-oci-image/",
|
||||
"ref": "c5778285d332edf3d9a538f9d0c06154b7ec1b0b"
|
||||
},
|
||||
"ops": {
|
||||
"hashes": [
|
||||
"sha256:23556db47b2c97a1bb72845b7c8ec88aa7a3e27717402903b5fea7b659616ab8",
|
||||
"sha256:d102359496584617a00f6f42525a01d1b60269a3d41788cf025738cbe3348c99"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.10.0"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
"sha256:02c78d77281d8f8d07a255e57abdbf43b02257f59f50cc6b636937d68efa5dd0",
|
||||
"sha256:0dc9f2eb2e3c97640928dec63fd8dc1dd91e6b6ed236bd5ac00332b99b5c2ff9",
|
||||
"sha256:124fd7c7bc1e95b1eafc60825f2daf67c73ce7b33f1194731240d24b0d1bf628",
|
||||
"sha256:26fcb33776857f4072601502d93e1a619f166c9c00befb52826e7b774efaa9db",
|
||||
"sha256:31ba07c54ef4a897758563e3a0fcc60077698df10180abe4b8165d9895c00ebf",
|
||||
"sha256:3c49e39ac034fd64fd576d63bb4db53cda89b362768a67f07749d55f128ac18a",
|
||||
"sha256:52bf0930903818e600ae6c2901f748bc4869c0c406056f679ab9614e5d21a166",
|
||||
"sha256:5a3f345acff76cad4aa9cb171ee76c590f37394186325d53d1aa25318b0d4a09",
|
||||
"sha256:5e7ac4e0e79a53451dc2814f6876c2fa6f71452de1498bbe29c0b54b69a986f4",
|
||||
"sha256:7242790ab6c20316b8e7bb545be48d7ed36e26bbe279fd56f2c4a12510e60b4b",
|
||||
"sha256:737bd70e454a284d456aa1fa71a0b429dd527bcbf52c5c33f7c8eee81ac16b89",
|
||||
"sha256:8635d53223b1f561b081ff4adecb828fd484b8efffe542edcfdff471997f7c39",
|
||||
"sha256:8b818b6c5a920cbe4203b5a6b14256f0e5244338244560da89b7b0f1313ea4b6",
|
||||
"sha256:8bf38641b4713d77da19e91f8b5296b832e4db87338d6aeffe422d42f1ca896d",
|
||||
"sha256:a36a48a51e5471513a5aea920cdad84cbd56d70a5057cca3499a637496ea379c",
|
||||
"sha256:b2243dd033fd02c01212ad5c601dafb44fbb293065f430b0d3dbf03f3254d615",
|
||||
"sha256:cc547d3ead3754712223abb7b403f0a184e4c3eae18c9bb7fd15adef1597cc4b",
|
||||
"sha256:cc552b6434b90d9dbed6a4f13339625dc466fd82597119897e9489c953acbc22",
|
||||
"sha256:f3790156c606299ff499ec44db422f66f05a7363b39eb9d5b064f17bd7d7c47b",
|
||||
"sha256:f7a21e3d99aa3095ef0553e7ceba36fb693998fbb1226f1392ce33681047465f",
|
||||
"sha256:fdc6b2cb4b19e431994f25a9160695cc59a4e861710cc6fc97161c5e845fc579"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.4"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6",
|
||||
"sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700"
|
||||
],
|
||||
"version": "==20.3.0"
|
||||
},
|
||||
"backcall": {
|
||||
"hashes": [
|
||||
"sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e",
|
||||
"sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"
|
||||
],
|
||||
"version": "==0.2.0"
|
||||
},
|
||||
"decorator": {
|
||||
"hashes": [
|
||||
"sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760",
|
||||
"sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"
|
||||
],
|
||||
"version": "==4.4.2"
|
||||
},
|
||||
"flake8": {
|
||||
"hashes": [
|
||||
"sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c",
|
||||
"sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.8.3"
|
||||
},
|
||||
"iniconfig": {
|
||||
"hashes": [
|
||||
"sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
|
||||
"sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
|
||||
],
|
||||
"version": "==1.1.1"
|
||||
},
|
||||
"ipdb": {
|
||||
"hashes": [
|
||||
"sha256:d6f46d261c45a65e65a2f7ec69288a1c511e16206edb2875e7ec6b2f66997e78"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.13.3"
|
||||
},
|
||||
"ipython": {
|
||||
"hashes": [
|
||||
"sha256:04323f72d5b85b606330b6d7e2dc8d2683ad46c3905e955aa96ecc7a99388e70",
|
||||
"sha256:34207ffb2f653bced2bc8e3756c1db86e7d93e44ed049daae9814fed66d408ec"
|
||||
],
|
||||
"version": "==7.21.0"
|
||||
},
|
||||
"ipython-genutils": {
|
||||
"hashes": [
|
||||
"sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8",
|
||||
"sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"
|
||||
],
|
||||
"version": "==0.2.0"
|
||||
},
|
||||
"jedi": {
|
||||
"hashes": [
|
||||
"sha256:18456d83f65f400ab0c2d3319e48520420ef43b23a086fdc05dff34132f0fb93",
|
||||
"sha256:92550a404bad8afed881a137ec9a461fed49eca661414be45059329614ed0707"
|
||||
],
|
||||
"version": "==0.18.0"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||
],
|
||||
"version": "==0.6.1"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
|
||||
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
|
||||
],
|
||||
"version": "==20.9"
|
||||
},
|
||||
"parso": {
|
||||
"hashes": [
|
||||
"sha256:15b00182f472319383252c18d5913b69269590616c947747bc50bf4ac768f410",
|
||||
"sha256:8519430ad07087d4c997fda3a7918f7cfa27cb58972a8c89c2a0295a1c940e9e"
|
||||
],
|
||||
"version": "==0.8.1"
|
||||
},
|
||||
"pexpect": {
|
||||
"hashes": [
|
||||
"sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937",
|
||||
"sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"
|
||||
],
|
||||
"markers": "sys_platform != 'win32'",
|
||||
"version": "==4.8.0"
|
||||
},
|
||||
"pickleshare": {
|
||||
"hashes": [
|
||||
"sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca",
|
||||
"sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"
|
||||
],
|
||||
"version": "==0.7.5"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
|
||||
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
|
||||
],
|
||||
"version": "==0.13.1"
|
||||
},
|
||||
"prompt-toolkit": {
|
||||
"hashes": [
|
||||
"sha256:bf00f22079f5fadc949f42ae8ff7f05702826a97059ffcc6281036ad40ac6f04",
|
||||
"sha256:e1b4f11b9336a28fa11810bc623c357420f69dfdb6d2dac41ca2c21a55c033bc"
|
||||
],
|
||||
"version": "==3.0.18"
|
||||
},
|
||||
"ptyprocess": {
|
||||
"hashes": [
|
||||
"sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35",
|
||||
"sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"
|
||||
],
|
||||
"version": "==0.7.0"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
|
||||
"sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
|
||||
],
|
||||
"version": "==1.10.0"
|
||||
},
|
||||
"pycodestyle": {
|
||||
"hashes": [
|
||||
"sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367",
|
||||
"sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"
|
||||
],
|
||||
"version": "==2.6.0"
|
||||
},
|
||||
"pyflakes": {
|
||||
"hashes": [
|
||||
"sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92",
|
||||
"sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"
|
||||
],
|
||||
"version": "==2.2.0"
|
||||
},
|
||||
"pygments": {
|
||||
"hashes": [
|
||||
"sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94",
|
||||
"sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8"
|
||||
],
|
||||
"version": "==2.8.1"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
|
||||
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
|
||||
],
|
||||
"version": "==2.4.7"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:1cd09785c0a50f9af72220dd12aa78cfa49cbffc356c61eab009ca189e018a33",
|
||||
"sha256:d010e24666435b39a4cf48740b039885642b6c273a3f77be3e7e03554d2806b7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==6.1.0"
|
||||
},
|
||||
"toml": {
|
||||
"hashes": [
|
||||
"sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
|
||||
"sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
|
||||
],
|
||||
"version": "==0.10.2"
|
||||
},
|
||||
"traitlets": {
|
||||
"hashes": [
|
||||
"sha256:178f4ce988f69189f7e523337a3e11d91c786ded9360174a3d9ca83e79bc5396",
|
||||
"sha256:69ff3f9d5351f31a7ad80443c2674b7099df13cc41fc5fa6e2f6d3b0330b0426"
|
||||
],
|
||||
"version": "==5.0.5"
|
||||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
"sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784",
|
||||
"sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"
|
||||
],
|
||||
"version": "==0.2.5"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
# CoreDNS Operator
|
||||
|
||||
[CoreDNS][] is a flexible, plugin-based DNS server, and is the recommended
|
||||
solution for providing DNS to Kubernetes services within the cluster.
|
||||
This operator enables integration with [Charmed Kubernetes][] via a
|
||||
cross-model relation and allows for more customization than provided by the
|
||||
deployment of CoreDNS provided by default by Charmed Kubernetes.
|
||||
|
||||
More information on using this operator with Charmed Kubernetes can be found
|
||||
[here](https://ubuntu.com/kubernetes/docs/cdk-addons#coredns), and bugs should
|
||||
be filed [here](https://bugs.launchpad.net/charmed-kubernetes).
|
||||
|
||||
|
||||
[CoreDNS]: https://coredns.io/
|
||||
[Charmed Kubernetes]: https://ubuntu.com/kubernetes/docs
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
type: charm
|
||||
parts:
|
||||
charm:
|
||||
build-packages: [git]
|
||||
prime:
|
||||
- ./files/*
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
options:
|
||||
domain:
|
||||
description: The local domain for cluster DNS.
|
||||
type: string
|
||||
default: cluster.local
|
||||
forward:
|
||||
description: Where to forward non-cluster addresses.
|
||||
type: string
|
||||
default: /etc/resolv.conf
|
||||
extra_servers:
|
||||
description: Any additional servers to add to the Corefile.
|
||||
type: string
|
||||
default: ''
|
||||
corefile:
|
||||
description: >-
|
||||
Configuration file to use for CoreDNS. This is interpreted as a Python
|
||||
string. Template which will be given the `domain` and `forward` configs as
|
||||
its context.
|
||||
type: string
|
||||
default: |
|
||||
.:53 {
|
||||
errors
|
||||
health {
|
||||
lameduck 5s
|
||||
}
|
||||
ready
|
||||
kubernetes ${domain} in-addr.arpa ip6.arpa {
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
pods insecure
|
||||
}
|
||||
prometheus :9153
|
||||
forward . ${forward}
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
${extra_servers}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 7.7 KiB |
|
|
@ -1,21 +0,0 @@
|
|||
name: coredns
|
||||
summary: CoreDNS
|
||||
maintainers:
|
||||
- Cory Johns <cory.johns@canonical.com>
|
||||
description: |
|
||||
CoreDNS provides DNS resolution for Kubernetes.
|
||||
tags:
|
||||
- networking
|
||||
series:
|
||||
- kubernetes
|
||||
provides:
|
||||
dns-provider:
|
||||
interface: kube-dns
|
||||
requires: {}
|
||||
peers: {}
|
||||
resources:
|
||||
coredns-image:
|
||||
type: oci-image
|
||||
description: 'CoreDNS image'
|
||||
upstream-source: coredns/coredns:1.6.7
|
||||
min-juju-version: 2.8.2
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
-i https://pypi.org/simple
|
||||
git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
|
||||
ops==0.10.0
|
||||
pyyaml==5.3.1
|
||||
|
|
@ -1 +0,0 @@
|
|||
0
|
||||
|
|
@ -1,204 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import logging
|
||||
from string import Template
|
||||
|
||||
from ops.charm import CharmBase
|
||||
from ops.main import main
|
||||
from ops.model import ActiveStatus, MaintenanceStatus, WaitingStatus
|
||||
|
||||
from oci_image import OCIImageResource, OCIImageResourceError
|
||||
|
||||
|
||||
class CoreDNSCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
if not self.unit.is_leader():
|
||||
# We can't do anything useful when not the leader, so do nothing.
|
||||
self.model.unit.status = WaitingStatus('Waiting for leadership')
|
||||
return
|
||||
self.log = logging.getLogger(__name__)
|
||||
self.image = OCIImageResource(self, 'coredns-image')
|
||||
for event in [self.on.install,
|
||||
self.on.leader_elected,
|
||||
self.on.upgrade_charm,
|
||||
self.on.config_changed]:
|
||||
self.framework.observe(event, self.main)
|
||||
self.framework.observe(self.on.dns_provider_relation_joined, self.provide_dns)
|
||||
|
||||
def main(self, event):
|
||||
try:
|
||||
image_details = self.image.fetch()
|
||||
except OCIImageResourceError as e:
|
||||
self.model.unit.status = e.status
|
||||
return
|
||||
|
||||
self.model.unit.status = MaintenanceStatus('Setting pod spec')
|
||||
|
||||
corefile = Template(self.model.config['corefile'])
|
||||
corefile = corefile.safe_substitute(self.model.config)
|
||||
|
||||
# Adapted from coredns.yaml.sed in https://github.com/coredns/ at 75a1cad
|
||||
self.model.pod.set_spec({
|
||||
'version': 3,
|
||||
'service': {
|
||||
'updateStrategy': {
|
||||
'type': 'RollingUpdate',
|
||||
'rollingUpdate': {'maxUnavailable': 1},
|
||||
},
|
||||
'annotations': {
|
||||
'prometheus.io/port': "9153",
|
||||
'prometheus.io/scrape': "true",
|
||||
},
|
||||
},
|
||||
# Dropped by a regression; see:
|
||||
# https://bugs.launchpad.net/juju/+bug/1895886
|
||||
# 'priorityClassName': 'system-cluster-critical',
|
||||
'containers': [{
|
||||
'name': 'coredns',
|
||||
'imageDetails': image_details,
|
||||
'imagePullPolicy': 'IfNotPresent',
|
||||
'args': ['-conf', '/etc/coredns/Corefile'],
|
||||
'volumeConfig': [{
|
||||
'name': 'config-volume',
|
||||
'mountPath': '/etc/coredns',
|
||||
# Not supported
|
||||
# 'readOnly': True,
|
||||
'files': [{
|
||||
'path': 'Corefile',
|
||||
'mode': 0o444,
|
||||
'content': corefile,
|
||||
}],
|
||||
}],
|
||||
'ports': [
|
||||
{
|
||||
'name': 'dns',
|
||||
'containerPort': 53,
|
||||
'protocol': 'UDP',
|
||||
},
|
||||
{
|
||||
'name': 'dns-tcp',
|
||||
'containerPort': 53,
|
||||
'protocol': 'TCP',
|
||||
},
|
||||
{
|
||||
'name': 'metrics',
|
||||
'containerPort': 9153,
|
||||
'protocol': 'TCP',
|
||||
},
|
||||
],
|
||||
# Can't be specified by the charm yet; see:
|
||||
# https://bugs.launchpad.net/juju/+bug/1893123
|
||||
# 'resources': {
|
||||
# 'limits': {'memory': '170Mi'},
|
||||
# 'requests': {'cpu': '100m', 'memory': '70Mi'},
|
||||
# },
|
||||
'kubernetes': {
|
||||
'securityContext': {
|
||||
'allowPrivilegeEscalation': False,
|
||||
'capabilities': {
|
||||
'add': ['NET_BIND_SERVICE'],
|
||||
'drop': ['all'],
|
||||
},
|
||||
'readOnlyRootFilesystem': True,
|
||||
},
|
||||
'livenessProbe': {
|
||||
'httpGet': {
|
||||
'path': '/health',
|
||||
'port': 8080,
|
||||
'scheme': 'HTTP',
|
||||
},
|
||||
'initialDelaySeconds': 60,
|
||||
'timeoutSeconds': 5,
|
||||
'successThreshold': 1,
|
||||
'failureThreshold': 5,
|
||||
},
|
||||
'readinessProbe': {
|
||||
'httpGet': {
|
||||
'path': '/ready',
|
||||
'port': 8181,
|
||||
'scheme': 'HTTP',
|
||||
},
|
||||
},
|
||||
},
|
||||
}],
|
||||
'serviceAccount': {
|
||||
'roles': [{
|
||||
'global': True,
|
||||
'rules': [
|
||||
{
|
||||
'apigroups': ['discovery.k8s.io'],
|
||||
'resources': [
|
||||
'endpointslices',
|
||||
],
|
||||
'verbs': ['list', 'watch'],
|
||||
},
|
||||
{
|
||||
'apigroups': [''],
|
||||
'resources': [
|
||||
'endpoints',
|
||||
'services',
|
||||
'pods',
|
||||
'namespaces',
|
||||
],
|
||||
'verbs': ['list', 'watch'],
|
||||
},
|
||||
{
|
||||
'apigroups': [''],
|
||||
'resources': ['nodes'],
|
||||
'verbs': ['get'],
|
||||
},
|
||||
],
|
||||
}],
|
||||
},
|
||||
'kubernetesResources': {
|
||||
'pod': {
|
||||
'dnsPolicy': 'Default',
|
||||
# Not yet supported by Juju; see:
|
||||
# https://bugs.launchpad.net/juju/+bug/1895887
|
||||
# 'tolerations': [{
|
||||
# 'key': 'CriticalAddonsOnly',
|
||||
# 'operator': 'Exists',
|
||||
# }],
|
||||
# 'affinity': {
|
||||
# 'podAntiAffinity': {
|
||||
# 'preferredDuringScheduling' +
|
||||
# 'IgnoredDuringExecution': [{
|
||||
# 'weight': 100,
|
||||
# 'podAffinityTerm': {
|
||||
# 'labelSelector': {
|
||||
# 'matchExpressions': [{
|
||||
# 'key': 'k8s-app',
|
||||
# 'operator': 'In',
|
||||
# 'values': ["kube-dns"],
|
||||
# }],
|
||||
# },
|
||||
# 'topologyKey': 'kubernetes.io/hostname',
|
||||
# },
|
||||
# }],
|
||||
# },
|
||||
# },
|
||||
# Can be done by the operator via placement (--to), but can't
|
||||
# be specified by the charm yet, per same bug as above.
|
||||
# 'nodeSelector': {
|
||||
# 'kubernetes.io/os': 'linux',
|
||||
# },
|
||||
}
|
||||
}
|
||||
})
|
||||
self.model.unit.status = ActiveStatus()
|
||||
|
||||
def provide_dns(self, event):
|
||||
provided_data = event.relation.data[self.unit]
|
||||
if not provided_data.get('ingress-address'):
|
||||
event.defer()
|
||||
return
|
||||
provided_data.update({
|
||||
'domain': self.model.config['domain'],
|
||||
'sdn-ip': str(provided_data['ingress-address']),
|
||||
'port': "53",
|
||||
})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(CoreDNSCharm)
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
import subprocess
|
||||
from pathlib import Path
|
||||
from time import sleep
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
CHARM_DIR = Path(__file__).parent.parent.parent.resolve()
|
||||
SPEC_FILE = Path(__file__).parent / 'validate-dns-spec.yaml'
|
||||
|
||||
|
||||
def test_charm():
|
||||
model = run('juju', 'switch').split('/')[-1]
|
||||
coredns_ready = run(
|
||||
'kubectl', 'get', 'pod', '-n', model, '-l', 'juju-app=coredns',
|
||||
'-o', 'jsonpath={..status.containerStatuses[0].ready}')
|
||||
assert coredns_ready == 'true'
|
||||
run('kubectl', 'apply', '-f', SPEC_FILE)
|
||||
try:
|
||||
wait_for_output('kubectl', 'get', 'pod/validate-dns',
|
||||
expected='Running')
|
||||
for name in ("www.ubuntu.com", "kubernetes.default.svc.cluster.local"):
|
||||
run('kubectl', 'exec', 'validate-dns', '--', 'nslookup', name)
|
||||
finally:
|
||||
run('kubectl', 'delete', '-f', SPEC_FILE)
|
||||
|
||||
|
||||
def run(*args):
|
||||
args = [str(a) for a in args]
|
||||
try:
|
||||
res = subprocess.run(args,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
return res.stdout.decode('utf8').strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
pytest.fail(f'Command {args} failed ({e.returncode}):\n'
|
||||
f'stdout:\n{e.stdout.decode("utf8")}\n'
|
||||
f'stderr:\n{e.stderr.decode("utf8")}\n')
|
||||
|
||||
|
||||
def wait_for_output(*args, expected='', timeout=3 * 60):
|
||||
args = [str(a) for a in args]
|
||||
output = None
|
||||
for attempt in range(int(timeout / 5)):
|
||||
output = run(*args)
|
||||
if expected in output:
|
||||
break
|
||||
sleep(5)
|
||||
else:
|
||||
pytest.fail(f'Timed out waiting for "{expected}" from {args}:\n{output}')
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: validate-dns
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox
|
||||
imagePullPolicy: IfNotPresent
|
||||
args: ['sleep', '3600']
|
||||
restartPolicy: Always
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
import pytest
|
||||
|
||||
from ops.model import ActiveStatus, BlockedStatus, WaitingStatus
|
||||
from ops.testing import Harness
|
||||
import yaml
|
||||
|
||||
from charm import CoreDNSCharm
|
||||
|
||||
|
||||
if yaml.__with_libyaml__:
|
||||
_DefaultDumper = yaml.CSafeDumper
|
||||
else:
|
||||
_DefaultDumper = yaml.SafeDumper
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def harness():
|
||||
return Harness(CoreDNSCharm)
|
||||
|
||||
|
||||
def test_not_leader(harness):
|
||||
harness.begin()
|
||||
assert isinstance(harness.charm.model.unit.status, WaitingStatus)
|
||||
|
||||
|
||||
def test_missing_image(harness):
|
||||
harness.set_leader(True)
|
||||
harness.begin_with_initial_hooks()
|
||||
assert isinstance(harness.charm.model.unit.status, BlockedStatus)
|
||||
|
||||
|
||||
def test_main(harness):
|
||||
harness.set_leader(True)
|
||||
harness.add_oci_resource('coredns-image', {
|
||||
'registrypath': 'coredns/coredns:1.6.7',
|
||||
'username': '',
|
||||
'password': '',
|
||||
})
|
||||
harness.begin_with_initial_hooks()
|
||||
assert isinstance(harness.charm.model.unit.status, ActiveStatus)
|
||||
# confirm that we can serialize the pod spec
|
||||
yaml.dump(harness.get_pod_spec(), Dumper=_DefaultDumper)
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
[flake8]
|
||||
max-line-length = 88
|
||||
|
||||
[tox]
|
||||
skipsdist = True
|
||||
envlist = lint,unit
|
||||
|
||||
[testenv]
|
||||
basepython = python3
|
||||
setenv =
|
||||
PYTHONPATH={toxinidir}/src
|
||||
PYTHONBREAKPOINT=ipdb.set_trace
|
||||
passenv = HOME
|
||||
deps = pipenv
|
||||
commands =
|
||||
pipenv install --dev --ignore-pipfile
|
||||
pipenv run pytest --tb native -s {posargs:tests/unit}
|
||||
|
||||
[testenv:lint]
|
||||
commands =
|
||||
pipenv install --dev --ignore-pipfile
|
||||
pipenv run flake8 {toxinidir}/src {toxinidir}/tests
|
||||
|
||||
[testenv:func]
|
||||
commands =
|
||||
pipenv install --dev --ignore-pipfile
|
||||
pipenv run pytest --tb native -s {posargs:tests/func}
|
||||
|
|
@ -1 +0,0 @@
|
|||
pip
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
Copyright (c) 2017-2020 Ingy döt Net
|
||||
Copyright (c) 2006-2016 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: PyYAML
|
||||
Version: 5.3.1
|
||||
Summary: YAML parser and emitter for Python
|
||||
Home-page: https://github.com/yaml/pyyaml
|
||||
Author: Kirill Simonov
|
||||
Author-email: xi@resolvent.net
|
||||
License: MIT
|
||||
Download-URL: https://pypi.org/project/PyYAML/
|
||||
Platform: Any
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Cython
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Text Processing :: Markup
|
||||
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
|
||||
|
||||
YAML is a data serialization format designed for human readability
|
||||
and interaction with scripting languages. PyYAML is a YAML parser
|
||||
and emitter for Python.
|
||||
|
||||
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
|
||||
support, capable extension API, and sensible error messages. PyYAML
|
||||
supports standard YAML tags and provides Python-specific tags that
|
||||
allow to represent an arbitrary Python object.
|
||||
|
||||
PyYAML is applicable for a broad range of tasks from complex
|
||||
configuration files to object serialization and persistence.
|
||||
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
PyYAML-5.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
PyYAML-5.3.1.dist-info/LICENSE,sha256=xAESRJ8lS5dTBFklJIMT6ScO-jbSJrItgtTMbEPFfyk,1101
|
||||
PyYAML-5.3.1.dist-info/METADATA,sha256=xTsZFjd8T4M-5rC2M3BHgx_KTTpEPy5vFDIXrbzRXPQ,1758
|
||||
PyYAML-5.3.1.dist-info/RECORD,,
|
||||
PyYAML-5.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
PyYAML-5.3.1.dist-info/WHEEL,sha256=hzx2-39jWfx-No5BPGm7YN661ryRYBuLP8gZdbxDo8I,103
|
||||
PyYAML-5.3.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
|
||||
yaml/__init__.py,sha256=XFUNbKTg4afAd0BETjGQ1mKQ97_g5jbE1C0WoKc74dc,13170
|
||||
yaml/__pycache__/__init__.cpython-38.pyc,,
|
||||
yaml/__pycache__/composer.cpython-38.pyc,,
|
||||
yaml/__pycache__/constructor.cpython-38.pyc,,
|
||||
yaml/__pycache__/cyaml.cpython-38.pyc,,
|
||||
yaml/__pycache__/dumper.cpython-38.pyc,,
|
||||
yaml/__pycache__/emitter.cpython-38.pyc,,
|
||||
yaml/__pycache__/error.cpython-38.pyc,,
|
||||
yaml/__pycache__/events.cpython-38.pyc,,
|
||||
yaml/__pycache__/loader.cpython-38.pyc,,
|
||||
yaml/__pycache__/nodes.cpython-38.pyc,,
|
||||
yaml/__pycache__/parser.cpython-38.pyc,,
|
||||
yaml/__pycache__/reader.cpython-38.pyc,,
|
||||
yaml/__pycache__/representer.cpython-38.pyc,,
|
||||
yaml/__pycache__/resolver.cpython-38.pyc,,
|
||||
yaml/__pycache__/scanner.cpython-38.pyc,,
|
||||
yaml/__pycache__/serializer.cpython-38.pyc,,
|
||||
yaml/__pycache__/tokens.cpython-38.pyc,,
|
||||
yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883
|
||||
yaml/constructor.py,sha256=O3Uaf0_J_5GQBoeI9ZNhpJAhtdagr_X2HzDgGbZOMnw,28627
|
||||
yaml/cyaml.py,sha256=LiMkvchNonfoy1F6ec9L2BiUz3r0bwF4hympASJX1Ic,3846
|
||||
yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837
|
||||
yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006
|
||||
yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
|
||||
yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
|
||||
yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061
|
||||
yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
|
||||
yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
|
||||
yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794
|
||||
yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184
|
||||
yaml/resolver.py,sha256=DJCjpQr8YQCEYYjKEYqTl0GrsZil2H4aFOI9b0Oe-U4,8970
|
||||
yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277
|
||||
yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
|
||||
yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.36.2)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp38-cp38-linux_x86_64
|
||||
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
_yaml
|
||||
yaml
|
||||
|
|
@ -1 +0,0 @@
|
|||
pip
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: oci-image
|
||||
Version: 1.0.0
|
||||
Summary: Helper for dealing with OCI Image resources in the charm operator framework
|
||||
Home-page: https://github.com/juju-solutions/resource-oci-image
|
||||
Author: Cory Johns
|
||||
Author-email: johnsca@gmail.com
|
||||
License: Apache License 2.0
|
||||
Platform: UNKNOWN
|
||||
|
||||
# OCI Image Resource helper
|
||||
|
||||
This is a helper for working with OCI image resources in the charm operator
|
||||
framework.
|
||||
|
||||
## Installation
|
||||
|
||||
Add it to your `requirements.txt`. Since it's not in PyPI, you'll need to use
|
||||
the GitHub archive URL (or `git+` URL, if you want to pin to a specific commit):
|
||||
|
||||
```
|
||||
https://github.com/juju-solutions/resource-oci-image/archive/master.zip
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The `OCIImageResource` class will wrap the framework resource for the given
|
||||
resource name, and calling `fetch` on it will either return the image info
|
||||
or raise an `OCIImageResourceError` if it can't fetch or parse the image
|
||||
info. The exception will have a `status` attribute you can use directly,
|
||||
or a `status_message` attribute if you just want that.
|
||||
|
||||
Example usage:
|
||||
|
||||
```python
|
||||
from ops.charm import CharmBase
|
||||
from ops.main import main
|
||||
from oci_image import OCIImageResource, OCIImageResourceError
|
||||
|
||||
class MyCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
self.image = OCIImageResource(self, 'resource-name')
|
||||
self.framework.observe(self.on.start, self.on_start)
|
||||
|
||||
def on_start(self, event):
|
||||
try:
|
||||
image_info = self.image.fetch()
|
||||
except OCIImageResourceError as e:
|
||||
self.model.unit.status = e.status
|
||||
event.defer()
|
||||
return
|
||||
|
||||
self.model.pod.set_spec({'containers': [{
|
||||
'name': 'my-charm',
|
||||
'imageDetails': image_info,
|
||||
}]})
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(MyCharm)
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
__pycache__/oci_image.cpython-38.pyc,,
|
||||
oci_image-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
oci_image-1.0.0.dist-info/METADATA,sha256=QIpPa4JcSPa_Ci0n-DaCNp4PkKovZudFW8FnpnauJnQ,1808
|
||||
oci_image-1.0.0.dist-info/RECORD,,
|
||||
oci_image-1.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
oci_image-1.0.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
|
||||
oci_image-1.0.0.dist-info/direct_url.json,sha256=sUsaIeKXs7oqCE-NdmqTsNJ8rmr97YMi0wuRNVObj0Y,215
|
||||
oci_image-1.0.0.dist-info/top_level.txt,sha256=M4dLaObLx7irI4EO-A4_VJP_b-A6dDD7hB5QyVKdHOY,10
|
||||
oci_image.py,sha256=c75VR2vSmOp9pPTP2cnsxo23CqhhFbRtnIOtMjzDyXY,1794
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.36.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
{"url": "https://github.com/juju-solutions/resource-oci-image/", "vcs_info": {"commit_id": "c5778285d332edf3d9a538f9d0c06154b7ec1b0b", "requested_revision": "c5778285d332edf3d9a538f9d0c06154b7ec1b0b", "vcs": "git"}}
|
||||
|
|
@ -1 +0,0 @@
|
|||
oci_image
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from ops.framework import Object
|
||||
from ops.model import BlockedStatus, ModelError
|
||||
|
||||
|
||||
class OCIImageResource(Object):
|
||||
def __init__(self, charm, resource_name):
|
||||
super().__init__(charm, resource_name)
|
||||
self.resource_name = resource_name
|
||||
|
||||
def fetch(self):
|
||||
try:
|
||||
resource_path = self.model.resources.fetch(self.resource_name)
|
||||
except ModelError as e:
|
||||
raise MissingResourceError(self.resource_name) from e
|
||||
if not resource_path.exists():
|
||||
raise MissingResourceError(self.resource_name)
|
||||
resource_text = Path(resource_path).read_text()
|
||||
if not resource_text:
|
||||
raise MissingResourceError(self.resource_name)
|
||||
try:
|
||||
resource_data = yaml.safe_load(resource_text)
|
||||
except yaml.YAMLError as e:
|
||||
raise InvalidResourceError(self.resource_name) from e
|
||||
else:
|
||||
# Translate the data from the format used by the charm store to the
|
||||
# format used by the Juju K8s pod spec, since that is how this is
|
||||
# typically used.
|
||||
return {
|
||||
'imagePath': resource_data['registrypath'],
|
||||
'username': resource_data['username'],
|
||||
'password': resource_data['password'],
|
||||
}
|
||||
|
||||
|
||||
class OCIImageResourceError(ModelError):
|
||||
status_type = BlockedStatus
|
||||
status_message = 'Resource error'
|
||||
|
||||
def __init__(self, resource_name):
|
||||
super().__init__(resource_name)
|
||||
self.status = self.status_type(
|
||||
f'{self.status_message}: {resource_name}')
|
||||
|
||||
|
||||
class MissingResourceError(OCIImageResourceError):
|
||||
status_message = 'Missing resource'
|
||||
|
||||
|
||||
class InvalidResourceError(OCIImageResourceError):
|
||||
status_message = 'Invalid resource'
|
||||
|
|
@ -1 +0,0 @@
|
|||
pip
|
||||
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
Metadata-Version: 2.1
|
||||
Name: ops
|
||||
Version: 0.10.0
|
||||
Summary: The Python library behind great charms
|
||||
Home-page: https://github.com/canonical/operator
|
||||
Author: The Charmcraft team at Canonical Ltd.
|
||||
Author-email: charmcraft@lists.launchpad.net
|
||||
License: Apache-2.0
|
||||
Platform: UNKNOWN
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: License :: OSI Approved :: Apache Software License
|
||||
Classifier: Development Status :: 4 - Beta
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: Operating System :: MacOS :: MacOS X
|
||||
Classifier: Operating System :: POSIX :: Linux
|
||||
Requires-Python: >=3.5
|
||||
Description-Content-Type: text/markdown
|
||||
Requires-Dist: PyYAML
|
||||
|
||||
# The Operator Framework
|
||||
|
||||
The Operator Framework provides a simple, lightweight, and powerful way of
|
||||
writing Juju charms, the best way to encapsulate operational experience in code.
|
||||
|
||||
The framework will help you to:
|
||||
|
||||
* model the integration of your services
|
||||
* manage the lifecycle of your application
|
||||
* create reusable and scalable components
|
||||
* keep your code simple and readable
|
||||
|
||||
## Getting Started
|
||||
|
||||
Charms written using the operator framework are just Python code. The intention
|
||||
is for it to feel very natural for somebody used to coding in Python, and
|
||||
reasonably easy to pick up for somebody who might be a domain expert but not
|
||||
necessarily a pythonista themselves.
|
||||
|
||||
The dependencies of the operator framework are kept as minimal as possible;
|
||||
currently that's Python 3.5 or greater, and `PyYAML` (both are included by
|
||||
default in Ubuntu's cloud images from 16.04 on).
|
||||
|
||||
<!--
|
||||
If you're new to the world of Juju and charms, you should probably dive into our
|
||||
[tutorial](/TBD).
|
||||
|
||||
If you know about Juju, and have written charms that didn't use the operator
|
||||
framework (be it with reactive or without), we have an [introduction to the
|
||||
operator framework](/TBD) just for you.
|
||||
|
||||
If you've gone through the above already and just want a refresher, or are
|
||||
really impatient and need to dive in, feel free to carry on down.
|
||||
-->
|
||||
## A Quick Introduction
|
||||
|
||||
Operator framework charms are just Python code. The entry point to your charm is
|
||||
a particular Python file. It could be anything that makes sense to your project,
|
||||
but let's assume this is `src/charm.py`. This file must be executable (and it
|
||||
must have the appropriate shebang line).
|
||||
|
||||
You need the usual `metadata.yaml` and (probably) `config.yaml` files, and a
|
||||
`requirements.txt` for any Python dependencies. In other words, your project
|
||||
might look like this:
|
||||
|
||||
```
|
||||
my-charm
|
||||
├── config.yaml
|
||||
├── metadata.yaml
|
||||
├── requirements.txt
|
||||
└── src/
|
||||
└── charm.py
|
||||
```
|
||||
|
||||
`src/charm.py` here is the entry point to your charm code. At a minimum, it
|
||||
needs to define a subclass of `CharmBase` and pass that into the framework's
|
||||
`main` function:
|
||||
|
||||
```python
|
||||
from ops.charm import CharmBase
|
||||
from ops.main import main
|
||||
|
||||
class MyCharm(CharmBase):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
self.framework.observe(self.on.start, self.on_start)
|
||||
|
||||
def on_start(self, event):
|
||||
# Handle the start event here.
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(MyCharm)
|
||||
```
|
||||
|
||||
That should be enough for you to be able to run
|
||||
|
||||
```
|
||||
$ charmcraft build
|
||||
Done, charm left in 'my-charm.charm'
|
||||
$ juju deploy ./my-charm.charm
|
||||
```
|
||||
|
||||
> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can
|
||||
> also be found on its [github page](https://github.com/canonical/charmcraft).
|
||||
|
||||
Happy charming!
|
||||
|
||||
## Testing your charms
|
||||
|
||||
The operator framework provides a testing harness, so that you can test that
|
||||
your charm does the right thing when presented with different scenarios, without
|
||||
having to have a full deployment to do so. `pydoc3 ops.testing` has the details
|
||||
for that, including this example:
|
||||
|
||||
```python
|
||||
harness = Harness(MyCharm)
|
||||
# Do initial setup here
|
||||
relation_id = harness.add_relation('db', 'postgresql')
|
||||
# Now instantiate the charm to see events as the model changes
|
||||
harness.begin()
|
||||
harness.add_relation_unit(relation_id, 'postgresql/0')
|
||||
harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
|
||||
# Check that charm has properly handled the relation_joined event for postgresql/0
|
||||
self.assertEqual(harness.charm. ...)
|
||||
```
|
||||
|
||||
## Talk to us
|
||||
|
||||
If you need help, have ideas, or would just like to chat with us, reach out on
|
||||
IRC: we're in [#smooth-operator] on freenode (or try the [webchat]).
|
||||
|
||||
We also pay attention to Juju's [discourse]; most discussion at this
|
||||
stage is on IRC, however.
|
||||
|
||||
You can also deep dive into the [API docs] if that's your thing.
|
||||
|
||||
[webchat]: https://webchat.freenode.net/#smooth-operator
|
||||
[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator
|
||||
[discourse]: https://discourse.juju.is/c/charming
|
||||
[API docs]: https://ops.rtfd.io/
|
||||
|
||||
## Operator Framework development
|
||||
|
||||
If you want to work in the framework *itself* you will need Python >= 3.5 and
|
||||
the dependencies declared in `requirements-dev.txt` installed in your system.
|
||||
Or you can use a virtualenv:
|
||||
|
||||
virtualenv --python=python3 env
|
||||
source env/bin/activate
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
Then you can try `./run_tests`, it should all go green.
|
||||
|
||||
If you see the error `yaml does not have libyaml extensions, using slower pure
|
||||
Python yaml`, you need to reinstall pyyaml with the correct extensions:
|
||||
|
||||
apt-get install libyaml-dev
|
||||
pip install --force-reinstall --no-cache-dir pyyaml
|
||||
|
||||
If you want to build the documentation you'll need the requirements from
|
||||
`docs/requirements.txt`, or in your virtualenv
|
||||
|
||||
pip install -r docs/requirements.txt
|
||||
|
||||
and then you can run `./build_docs`.
|
||||
|
||||
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
ops-0.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
ops-0.10.0.dist-info/LICENSE.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
||||
ops-0.10.0.dist-info/METADATA,sha256=AI7mL-PWkkYQ4f_NCulM5VcIQrMskxPIYp108DZrOcA,5577
|
||||
ops-0.10.0.dist-info/RECORD,,
|
||||
ops-0.10.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
ops-0.10.0.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
|
||||
ops-0.10.0.dist-info/top_level.txt,sha256=enC05wWafSg8iDKIvj3gvtAtEP2kYCyN5Gmd689q-_I,4
|
||||
ops/__init__.py,sha256=WaHb0dfp1KEe6jFV8Pm_mcdJ3ModiWujnQ6xLjNzPNQ,819
|
||||
ops/__pycache__/__init__.cpython-38.pyc,,
|
||||
ops/__pycache__/charm.cpython-38.pyc,,
|
||||
ops/__pycache__/framework.cpython-38.pyc,,
|
||||
ops/__pycache__/jujuversion.cpython-38.pyc,,
|
||||
ops/__pycache__/log.cpython-38.pyc,,
|
||||
ops/__pycache__/main.cpython-38.pyc,,
|
||||
ops/__pycache__/model.cpython-38.pyc,,
|
||||
ops/__pycache__/storage.cpython-38.pyc,,
|
||||
ops/__pycache__/testing.cpython-38.pyc,,
|
||||
ops/__pycache__/version.cpython-38.pyc,,
|
||||
ops/charm.py,sha256=i1fcd-pMzRV6f9AfMy0S_Jr_rZso3s9Xi-5GZWEs3nc,22512
|
||||
ops/framework.py,sha256=T9PWR4FXBI6Yd3XGwwNO51rJlyMUeO5vPdd4GmEjdzY,38298
|
||||
ops/jujuversion.py,sha256=T5KafqBHbQiHJ1OVoVbseUnZz7og4gPUz7CayXcHddk,3845
|
||||
ops/lib/__init__.py,sha256=7i2EN1jCUkVZT5NCi_q_ilBBzpCkWaW9mnBc3vBYCns,9188
|
||||
ops/lib/__pycache__/__init__.cpython-38.pyc,,
|
||||
ops/log.py,sha256=7jNn71--WpFngrZIwnJoaTRiaVrNVkLHK2enVu_VRA8,1860
|
||||
ops/main.py,sha256=TcOAS3VE1nMt-jF9uUzoyDWGTNl-OoAkS7XqQraWH3c,15375
|
||||
ops/model.py,sha256=katD2gQc35VArVMfGdI2AjPobFegQjShmDqVCKeLXZc,46796
|
||||
ops/storage.py,sha256=dal0athxe35cnWE8ol9N7nEUQDMcphDgRrQrmyGQDoA,11859
|
||||
ops/testing.py,sha256=HRjgq2ikVijGRMjVN2g-HJr8oQJ0ul8QEUUZv9D2_go,34727
|
||||
ops/version.py,sha256=6wsm0bsNX30wL9YmCZai2X5ISKQZYBIFJAbgmBn2Ri4,47
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.34.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
ops
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
# Copyright 2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""The Operator Framework."""
|
||||
|
||||
from .version import version as __version__ # noqa: F401 (imported but unused)
|
||||
|
||||
# Import here the bare minimum to break the circular import between modules
|
||||
from . import charm # noqa: F401 (imported but unused)
|
||||
|
|
@ -1,575 +0,0 @@
|
|||
# Copyright 2019-2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import enum
|
||||
import os
|
||||
import pathlib
|
||||
import typing
|
||||
|
||||
import yaml
|
||||
|
||||
from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents
|
||||
from ops import model
|
||||
|
||||
|
||||
def _loadYaml(source):
|
||||
if yaml.__with_libyaml__:
|
||||
return yaml.load(source, Loader=yaml.CSafeLoader)
|
||||
return yaml.load(source, Loader=yaml.SafeLoader)
|
||||
|
||||
|
||||
class HookEvent(EventBase):
|
||||
"""A base class for events that trigger because of a Juju hook firing."""
|
||||
|
||||
|
||||
class ActionEvent(EventBase):
|
||||
"""A base class for events that trigger when a user asks for an Action to be run.
|
||||
|
||||
To read the parameters for the action, see the instance variable `params`.
|
||||
To respond with the result of the action, call `set_results`. To add progress
|
||||
messages that are visible as the action is progressing use `log`.
|
||||
|
||||
:ivar params: The parameters passed to the action (read by action-get)
|
||||
"""
|
||||
|
||||
def defer(self):
|
||||
"""Action events are not deferable like other events.
|
||||
|
||||
This is because an action runs synchronously and the user is waiting for the result.
|
||||
"""
|
||||
raise RuntimeError('cannot defer action events')
|
||||
|
||||
def restore(self, snapshot: dict) -> None:
|
||||
"""Used by the operator framework to record the action.
|
||||
|
||||
Not meant to be called directly by Charm code.
|
||||
"""
|
||||
env_action_name = os.environ.get('JUJU_ACTION_NAME')
|
||||
event_action_name = self.handle.kind[:-len('_action')].replace('_', '-')
|
||||
if event_action_name != env_action_name:
|
||||
# This could only happen if the dev manually emits the action, or from a bug.
|
||||
raise RuntimeError('action event kind does not match current action')
|
||||
# Params are loaded at restore rather than __init__ because
|
||||
# the model is not available in __init__.
|
||||
self.params = self.framework.model._backend.action_get()
|
||||
|
||||
def set_results(self, results: typing.Mapping) -> None:
|
||||
"""Report the result of the action.
|
||||
|
||||
Args:
|
||||
results: The result of the action as a Dict
|
||||
"""
|
||||
self.framework.model._backend.action_set(results)
|
||||
|
||||
def log(self, message: str) -> None:
|
||||
"""Send a message that a user will see while the action is running.
|
||||
|
||||
Args:
|
||||
message: The message for the user.
|
||||
"""
|
||||
self.framework.model._backend.action_log(message)
|
||||
|
||||
def fail(self, message: str = '') -> None:
|
||||
"""Report that this action has failed.
|
||||
|
||||
Args:
|
||||
message: Optional message to record why it has failed.
|
||||
"""
|
||||
self.framework.model._backend.action_fail(message)
|
||||
|
||||
|
||||
class InstallEvent(HookEvent):
|
||||
"""Represents the `install` hook from Juju."""
|
||||
|
||||
|
||||
class StartEvent(HookEvent):
|
||||
"""Represents the `start` hook from Juju."""
|
||||
|
||||
|
||||
class StopEvent(HookEvent):
|
||||
"""Represents the `stop` hook from Juju."""
|
||||
|
||||
|
||||
class RemoveEvent(HookEvent):
|
||||
"""Represents the `remove` hook from Juju. """
|
||||
|
||||
|
||||
class ConfigChangedEvent(HookEvent):
|
||||
"""Represents the `config-changed` hook from Juju."""
|
||||
|
||||
|
||||
class UpdateStatusEvent(HookEvent):
|
||||
"""Represents the `update-status` hook from Juju."""
|
||||
|
||||
|
||||
class UpgradeCharmEvent(HookEvent):
|
||||
"""Represents the `upgrade-charm` hook from Juju.
|
||||
|
||||
This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju
|
||||
has unpacked the upgraded charm code, and so this event will be handled with new code.
|
||||
"""
|
||||
|
||||
|
||||
class PreSeriesUpgradeEvent(HookEvent):
|
||||
"""Represents the `pre-series-upgrade` hook from Juju.
|
||||
|
||||
This happens when a user has run `juju upgrade-series MACHINE prepare` and
|
||||
will fire for each unit that is running on the machine, telling them that
|
||||
the user is preparing to upgrade the Machine's series (eg trusty->bionic).
|
||||
The charm should take actions to prepare for the upgrade (a database charm
|
||||
would want to write out a version-independent dump of the database, so that
|
||||
when a new version of the database is available in a new series, it can be
|
||||
used.)
|
||||
Once all units on a machine have run `pre-series-upgrade`, the user will
|
||||
initiate the steps to actually upgrade the machine (eg `do-release-upgrade`).
|
||||
When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire.
|
||||
"""
|
||||
|
||||
|
||||
class PostSeriesUpgradeEvent(HookEvent):
|
||||
"""Represents the `post-series-upgrade` hook from Juju.
|
||||
|
||||
This is run after the user has done a distribution upgrade (or rolled back
|
||||
and kept the same series). It is called in response to
|
||||
`juju upgrade-series MACHINE complete`. Charms are expected to do whatever
|
||||
steps are necessary to reconfigure their applications for the new series.
|
||||
"""
|
||||
|
||||
|
||||
class LeaderElectedEvent(HookEvent):
|
||||
"""Represents the `leader-elected` hook from Juju.
|
||||
|
||||
Juju will trigger this when a new lead unit is chosen for a given application.
|
||||
This represents the leader of the charm information (not necessarily the primary
|
||||
of a running application). The main utility is that charm authors can know
|
||||
that only one unit will be a leader at any given time, so they can do
|
||||
configuration, etc, that would otherwise require coordination between units.
|
||||
(eg, selecting a password for a new relation)
|
||||
"""
|
||||
|
||||
|
||||
class LeaderSettingsChangedEvent(HookEvent):
|
||||
"""Represents the `leader-settings-changed` hook from Juju.
|
||||
|
||||
Deprecated. This represents when a lead unit would call `leader-set` to inform
|
||||
the other units of an application that they have new information to handle.
|
||||
This has been deprecated in favor of using a Peer relation, and having the
|
||||
leader set a value in the Application data bag for that peer relation.
|
||||
(see :class:`RelationChangedEvent`).
|
||||
"""
|
||||
|
||||
|
||||
class CollectMetricsEvent(HookEvent):
|
||||
"""Represents the `collect-metrics` hook from Juju.
|
||||
|
||||
Note that events firing during a CollectMetricsEvent are currently
|
||||
sandboxed in how they can interact with Juju. To report metrics
|
||||
use :meth:`.add_metrics`.
|
||||
"""
|
||||
|
||||
def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None:
|
||||
"""Record metrics that have been gathered by the charm for this unit.
|
||||
|
||||
Args:
|
||||
metrics: A collection of {key: float} pairs that contains the
|
||||
metrics that have been gathered
|
||||
labels: {key:value} strings that can be applied to the
|
||||
metrics that are being gathered
|
||||
"""
|
||||
self.framework.model._backend.add_metrics(metrics, labels)
|
||||
|
||||
|
||||
class RelationEvent(HookEvent):
|
||||
"""A base class representing the various relation lifecycle events.
|
||||
|
||||
Charmers should not be creating RelationEvents directly. The events will be
|
||||
generated by the framework from Juju related events. Users can observe them
|
||||
from the various `CharmBase.on[relation_name].relation_*` events.
|
||||
|
||||
Attributes:
|
||||
relation: The Relation involved in this event
|
||||
app: The remote application that has triggered this event
|
||||
unit: The remote unit that has triggered this event. This may be None
|
||||
if the relation event was triggered as an Application level event
|
||||
"""
|
||||
|
||||
def __init__(self, handle, relation, app=None, unit=None):
|
||||
super().__init__(handle)
|
||||
|
||||
if unit is not None and unit.app != app:
|
||||
raise RuntimeError(
|
||||
'cannot create RelationEvent with application {} and unit {}'.format(app, unit))
|
||||
|
||||
self.relation = relation
|
||||
self.app = app
|
||||
self.unit = unit
|
||||
|
||||
def snapshot(self) -> dict:
|
||||
"""Used by the framework to serialize the event to disk.
|
||||
|
||||
Not meant to be called by Charm code.
|
||||
"""
|
||||
snapshot = {
|
||||
'relation_name': self.relation.name,
|
||||
'relation_id': self.relation.id,
|
||||
}
|
||||
if self.app:
|
||||
snapshot['app_name'] = self.app.name
|
||||
if self.unit:
|
||||
snapshot['unit_name'] = self.unit.name
|
||||
return snapshot
|
||||
|
||||
def restore(self, snapshot: dict) -> None:
|
||||
"""Used by the framework to deserialize the event from disk.
|
||||
|
||||
Not meant to be called by Charm code.
|
||||
"""
|
||||
self.relation = self.framework.model.get_relation(
|
||||
snapshot['relation_name'], snapshot['relation_id'])
|
||||
|
||||
app_name = snapshot.get('app_name')
|
||||
if app_name:
|
||||
self.app = self.framework.model.get_app(app_name)
|
||||
else:
|
||||
self.app = None
|
||||
|
||||
unit_name = snapshot.get('unit_name')
|
||||
if unit_name:
|
||||
self.unit = self.framework.model.get_unit(unit_name)
|
||||
else:
|
||||
self.unit = None
|
||||
|
||||
|
||||
class RelationCreatedEvent(RelationEvent):
|
||||
"""Represents the `relation-created` hook from Juju.
|
||||
|
||||
This is triggered when a new relation to another app is added in Juju. This
|
||||
can occur before units for those applications have started. All existing
|
||||
relations should be established before start.
|
||||
"""
|
||||
|
||||
|
||||
class RelationJoinedEvent(RelationEvent):
|
||||
"""Represents the `relation-joined` hook from Juju.
|
||||
|
||||
This is triggered whenever a new unit of a related application joins the relation.
|
||||
(eg, a unit was added to an existing related app, or a new relation was established
|
||||
with an application that already had units.)
|
||||
"""
|
||||
|
||||
|
||||
class RelationChangedEvent(RelationEvent):
|
||||
"""Represents the `relation-changed` hook from Juju.
|
||||
|
||||
This is triggered whenever there is a change to the data bucket for a related
|
||||
application or unit. Look at `event.relation.data[event.unit/app]` to see the
|
||||
new information.
|
||||
"""
|
||||
|
||||
|
||||
class RelationDepartedEvent(RelationEvent):
|
||||
"""Represents the `relation-departed` hook from Juju.
|
||||
|
||||
This is the inverse of the RelationJoinedEvent, representing when a unit
|
||||
is leaving the relation (the unit is being removed, the app is being removed,
|
||||
the relation is being removed). It is fired once for each unit that is
|
||||
going away.
|
||||
"""
|
||||
|
||||
|
||||
class RelationBrokenEvent(RelationEvent):
|
||||
"""Represents the `relation-broken` hook from Juju.
|
||||
|
||||
If a relation is being removed (`juju remove-relation` or `juju remove-application`),
|
||||
once all the units have been removed, RelationBrokenEvent will fire to signal
|
||||
that the relationship has been fully terminated.
|
||||
"""
|
||||
|
||||
|
||||
class StorageEvent(HookEvent):
|
||||
"""Base class representing Storage related events."""
|
||||
|
||||
|
||||
class StorageAttachedEvent(StorageEvent):
|
||||
"""Represents the `storage-attached` hook from Juju.
|
||||
|
||||
Called when new storage is available for the charm to use.
|
||||
"""
|
||||
|
||||
|
||||
class StorageDetachingEvent(StorageEvent):
|
||||
"""Represents the `storage-detaching` hook from Juju.
|
||||
|
||||
Called when storage a charm has been using is going away.
|
||||
"""
|
||||
|
||||
|
||||
class CharmEvents(ObjectEvents):
|
||||
"""The events that are generated by Juju in response to the lifecycle of an application."""
|
||||
|
||||
install = EventSource(InstallEvent)
|
||||
start = EventSource(StartEvent)
|
||||
stop = EventSource(StopEvent)
|
||||
remove = EventSource(RemoveEvent)
|
||||
update_status = EventSource(UpdateStatusEvent)
|
||||
config_changed = EventSource(ConfigChangedEvent)
|
||||
upgrade_charm = EventSource(UpgradeCharmEvent)
|
||||
pre_series_upgrade = EventSource(PreSeriesUpgradeEvent)
|
||||
post_series_upgrade = EventSource(PostSeriesUpgradeEvent)
|
||||
leader_elected = EventSource(LeaderElectedEvent)
|
||||
leader_settings_changed = EventSource(LeaderSettingsChangedEvent)
|
||||
collect_metrics = EventSource(CollectMetricsEvent)
|
||||
|
||||
|
||||
class CharmBase(Object):
|
||||
"""Base class that represents the Charm overall.
|
||||
|
||||
Usually this initialization is done by ops.main.main() rather than Charm authors
|
||||
directly instantiating a Charm.
|
||||
|
||||
Args:
|
||||
framework: The framework responsible for managing the Model and events for this
|
||||
Charm.
|
||||
key: Ignored; will remove after deprecation period of the signature change.
|
||||
"""
|
||||
|
||||
on = CharmEvents()
|
||||
|
||||
def __init__(self, framework: Framework, key: typing.Optional = None):
|
||||
super().__init__(framework, None)
|
||||
|
||||
for relation_name in self.framework.meta.relations:
|
||||
relation_name = relation_name.replace('-', '_')
|
||||
self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent)
|
||||
self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent)
|
||||
self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent)
|
||||
self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent)
|
||||
self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent)
|
||||
|
||||
for storage_name in self.framework.meta.storages:
|
||||
storage_name = storage_name.replace('-', '_')
|
||||
self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent)
|
||||
self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent)
|
||||
|
||||
for action_name in self.framework.meta.actions:
|
||||
action_name = action_name.replace('-', '_')
|
||||
self.on.define_event(action_name + '_action', ActionEvent)
|
||||
|
||||
@property
|
||||
def app(self) -> model.Application:
|
||||
"""Application that this unit is part of."""
|
||||
return self.framework.model.app
|
||||
|
||||
@property
|
||||
def unit(self) -> model.Unit:
|
||||
"""Unit that this execution is responsible for."""
|
||||
return self.framework.model.unit
|
||||
|
||||
@property
|
||||
def meta(self) -> 'CharmMeta':
|
||||
"""CharmMeta of this charm.
|
||||
"""
|
||||
return self.framework.meta
|
||||
|
||||
@property
|
||||
def charm_dir(self) -> pathlib.Path:
|
||||
"""Root directory of the Charm as it is running.
|
||||
"""
|
||||
return self.framework.charm_dir
|
||||
|
||||
|
||||
class CharmMeta:
|
||||
"""Object containing the metadata for the charm.
|
||||
|
||||
This is read from metadata.yaml and/or actions.yaml. Generally charms will
|
||||
define this information, rather than reading it at runtime. This class is
|
||||
mostly for the framework to understand what the charm has defined.
|
||||
|
||||
The maintainers, tags, terms, series, and extra_bindings attributes are all
|
||||
lists of strings. The requires, provides, peers, relations, storage,
|
||||
resources, and payloads attributes are all mappings of names to instances
|
||||
of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta.
|
||||
|
||||
The relations attribute is a convenience accessor which includes all of the
|
||||
requires, provides, and peers RelationMeta items. If needed, the role of
|
||||
the relation definition can be obtained from its role attribute.
|
||||
|
||||
Attributes:
|
||||
name: The name of this charm
|
||||
summary: Short description of what this charm does
|
||||
description: Long description for this charm
|
||||
maintainers: A list of strings of the email addresses of the maintainers
|
||||
of this charm.
|
||||
tags: Charm store tag metadata for categories associated with this charm.
|
||||
terms: Charm store terms that should be agreed to before this charm can
|
||||
be deployed. (Used for things like licensing issues.)
|
||||
series: The list of supported OS series that this charm can support.
|
||||
The first entry in the list is the default series that will be
|
||||
used by deploy if no other series is requested by the user.
|
||||
subordinate: True/False whether this charm is intended to be used as a
|
||||
subordinate charm.
|
||||
min_juju_version: If supplied, indicates this charm needs features that
|
||||
are not available in older versions of Juju.
|
||||
requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation.
|
||||
provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation.
|
||||
peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation.
|
||||
relations: A dict containing all :class:`RelationMeta` attributes (merged from other
|
||||
sections)
|
||||
storages: A dict of {name: :class:`StorageMeta`} for each defined storage.
|
||||
resources: A dict of {name: :class:`ResourceMeta`} for each defined resource.
|
||||
payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload.
|
||||
extra_bindings: A dict of additional named bindings that a charm can use
|
||||
for network configuration.
|
||||
actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined.
|
||||
Args:
|
||||
raw: a mapping containing the contents of metadata.yaml
|
||||
actions_raw: a mapping containing the contents of actions.yaml
|
||||
"""
|
||||
|
||||
def __init__(self, raw: dict = {}, actions_raw: dict = {}):
|
||||
self.name = raw.get('name', '')
|
||||
self.summary = raw.get('summary', '')
|
||||
self.description = raw.get('description', '')
|
||||
self.maintainers = []
|
||||
if 'maintainer' in raw:
|
||||
self.maintainers.append(raw['maintainer'])
|
||||
if 'maintainers' in raw:
|
||||
self.maintainers.extend(raw['maintainers'])
|
||||
self.tags = raw.get('tags', [])
|
||||
self.terms = raw.get('terms', [])
|
||||
self.series = raw.get('series', [])
|
||||
self.subordinate = raw.get('subordinate', False)
|
||||
self.min_juju_version = raw.get('min-juju-version')
|
||||
self.requires = {name: RelationMeta(RelationRole.requires, name, rel)
|
||||
for name, rel in raw.get('requires', {}).items()}
|
||||
self.provides = {name: RelationMeta(RelationRole.provides, name, rel)
|
||||
for name, rel in raw.get('provides', {}).items()}
|
||||
self.peers = {name: RelationMeta(RelationRole.peer, name, rel)
|
||||
for name, rel in raw.get('peers', {}).items()}
|
||||
self.relations = {}
|
||||
self.relations.update(self.requires)
|
||||
self.relations.update(self.provides)
|
||||
self.relations.update(self.peers)
|
||||
self.storages = {name: StorageMeta(name, storage)
|
||||
for name, storage in raw.get('storage', {}).items()}
|
||||
self.resources = {name: ResourceMeta(name, res)
|
||||
for name, res in raw.get('resources', {}).items()}
|
||||
self.payloads = {name: PayloadMeta(name, payload)
|
||||
for name, payload in raw.get('payloads', {}).items()}
|
||||
self.extra_bindings = raw.get('extra-bindings', {})
|
||||
self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()}
|
||||
|
||||
@classmethod
|
||||
def from_yaml(
|
||||
cls, metadata: typing.Union[str, typing.TextIO],
|
||||
actions: typing.Optional[typing.Union[str, typing.TextIO]] = None):
|
||||
"""Instantiate a CharmMeta from a YAML description of metadata.yaml.
|
||||
|
||||
Args:
|
||||
metadata: A YAML description of charm metadata (name, relations, etc.)
|
||||
This can be a simple string, or a file-like object. (passed to `yaml.safe_load`).
|
||||
actions: YAML description of Actions for this charm (eg actions.yaml)
|
||||
"""
|
||||
meta = _loadYaml(metadata)
|
||||
raw_actions = {}
|
||||
if actions is not None:
|
||||
raw_actions = _loadYaml(actions)
|
||||
return cls(meta, raw_actions)
|
||||
|
||||
|
||||
class RelationRole(enum.Enum):
|
||||
peer = 'peer'
|
||||
requires = 'requires'
|
||||
provides = 'provides'
|
||||
|
||||
def is_peer(self) -> bool:
|
||||
"""Return whether the current role is peer.
|
||||
|
||||
A convenience to avoid having to import charm.
|
||||
"""
|
||||
return self is RelationRole.peer
|
||||
|
||||
|
||||
class RelationMeta:
|
||||
"""Object containing metadata about a relation definition.
|
||||
|
||||
Should not be constructed directly by Charm code. Is gotten from one of
|
||||
:attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`,
|
||||
or :attr:`CharmMeta.relations`.
|
||||
|
||||
Attributes:
|
||||
role: This is one of peer/requires/provides
|
||||
relation_name: Name of this relation from metadata.yaml
|
||||
interface_name: Optional definition of the interface protocol.
|
||||
scope: "global" or "container" scope based on how the relation should be used.
|
||||
"""
|
||||
|
||||
def __init__(self, role: RelationRole, relation_name: str, raw: dict):
|
||||
if not isinstance(role, RelationRole):
|
||||
raise TypeError("role should be a Role, not {!r}".format(role))
|
||||
self.role = role
|
||||
self.relation_name = relation_name
|
||||
self.interface_name = raw['interface']
|
||||
self.scope = raw.get('scope')
|
||||
|
||||
|
||||
class StorageMeta:
|
||||
"""Object containing metadata about a storage definition."""
|
||||
|
||||
def __init__(self, name, raw):
|
||||
self.storage_name = name
|
||||
self.type = raw['type']
|
||||
self.description = raw.get('description', '')
|
||||
self.shared = raw.get('shared', False)
|
||||
self.read_only = raw.get('read-only', False)
|
||||
self.minimum_size = raw.get('minimum-size')
|
||||
self.location = raw.get('location')
|
||||
self.multiple_range = None
|
||||
if 'multiple' in raw:
|
||||
range = raw['multiple']['range']
|
||||
if '-' not in range:
|
||||
self.multiple_range = (int(range), int(range))
|
||||
else:
|
||||
range = range.split('-')
|
||||
self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None)
|
||||
|
||||
|
||||
class ResourceMeta:
|
||||
"""Object containing metadata about a resource definition."""
|
||||
|
||||
def __init__(self, name, raw):
|
||||
self.resource_name = name
|
||||
self.type = raw['type']
|
||||
self.filename = raw.get('filename', None)
|
||||
self.description = raw.get('description', '')
|
||||
|
||||
|
||||
class PayloadMeta:
|
||||
"""Object containing metadata about a payload definition."""
|
||||
|
||||
def __init__(self, name, raw):
|
||||
self.payload_name = name
|
||||
self.type = raw['type']
|
||||
|
||||
|
||||
class ActionMeta:
|
||||
"""Object containing metadata about an action's definition."""
|
||||
|
||||
def __init__(self, name, raw=None):
|
||||
raw = raw or {}
|
||||
self.name = name
|
||||
self.title = raw.get('title', '')
|
||||
self.description = raw.get('description', '')
|
||||
self.parameters = raw.get('params', {}) # {<parameter name>: <JSON Schema definition>}
|
||||
self.required = raw.get('required', []) # [<parameter name>, ...]
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,106 +0,0 @@
|
|||
# Copyright 2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import re
|
||||
from functools import total_ordering
|
||||
|
||||
|
||||
@total_ordering
|
||||
class JujuVersion:
|
||||
|
||||
PATTERN = r'''^
|
||||
(?P<major>\d{1,9})\.(?P<minor>\d{1,9}) # <major> and <minor> numbers are always there
|
||||
((?:\.|-(?P<tag>[a-z]+))(?P<patch>\d{1,9}))? # sometimes with .<patch> or -<tag><patch>
|
||||
(\.(?P<build>\d{1,9}))?$ # and sometimes with a <build> number.
|
||||
'''
|
||||
|
||||
def __init__(self, version):
|
||||
m = re.match(self.PATTERN, version, re.VERBOSE)
|
||||
if not m:
|
||||
raise RuntimeError('"{}" is not a valid Juju version string'.format(version))
|
||||
|
||||
d = m.groupdict()
|
||||
self.major = int(m.group('major'))
|
||||
self.minor = int(m.group('minor'))
|
||||
self.tag = d['tag'] or ''
|
||||
self.patch = int(d['patch'] or 0)
|
||||
self.build = int(d['build'] or 0)
|
||||
|
||||
def __repr__(self):
|
||||
if self.tag:
|
||||
s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch)
|
||||
else:
|
||||
s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
|
||||
if self.build > 0:
|
||||
s += '.{}'.format(self.build)
|
||||
return s
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
if isinstance(other, str):
|
||||
other = type(self)(other)
|
||||
elif not isinstance(other, JujuVersion):
|
||||
raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
|
||||
return (
|
||||
self.major == other.major
|
||||
and self.minor == other.minor
|
||||
and self.tag == other.tag
|
||||
and self.build == other.build
|
||||
and self.patch == other.patch)
|
||||
|
||||
def __lt__(self, other):
|
||||
if self is other:
|
||||
return False
|
||||
if isinstance(other, str):
|
||||
other = type(self)(other)
|
||||
elif not isinstance(other, JujuVersion):
|
||||
raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
|
||||
|
||||
if self.major != other.major:
|
||||
return self.major < other.major
|
||||
elif self.minor != other.minor:
|
||||
return self.minor < other.minor
|
||||
elif self.tag != other.tag:
|
||||
if not self.tag:
|
||||
return False
|
||||
elif not other.tag:
|
||||
return True
|
||||
return self.tag < other.tag
|
||||
elif self.patch != other.patch:
|
||||
return self.patch < other.patch
|
||||
elif self.build != other.build:
|
||||
return self.build < other.build
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def from_environ(cls) -> 'JujuVersion':
|
||||
"""Build a JujuVersion from JUJU_VERSION."""
|
||||
v = os.environ.get('JUJU_VERSION')
|
||||
if v is None:
|
||||
v = '0.0.0'
|
||||
return cls(v)
|
||||
|
||||
def has_app_data(self) -> bool:
|
||||
"""Determine whether this juju version knows about app data."""
|
||||
return (self.major, self.minor, self.patch) >= (2, 7, 0)
|
||||
|
||||
def is_dispatch_aware(self) -> bool:
|
||||
"""Determine whether this juju version knows about dispatch."""
|
||||
return (self.major, self.minor, self.patch) >= (2, 8, 0)
|
||||
|
||||
def has_controller_storage(self) -> bool:
|
||||
"""Determine whether this juju version supports controller-side storage."""
|
||||
return (self.major, self.minor, self.patch) >= (2, 8, 0)
|
||||
|
|
@ -1,262 +0,0 @@
|
|||
# Copyright 2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ast import literal_eval
|
||||
from importlib.util import module_from_spec
|
||||
from importlib.machinery import ModuleSpec
|
||||
from pkgutil import get_importer
|
||||
from types import ModuleType
|
||||
from typing import List
|
||||
|
||||
__all__ = ('use', 'autoimport')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_libraries = None
|
||||
|
||||
_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''')
|
||||
_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''')
|
||||
|
||||
# Not perfect, but should do for now.
|
||||
_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''')
|
||||
|
||||
|
||||
def use(name: str, api: int, author: str) -> ModuleType:
|
||||
"""Use a library from the ops libraries.
|
||||
|
||||
Args:
|
||||
name: the name of the library requested.
|
||||
api: the API version of the library.
|
||||
author: the author of the library. If not given, requests the
|
||||
one in the standard library.
|
||||
Raises:
|
||||
ImportError: if the library cannot be found.
|
||||
TypeError: if the name, api, or author are the wrong type.
|
||||
ValueError: if the name, api, or author are invalid.
|
||||
"""
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("invalid library name: {!r} (must be a str)".format(name))
|
||||
if not isinstance(author, str):
|
||||
raise TypeError("invalid library author: {!r} (must be a str)".format(author))
|
||||
if not isinstance(api, int):
|
||||
raise TypeError("invalid library API: {!r} (must be an int)".format(api))
|
||||
if api < 0:
|
||||
raise ValueError('invalid library api: {} (must be ≥0)'.format(api))
|
||||
if not _libname_re.match(name):
|
||||
raise ValueError("invalid library name: {!r} (chars and digits only)".format(name))
|
||||
if not _libauthor_re.match(author):
|
||||
raise ValueError("invalid library author email: {!r}".format(author))
|
||||
|
||||
if _libraries is None:
|
||||
autoimport()
|
||||
|
||||
versions = _libraries.get((name, author), ())
|
||||
for lib in versions:
|
||||
if lib.api == api:
|
||||
return lib.import_module()
|
||||
|
||||
others = ', '.join(str(lib.api) for lib in versions)
|
||||
if others:
|
||||
msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format(
|
||||
name, author, api, others)
|
||||
else:
|
||||
msg = 'cannot find library "{}" from "{}"'.format(name, author)
|
||||
|
||||
raise ImportError(msg, name=name)
|
||||
|
||||
|
||||
def autoimport():
|
||||
"""Find all libs in the path and enable use of them.
|
||||
|
||||
You only need to call this if you've installed a package or
|
||||
otherwise changed sys.path in the current run, and need to see the
|
||||
changes. Otherwise libraries are found on first call of `use`.
|
||||
"""
|
||||
global _libraries
|
||||
_libraries = {}
|
||||
for spec in _find_all_specs(sys.path):
|
||||
lib = _parse_lib(spec)
|
||||
if lib is None:
|
||||
continue
|
||||
|
||||
versions = _libraries.setdefault((lib.name, lib.author), [])
|
||||
versions.append(lib)
|
||||
versions.sort(reverse=True)
|
||||
|
||||
|
||||
def _find_all_specs(path):
|
||||
for sys_dir in path:
|
||||
if sys_dir == "":
|
||||
sys_dir = "."
|
||||
try:
|
||||
top_dirs = os.listdir(sys_dir)
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
continue
|
||||
except OSError as e:
|
||||
logger.debug("Tried to look for ops.lib packages under '%s': %s", sys_dir, e)
|
||||
continue
|
||||
logger.debug("Looking for ops.lib packages under '%s'", sys_dir)
|
||||
for top_dir in top_dirs:
|
||||
opslib = os.path.join(sys_dir, top_dir, 'opslib')
|
||||
try:
|
||||
lib_dirs = os.listdir(opslib)
|
||||
except (FileNotFoundError, NotADirectoryError):
|
||||
continue
|
||||
except OSError as e:
|
||||
logger.debug(" Tried '%s': %s", opslib, e) # *lots* of things checked here
|
||||
continue
|
||||
else:
|
||||
logger.debug(" Trying '%s'", opslib)
|
||||
finder = get_importer(opslib)
|
||||
if finder is None:
|
||||
logger.debug(" Finder for '%s' is None", opslib)
|
||||
continue
|
||||
if not hasattr(finder, 'find_spec'):
|
||||
logger.debug(" Finder for '%s' has no find_spec", opslib)
|
||||
continue
|
||||
for lib_dir in lib_dirs:
|
||||
spec_name = "{}.opslib.{}".format(top_dir, lib_dir)
|
||||
spec = finder.find_spec(spec_name)
|
||||
if spec is None:
|
||||
logger.debug(" No spec for %r", spec_name)
|
||||
continue
|
||||
if spec.loader is None:
|
||||
# a namespace package; not supported
|
||||
logger.debug(" No loader for %r (probably a namespace package)", spec_name)
|
||||
continue
|
||||
|
||||
logger.debug(" Found %r", spec_name)
|
||||
yield spec
|
||||
|
||||
|
||||
# only the first this many lines of a file are looked at for the LIB* constants
|
||||
_MAX_LIB_LINES = 99
|
||||
# these keys, with these types, are needed to have an opslib
|
||||
_NEEDED_KEYS = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int}
|
||||
|
||||
|
||||
def _join_and(keys: List[str]) -> str:
|
||||
if len(keys) == 0:
|
||||
return ""
|
||||
if len(keys) == 1:
|
||||
return keys[0]
|
||||
return ", ".join(keys[:-1]) + ", and " + keys[-1]
|
||||
|
||||
|
||||
class _Missing:
|
||||
"""A silly little helper to only work out the difference between
|
||||
what was found and what was needed when logging"""
|
||||
|
||||
def __init__(self, found):
|
||||
self._found = found
|
||||
|
||||
def __str__(self):
|
||||
exp = set(_NEEDED_KEYS)
|
||||
got = set(self._found)
|
||||
if len(got) == 0:
|
||||
return "missing {}".format(_join_and(sorted(exp)))
|
||||
return "got {}, but missing {}".format(
|
||||
_join_and(sorted(got)),
|
||||
_join_and(sorted(exp - got)))
|
||||
|
||||
|
||||
def _parse_lib(spec):
|
||||
if spec.origin is None:
|
||||
# "can't happen"
|
||||
logger.warning("No origin for %r (no idea why; please report)", spec.name)
|
||||
return None
|
||||
|
||||
logger.debug(" Parsing %r", spec.name)
|
||||
|
||||
try:
|
||||
with open(spec.origin, 'rt', encoding='utf-8') as f:
|
||||
libinfo = {}
|
||||
for n, line in enumerate(f):
|
||||
if len(libinfo) == len(_NEEDED_KEYS):
|
||||
break
|
||||
if n > _MAX_LIB_LINES:
|
||||
logger.debug(
|
||||
" Missing opslib metadata after reading to line %d: %s",
|
||||
_MAX_LIB_LINES, _Missing(libinfo))
|
||||
return None
|
||||
m = _libline_re.match(line)
|
||||
if m is None:
|
||||
continue
|
||||
key, value = m.groups()
|
||||
if key in _NEEDED_KEYS:
|
||||
value = literal_eval(value)
|
||||
if not isinstance(value, _NEEDED_KEYS[key]):
|
||||
logger.debug(
|
||||
" Bad type for %s: expected %s, got %s",
|
||||
key, _NEEDED_KEYS[key].__name__, type(value).__name__)
|
||||
return None
|
||||
libinfo[key] = value
|
||||
else:
|
||||
if len(libinfo) != len(_NEEDED_KEYS):
|
||||
logger.debug(
|
||||
" Missing opslib metadata after reading to end of file: %s",
|
||||
_Missing(libinfo))
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(" Failed: %s", e)
|
||||
return None
|
||||
|
||||
lib = _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH'])
|
||||
logger.debug(" Success: found library %s", lib)
|
||||
|
||||
return lib
|
||||
|
||||
|
||||
class _Lib:
|
||||
|
||||
def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int):
|
||||
self.spec = spec
|
||||
self.name = name
|
||||
self.author = author
|
||||
self.api = api
|
||||
self.patch = patch
|
||||
|
||||
self._module = None
|
||||
|
||||
def __repr__(self):
|
||||
return "<_Lib {}>".format(self)
|
||||
|
||||
def __str__(self):
|
||||
return "{0.name} by {0.author}, API {0.api}, patch {0.patch}".format(self)
|
||||
|
||||
def import_module(self) -> ModuleType:
|
||||
if self._module is None:
|
||||
module = module_from_spec(self.spec)
|
||||
self.spec.loader.exec_module(module)
|
||||
self._module = module
|
||||
return self._module
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, _Lib):
|
||||
return NotImplemented
|
||||
a = (self.name, self.author, self.api, self.patch)
|
||||
b = (other.name, other.author, other.api, other.patch)
|
||||
return a == b
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, _Lib):
|
||||
return NotImplemented
|
||||
a = (self.name, self.author, self.api, self.patch)
|
||||
b = (other.name, other.author, other.api, other.patch)
|
||||
return a < b
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
# Copyright 2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
|
||||
|
||||
class JujuLogHandler(logging.Handler):
|
||||
"""A handler for sending logs to Juju via juju-log."""
|
||||
|
||||
def __init__(self, model_backend, level=logging.DEBUG):
|
||||
super().__init__(level)
|
||||
self.model_backend = model_backend
|
||||
|
||||
def emit(self, record):
|
||||
self.model_backend.juju_log(record.levelname, self.format(record))
|
||||
|
||||
|
||||
def setup_root_logging(model_backend, debug=False):
|
||||
"""Setup python logging to forward messages to juju-log.
|
||||
|
||||
By default, logging is set to DEBUG level, and messages will be filtered by Juju.
|
||||
Charmers can also set their own default log level with::
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
||||
model_backend -- a ModelBackend to use for juju-log
|
||||
debug -- if True, write logs to stderr as well as to juju-log.
|
||||
"""
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(JujuLogHandler(model_backend))
|
||||
if debug:
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
sys.excepthook = lambda etype, value, tb: logger.error(
|
||||
"Uncaught exception while in charm code:", exc_info=(etype, value, tb))
|
||||
|
|
@ -1,404 +0,0 @@
|
|||
# Copyright 2019-2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import typing
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
import ops.charm
|
||||
import ops.framework
|
||||
import ops.model
|
||||
import ops.storage
|
||||
|
||||
from ops.log import setup_root_logging
|
||||
from ops.jujuversion import JujuVersion
|
||||
|
||||
CHARM_STATE_FILE = '.unit-state.db'
|
||||
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
def _exe_path(path: Path) -> typing.Optional[Path]:
|
||||
"""Find and return the full path to the given binary.
|
||||
|
||||
Here path is the absolute path to a binary, but might be missing an extension.
|
||||
"""
|
||||
p = shutil.which(path.name, mode=os.F_OK, path=str(path.parent))
|
||||
if p is None:
|
||||
return None
|
||||
return Path(p)
|
||||
|
||||
|
||||
def _get_charm_dir():
|
||||
charm_dir = os.environ.get("JUJU_CHARM_DIR")
|
||||
if charm_dir is None:
|
||||
# Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
|
||||
charm_dir = Path('{}/../../..'.format(__file__)).resolve()
|
||||
else:
|
||||
charm_dir = Path(charm_dir).resolve()
|
||||
return charm_dir
|
||||
|
||||
|
||||
def _create_event_link(charm, bound_event, link_to):
|
||||
"""Create a symlink for a particular event.
|
||||
|
||||
charm -- A charm object.
|
||||
bound_event -- An event for which to create a symlink.
|
||||
link_to -- What the event link should point to
|
||||
"""
|
||||
if issubclass(bound_event.event_type, ops.charm.HookEvent):
|
||||
event_dir = charm.framework.charm_dir / 'hooks'
|
||||
event_path = event_dir / bound_event.event_kind.replace('_', '-')
|
||||
elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
|
||||
if not bound_event.event_kind.endswith("_action"):
|
||||
raise RuntimeError(
|
||||
'action event name {} needs _action suffix'.format(bound_event.event_kind))
|
||||
event_dir = charm.framework.charm_dir / 'actions'
|
||||
# The event_kind is suffixed with "_action" while the executable is not.
|
||||
event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
|
||||
|
||||
event_dir.mkdir(exist_ok=True)
|
||||
if not event_path.exists():
|
||||
target_path = os.path.relpath(link_to, str(event_dir))
|
||||
|
||||
# Ignore the non-symlink files or directories
|
||||
# assuming the charm author knows what they are doing.
|
||||
logger.debug(
|
||||
'Creating a new relative symlink at %s pointing to %s',
|
||||
event_path, target_path)
|
||||
event_path.symlink_to(target_path)
|
||||
|
||||
|
||||
def _setup_event_links(charm_dir, charm):
|
||||
"""Set up links for supported events that originate from Juju.
|
||||
|
||||
Whether a charm can handle an event or not can be determined by
|
||||
introspecting which events are defined on it.
|
||||
|
||||
Hooks or actions are created as symlinks to the charm code file
|
||||
which is determined by inspecting symlinks provided by the charm
|
||||
author at hooks/install or hooks/start.
|
||||
|
||||
charm_dir -- A root directory of the charm.
|
||||
charm -- An instance of the Charm class.
|
||||
|
||||
"""
|
||||
# XXX: on windows this function does not accomplish what it wants to:
|
||||
# it creates symlinks with no extension pointing to a .py
|
||||
# and juju only knows how to handle .exe, .bat, .cmd, and .ps1
|
||||
# so it does its job, but does not accomplish anything as the
|
||||
# hooks aren't 'callable'.
|
||||
link_to = os.path.realpath(os.environ.get("JUJU_DISPATCH_PATH", sys.argv[0]))
|
||||
for bound_event in charm.on.events().values():
|
||||
# Only events that originate from Juju need symlinks.
|
||||
if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
|
||||
_create_event_link(charm, bound_event, link_to)
|
||||
|
||||
|
||||
def _emit_charm_event(charm, event_name):
|
||||
"""Emits a charm event based on a Juju event name.
|
||||
|
||||
charm -- A charm instance to emit an event from.
|
||||
event_name -- A Juju event name to emit on a charm.
|
||||
"""
|
||||
event_to_emit = None
|
||||
try:
|
||||
event_to_emit = getattr(charm.on, event_name)
|
||||
except AttributeError:
|
||||
logger.debug("Event %s not defined for %s.", event_name, charm)
|
||||
|
||||
# If the event is not supported by the charm implementation, do
|
||||
# not error out or try to emit it. This is to support rollbacks.
|
||||
if event_to_emit is not None:
|
||||
args, kwargs = _get_event_args(charm, event_to_emit)
|
||||
logger.debug('Emitting Juju event %s.', event_name)
|
||||
event_to_emit.emit(*args, **kwargs)
|
||||
|
||||
|
||||
def _get_event_args(charm, bound_event):
|
||||
event_type = bound_event.event_type
|
||||
model = charm.framework.model
|
||||
|
||||
if issubclass(event_type, ops.charm.RelationEvent):
|
||||
relation_name = os.environ['JUJU_RELATION']
|
||||
relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
|
||||
relation = model.get_relation(relation_name, relation_id)
|
||||
else:
|
||||
relation = None
|
||||
|
||||
remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
|
||||
remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
|
||||
if remote_app_name or remote_unit_name:
|
||||
if not remote_app_name:
|
||||
if '/' not in remote_unit_name:
|
||||
raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
|
||||
remote_app_name = remote_unit_name.split('/')[0]
|
||||
args = [relation, model.get_app(remote_app_name)]
|
||||
if remote_unit_name:
|
||||
args.append(model.get_unit(remote_unit_name))
|
||||
return args, {}
|
||||
elif relation:
|
||||
return [relation], {}
|
||||
return [], {}
|
||||
|
||||
|
||||
class _Dispatcher:
|
||||
"""Encapsulate how to figure out what event Juju wants us to run.
|
||||
|
||||
Also knows how to run “legacy” hooks when Juju called us via a top-level
|
||||
``dispatch`` binary.
|
||||
|
||||
Args:
|
||||
charm_dir: the toplevel directory of the charm
|
||||
|
||||
Attributes:
|
||||
event_name: the name of the event to run
|
||||
is_dispatch_aware: are we running under a Juju that knows about the
|
||||
dispatch binary, and is that binary present?
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, charm_dir: Path):
|
||||
self._charm_dir = charm_dir
|
||||
self._exec_path = Path(os.environ.get('JUJU_DISPATCH_PATH', sys.argv[0]))
|
||||
|
||||
dispatch = charm_dir / 'dispatch'
|
||||
if JujuVersion.from_environ().is_dispatch_aware() and _exe_path(dispatch) is not None:
|
||||
self._init_dispatch()
|
||||
else:
|
||||
self._init_legacy()
|
||||
|
||||
def ensure_event_links(self, charm):
|
||||
"""Make sure necessary symlinks are present on disk"""
|
||||
|
||||
if self.is_dispatch_aware:
|
||||
# links aren't needed
|
||||
return
|
||||
|
||||
# When a charm is force-upgraded and a unit is in an error state Juju
|
||||
# does not run upgrade-charm and instead runs the failed hook followed
|
||||
# by config-changed. Given the nature of force-upgrading the hook setup
|
||||
# code is not triggered on config-changed.
|
||||
#
|
||||
# 'start' event is included as Juju does not fire the install event for
|
||||
# K8s charms (see LP: #1854635).
|
||||
if (self.event_name in ('install', 'start', 'upgrade_charm')
|
||||
or self.event_name.endswith('_storage_attached')):
|
||||
_setup_event_links(self._charm_dir, charm)
|
||||
|
||||
def run_any_legacy_hook(self):
|
||||
"""Run any extant legacy hook.
|
||||
|
||||
If there is both a dispatch file and a legacy hook for the
|
||||
current event, run the wanted legacy hook.
|
||||
"""
|
||||
|
||||
if not self.is_dispatch_aware:
|
||||
# we *are* the legacy hook
|
||||
return
|
||||
|
||||
dispatch_path = _exe_path(self._charm_dir / self._dispatch_path)
|
||||
if dispatch_path is None:
|
||||
logger.debug("Legacy %s does not exist.", self._dispatch_path)
|
||||
return
|
||||
|
||||
# super strange that there isn't an is_executable
|
||||
if not os.access(str(dispatch_path), os.X_OK):
|
||||
logger.warning("Legacy %s exists but is not executable.", self._dispatch_path)
|
||||
return
|
||||
|
||||
if dispatch_path.resolve() == Path(sys.argv[0]).resolve():
|
||||
logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path)
|
||||
return
|
||||
|
||||
argv = sys.argv.copy()
|
||||
argv[0] = str(dispatch_path)
|
||||
logger.info("Running legacy %s.", self._dispatch_path)
|
||||
try:
|
||||
subprocess.run(argv, check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.warning("Legacy %s exited with status %d.", self._dispatch_path, e.returncode)
|
||||
sys.exit(e.returncode)
|
||||
except OSError as e:
|
||||
logger.warning("Unable to run legacy %s: %s", self._dispatch_path, e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logger.debug("Legacy %s exited with status 0.", self._dispatch_path)
|
||||
|
||||
def _set_name_from_path(self, path: Path):
|
||||
"""Sets the name attribute to that which can be inferred from the given path."""
|
||||
name = path.name.replace('-', '_')
|
||||
if path.parent.name == 'actions':
|
||||
name = '{}_action'.format(name)
|
||||
self.event_name = name
|
||||
|
||||
def _init_legacy(self):
|
||||
"""Set up the 'legacy' dispatcher.
|
||||
|
||||
The current Juju doesn't know about 'dispatch' and calls hooks
|
||||
explicitly.
|
||||
"""
|
||||
self.is_dispatch_aware = False
|
||||
self._set_name_from_path(self._exec_path)
|
||||
|
||||
def _init_dispatch(self):
|
||||
"""Set up the new 'dispatch' dispatcher.
|
||||
|
||||
The current Juju will run 'dispatch' if it exists, and otherwise fall
|
||||
back to the old behaviour.
|
||||
|
||||
JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install,
|
||||
in both cases.
|
||||
"""
|
||||
self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH'])
|
||||
|
||||
if 'OPERATOR_DISPATCH' in os.environ:
|
||||
logger.debug("Charm called itself via %s.", self._dispatch_path)
|
||||
sys.exit(0)
|
||||
os.environ['OPERATOR_DISPATCH'] = '1'
|
||||
|
||||
self.is_dispatch_aware = True
|
||||
self._set_name_from_path(self._dispatch_path)
|
||||
|
||||
def is_restricted_context(self):
|
||||
""""Return True if we are running in a restricted Juju context.
|
||||
|
||||
When in a restricted context, most commands (relation-get, config-get,
|
||||
state-get) are not available. As such, we change how we interact with
|
||||
Juju.
|
||||
"""
|
||||
return self.event_name in ('collect_metrics',)
|
||||
|
||||
|
||||
def _should_use_controller_storage(db_path: Path, meta: ops.charm.CharmMeta) -> bool:
|
||||
"""Figure out whether we want to use controller storage or not."""
|
||||
# if you've previously used local state, carry on using that
|
||||
if db_path.exists():
|
||||
logger.debug("Using local storage: %s already exists", db_path)
|
||||
return False
|
||||
|
||||
# if you're not in k8s you don't need controller storage
|
||||
if 'kubernetes' not in meta.series:
|
||||
logger.debug("Using local storage: not a kubernetes charm")
|
||||
return False
|
||||
|
||||
# are we in a new enough Juju?
|
||||
cur_version = JujuVersion.from_environ()
|
||||
|
||||
if cur_version.has_controller_storage():
|
||||
logger.debug("Using controller storage: JUJU_VERSION=%s", cur_version)
|
||||
return True
|
||||
else:
|
||||
logger.debug("Using local storage: JUJU_VERSION=%s", cur_version)
|
||||
return False
|
||||
|
||||
|
||||
def main(charm_class: ops.charm.CharmBase, use_juju_for_storage: bool = None):
|
||||
"""Setup the charm and dispatch the observed event.
|
||||
|
||||
The event name is based on the way this executable was called (argv[0]).
|
||||
|
||||
Args:
|
||||
charm_class: your charm class.
|
||||
use_juju_for_storage: whether to use controller-side storage. If not specified
|
||||
then kubernetes charms that haven't previously used local storage and that
|
||||
are running on a new enough Juju default to controller-side storage,
|
||||
otherwise local storage is used.
|
||||
"""
|
||||
charm_dir = _get_charm_dir()
|
||||
|
||||
model_backend = ops.model._ModelBackend()
|
||||
debug = ('JUJU_DEBUG' in os.environ)
|
||||
setup_root_logging(model_backend, debug=debug)
|
||||
logger.debug("Operator Framework %s up and running.", ops.__version__)
|
||||
|
||||
dispatcher = _Dispatcher(charm_dir)
|
||||
dispatcher.run_any_legacy_hook()
|
||||
|
||||
metadata = (charm_dir / 'metadata.yaml').read_text()
|
||||
actions_meta = charm_dir / 'actions.yaml'
|
||||
if actions_meta.exists():
|
||||
actions_metadata = actions_meta.read_text()
|
||||
else:
|
||||
actions_metadata = None
|
||||
|
||||
if not yaml.__with_libyaml__:
|
||||
logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader')
|
||||
meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata)
|
||||
model = ops.model.Model(meta, model_backend)
|
||||
|
||||
charm_state_path = charm_dir / CHARM_STATE_FILE
|
||||
|
||||
if use_juju_for_storage and not ops.storage.juju_backend_available():
|
||||
# raise an exception; the charm is broken and needs fixing.
|
||||
msg = 'charm set use_juju_for_storage=True, but Juju version {} does not support it'
|
||||
raise RuntimeError(msg.format(JujuVersion.from_environ()))
|
||||
|
||||
if use_juju_for_storage is None:
|
||||
use_juju_for_storage = _should_use_controller_storage(charm_state_path, meta)
|
||||
|
||||
if use_juju_for_storage:
|
||||
if dispatcher.is_restricted_context():
|
||||
# TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event
|
||||
# Though we eventually expect that juju will run collect-metrics in a
|
||||
# non-restricted context. Once we can determine that we are running collect-metrics
|
||||
# in a non-restricted context, we should fire the event as normal.
|
||||
logger.debug('"%s" is not supported when using Juju for storage\n'
|
||||
'see: https://github.com/canonical/operator/issues/348',
|
||||
dispatcher.event_name)
|
||||
# Note that we don't exit nonzero, because that would cause Juju to rerun the hook
|
||||
return
|
||||
store = ops.storage.JujuStorage()
|
||||
else:
|
||||
store = ops.storage.SQLiteStorage(charm_state_path)
|
||||
framework = ops.framework.Framework(store, charm_dir, meta, model)
|
||||
try:
|
||||
sig = inspect.signature(charm_class)
|
||||
try:
|
||||
sig.bind(framework)
|
||||
except TypeError:
|
||||
msg = (
|
||||
"the second argument, 'key', has been deprecated and will be "
|
||||
"removed after the 0.7 release")
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
charm = charm_class(framework, None)
|
||||
else:
|
||||
charm = charm_class(framework)
|
||||
dispatcher.ensure_event_links(charm)
|
||||
|
||||
# TODO: Remove the collect_metrics check below as soon as the relevant
|
||||
# Juju changes are made.
|
||||
#
|
||||
# Skip reemission of deferred events for collect-metrics events because
|
||||
# they do not have the full access to all hook tools.
|
||||
if not dispatcher.is_restricted_context():
|
||||
framework.reemit()
|
||||
|
||||
_emit_charm_event(charm, dispatcher.event_name)
|
||||
|
||||
framework.commit()
|
||||
finally:
|
||||
framework.close()
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,318 +0,0 @@
|
|||
# Copyright 2019-2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from datetime import timedelta
|
||||
import pickle
|
||||
import shutil
|
||||
import subprocess
|
||||
import sqlite3
|
||||
import typing
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
def _run(args, **kw):
|
||||
cmd = shutil.which(args[0])
|
||||
if cmd is None:
|
||||
raise FileNotFoundError(args[0])
|
||||
return subprocess.run([cmd, *args[1:]], **kw)
|
||||
|
||||
|
||||
class SQLiteStorage:
|
||||
|
||||
DB_LOCK_TIMEOUT = timedelta(hours=1)
|
||||
|
||||
def __init__(self, filename):
|
||||
# The isolation_level argument is set to None such that the implicit
|
||||
# transaction management behavior of the sqlite3 module is disabled.
|
||||
self._db = sqlite3.connect(str(filename),
|
||||
isolation_level=None,
|
||||
timeout=self.DB_LOCK_TIMEOUT.total_seconds())
|
||||
self._setup()
|
||||
|
||||
def _setup(self):
|
||||
# Make sure that the database is locked until the connection is closed,
|
||||
# not until the transaction ends.
|
||||
self._db.execute("PRAGMA locking_mode=EXCLUSIVE")
|
||||
c = self._db.execute("BEGIN")
|
||||
c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'")
|
||||
if c.fetchone()[0] == 0:
|
||||
# Keep in mind what might happen if the process dies somewhere below.
|
||||
# The system must not be rendered permanently broken by that.
|
||||
self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)")
|
||||
self._db.execute('''
|
||||
CREATE TABLE notice (
|
||||
sequence INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
event_path TEXT,
|
||||
observer_path TEXT,
|
||||
method_name TEXT)
|
||||
''')
|
||||
self._db.commit()
|
||||
|
||||
def close(self):
|
||||
self._db.close()
|
||||
|
||||
def commit(self):
|
||||
self._db.commit()
|
||||
|
||||
# There's commit but no rollback. For abort to be supported, we'll need logic that
|
||||
# can rollback decisions made by third-party code in terms of the internal state
|
||||
# of objects that have been snapshotted, and hooks to let them know about it and
|
||||
# take the needed actions to undo their logic until the last snapshot.
|
||||
# This is doable but will increase significantly the chances for mistakes.
|
||||
|
||||
def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
|
||||
"""Part of the Storage API, persist a snapshot data under the given handle.
|
||||
|
||||
Args:
|
||||
handle_path: The string identifying the snapshot.
|
||||
snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This
|
||||
might be a dict/tuple/int, but must only contain 'simple' python types.
|
||||
"""
|
||||
# Use pickle for serialization, so the value remains portable.
|
||||
raw_data = pickle.dumps(snapshot_data)
|
||||
self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data))
|
||||
|
||||
def load_snapshot(self, handle_path: str) -> typing.Any:
|
||||
"""Part of the Storage API, retrieve a snapshot that was previously saved.
|
||||
|
||||
Args:
|
||||
handle_path: The string identifying the snapshot.
|
||||
Raises:
|
||||
NoSnapshotError: if there is no snapshot for the given handle_path.
|
||||
"""
|
||||
c = self._db.cursor()
|
||||
c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,))
|
||||
row = c.fetchone()
|
||||
if row:
|
||||
return pickle.loads(row[0])
|
||||
raise NoSnapshotError(handle_path)
|
||||
|
||||
def drop_snapshot(self, handle_path: str):
|
||||
"""Part of the Storage API, remove a snapshot that was previously saved.
|
||||
|
||||
Dropping a snapshot that doesn't exist is treated as a no-op.
|
||||
"""
|
||||
self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,))
|
||||
|
||||
def list_snapshots(self) -> typing.Generator[str, None, None]:
|
||||
"""Return the name of all snapshots that are currently saved."""
|
||||
c = self._db.cursor()
|
||||
c.execute("SELECT handle FROM snapshot")
|
||||
while True:
|
||||
rows = c.fetchmany()
|
||||
if not rows:
|
||||
break
|
||||
for row in rows:
|
||||
yield row[0]
|
||||
|
||||
def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
|
||||
"""Part of the Storage API, record an notice (event and observer)"""
|
||||
self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)',
|
||||
(event_path, observer_path, method_name))
|
||||
|
||||
def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
|
||||
"""Part of the Storage API, remove a notice that was previously recorded."""
|
||||
self._db.execute('''
|
||||
DELETE FROM notice
|
||||
WHERE event_path=?
|
||||
AND observer_path=?
|
||||
AND method_name=?
|
||||
''', (event_path, observer_path, method_name))
|
||||
|
||||
def notices(self, event_path: typing.Optional[str]) ->\
|
||||
typing.Generator[typing.Tuple[str, str, str], None, None]:
|
||||
"""Part of the Storage API, return all notices that begin with event_path.
|
||||
|
||||
Args:
|
||||
event_path: If supplied, will only yield events that match event_path. If not
|
||||
supplied (or None/'') will return all events.
|
||||
Returns:
|
||||
Iterable of (event_path, observer_path, method_name) tuples
|
||||
"""
|
||||
if event_path:
|
||||
c = self._db.execute('''
|
||||
SELECT event_path, observer_path, method_name
|
||||
FROM notice
|
||||
WHERE event_path=?
|
||||
ORDER BY sequence
|
||||
''', (event_path,))
|
||||
else:
|
||||
c = self._db.execute('''
|
||||
SELECT event_path, observer_path, method_name
|
||||
FROM notice
|
||||
ORDER BY sequence
|
||||
''')
|
||||
while True:
|
||||
rows = c.fetchmany()
|
||||
if not rows:
|
||||
break
|
||||
for row in rows:
|
||||
yield tuple(row)
|
||||
|
||||
|
||||
class JujuStorage:
|
||||
""""Storing the content tracked by the Framework in Juju.
|
||||
|
||||
This uses :class:`_JujuStorageBackend` to interact with state-get/state-set
|
||||
as the way to store state for the framework and for components.
|
||||
"""
|
||||
|
||||
NOTICE_KEY = "#notices#"
|
||||
|
||||
def __init__(self, backend: '_JujuStorageBackend' = None):
|
||||
self._backend = backend
|
||||
if backend is None:
|
||||
self._backend = _JujuStorageBackend()
|
||||
|
||||
def close(self):
|
||||
return
|
||||
|
||||
def commit(self):
|
||||
return
|
||||
|
||||
def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
|
||||
self._backend.set(handle_path, snapshot_data)
|
||||
|
||||
def load_snapshot(self, handle_path):
|
||||
try:
|
||||
content = self._backend.get(handle_path)
|
||||
except KeyError:
|
||||
raise NoSnapshotError(handle_path)
|
||||
return content
|
||||
|
||||
def drop_snapshot(self, handle_path):
|
||||
self._backend.delete(handle_path)
|
||||
|
||||
def save_notice(self, event_path: str, observer_path: str, method_name: str):
|
||||
notice_list = self._load_notice_list()
|
||||
notice_list.append([event_path, observer_path, method_name])
|
||||
self._save_notice_list(notice_list)
|
||||
|
||||
def drop_notice(self, event_path: str, observer_path: str, method_name: str):
|
||||
notice_list = self._load_notice_list()
|
||||
notice_list.remove([event_path, observer_path, method_name])
|
||||
self._save_notice_list(notice_list)
|
||||
|
||||
def notices(self, event_path: str):
|
||||
notice_list = self._load_notice_list()
|
||||
for row in notice_list:
|
||||
if row[0] != event_path:
|
||||
continue
|
||||
yield tuple(row)
|
||||
|
||||
def _load_notice_list(self) -> typing.List[typing.Tuple[str]]:
|
||||
try:
|
||||
notice_list = self._backend.get(self.NOTICE_KEY)
|
||||
except KeyError:
|
||||
return []
|
||||
if notice_list is None:
|
||||
return []
|
||||
return notice_list
|
||||
|
||||
def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None:
|
||||
self._backend.set(self.NOTICE_KEY, notices)
|
||||
|
||||
|
||||
class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)):
|
||||
"""Handle a couple basic python types.
|
||||
|
||||
yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one
|
||||
that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just
|
||||
subclass SafeLoader and add tuples back in.
|
||||
"""
|
||||
# Taken from the example at:
|
||||
# https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml
|
||||
|
||||
construct_python_tuple = yaml.Loader.construct_python_tuple
|
||||
|
||||
|
||||
_SimpleLoader.add_constructor(
|
||||
u'tag:yaml.org,2002:python/tuple',
|
||||
_SimpleLoader.construct_python_tuple)
|
||||
|
||||
|
||||
class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)):
|
||||
"""Add types supported by 'marshal'
|
||||
|
||||
YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So
|
||||
we want to only support dumping out types that are safe to load.
|
||||
"""
|
||||
|
||||
|
||||
_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple
|
||||
_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple)
|
||||
|
||||
|
||||
def juju_backend_available() -> bool:
|
||||
"""Check if Juju state storage is available."""
|
||||
p = shutil.which('state-get')
|
||||
return p is not None
|
||||
|
||||
|
||||
class _JujuStorageBackend:
|
||||
"""Implements the interface from the Operator framework to Juju's state-get/set/etc."""
|
||||
|
||||
def set(self, key: str, value: typing.Any) -> None:
|
||||
"""Set a key to a given value.
|
||||
|
||||
Args:
|
||||
key: The string key that will be used to find the value later
|
||||
value: Arbitrary content that will be returned by get().
|
||||
Raises:
|
||||
CalledProcessError: if 'state-set' returns an error code.
|
||||
"""
|
||||
# default_flow_style=None means that it can use Block for
|
||||
# complex types (types that have nested types) but use flow
|
||||
# for simple types (like an array). Not all versions of PyYAML
|
||||
# have the same default style.
|
||||
encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None)
|
||||
content = yaml.dump(
|
||||
{key: encoded_value}, encoding='utf8', default_style='|',
|
||||
default_flow_style=False,
|
||||
Dumper=_SimpleDumper)
|
||||
_run(["state-set", "--file", "-"], input=content, check=True)
|
||||
|
||||
def get(self, key: str) -> typing.Any:
|
||||
"""Get the bytes value associated with a given key.
|
||||
|
||||
Args:
|
||||
key: The string key that will be used to find the value
|
||||
Raises:
|
||||
CalledProcessError: if 'state-get' returns an error code.
|
||||
"""
|
||||
# We don't capture stderr here so it can end up in debug logs.
|
||||
p = _run(["state-get", key], stdout=subprocess.PIPE, check=True, universal_newlines=True)
|
||||
if p.stdout == '' or p.stdout == '\n':
|
||||
raise KeyError(key)
|
||||
return yaml.load(p.stdout, Loader=_SimpleLoader)
|
||||
|
||||
def delete(self, key: str) -> None:
|
||||
"""Remove a key from being tracked.
|
||||
|
||||
Args:
|
||||
key: The key to stop storing
|
||||
Raises:
|
||||
CalledProcessError: if 'state-delete' returns an error code.
|
||||
"""
|
||||
_run(["state-delete", key], check=True)
|
||||
|
||||
|
||||
class NoSnapshotError(Exception):
|
||||
|
||||
def __init__(self, handle_path):
|
||||
self.handle_path = handle_path
|
||||
|
||||
def __str__(self):
|
||||
return 'no snapshot data found for {} object'.format(self.handle_path)
|
||||
|
|
@ -1,818 +0,0 @@
|
|||
# Copyright 2020 Canonical Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
import pathlib
|
||||
import random
|
||||
import tempfile
|
||||
import typing
|
||||
import yaml
|
||||
from contextlib import contextmanager
|
||||
from textwrap import dedent
|
||||
|
||||
from ops import (
|
||||
charm,
|
||||
framework,
|
||||
model,
|
||||
storage,
|
||||
)
|
||||
|
||||
|
||||
# OptionalYAML is something like metadata.yaml or actions.yaml. You can
|
||||
# pass in a file-like object or the string directly.
|
||||
OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]]
|
||||
|
||||
|
||||
# noinspection PyProtectedMember
|
||||
class Harness:
|
||||
"""This class represents a way to build up the model that will drive a test suite.
|
||||
|
||||
The model that is created is from the viewpoint of the charm that you are testing.
|
||||
|
||||
Example::
|
||||
|
||||
harness = Harness(MyCharm)
|
||||
# Do initial setup here
|
||||
relation_id = harness.add_relation('db', 'postgresql')
|
||||
# Now instantiate the charm to see events as the model changes
|
||||
harness.begin()
|
||||
harness.add_relation_unit(relation_id, 'postgresql/0')
|
||||
harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
|
||||
# Check that charm has properly handled the relation_joined event for postgresql/0
|
||||
self.assertEqual(harness.charm. ...)
|
||||
|
||||
Args:
|
||||
charm_cls: The Charm class that you'll be testing.
|
||||
meta: charm.CharmBase is a A string or file-like object containing the contents of
|
||||
metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the
|
||||
parent directory of the Charm, and if not found fall back to a trivial
|
||||
'name: test-charm' metadata.
|
||||
actions: A string or file-like object containing the contents of
|
||||
actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the
|
||||
parent directory of the Charm.
|
||||
config: A string or file-like object containing the contents of
|
||||
config.yaml. If not supplied, we will look for a 'config.yaml' file in the
|
||||
parent directory of the Charm.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm_cls: typing.Type[charm.CharmBase],
|
||||
*,
|
||||
meta: OptionalYAML = None,
|
||||
actions: OptionalYAML = None,
|
||||
config: OptionalYAML = None):
|
||||
self._charm_cls = charm_cls
|
||||
self._charm = None
|
||||
self._charm_dir = 'no-disk-path' # this may be updated by _create_meta
|
||||
self._meta = self._create_meta(meta, actions)
|
||||
self._unit_name = self._meta.name + '/0'
|
||||
self._framework = None
|
||||
self._hooks_enabled = True
|
||||
self._relation_id_counter = 0
|
||||
self._backend = _TestingModelBackend(self._unit_name, self._meta)
|
||||
self._model = model.Model(self._meta, self._backend)
|
||||
self._storage = storage.SQLiteStorage(':memory:')
|
||||
self._oci_resources = {}
|
||||
self._framework = framework.Framework(
|
||||
self._storage, self._charm_dir, self._meta, self._model)
|
||||
self._update_config(key_values=self._load_config_defaults(config))
|
||||
|
||||
@property
|
||||
def charm(self) -> charm.CharmBase:
|
||||
"""Return the instance of the charm class that was passed to __init__.
|
||||
|
||||
Note that the Charm is not instantiated until you have called
|
||||
:meth:`.begin()`.
|
||||
"""
|
||||
return self._charm
|
||||
|
||||
@property
|
||||
def model(self) -> model.Model:
|
||||
"""Return the :class:`~ops.model.Model` that is being driven by this Harness."""
|
||||
return self._model
|
||||
|
||||
@property
|
||||
def framework(self) -> framework.Framework:
|
||||
"""Return the Framework that is being driven by this Harness."""
|
||||
return self._framework
|
||||
|
||||
def begin(self) -> None:
|
||||
"""Instantiate the Charm and start handling events.
|
||||
|
||||
Before calling :meth:`.begin`(), there is no Charm instance, so changes to the Model won't
|
||||
emit events. You must call :meth:`.begin` before :attr:`.charm` is valid.
|
||||
"""
|
||||
if self._charm is not None:
|
||||
raise RuntimeError('cannot call the begin method on the harness more than once')
|
||||
|
||||
# The Framework adds attributes to class objects for events, etc. As such, we can't re-use
|
||||
# the original class against multiple Frameworks. So create a locally defined class
|
||||
# and register it.
|
||||
# TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of
|
||||
# Class attributes which should clean up this ugliness. The API can stay the same
|
||||
class TestEvents(self._charm_cls.on.__class__):
|
||||
pass
|
||||
|
||||
TestEvents.__name__ = self._charm_cls.on.__class__.__name__
|
||||
|
||||
class TestCharm(self._charm_cls):
|
||||
on = TestEvents()
|
||||
|
||||
# Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo,
|
||||
# rather than TestCharm has no attribute foo.
|
||||
TestCharm.__name__ = self._charm_cls.__name__
|
||||
self._charm = TestCharm(self._framework)
|
||||
|
||||
def begin_with_initial_hooks(self) -> None:
|
||||
"""Called when you want the Harness to fire the same hooks that Juju would fire at startup.
|
||||
|
||||
This triggers install, relation-created, config-changed, start, and any relation-joined
|
||||
hooks. Based on what relations have been defined before you called begin().
|
||||
Note that all of these are fired before returning control to the test suite, so if you
|
||||
want to introspect what happens at each step, you need to fire them directly
|
||||
(eg Charm.on.install.emit()).
|
||||
|
||||
To use this with all the normal hooks, you should instantiate the harness, setup any
|
||||
relations that you want active when the charm starts, and then call this method.
|
||||
|
||||
Example::
|
||||
|
||||
harness = Harness(MyCharm)
|
||||
# Do initial setup here
|
||||
relation_id = harness.add_relation('db', 'postgresql')
|
||||
harness.add_relation_unit(relation_id, 'postgresql/0')
|
||||
harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
|
||||
harness.set_leader(True)
|
||||
harness.update_config({'initial': 'config'})
|
||||
harness.begin_with_initial_hooks()
|
||||
# This will cause
|
||||
# install, db-relation-created('postgresql'), leader-elected, config-changed, start
|
||||
# db-relation-joined('postrgesql/0'), db-relation-changed('postgresql/0')
|
||||
# To be fired.
|
||||
"""
|
||||
self.begin()
|
||||
# TODO: jam 2020-08-03 This should also handle storage-attached hooks once we have support
|
||||
# for dealing with storage.
|
||||
self._charm.on.install.emit()
|
||||
# Juju itself iterates what relation to fire based on a map[int]relation, so it doesn't
|
||||
# guarantee a stable ordering between relation events. It *does* give a stable ordering
|
||||
# of joined units for a given relation.
|
||||
items = list(self._meta.relations.items())
|
||||
random.shuffle(items)
|
||||
this_app_name = self._meta.name
|
||||
for relname, rel_meta in items:
|
||||
if rel_meta.role == charm.RelationRole.peer:
|
||||
# If the user has directly added a relation, leave it be, but otherwise ensure
|
||||
# that peer relations are always established at before leader-elected.
|
||||
rel_ids = self._backend._relation_ids_map.get(relname)
|
||||
if rel_ids is None:
|
||||
self.add_relation(relname, self._meta.name)
|
||||
else:
|
||||
random.shuffle(rel_ids)
|
||||
for rel_id in rel_ids:
|
||||
self._emit_relation_created(relname, rel_id, this_app_name)
|
||||
else:
|
||||
rel_ids = self._backend._relation_ids_map.get(relname, [])
|
||||
random.shuffle(rel_ids)
|
||||
for rel_id in rel_ids:
|
||||
app_name = self._backend._relation_app_and_units[rel_id]["app"]
|
||||
self._emit_relation_created(relname, rel_id, app_name)
|
||||
if self._backend._is_leader:
|
||||
self._charm.on.leader_elected.emit()
|
||||
else:
|
||||
self._charm.on.leader_settings_changed.emit()
|
||||
self._charm.on.config_changed.emit()
|
||||
self._charm.on.start.emit()
|
||||
all_ids = list(self._backend._relation_names.items())
|
||||
random.shuffle(all_ids)
|
||||
for rel_id, rel_name in all_ids:
|
||||
rel_app_and_units = self._backend._relation_app_and_units[rel_id]
|
||||
app_name = rel_app_and_units["app"]
|
||||
# Note: Juju *does* fire relation events for a given relation in the sorted order of
|
||||
# the unit names. It also always fires relation-changed immediately after
|
||||
# relation-joined for the same unit.
|
||||
# Juju only fires relation-changed (app) if there is data for the related application
|
||||
relation = self._model.get_relation(rel_name, rel_id)
|
||||
if self._backend._relation_data[rel_id].get(app_name):
|
||||
app = self._model.get_app(app_name)
|
||||
self._charm.on[rel_name].relation_changed.emit(
|
||||
relation, app, None)
|
||||
for unit_name in sorted(rel_app_and_units["units"]):
|
||||
remote_unit = self._model.get_unit(unit_name)
|
||||
self._charm.on[rel_name].relation_joined.emit(
|
||||
relation, remote_unit.app, remote_unit)
|
||||
self._charm.on[rel_name].relation_changed.emit(
|
||||
relation, remote_unit.app, remote_unit)
|
||||
|
||||
def cleanup(self) -> None:
|
||||
"""Called by your test infrastructure to cleanup any temporary directories/files/etc.
|
||||
|
||||
Currently this only needs to be called if you test with resources. But it is reasonable
|
||||
to always include a `testcase.addCleanup(harness.cleanup)` just in case.
|
||||
"""
|
||||
self._backend._cleanup()
|
||||
|
||||
def _create_meta(self, charm_metadata, action_metadata):
|
||||
"""Create a CharmMeta object.
|
||||
|
||||
Handle the cases where a user doesn't supply explicit metadata snippets.
|
||||
"""
|
||||
filename = inspect.getfile(self._charm_cls)
|
||||
charm_dir = pathlib.Path(filename).parents[1]
|
||||
|
||||
if charm_metadata is None:
|
||||
metadata_path = charm_dir / 'metadata.yaml'
|
||||
if metadata_path.is_file():
|
||||
charm_metadata = metadata_path.read_text()
|
||||
self._charm_dir = charm_dir
|
||||
else:
|
||||
# The simplest of metadata that the framework can support
|
||||
charm_metadata = 'name: test-charm'
|
||||
elif isinstance(charm_metadata, str):
|
||||
charm_metadata = dedent(charm_metadata)
|
||||
|
||||
if action_metadata is None:
|
||||
actions_path = charm_dir / 'actions.yaml'
|
||||
if actions_path.is_file():
|
||||
action_metadata = actions_path.read_text()
|
||||
self._charm_dir = charm_dir
|
||||
elif isinstance(action_metadata, str):
|
||||
action_metadata = dedent(action_metadata)
|
||||
|
||||
return charm.CharmMeta.from_yaml(charm_metadata, action_metadata)
|
||||
|
||||
def _load_config_defaults(self, charm_config):
|
||||
"""Load default values from config.yaml
|
||||
|
||||
Handle the case where a user doesn't supply explicit config snippets.
|
||||
"""
|
||||
filename = inspect.getfile(self._charm_cls)
|
||||
charm_dir = pathlib.Path(filename).parents[1]
|
||||
|
||||
if charm_config is None:
|
||||
config_path = charm_dir / 'config.yaml'
|
||||
if config_path.is_file():
|
||||
charm_config = config_path.read_text()
|
||||
self._charm_dir = charm_dir
|
||||
else:
|
||||
# The simplest of config that the framework can support
|
||||
charm_config = '{}'
|
||||
elif isinstance(charm_config, str):
|
||||
charm_config = dedent(charm_config)
|
||||
charm_config = yaml.load(charm_config, Loader=yaml.SafeLoader)
|
||||
charm_config = charm_config.get('options', {})
|
||||
return {key: value['default'] for key, value in charm_config.items()
|
||||
if 'default' in value}
|
||||
|
||||
def add_oci_resource(self, resource_name: str,
|
||||
contents: typing.Mapping[str, str] = None) -> None:
|
||||
"""Add oci resources to the backend.
|
||||
|
||||
This will register an oci resource and create a temporary file for processing metadata
|
||||
about the resource. A default set of values will be used for all the file contents
|
||||
unless a specific contents dict is provided.
|
||||
|
||||
Args:
|
||||
resource_name: Name of the resource to add custom contents to.
|
||||
contents: Optional custom dict to write for the named resource.
|
||||
"""
|
||||
if not contents:
|
||||
contents = {'registrypath': 'registrypath',
|
||||
'username': 'username',
|
||||
'password': 'password',
|
||||
}
|
||||
if resource_name not in self._meta.resources.keys():
|
||||
raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
|
||||
if self._meta.resources[resource_name].type != "oci-image":
|
||||
raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name))
|
||||
|
||||
as_yaml = yaml.dump(contents, Dumper=yaml.SafeDumper)
|
||||
self._backend._resources_map[resource_name] = ('contents.yaml', as_yaml)
|
||||
|
||||
def add_resource(self, resource_name: str, content: typing.AnyStr) -> None:
|
||||
"""Add content for a resource to the backend.
|
||||
|
||||
This will register the content, so that a call to `Model.resources.fetch(resource_name)`
|
||||
will return a path to a file containing that content.
|
||||
|
||||
Args:
|
||||
resource_name: The name of the resource being added
|
||||
contents: Either string or bytes content, which will be the content of the filename
|
||||
returned by resource-get. If contents is a string, it will be encoded in utf-8
|
||||
"""
|
||||
if resource_name not in self._meta.resources.keys():
|
||||
raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
|
||||
record = self._meta.resources[resource_name]
|
||||
if record.type != "file":
|
||||
raise RuntimeError(
|
||||
'Resource {} is not a file, but actually {}'.format(resource_name, record.type))
|
||||
filename = record.filename
|
||||
if filename is None:
|
||||
filename = resource_name
|
||||
|
||||
self._backend._resources_map[resource_name] = (filename, content)
|
||||
|
||||
def populate_oci_resources(self) -> None:
|
||||
"""Populate all OCI resources."""
|
||||
for name, data in self._meta.resources.items():
|
||||
if data.type == "oci-image":
|
||||
self.add_oci_resource(name)
|
||||
|
||||
def disable_hooks(self) -> None:
|
||||
"""Stop emitting hook events when the model changes.
|
||||
|
||||
This can be used by developers to stop changes to the model from emitting events that
|
||||
the charm will react to. Call :meth:`.enable_hooks`
|
||||
to re-enable them.
|
||||
"""
|
||||
self._hooks_enabled = False
|
||||
|
||||
def enable_hooks(self) -> None:
|
||||
"""Re-enable hook events from charm.on when the model is changed.
|
||||
|
||||
By default hook events are enabled once you call :meth:`.begin`,
|
||||
but if you have used :meth:`.disable_hooks`, this can be used to
|
||||
enable them again.
|
||||
"""
|
||||
self._hooks_enabled = True
|
||||
|
||||
@contextmanager
|
||||
def hooks_disabled(self):
|
||||
"""A context manager to run code with hooks disabled.
|
||||
|
||||
Example::
|
||||
|
||||
with harness.hooks_disabled():
|
||||
# things in here don't fire events
|
||||
harness.set_leader(True)
|
||||
harness.update_config(unset=['foo', 'bar'])
|
||||
# things here will again fire events
|
||||
"""
|
||||
self.disable_hooks()
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
self.enable_hooks()
|
||||
|
||||
def _next_relation_id(self):
|
||||
rel_id = self._relation_id_counter
|
||||
self._relation_id_counter += 1
|
||||
return rel_id
|
||||
|
||||
def add_relation(self, relation_name: str, remote_app: str) -> int:
|
||||
"""Declare that there is a new relation between this app and `remote_app`.
|
||||
|
||||
Args:
|
||||
relation_name: The relation on Charm that is being related to
|
||||
remote_app: The name of the application that is being related to
|
||||
|
||||
Return:
|
||||
The relation_id created by this add_relation.
|
||||
"""
|
||||
rel_id = self._next_relation_id()
|
||||
self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id)
|
||||
self._backend._relation_names[rel_id] = relation_name
|
||||
self._backend._relation_list_map[rel_id] = []
|
||||
self._backend._relation_data[rel_id] = {
|
||||
remote_app: {},
|
||||
self._backend.unit_name: {},
|
||||
self._backend.app_name: {},
|
||||
}
|
||||
self._backend._relation_app_and_units[rel_id] = {
|
||||
"app": remote_app,
|
||||
"units": [],
|
||||
}
|
||||
# Reload the relation_ids list
|
||||
if self._model is not None:
|
||||
self._model.relations._invalidate(relation_name)
|
||||
self._emit_relation_created(relation_name, rel_id, remote_app)
|
||||
return rel_id
|
||||
|
||||
def _emit_relation_created(self, relation_name: str, relation_id: int,
|
||||
remote_app: str) -> None:
|
||||
"""Trigger relation-created for a given relation with a given remote application."""
|
||||
if self._charm is None or not self._hooks_enabled:
|
||||
return
|
||||
if self._charm is None or not self._hooks_enabled:
|
||||
return
|
||||
relation = self._model.get_relation(relation_name, relation_id)
|
||||
app = self._model.get_app(remote_app)
|
||||
self._charm.on[relation_name].relation_created.emit(
|
||||
relation, app)
|
||||
|
||||
def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None:
|
||||
"""Add a new unit to a relation.
|
||||
|
||||
Example::
|
||||
|
||||
rel_id = harness.add_relation('db', 'postgresql')
|
||||
harness.add_relation_unit(rel_id, 'postgresql/0')
|
||||
|
||||
This will trigger a `relation_joined` event. This would naturally be
|
||||
followed by a `relation_changed` event, which you can trigger with
|
||||
:meth:`.update_relation_data`. This separation is artificial in the
|
||||
sense that Juju will always fire the two, but is intended to make
|
||||
testing relations and their data bags slightly more natural.
|
||||
|
||||
Args:
|
||||
relation_id: The integer relation identifier (as returned by add_relation).
|
||||
remote_unit_name: A string representing the remote unit that is being added.
|
||||
Return:
|
||||
None
|
||||
"""
|
||||
self._backend._relation_list_map[relation_id].append(remote_unit_name)
|
||||
self._backend._relation_data[relation_id][remote_unit_name] = {}
|
||||
# TODO: jam 2020-08-03 This is where we could assert that the unit name matches the
|
||||
# application name (eg you don't have a relation to 'foo' but add units of 'bar/0'
|
||||
self._backend._relation_app_and_units[relation_id]["units"].append(remote_unit_name)
|
||||
relation_name = self._backend._relation_names[relation_id]
|
||||
# Make sure that the Model reloads the relation_list for this relation_id, as well as
|
||||
# reloading the relation data for this unit.
|
||||
if self._model is not None:
|
||||
remote_unit = self._model.get_unit(remote_unit_name)
|
||||
relation = self._model.get_relation(relation_name, relation_id)
|
||||
unit_cache = relation.data.get(remote_unit, None)
|
||||
if unit_cache is not None:
|
||||
unit_cache._invalidate()
|
||||
self._model.relations._invalidate(relation_name)
|
||||
if self._charm is None or not self._hooks_enabled:
|
||||
return
|
||||
self._charm.on[relation_name].relation_joined.emit(
|
||||
relation, remote_unit.app, remote_unit)
|
||||
|
||||
def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping:
|
||||
"""Get the relation data bucket for a single app or unit in a given relation.
|
||||
|
||||
This ignores all of the safety checks of who can and can't see data in relations (eg,
|
||||
non-leaders can't read their own application's relation data because there are no events
|
||||
that keep that data up-to-date for the unit).
|
||||
|
||||
Args:
|
||||
relation_id: The relation whose content we want to look at.
|
||||
app_or_unit: The name of the application or unit whose data we want to read
|
||||
Return:
|
||||
a dict containing the relation data for `app_or_unit` or None.
|
||||
Raises:
|
||||
KeyError: if relation_id doesn't exist
|
||||
"""
|
||||
return self._backend._relation_data[relation_id].get(app_or_unit, None)
|
||||
|
||||
def get_pod_spec(self) -> (typing.Mapping, typing.Mapping):
|
||||
"""Return the content of the pod spec as last set by the charm.
|
||||
|
||||
This returns both the pod spec and any k8s_resources that were supplied.
|
||||
See the signature of Model.pod.set_spec
|
||||
"""
|
||||
return self._backend._pod_spec
|
||||
|
||||
def get_workload_version(self) -> str:
|
||||
"""Read the workload version that was set by the unit."""
|
||||
return self._backend._workload_version
|
||||
|
||||
def set_model_name(self, name: str) -> None:
|
||||
"""Set the name of the Model that this is representing.
|
||||
|
||||
This cannot be called once begin() has been called. But it lets you set the value that
|
||||
will be returned by Model.name.
|
||||
"""
|
||||
if self._charm is not None:
|
||||
raise RuntimeError('cannot set the Model name after begin()')
|
||||
self._backend.model_name = name
|
||||
|
||||
def update_relation_data(
|
||||
self,
|
||||
relation_id: int,
|
||||
app_or_unit: str,
|
||||
key_values: typing.Mapping,
|
||||
) -> None:
|
||||
"""Update the relation data for a given unit or application in a given relation.
|
||||
|
||||
This also triggers the `relation_changed` event for this relation_id.
|
||||
|
||||
Args:
|
||||
relation_id: The integer relation_id representing this relation.
|
||||
app_or_unit: The unit or application name that is being updated.
|
||||
This can be the local or remote application.
|
||||
key_values: Each key/value will be updated in the relation data.
|
||||
"""
|
||||
relation_name = self._backend._relation_names[relation_id]
|
||||
relation = self._model.get_relation(relation_name, relation_id)
|
||||
if '/' in app_or_unit:
|
||||
entity = self._model.get_unit(app_or_unit)
|
||||
else:
|
||||
entity = self._model.get_app(app_or_unit)
|
||||
rel_data = relation.data.get(entity, None)
|
||||
if rel_data is not None:
|
||||
# rel_data may have cached now-stale data, so _invalidate() it.
|
||||
# Note, this won't cause the data to be loaded if it wasn't already.
|
||||
rel_data._invalidate()
|
||||
|
||||
new_values = self._backend._relation_data[relation_id][app_or_unit].copy()
|
||||
for k, v in key_values.items():
|
||||
if v == '':
|
||||
new_values.pop(k, None)
|
||||
else:
|
||||
new_values[k] = v
|
||||
self._backend._relation_data[relation_id][app_or_unit] = new_values
|
||||
|
||||
if app_or_unit == self._model.unit.name:
|
||||
# No events for our own unit
|
||||
return
|
||||
if app_or_unit == self._model.app.name:
|
||||
# updating our own app only generates an event if it is a peer relation and we
|
||||
# aren't the leader
|
||||
is_peer = self._meta.relations[relation_name].role.is_peer()
|
||||
if not is_peer:
|
||||
return
|
||||
if self._model.unit.is_leader():
|
||||
return
|
||||
self._emit_relation_changed(relation_id, app_or_unit)
|
||||
|
||||
def _emit_relation_changed(self, relation_id, app_or_unit):
|
||||
if self._charm is None or not self._hooks_enabled:
|
||||
return
|
||||
rel_name = self._backend._relation_names[relation_id]
|
||||
relation = self.model.get_relation(rel_name, relation_id)
|
||||
if '/' in app_or_unit:
|
||||
app_name = app_or_unit.split('/')[0]
|
||||
unit_name = app_or_unit
|
||||
app = self.model.get_app(app_name)
|
||||
unit = self.model.get_unit(unit_name)
|
||||
args = (relation, app, unit)
|
||||
else:
|
||||
app_name = app_or_unit
|
||||
app = self.model.get_app(app_name)
|
||||
args = (relation, app)
|
||||
self._charm.on[rel_name].relation_changed.emit(*args)
|
||||
|
||||
def _update_config(
|
||||
self,
|
||||
key_values: typing.Mapping[str, str] = None,
|
||||
unset: typing.Iterable[str] = (),
|
||||
) -> None:
|
||||
"""Update the config as seen by the charm.
|
||||
|
||||
This will *not* trigger a `config_changed` event, and is intended for internal use.
|
||||
|
||||
Note that the `key_values` mapping will only add or update configuration items.
|
||||
To remove existing ones, see the `unset` parameter.
|
||||
|
||||
Args:
|
||||
key_values: A Mapping of key:value pairs to update in config.
|
||||
unset: An iterable of keys to remove from Config. (Note that this does
|
||||
not currently reset the config values to the default defined in config.yaml.)
|
||||
"""
|
||||
# NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config
|
||||
# is a LazyMapping, but its _load returns a dict and this method mutates
|
||||
# the dict that Config is caching. Arguably we should be doing some sort
|
||||
# of charm.framework.model.config._invalidate()
|
||||
config = self._backend._config
|
||||
if key_values is not None:
|
||||
for key, value in key_values.items():
|
||||
config[key] = value
|
||||
for key in unset:
|
||||
config.pop(key, None)
|
||||
|
||||
def update_config(
|
||||
self,
|
||||
key_values: typing.Mapping[str, str] = None,
|
||||
unset: typing.Iterable[str] = (),
|
||||
) -> None:
|
||||
"""Update the config as seen by the charm.
|
||||
|
||||
This will trigger a `config_changed` event.
|
||||
|
||||
Note that the `key_values` mapping will only add or update configuration items.
|
||||
To remove existing ones, see the `unset` parameter.
|
||||
|
||||
Args:
|
||||
key_values: A Mapping of key:value pairs to update in config.
|
||||
unset: An iterable of keys to remove from Config. (Note that this does
|
||||
not currently reset the config values to the default defined in config.yaml.)
|
||||
"""
|
||||
self._update_config(key_values, unset)
|
||||
if self._charm is None or not self._hooks_enabled:
|
||||
return
|
||||
self._charm.on.config_changed.emit()
|
||||
|
||||
def set_leader(self, is_leader: bool = True) -> None:
|
||||
"""Set whether this unit is the leader or not.
|
||||
|
||||
If this charm becomes a leader then `leader_elected` will be triggered.
|
||||
|
||||
Args:
|
||||
is_leader: True/False as to whether this unit is the leader.
|
||||
"""
|
||||
was_leader = self._backend._is_leader
|
||||
self._backend._is_leader = is_leader
|
||||
# Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
|
||||
# the Model objects, so this automatically gets noticed.
|
||||
if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
|
||||
self._charm.on.leader_elected.emit()
|
||||
|
||||
def _get_backend_calls(self, reset: bool = True) -> list:
|
||||
"""Return the calls that we have made to the TestingModelBackend.
|
||||
|
||||
This is useful mostly for testing the framework itself, so that we can assert that we
|
||||
do/don't trigger extra calls.
|
||||
|
||||
Args:
|
||||
reset: If True, reset the calls list back to empty, if false, the call list is
|
||||
preserved.
|
||||
Return:
|
||||
``[(call1, args...), (call2, args...)]``
|
||||
"""
|
||||
calls = self._backend._calls.copy()
|
||||
if reset:
|
||||
self._backend._calls.clear()
|
||||
return calls
|
||||
|
||||
|
||||
def _record_calls(cls):
|
||||
"""Replace methods on cls with methods that record that they have been called.
|
||||
|
||||
Iterate all attributes of cls, and for public methods, replace them with a wrapped method
|
||||
that records the method called along with the arguments and keyword arguments.
|
||||
"""
|
||||
for meth_name, orig_method in cls.__dict__.items():
|
||||
if meth_name.startswith('_'):
|
||||
continue
|
||||
|
||||
def decorator(orig_method):
|
||||
def wrapped(self, *args, **kwargs):
|
||||
full_args = (orig_method.__name__,) + args
|
||||
if kwargs:
|
||||
full_args = full_args + (kwargs,)
|
||||
self._calls.append(full_args)
|
||||
return orig_method(self, *args, **kwargs)
|
||||
return wrapped
|
||||
|
||||
setattr(cls, meth_name, decorator(orig_method))
|
||||
return cls
|
||||
|
||||
|
||||
class _ResourceEntry:
|
||||
"""Tracks the contents of a Resource."""
|
||||
|
||||
def __init__(self, resource_name):
|
||||
self.name = resource_name
|
||||
|
||||
|
||||
@_record_calls
|
||||
class _TestingModelBackend:
|
||||
"""This conforms to the interface for ModelBackend but provides canned data.
|
||||
|
||||
DO NOT use this class directly, it is used by `Harness`_ to drive the model.
|
||||
`Harness`_ is responsible for maintaining the internal consistency of the values here,
|
||||
as the only public methods of this type are for implementing ModelBackend.
|
||||
"""
|
||||
|
||||
def __init__(self, unit_name, meta):
|
||||
self.unit_name = unit_name
|
||||
self.app_name = self.unit_name.split('/')[0]
|
||||
self.model_name = None
|
||||
self._calls = []
|
||||
self._meta = meta
|
||||
self._is_leader = None
|
||||
self._relation_ids_map = {} # relation name to [relation_ids,...]
|
||||
self._relation_names = {} # reverse map from relation_id to relation_name
|
||||
self._relation_list_map = {} # relation_id: [unit_name,...]
|
||||
self._relation_data = {} # {relation_id: {name: data}}
|
||||
# {relation_id: {"app": app_name, "units": ["app/0",...]}
|
||||
self._relation_app_and_units = {}
|
||||
self._config = {}
|
||||
self._is_leader = False
|
||||
self._resources_map = {} # {resource_name: resource_content}
|
||||
self._pod_spec = None
|
||||
self._app_status = {'status': 'unknown', 'message': ''}
|
||||
self._unit_status = {'status': 'maintenance', 'message': ''}
|
||||
self._workload_version = None
|
||||
self._resource_dir = None
|
||||
|
||||
def _cleanup(self):
|
||||
if self._resource_dir is not None:
|
||||
self._resource_dir.cleanup()
|
||||
self._resource_dir = None
|
||||
|
||||
def _get_resource_dir(self) -> pathlib.Path:
|
||||
if self._resource_dir is None:
|
||||
# In actual Juju, the resource path for a charm's resource is
|
||||
# $AGENT_DIR/resources/$RESOURCE_NAME/$RESOURCE_FILENAME
|
||||
# However, charms shouldn't depend on this.
|
||||
self._resource_dir = tempfile.TemporaryDirectory(prefix='tmp-ops-test-resource-')
|
||||
return pathlib.Path(self._resource_dir.name)
|
||||
|
||||
def relation_ids(self, relation_name):
|
||||
try:
|
||||
return self._relation_ids_map[relation_name]
|
||||
except KeyError as e:
|
||||
if relation_name not in self._meta.relations:
|
||||
raise model.ModelError('{} is not a known relation'.format(relation_name)) from e
|
||||
return []
|
||||
|
||||
def relation_list(self, relation_id):
|
||||
try:
|
||||
return self._relation_list_map[relation_id]
|
||||
except KeyError as e:
|
||||
raise model.RelationNotFoundError from e
|
||||
|
||||
def relation_get(self, relation_id, member_name, is_app):
|
||||
if is_app and '/' in member_name:
|
||||
member_name = member_name.split('/')[0]
|
||||
if relation_id not in self._relation_data:
|
||||
raise model.RelationNotFoundError()
|
||||
return self._relation_data[relation_id][member_name].copy()
|
||||
|
||||
def relation_set(self, relation_id, key, value, is_app):
|
||||
relation = self._relation_data[relation_id]
|
||||
if is_app:
|
||||
bucket_key = self.app_name
|
||||
else:
|
||||
bucket_key = self.unit_name
|
||||
if bucket_key not in relation:
|
||||
relation[bucket_key] = {}
|
||||
bucket = relation[bucket_key]
|
||||
if value == '':
|
||||
bucket.pop(key, None)
|
||||
else:
|
||||
bucket[key] = value
|
||||
|
||||
def config_get(self):
|
||||
return self._config
|
||||
|
||||
def is_leader(self):
|
||||
return self._is_leader
|
||||
|
||||
def application_version_set(self, version):
|
||||
self._workload_version = version
|
||||
|
||||
def resource_get(self, resource_name):
|
||||
if resource_name not in self._resources_map:
|
||||
raise model.ModelError(
|
||||
"ERROR could not download resource: HTTP request failed: "
|
||||
"Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found".format(
|
||||
self.unit_name.replace('/', '-'), resource_name, self.app_name, resource_name
|
||||
))
|
||||
filename, contents = self._resources_map[resource_name]
|
||||
resource_dir = self._get_resource_dir()
|
||||
resource_filename = resource_dir / resource_name / filename
|
||||
if not resource_filename.exists():
|
||||
if isinstance(contents, bytes):
|
||||
mode = 'wb'
|
||||
else:
|
||||
mode = 'wt'
|
||||
resource_filename.parent.mkdir(exist_ok=True)
|
||||
with resource_filename.open(mode=mode) as resource_file:
|
||||
resource_file.write(contents)
|
||||
return resource_filename
|
||||
|
||||
def pod_spec_set(self, spec, k8s_resources):
|
||||
self._pod_spec = (spec, k8s_resources)
|
||||
|
||||
def status_get(self, *, is_app=False):
|
||||
if is_app:
|
||||
return self._app_status
|
||||
else:
|
||||
return self._unit_status
|
||||
|
||||
def status_set(self, status, message='', *, is_app=False):
|
||||
if is_app:
|
||||
self._app_status = {'status': status, 'message': message}
|
||||
else:
|
||||
self._unit_status = {'status': status, 'message': message}
|
||||
|
||||
def storage_list(self, name):
|
||||
raise NotImplementedError(self.storage_list)
|
||||
|
||||
def storage_get(self, storage_name_id, attribute):
|
||||
raise NotImplementedError(self.storage_get)
|
||||
|
||||
def storage_add(self, name, count=1):
|
||||
raise NotImplementedError(self.storage_add)
|
||||
|
||||
def action_get(self):
|
||||
raise NotImplementedError(self.action_get)
|
||||
|
||||
def action_set(self, results):
|
||||
raise NotImplementedError(self.action_set)
|
||||
|
||||
def action_log(self, message):
|
||||
raise NotImplementedError(self.action_log)
|
||||
|
||||
def action_fail(self, message=''):
|
||||
raise NotImplementedError(self.action_fail)
|
||||
|
||||
def network_get(self, endpoint_name, relation_id=None):
|
||||
raise NotImplementedError(self.network_get)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
# this is a generated file
|
||||
|
||||
version = '0.10.0'
|
||||
|
|
@ -1,427 +0,0 @@
|
|||
|
||||
from .error import *
|
||||
|
||||
from .tokens import *
|
||||
from .events import *
|
||||
from .nodes import *
|
||||
|
||||
from .loader import *
|
||||
from .dumper import *
|
||||
|
||||
__version__ = '5.3.1'
|
||||
try:
|
||||
from .cyaml import *
|
||||
__with_libyaml__ = True
|
||||
except ImportError:
|
||||
__with_libyaml__ = False
|
||||
|
||||
import io
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Warnings control
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# 'Global' warnings state:
|
||||
_warnings_enabled = {
|
||||
'YAMLLoadWarning': True,
|
||||
}
|
||||
|
||||
# Get or set global warnings' state
|
||||
def warnings(settings=None):
|
||||
if settings is None:
|
||||
return _warnings_enabled
|
||||
|
||||
if type(settings) is dict:
|
||||
for key in settings:
|
||||
if key in _warnings_enabled:
|
||||
_warnings_enabled[key] = settings[key]
|
||||
|
||||
# Warn when load() is called without Loader=...
|
||||
class YAMLLoadWarning(RuntimeWarning):
|
||||
pass
|
||||
|
||||
def load_warning(method):
|
||||
if _warnings_enabled['YAMLLoadWarning'] is False:
|
||||
return
|
||||
|
||||
import warnings
|
||||
|
||||
message = (
|
||||
"calling yaml.%s() without Loader=... is deprecated, as the "
|
||||
"default Loader is unsafe. Please read "
|
||||
"https://msg.pyyaml.org/load for full details."
|
||||
) % method
|
||||
|
||||
warnings.warn(message, YAMLLoadWarning, stacklevel=3)
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
def scan(stream, Loader=Loader):
|
||||
"""
|
||||
Scan a YAML stream and produce scanning tokens.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_token():
|
||||
yield loader.get_token()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def parse(stream, Loader=Loader):
|
||||
"""
|
||||
Parse a YAML stream and produce parsing events.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_event():
|
||||
yield loader.get_event()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def compose(stream, Loader=Loader):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding representation tree.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
return loader.get_single_node()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def compose_all(stream, Loader=Loader):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding representation trees.
|
||||
"""
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_node():
|
||||
yield loader.get_node()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def load(stream, Loader=None):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding Python object.
|
||||
"""
|
||||
if Loader is None:
|
||||
load_warning('load')
|
||||
Loader = FullLoader
|
||||
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
return loader.get_single_data()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def load_all(stream, Loader=None):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding Python objects.
|
||||
"""
|
||||
if Loader is None:
|
||||
load_warning('load_all')
|
||||
Loader = FullLoader
|
||||
|
||||
loader = Loader(stream)
|
||||
try:
|
||||
while loader.check_data():
|
||||
yield loader.get_data()
|
||||
finally:
|
||||
loader.dispose()
|
||||
|
||||
def full_load(stream):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding Python object.
|
||||
|
||||
Resolve all tags except those known to be
|
||||
unsafe on untrusted input.
|
||||
"""
|
||||
return load(stream, FullLoader)
|
||||
|
||||
def full_load_all(stream):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding Python objects.
|
||||
|
||||
Resolve all tags except those known to be
|
||||
unsafe on untrusted input.
|
||||
"""
|
||||
return load_all(stream, FullLoader)
|
||||
|
||||
def safe_load(stream):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding Python object.
|
||||
|
||||
Resolve only basic YAML tags. This is known
|
||||
to be safe for untrusted input.
|
||||
"""
|
||||
return load(stream, SafeLoader)
|
||||
|
||||
def safe_load_all(stream):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding Python objects.
|
||||
|
||||
Resolve only basic YAML tags. This is known
|
||||
to be safe for untrusted input.
|
||||
"""
|
||||
return load_all(stream, SafeLoader)
|
||||
|
||||
def unsafe_load(stream):
|
||||
"""
|
||||
Parse the first YAML document in a stream
|
||||
and produce the corresponding Python object.
|
||||
|
||||
Resolve all tags, even those known to be
|
||||
unsafe on untrusted input.
|
||||
"""
|
||||
return load(stream, UnsafeLoader)
|
||||
|
||||
def unsafe_load_all(stream):
|
||||
"""
|
||||
Parse all YAML documents in a stream
|
||||
and produce corresponding Python objects.
|
||||
|
||||
Resolve all tags, even those known to be
|
||||
unsafe on untrusted input.
|
||||
"""
|
||||
return load_all(stream, UnsafeLoader)
|
||||
|
||||
def emit(events, stream=None, Dumper=Dumper,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None):
|
||||
"""
|
||||
Emit YAML parsing events into a stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
getvalue = None
|
||||
if stream is None:
|
||||
stream = io.StringIO()
|
||||
getvalue = stream.getvalue
|
||||
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
try:
|
||||
for event in events:
|
||||
dumper.emit(event)
|
||||
finally:
|
||||
dumper.dispose()
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
||||
def serialize_all(nodes, stream=None, Dumper=Dumper,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None):
|
||||
"""
|
||||
Serialize a sequence of representation trees into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
getvalue = None
|
||||
if stream is None:
|
||||
if encoding is None:
|
||||
stream = io.StringIO()
|
||||
else:
|
||||
stream = io.BytesIO()
|
||||
getvalue = stream.getvalue
|
||||
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
encoding=encoding, version=version, tags=tags,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end)
|
||||
try:
|
||||
dumper.open()
|
||||
for node in nodes:
|
||||
dumper.serialize(node)
|
||||
dumper.close()
|
||||
finally:
|
||||
dumper.dispose()
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
||||
def serialize(node, stream=None, Dumper=Dumper, **kwds):
|
||||
"""
|
||||
Serialize a representation tree into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return serialize_all([node], stream, Dumper=Dumper, **kwds)
|
||||
|
||||
def dump_all(documents, stream=None, Dumper=Dumper,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
"""
|
||||
Serialize a sequence of Python objects into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
getvalue = None
|
||||
if stream is None:
|
||||
if encoding is None:
|
||||
stream = io.StringIO()
|
||||
else:
|
||||
stream = io.BytesIO()
|
||||
getvalue = stream.getvalue
|
||||
dumper = Dumper(stream, default_style=default_style,
|
||||
default_flow_style=default_flow_style,
|
||||
canonical=canonical, indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
encoding=encoding, version=version, tags=tags,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
|
||||
try:
|
||||
dumper.open()
|
||||
for data in documents:
|
||||
dumper.represent(data)
|
||||
dumper.close()
|
||||
finally:
|
||||
dumper.dispose()
|
||||
if getvalue:
|
||||
return getvalue()
|
||||
|
||||
def dump(data, stream=None, Dumper=Dumper, **kwds):
|
||||
"""
|
||||
Serialize a Python object into a YAML stream.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return dump_all([data], stream, Dumper=Dumper, **kwds)
|
||||
|
||||
def safe_dump_all(documents, stream=None, **kwds):
|
||||
"""
|
||||
Serialize a sequence of Python objects into a YAML stream.
|
||||
Produce only basic YAML tags.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
|
||||
|
||||
def safe_dump(data, stream=None, **kwds):
|
||||
"""
|
||||
Serialize a Python object into a YAML stream.
|
||||
Produce only basic YAML tags.
|
||||
If stream is None, return the produced string instead.
|
||||
"""
|
||||
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
|
||||
|
||||
def add_implicit_resolver(tag, regexp, first=None,
|
||||
Loader=None, Dumper=Dumper):
|
||||
"""
|
||||
Add an implicit scalar detector.
|
||||
If an implicit scalar value matches the given regexp,
|
||||
the corresponding tag is assigned to the scalar.
|
||||
first is a sequence of possible initial characters or None.
|
||||
"""
|
||||
if Loader is None:
|
||||
loader.Loader.add_implicit_resolver(tag, regexp, first)
|
||||
loader.FullLoader.add_implicit_resolver(tag, regexp, first)
|
||||
loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
|
||||
else:
|
||||
Loader.add_implicit_resolver(tag, regexp, first)
|
||||
Dumper.add_implicit_resolver(tag, regexp, first)
|
||||
|
||||
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
|
||||
"""
|
||||
Add a path based resolver for the given tag.
|
||||
A path is a list of keys that forms a path
|
||||
to a node in the representation tree.
|
||||
Keys can be string values, integers, or None.
|
||||
"""
|
||||
if Loader is None:
|
||||
loader.Loader.add_path_resolver(tag, path, kind)
|
||||
loader.FullLoader.add_path_resolver(tag, path, kind)
|
||||
loader.UnsafeLoader.add_path_resolver(tag, path, kind)
|
||||
else:
|
||||
Loader.add_path_resolver(tag, path, kind)
|
||||
Dumper.add_path_resolver(tag, path, kind)
|
||||
|
||||
def add_constructor(tag, constructor, Loader=None):
|
||||
"""
|
||||
Add a constructor for the given tag.
|
||||
Constructor is a function that accepts a Loader instance
|
||||
and a node object and produces the corresponding Python object.
|
||||
"""
|
||||
if Loader is None:
|
||||
loader.Loader.add_constructor(tag, constructor)
|
||||
loader.FullLoader.add_constructor(tag, constructor)
|
||||
loader.UnsafeLoader.add_constructor(tag, constructor)
|
||||
else:
|
||||
Loader.add_constructor(tag, constructor)
|
||||
|
||||
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
|
||||
"""
|
||||
Add a multi-constructor for the given tag prefix.
|
||||
Multi-constructor is called for a node if its tag starts with tag_prefix.
|
||||
Multi-constructor accepts a Loader instance, a tag suffix,
|
||||
and a node object and produces the corresponding Python object.
|
||||
"""
|
||||
if Loader is None:
|
||||
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
|
||||
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
|
||||
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
|
||||
else:
|
||||
Loader.add_multi_constructor(tag_prefix, multi_constructor)
|
||||
|
||||
def add_representer(data_type, representer, Dumper=Dumper):
|
||||
"""
|
||||
Add a representer for the given type.
|
||||
Representer is a function accepting a Dumper instance
|
||||
and an instance of the given data type
|
||||
and producing the corresponding representation node.
|
||||
"""
|
||||
Dumper.add_representer(data_type, representer)
|
||||
|
||||
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
|
||||
"""
|
||||
Add a representer for the given type.
|
||||
Multi-representer is a function accepting a Dumper instance
|
||||
and an instance of the given data type or subtype
|
||||
and producing the corresponding representation node.
|
||||
"""
|
||||
Dumper.add_multi_representer(data_type, multi_representer)
|
||||
|
||||
class YAMLObjectMetaclass(type):
|
||||
"""
|
||||
The metaclass for YAMLObject.
|
||||
"""
|
||||
def __init__(cls, name, bases, kwds):
|
||||
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
|
||||
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
|
||||
if isinstance(cls.yaml_loader, list):
|
||||
for loader in cls.yaml_loader:
|
||||
loader.add_constructor(cls.yaml_tag, cls.from_yaml)
|
||||
else:
|
||||
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
|
||||
|
||||
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
|
||||
|
||||
class YAMLObject(metaclass=YAMLObjectMetaclass):
|
||||
"""
|
||||
An object that can dump itself to a YAML stream
|
||||
and load itself from a YAML stream.
|
||||
"""
|
||||
|
||||
__slots__ = () # no direct instantiation, so allow immutable subclasses
|
||||
|
||||
yaml_loader = [Loader, FullLoader, UnsafeLoader]
|
||||
yaml_dumper = Dumper
|
||||
|
||||
yaml_tag = None
|
||||
yaml_flow_style = None
|
||||
|
||||
@classmethod
|
||||
def from_yaml(cls, loader, node):
|
||||
"""
|
||||
Convert a representation node to a Python object.
|
||||
"""
|
||||
return loader.construct_yaml_object(node, cls)
|
||||
|
||||
@classmethod
|
||||
def to_yaml(cls, dumper, data):
|
||||
"""
|
||||
Convert a Python object to a representation node.
|
||||
"""
|
||||
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
|
||||
flow_style=cls.yaml_flow_style)
|
||||
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
|
||||
__all__ = ['Composer', 'ComposerError']
|
||||
|
||||
from .error import MarkedYAMLError
|
||||
from .events import *
|
||||
from .nodes import *
|
||||
|
||||
class ComposerError(MarkedYAMLError):
|
||||
pass
|
||||
|
||||
class Composer:
|
||||
|
||||
def __init__(self):
|
||||
self.anchors = {}
|
||||
|
||||
def check_node(self):
|
||||
# Drop the STREAM-START event.
|
||||
if self.check_event(StreamStartEvent):
|
||||
self.get_event()
|
||||
|
||||
# If there are more documents available?
|
||||
return not self.check_event(StreamEndEvent)
|
||||
|
||||
def get_node(self):
|
||||
# Get the root node of the next document.
|
||||
if not self.check_event(StreamEndEvent):
|
||||
return self.compose_document()
|
||||
|
||||
def get_single_node(self):
|
||||
# Drop the STREAM-START event.
|
||||
self.get_event()
|
||||
|
||||
# Compose a document if the stream is not empty.
|
||||
document = None
|
||||
if not self.check_event(StreamEndEvent):
|
||||
document = self.compose_document()
|
||||
|
||||
# Ensure that the stream contains no more documents.
|
||||
if not self.check_event(StreamEndEvent):
|
||||
event = self.get_event()
|
||||
raise ComposerError("expected a single document in the stream",
|
||||
document.start_mark, "but found another document",
|
||||
event.start_mark)
|
||||
|
||||
# Drop the STREAM-END event.
|
||||
self.get_event()
|
||||
|
||||
return document
|
||||
|
||||
def compose_document(self):
|
||||
# Drop the DOCUMENT-START event.
|
||||
self.get_event()
|
||||
|
||||
# Compose the root node.
|
||||
node = self.compose_node(None, None)
|
||||
|
||||
# Drop the DOCUMENT-END event.
|
||||
self.get_event()
|
||||
|
||||
self.anchors = {}
|
||||
return node
|
||||
|
||||
def compose_node(self, parent, index):
|
||||
if self.check_event(AliasEvent):
|
||||
event = self.get_event()
|
||||
anchor = event.anchor
|
||||
if anchor not in self.anchors:
|
||||
raise ComposerError(None, None, "found undefined alias %r"
|
||||
% anchor, event.start_mark)
|
||||
return self.anchors[anchor]
|
||||
event = self.peek_event()
|
||||
anchor = event.anchor
|
||||
if anchor is not None:
|
||||
if anchor in self.anchors:
|
||||
raise ComposerError("found duplicate anchor %r; first occurrence"
|
||||
% anchor, self.anchors[anchor].start_mark,
|
||||
"second occurrence", event.start_mark)
|
||||
self.descend_resolver(parent, index)
|
||||
if self.check_event(ScalarEvent):
|
||||
node = self.compose_scalar_node(anchor)
|
||||
elif self.check_event(SequenceStartEvent):
|
||||
node = self.compose_sequence_node(anchor)
|
||||
elif self.check_event(MappingStartEvent):
|
||||
node = self.compose_mapping_node(anchor)
|
||||
self.ascend_resolver()
|
||||
return node
|
||||
|
||||
def compose_scalar_node(self, anchor):
|
||||
event = self.get_event()
|
||||
tag = event.tag
|
||||
if tag is None or tag == '!':
|
||||
tag = self.resolve(ScalarNode, event.value, event.implicit)
|
||||
node = ScalarNode(tag, event.value,
|
||||
event.start_mark, event.end_mark, style=event.style)
|
||||
if anchor is not None:
|
||||
self.anchors[anchor] = node
|
||||
return node
|
||||
|
||||
def compose_sequence_node(self, anchor):
|
||||
start_event = self.get_event()
|
||||
tag = start_event.tag
|
||||
if tag is None or tag == '!':
|
||||
tag = self.resolve(SequenceNode, None, start_event.implicit)
|
||||
node = SequenceNode(tag, [],
|
||||
start_event.start_mark, None,
|
||||
flow_style=start_event.flow_style)
|
||||
if anchor is not None:
|
||||
self.anchors[anchor] = node
|
||||
index = 0
|
||||
while not self.check_event(SequenceEndEvent):
|
||||
node.value.append(self.compose_node(node, index))
|
||||
index += 1
|
||||
end_event = self.get_event()
|
||||
node.end_mark = end_event.end_mark
|
||||
return node
|
||||
|
||||
def compose_mapping_node(self, anchor):
|
||||
start_event = self.get_event()
|
||||
tag = start_event.tag
|
||||
if tag is None or tag == '!':
|
||||
tag = self.resolve(MappingNode, None, start_event.implicit)
|
||||
node = MappingNode(tag, [],
|
||||
start_event.start_mark, None,
|
||||
flow_style=start_event.flow_style)
|
||||
if anchor is not None:
|
||||
self.anchors[anchor] = node
|
||||
while not self.check_event(MappingEndEvent):
|
||||
#key_event = self.peek_event()
|
||||
item_key = self.compose_node(node, None)
|
||||
#if item_key in node.value:
|
||||
# raise ComposerError("while composing a mapping", start_event.start_mark,
|
||||
# "found duplicate key", key_event.start_mark)
|
||||
item_value = self.compose_node(node, item_key)
|
||||
#node.value[item_key] = item_value
|
||||
node.value.append((item_key, item_value))
|
||||
end_event = self.get_event()
|
||||
node.end_mark = end_event.end_mark
|
||||
return node
|
||||
|
||||
|
|
@ -1,748 +0,0 @@
|
|||
|
||||
__all__ = [
|
||||
'BaseConstructor',
|
||||
'SafeConstructor',
|
||||
'FullConstructor',
|
||||
'UnsafeConstructor',
|
||||
'Constructor',
|
||||
'ConstructorError'
|
||||
]
|
||||
|
||||
from .error import *
|
||||
from .nodes import *
|
||||
|
||||
import collections.abc, datetime, base64, binascii, re, sys, types
|
||||
|
||||
class ConstructorError(MarkedYAMLError):
|
||||
pass
|
||||
|
||||
class BaseConstructor:
|
||||
|
||||
yaml_constructors = {}
|
||||
yaml_multi_constructors = {}
|
||||
|
||||
def __init__(self):
|
||||
self.constructed_objects = {}
|
||||
self.recursive_objects = {}
|
||||
self.state_generators = []
|
||||
self.deep_construct = False
|
||||
|
||||
def check_data(self):
|
||||
# If there are more documents available?
|
||||
return self.check_node()
|
||||
|
||||
def check_state_key(self, key):
|
||||
"""Block special attributes/methods from being set in a newly created
|
||||
object, to prevent user-controlled methods from being called during
|
||||
deserialization"""
|
||||
if self.get_state_keys_blacklist_regexp().match(key):
|
||||
raise ConstructorError(None, None,
|
||||
"blacklisted key '%s' in instance state found" % (key,), None)
|
||||
|
||||
def get_data(self):
|
||||
# Construct and return the next document.
|
||||
if self.check_node():
|
||||
return self.construct_document(self.get_node())
|
||||
|
||||
def get_single_data(self):
|
||||
# Ensure that the stream contains a single document and construct it.
|
||||
node = self.get_single_node()
|
||||
if node is not None:
|
||||
return self.construct_document(node)
|
||||
return None
|
||||
|
||||
def construct_document(self, node):
|
||||
data = self.construct_object(node)
|
||||
while self.state_generators:
|
||||
state_generators = self.state_generators
|
||||
self.state_generators = []
|
||||
for generator in state_generators:
|
||||
for dummy in generator:
|
||||
pass
|
||||
self.constructed_objects = {}
|
||||
self.recursive_objects = {}
|
||||
self.deep_construct = False
|
||||
return data
|
||||
|
||||
def construct_object(self, node, deep=False):
|
||||
if node in self.constructed_objects:
|
||||
return self.constructed_objects[node]
|
||||
if deep:
|
||||
old_deep = self.deep_construct
|
||||
self.deep_construct = True
|
||||
if node in self.recursive_objects:
|
||||
raise ConstructorError(None, None,
|
||||
"found unconstructable recursive node", node.start_mark)
|
||||
self.recursive_objects[node] = None
|
||||
constructor = None
|
||||
tag_suffix = None
|
||||
if node.tag in self.yaml_constructors:
|
||||
constructor = self.yaml_constructors[node.tag]
|
||||
else:
|
||||
for tag_prefix in self.yaml_multi_constructors:
|
||||
if tag_prefix is not None and node.tag.startswith(tag_prefix):
|
||||
tag_suffix = node.tag[len(tag_prefix):]
|
||||
constructor = self.yaml_multi_constructors[tag_prefix]
|
||||
break
|
||||
else:
|
||||
if None in self.yaml_multi_constructors:
|
||||
tag_suffix = node.tag
|
||||
constructor = self.yaml_multi_constructors[None]
|
||||
elif None in self.yaml_constructors:
|
||||
constructor = self.yaml_constructors[None]
|
||||
elif isinstance(node, ScalarNode):
|
||||
constructor = self.__class__.construct_scalar
|
||||
elif isinstance(node, SequenceNode):
|
||||
constructor = self.__class__.construct_sequence
|
||||
elif isinstance(node, MappingNode):
|
||||
constructor = self.__class__.construct_mapping
|
||||
if tag_suffix is None:
|
||||
data = constructor(self, node)
|
||||
else:
|
||||
data = constructor(self, tag_suffix, node)
|
||||
if isinstance(data, types.GeneratorType):
|
||||
generator = data
|
||||
data = next(generator)
|
||||
if self.deep_construct:
|
||||
for dummy in generator:
|
||||
pass
|
||||
else:
|
||||
self.state_generators.append(generator)
|
||||
self.constructed_objects[node] = data
|
||||
del self.recursive_objects[node]
|
||||
if deep:
|
||||
self.deep_construct = old_deep
|
||||
return data
|
||||
|
||||
def construct_scalar(self, node):
|
||||
if not isinstance(node, ScalarNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a scalar node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
return node.value
|
||||
|
||||
def construct_sequence(self, node, deep=False):
|
||||
if not isinstance(node, SequenceNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a sequence node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
return [self.construct_object(child, deep=deep)
|
||||
for child in node.value]
|
||||
|
||||
def construct_mapping(self, node, deep=False):
|
||||
if not isinstance(node, MappingNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a mapping node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
mapping = {}
|
||||
for key_node, value_node in node.value:
|
||||
key = self.construct_object(key_node, deep=deep)
|
||||
if not isinstance(key, collections.abc.Hashable):
|
||||
raise ConstructorError("while constructing a mapping", node.start_mark,
|
||||
"found unhashable key", key_node.start_mark)
|
||||
value = self.construct_object(value_node, deep=deep)
|
||||
mapping[key] = value
|
||||
return mapping
|
||||
|
||||
def construct_pairs(self, node, deep=False):
|
||||
if not isinstance(node, MappingNode):
|
||||
raise ConstructorError(None, None,
|
||||
"expected a mapping node, but found %s" % node.id,
|
||||
node.start_mark)
|
||||
pairs = []
|
||||
for key_node, value_node in node.value:
|
||||
key = self.construct_object(key_node, deep=deep)
|
||||
value = self.construct_object(value_node, deep=deep)
|
||||
pairs.append((key, value))
|
||||
return pairs
|
||||
|
||||
@classmethod
|
||||
def add_constructor(cls, tag, constructor):
|
||||
if not 'yaml_constructors' in cls.__dict__:
|
||||
cls.yaml_constructors = cls.yaml_constructors.copy()
|
||||
cls.yaml_constructors[tag] = constructor
|
||||
|
||||
@classmethod
|
||||
def add_multi_constructor(cls, tag_prefix, multi_constructor):
|
||||
if not 'yaml_multi_constructors' in cls.__dict__:
|
||||
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
|
||||
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
|
||||
|
||||
class SafeConstructor(BaseConstructor):
|
||||
|
||||
def construct_scalar(self, node):
|
||||
if isinstance(node, MappingNode):
|
||||
for key_node, value_node in node.value:
|
||||
if key_node.tag == 'tag:yaml.org,2002:value':
|
||||
return self.construct_scalar(value_node)
|
||||
return super().construct_scalar(node)
|
||||
|
||||
def flatten_mapping(self, node):
|
||||
merge = []
|
||||
index = 0
|
||||
while index < len(node.value):
|
||||
key_node, value_node = node.value[index]
|
||||
if key_node.tag == 'tag:yaml.org,2002:merge':
|
||||
del node.value[index]
|
||||
if isinstance(value_node, MappingNode):
|
||||
self.flatten_mapping(value_node)
|
||||
merge.extend(value_node.value)
|
||||
elif isinstance(value_node, SequenceNode):
|
||||
submerge = []
|
||||
for subnode in value_node.value:
|
||||
if not isinstance(subnode, MappingNode):
|
||||
raise ConstructorError("while constructing a mapping",
|
||||
node.start_mark,
|
||||
"expected a mapping for merging, but found %s"
|
||||
% subnode.id, subnode.start_mark)
|
||||
self.flatten_mapping(subnode)
|
||||
submerge.append(subnode.value)
|
||||
submerge.reverse()
|
||||
for value in submerge:
|
||||
merge.extend(value)
|
||||
else:
|
||||
raise ConstructorError("while constructing a mapping", node.start_mark,
|
||||
"expected a mapping or list of mappings for merging, but found %s"
|
||||
% value_node.id, value_node.start_mark)
|
||||
elif key_node.tag == 'tag:yaml.org,2002:value':
|
||||
key_node.tag = 'tag:yaml.org,2002:str'
|
||||
index += 1
|
||||
else:
|
||||
index += 1
|
||||
if merge:
|
||||
node.value = merge + node.value
|
||||
|
||||
def construct_mapping(self, node, deep=False):
|
||||
if isinstance(node, MappingNode):
|
||||
self.flatten_mapping(node)
|
||||
return super().construct_mapping(node, deep=deep)
|
||||
|
||||
def construct_yaml_null(self, node):
|
||||
self.construct_scalar(node)
|
||||
return None
|
||||
|
||||
bool_values = {
|
||||
'yes': True,
|
||||
'no': False,
|
||||
'true': True,
|
||||
'false': False,
|
||||
'on': True,
|
||||
'off': False,
|
||||
}
|
||||
|
||||
def construct_yaml_bool(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
return self.bool_values[value.lower()]
|
||||
|
||||
def construct_yaml_int(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
value = value.replace('_', '')
|
||||
sign = +1
|
||||
if value[0] == '-':
|
||||
sign = -1
|
||||
if value[0] in '+-':
|
||||
value = value[1:]
|
||||
if value == '0':
|
||||
return 0
|
||||
elif value.startswith('0b'):
|
||||
return sign*int(value[2:], 2)
|
||||
elif value.startswith('0x'):
|
||||
return sign*int(value[2:], 16)
|
||||
elif value[0] == '0':
|
||||
return sign*int(value, 8)
|
||||
elif ':' in value:
|
||||
digits = [int(part) for part in value.split(':')]
|
||||
digits.reverse()
|
||||
base = 1
|
||||
value = 0
|
||||
for digit in digits:
|
||||
value += digit*base
|
||||
base *= 60
|
||||
return sign*value
|
||||
else:
|
||||
return sign*int(value)
|
||||
|
||||
inf_value = 1e300
|
||||
while inf_value != inf_value*inf_value:
|
||||
inf_value *= inf_value
|
||||
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
|
||||
|
||||
def construct_yaml_float(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
value = value.replace('_', '').lower()
|
||||
sign = +1
|
||||
if value[0] == '-':
|
||||
sign = -1
|
||||
if value[0] in '+-':
|
||||
value = value[1:]
|
||||
if value == '.inf':
|
||||
return sign*self.inf_value
|
||||
elif value == '.nan':
|
||||
return self.nan_value
|
||||
elif ':' in value:
|
||||
digits = [float(part) for part in value.split(':')]
|
||||
digits.reverse()
|
||||
base = 1
|
||||
value = 0.0
|
||||
for digit in digits:
|
||||
value += digit*base
|
||||
base *= 60
|
||||
return sign*value
|
||||
else:
|
||||
return sign*float(value)
|
||||
|
||||
def construct_yaml_binary(self, node):
|
||||
try:
|
||||
value = self.construct_scalar(node).encode('ascii')
|
||||
except UnicodeEncodeError as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to convert base64 data into ascii: %s" % exc,
|
||||
node.start_mark)
|
||||
try:
|
||||
if hasattr(base64, 'decodebytes'):
|
||||
return base64.decodebytes(value)
|
||||
else:
|
||||
return base64.decodestring(value)
|
||||
except binascii.Error as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to decode base64 data: %s" % exc, node.start_mark)
|
||||
|
||||
timestamp_regexp = re.compile(
|
||||
r'''^(?P<year>[0-9][0-9][0-9][0-9])
|
||||
-(?P<month>[0-9][0-9]?)
|
||||
-(?P<day>[0-9][0-9]?)
|
||||
(?:(?:[Tt]|[ \t]+)
|
||||
(?P<hour>[0-9][0-9]?)
|
||||
:(?P<minute>[0-9][0-9])
|
||||
:(?P<second>[0-9][0-9])
|
||||
(?:\.(?P<fraction>[0-9]*))?
|
||||
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
|
||||
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
|
||||
|
||||
def construct_yaml_timestamp(self, node):
|
||||
value = self.construct_scalar(node)
|
||||
match = self.timestamp_regexp.match(node.value)
|
||||
values = match.groupdict()
|
||||
year = int(values['year'])
|
||||
month = int(values['month'])
|
||||
day = int(values['day'])
|
||||
if not values['hour']:
|
||||
return datetime.date(year, month, day)
|
||||
hour = int(values['hour'])
|
||||
minute = int(values['minute'])
|
||||
second = int(values['second'])
|
||||
fraction = 0
|
||||
tzinfo = None
|
||||
if values['fraction']:
|
||||
fraction = values['fraction'][:6]
|
||||
while len(fraction) < 6:
|
||||
fraction += '0'
|
||||
fraction = int(fraction)
|
||||
if values['tz_sign']:
|
||||
tz_hour = int(values['tz_hour'])
|
||||
tz_minute = int(values['tz_minute'] or 0)
|
||||
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
|
||||
if values['tz_sign'] == '-':
|
||||
delta = -delta
|
||||
tzinfo = datetime.timezone(delta)
|
||||
elif values['tz']:
|
||||
tzinfo = datetime.timezone.utc
|
||||
return datetime.datetime(year, month, day, hour, minute, second, fraction,
|
||||
tzinfo=tzinfo)
|
||||
|
||||
def construct_yaml_omap(self, node):
|
||||
# Note: we do not check for duplicate keys, because it's too
|
||||
# CPU-expensive.
|
||||
omap = []
|
||||
yield omap
|
||||
if not isinstance(node, SequenceNode):
|
||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
||||
"expected a sequence, but found %s" % node.id, node.start_mark)
|
||||
for subnode in node.value:
|
||||
if not isinstance(subnode, MappingNode):
|
||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
||||
"expected a mapping of length 1, but found %s" % subnode.id,
|
||||
subnode.start_mark)
|
||||
if len(subnode.value) != 1:
|
||||
raise ConstructorError("while constructing an ordered map", node.start_mark,
|
||||
"expected a single mapping item, but found %d items" % len(subnode.value),
|
||||
subnode.start_mark)
|
||||
key_node, value_node = subnode.value[0]
|
||||
key = self.construct_object(key_node)
|
||||
value = self.construct_object(value_node)
|
||||
omap.append((key, value))
|
||||
|
||||
def construct_yaml_pairs(self, node):
|
||||
# Note: the same code as `construct_yaml_omap`.
|
||||
pairs = []
|
||||
yield pairs
|
||||
if not isinstance(node, SequenceNode):
|
||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
||||
"expected a sequence, but found %s" % node.id, node.start_mark)
|
||||
for subnode in node.value:
|
||||
if not isinstance(subnode, MappingNode):
|
||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
||||
"expected a mapping of length 1, but found %s" % subnode.id,
|
||||
subnode.start_mark)
|
||||
if len(subnode.value) != 1:
|
||||
raise ConstructorError("while constructing pairs", node.start_mark,
|
||||
"expected a single mapping item, but found %d items" % len(subnode.value),
|
||||
subnode.start_mark)
|
||||
key_node, value_node = subnode.value[0]
|
||||
key = self.construct_object(key_node)
|
||||
value = self.construct_object(value_node)
|
||||
pairs.append((key, value))
|
||||
|
||||
def construct_yaml_set(self, node):
|
||||
data = set()
|
||||
yield data
|
||||
value = self.construct_mapping(node)
|
||||
data.update(value)
|
||||
|
||||
def construct_yaml_str(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
def construct_yaml_seq(self, node):
|
||||
data = []
|
||||
yield data
|
||||
data.extend(self.construct_sequence(node))
|
||||
|
||||
def construct_yaml_map(self, node):
|
||||
data = {}
|
||||
yield data
|
||||
value = self.construct_mapping(node)
|
||||
data.update(value)
|
||||
|
||||
def construct_yaml_object(self, node, cls):
|
||||
data = cls.__new__(cls)
|
||||
yield data
|
||||
if hasattr(data, '__setstate__'):
|
||||
state = self.construct_mapping(node, deep=True)
|
||||
data.__setstate__(state)
|
||||
else:
|
||||
state = self.construct_mapping(node)
|
||||
data.__dict__.update(state)
|
||||
|
||||
def construct_undefined(self, node):
|
||||
raise ConstructorError(None, None,
|
||||
"could not determine a constructor for the tag %r" % node.tag,
|
||||
node.start_mark)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:null',
|
||||
SafeConstructor.construct_yaml_null)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:bool',
|
||||
SafeConstructor.construct_yaml_bool)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:int',
|
||||
SafeConstructor.construct_yaml_int)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:float',
|
||||
SafeConstructor.construct_yaml_float)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:binary',
|
||||
SafeConstructor.construct_yaml_binary)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:timestamp',
|
||||
SafeConstructor.construct_yaml_timestamp)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:omap',
|
||||
SafeConstructor.construct_yaml_omap)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:pairs',
|
||||
SafeConstructor.construct_yaml_pairs)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:set',
|
||||
SafeConstructor.construct_yaml_set)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:str',
|
||||
SafeConstructor.construct_yaml_str)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:seq',
|
||||
SafeConstructor.construct_yaml_seq)
|
||||
|
||||
SafeConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:map',
|
||||
SafeConstructor.construct_yaml_map)
|
||||
|
||||
SafeConstructor.add_constructor(None,
|
||||
SafeConstructor.construct_undefined)
|
||||
|
||||
class FullConstructor(SafeConstructor):
|
||||
# 'extend' is blacklisted because it is used by
|
||||
# construct_python_object_apply to add `listitems` to a newly generate
|
||||
# python instance
|
||||
def get_state_keys_blacklist(self):
|
||||
return ['^extend$', '^__.*__$']
|
||||
|
||||
def get_state_keys_blacklist_regexp(self):
|
||||
if not hasattr(self, 'state_keys_blacklist_regexp'):
|
||||
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
|
||||
return self.state_keys_blacklist_regexp
|
||||
|
||||
def construct_python_str(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
def construct_python_unicode(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
def construct_python_bytes(self, node):
|
||||
try:
|
||||
value = self.construct_scalar(node).encode('ascii')
|
||||
except UnicodeEncodeError as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to convert base64 data into ascii: %s" % exc,
|
||||
node.start_mark)
|
||||
try:
|
||||
if hasattr(base64, 'decodebytes'):
|
||||
return base64.decodebytes(value)
|
||||
else:
|
||||
return base64.decodestring(value)
|
||||
except binascii.Error as exc:
|
||||
raise ConstructorError(None, None,
|
||||
"failed to decode base64 data: %s" % exc, node.start_mark)
|
||||
|
||||
def construct_python_long(self, node):
|
||||
return self.construct_yaml_int(node)
|
||||
|
||||
def construct_python_complex(self, node):
|
||||
return complex(self.construct_scalar(node))
|
||||
|
||||
def construct_python_tuple(self, node):
|
||||
return tuple(self.construct_sequence(node))
|
||||
|
||||
def find_python_module(self, name, mark, unsafe=False):
|
||||
if not name:
|
||||
raise ConstructorError("while constructing a Python module", mark,
|
||||
"expected non-empty name appended to the tag", mark)
|
||||
if unsafe:
|
||||
try:
|
||||
__import__(name)
|
||||
except ImportError as exc:
|
||||
raise ConstructorError("while constructing a Python module", mark,
|
||||
"cannot find module %r (%s)" % (name, exc), mark)
|
||||
if name not in sys.modules:
|
||||
raise ConstructorError("while constructing a Python module", mark,
|
||||
"module %r is not imported" % name, mark)
|
||||
return sys.modules[name]
|
||||
|
||||
def find_python_name(self, name, mark, unsafe=False):
|
||||
if not name:
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"expected non-empty name appended to the tag", mark)
|
||||
if '.' in name:
|
||||
module_name, object_name = name.rsplit('.', 1)
|
||||
else:
|
||||
module_name = 'builtins'
|
||||
object_name = name
|
||||
if unsafe:
|
||||
try:
|
||||
__import__(module_name)
|
||||
except ImportError as exc:
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"cannot find module %r (%s)" % (module_name, exc), mark)
|
||||
if module_name not in sys.modules:
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"module %r is not imported" % module_name, mark)
|
||||
module = sys.modules[module_name]
|
||||
if not hasattr(module, object_name):
|
||||
raise ConstructorError("while constructing a Python object", mark,
|
||||
"cannot find %r in the module %r"
|
||||
% (object_name, module.__name__), mark)
|
||||
return getattr(module, object_name)
|
||||
|
||||
def construct_python_name(self, suffix, node):
|
||||
value = self.construct_scalar(node)
|
||||
if value:
|
||||
raise ConstructorError("while constructing a Python name", node.start_mark,
|
||||
"expected the empty value, but found %r" % value, node.start_mark)
|
||||
return self.find_python_name(suffix, node.start_mark)
|
||||
|
||||
def construct_python_module(self, suffix, node):
|
||||
value = self.construct_scalar(node)
|
||||
if value:
|
||||
raise ConstructorError("while constructing a Python module", node.start_mark,
|
||||
"expected the empty value, but found %r" % value, node.start_mark)
|
||||
return self.find_python_module(suffix, node.start_mark)
|
||||
|
||||
def make_python_instance(self, suffix, node,
|
||||
args=None, kwds=None, newobj=False, unsafe=False):
|
||||
if not args:
|
||||
args = []
|
||||
if not kwds:
|
||||
kwds = {}
|
||||
cls = self.find_python_name(suffix, node.start_mark)
|
||||
if not (unsafe or isinstance(cls, type)):
|
||||
raise ConstructorError("while constructing a Python instance", node.start_mark,
|
||||
"expected a class, but found %r" % type(cls),
|
||||
node.start_mark)
|
||||
if newobj and isinstance(cls, type):
|
||||
return cls.__new__(cls, *args, **kwds)
|
||||
else:
|
||||
return cls(*args, **kwds)
|
||||
|
||||
def set_python_instance_state(self, instance, state, unsafe=False):
|
||||
if hasattr(instance, '__setstate__'):
|
||||
instance.__setstate__(state)
|
||||
else:
|
||||
slotstate = {}
|
||||
if isinstance(state, tuple) and len(state) == 2:
|
||||
state, slotstate = state
|
||||
if hasattr(instance, '__dict__'):
|
||||
if not unsafe and state:
|
||||
for key in state.keys():
|
||||
self.check_state_key(key)
|
||||
instance.__dict__.update(state)
|
||||
elif state:
|
||||
slotstate.update(state)
|
||||
for key, value in slotstate.items():
|
||||
if not unsafe:
|
||||
self.check_state_key(key)
|
||||
setattr(instance, key, value)
|
||||
|
||||
def construct_python_object(self, suffix, node):
|
||||
# Format:
|
||||
# !!python/object:module.name { ... state ... }
|
||||
instance = self.make_python_instance(suffix, node, newobj=True)
|
||||
yield instance
|
||||
deep = hasattr(instance, '__setstate__')
|
||||
state = self.construct_mapping(node, deep=deep)
|
||||
self.set_python_instance_state(instance, state)
|
||||
|
||||
def construct_python_object_apply(self, suffix, node, newobj=False):
|
||||
# Format:
|
||||
# !!python/object/apply # (or !!python/object/new)
|
||||
# args: [ ... arguments ... ]
|
||||
# kwds: { ... keywords ... }
|
||||
# state: ... state ...
|
||||
# listitems: [ ... listitems ... ]
|
||||
# dictitems: { ... dictitems ... }
|
||||
# or short format:
|
||||
# !!python/object/apply [ ... arguments ... ]
|
||||
# The difference between !!python/object/apply and !!python/object/new
|
||||
# is how an object is created, check make_python_instance for details.
|
||||
if isinstance(node, SequenceNode):
|
||||
args = self.construct_sequence(node, deep=True)
|
||||
kwds = {}
|
||||
state = {}
|
||||
listitems = []
|
||||
dictitems = {}
|
||||
else:
|
||||
value = self.construct_mapping(node, deep=True)
|
||||
args = value.get('args', [])
|
||||
kwds = value.get('kwds', {})
|
||||
state = value.get('state', {})
|
||||
listitems = value.get('listitems', [])
|
||||
dictitems = value.get('dictitems', {})
|
||||
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
|
||||
if state:
|
||||
self.set_python_instance_state(instance, state)
|
||||
if listitems:
|
||||
instance.extend(listitems)
|
||||
if dictitems:
|
||||
for key in dictitems:
|
||||
instance[key] = dictitems[key]
|
||||
return instance
|
||||
|
||||
def construct_python_object_new(self, suffix, node):
|
||||
return self.construct_python_object_apply(suffix, node, newobj=True)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/none',
|
||||
FullConstructor.construct_yaml_null)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/bool',
|
||||
FullConstructor.construct_yaml_bool)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/str',
|
||||
FullConstructor.construct_python_str)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/unicode',
|
||||
FullConstructor.construct_python_unicode)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/bytes',
|
||||
FullConstructor.construct_python_bytes)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/int',
|
||||
FullConstructor.construct_yaml_int)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/long',
|
||||
FullConstructor.construct_python_long)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/float',
|
||||
FullConstructor.construct_yaml_float)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/complex',
|
||||
FullConstructor.construct_python_complex)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/list',
|
||||
FullConstructor.construct_yaml_seq)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/tuple',
|
||||
FullConstructor.construct_python_tuple)
|
||||
|
||||
FullConstructor.add_constructor(
|
||||
'tag:yaml.org,2002:python/dict',
|
||||
FullConstructor.construct_yaml_map)
|
||||
|
||||
FullConstructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/name:',
|
||||
FullConstructor.construct_python_name)
|
||||
|
||||
FullConstructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/module:',
|
||||
FullConstructor.construct_python_module)
|
||||
|
||||
FullConstructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/object:',
|
||||
FullConstructor.construct_python_object)
|
||||
|
||||
FullConstructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/object/new:',
|
||||
FullConstructor.construct_python_object_new)
|
||||
|
||||
class UnsafeConstructor(FullConstructor):
|
||||
|
||||
def find_python_module(self, name, mark):
|
||||
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
|
||||
|
||||
def find_python_name(self, name, mark):
|
||||
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
|
||||
|
||||
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
|
||||
return super(UnsafeConstructor, self).make_python_instance(
|
||||
suffix, node, args, kwds, newobj, unsafe=True)
|
||||
|
||||
def set_python_instance_state(self, instance, state):
|
||||
return super(UnsafeConstructor, self).set_python_instance_state(
|
||||
instance, state, unsafe=True)
|
||||
|
||||
UnsafeConstructor.add_multi_constructor(
|
||||
'tag:yaml.org,2002:python/object/apply:',
|
||||
UnsafeConstructor.construct_python_object_apply)
|
||||
|
||||
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
|
||||
# people have extended it directly.
|
||||
class Constructor(UnsafeConstructor):
|
||||
pass
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
|
||||
__all__ = [
|
||||
'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
|
||||
'CBaseDumper', 'CSafeDumper', 'CDumper'
|
||||
]
|
||||
|
||||
from _yaml import CParser, CEmitter
|
||||
|
||||
from .constructor import *
|
||||
|
||||
from .serializer import *
|
||||
from .representer import *
|
||||
|
||||
from .resolver import *
|
||||
|
||||
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
BaseConstructor.__init__(self)
|
||||
BaseResolver.__init__(self)
|
||||
|
||||
class CSafeLoader(CParser, SafeConstructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
SafeConstructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CFullLoader(CParser, FullConstructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
FullConstructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
UnsafeConstructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CLoader(CParser, Constructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
CParser.__init__(self, stream)
|
||||
Constructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
CEmitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width, encoding=encoding,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
CEmitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width, encoding=encoding,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
SafeRepresenter.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class CDumper(CEmitter, Serializer, Representer, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
CEmitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width, encoding=encoding,
|
||||
allow_unicode=allow_unicode, line_break=line_break,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
||||
Resolver.__init__(self)
|
||||
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
|
||||
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
|
||||
|
||||
from .emitter import *
|
||||
from .serializer import *
|
||||
from .representer import *
|
||||
from .resolver import *
|
||||
|
||||
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
Emitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
Serializer.__init__(self, encoding=encoding,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
Emitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
Serializer.__init__(self, encoding=encoding,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
SafeRepresenter.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class Dumper(Emitter, Serializer, Representer, Resolver):
|
||||
|
||||
def __init__(self, stream,
|
||||
default_style=None, default_flow_style=False,
|
||||
canonical=None, indent=None, width=None,
|
||||
allow_unicode=None, line_break=None,
|
||||
encoding=None, explicit_start=None, explicit_end=None,
|
||||
version=None, tags=None, sort_keys=True):
|
||||
Emitter.__init__(self, stream, canonical=canonical,
|
||||
indent=indent, width=width,
|
||||
allow_unicode=allow_unicode, line_break=line_break)
|
||||
Serializer.__init__(self, encoding=encoding,
|
||||
explicit_start=explicit_start, explicit_end=explicit_end,
|
||||
version=version, tags=tags)
|
||||
Representer.__init__(self, default_style=default_style,
|
||||
default_flow_style=default_flow_style, sort_keys=sort_keys)
|
||||
Resolver.__init__(self)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,75 +0,0 @@
|
|||
|
||||
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
|
||||
|
||||
class Mark:
|
||||
|
||||
def __init__(self, name, index, line, column, buffer, pointer):
|
||||
self.name = name
|
||||
self.index = index
|
||||
self.line = line
|
||||
self.column = column
|
||||
self.buffer = buffer
|
||||
self.pointer = pointer
|
||||
|
||||
def get_snippet(self, indent=4, max_length=75):
|
||||
if self.buffer is None:
|
||||
return None
|
||||
head = ''
|
||||
start = self.pointer
|
||||
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
|
||||
start -= 1
|
||||
if self.pointer-start > max_length/2-1:
|
||||
head = ' ... '
|
||||
start += 5
|
||||
break
|
||||
tail = ''
|
||||
end = self.pointer
|
||||
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
|
||||
end += 1
|
||||
if end-self.pointer > max_length/2-1:
|
||||
tail = ' ... '
|
||||
end -= 5
|
||||
break
|
||||
snippet = self.buffer[start:end]
|
||||
return ' '*indent + head + snippet + tail + '\n' \
|
||||
+ ' '*(indent+self.pointer-start+len(head)) + '^'
|
||||
|
||||
def __str__(self):
|
||||
snippet = self.get_snippet()
|
||||
where = " in \"%s\", line %d, column %d" \
|
||||
% (self.name, self.line+1, self.column+1)
|
||||
if snippet is not None:
|
||||
where += ":\n"+snippet
|
||||
return where
|
||||
|
||||
class YAMLError(Exception):
|
||||
pass
|
||||
|
||||
class MarkedYAMLError(YAMLError):
|
||||
|
||||
def __init__(self, context=None, context_mark=None,
|
||||
problem=None, problem_mark=None, note=None):
|
||||
self.context = context
|
||||
self.context_mark = context_mark
|
||||
self.problem = problem
|
||||
self.problem_mark = problem_mark
|
||||
self.note = note
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
if self.context is not None:
|
||||
lines.append(self.context)
|
||||
if self.context_mark is not None \
|
||||
and (self.problem is None or self.problem_mark is None
|
||||
or self.context_mark.name != self.problem_mark.name
|
||||
or self.context_mark.line != self.problem_mark.line
|
||||
or self.context_mark.column != self.problem_mark.column):
|
||||
lines.append(str(self.context_mark))
|
||||
if self.problem is not None:
|
||||
lines.append(self.problem)
|
||||
if self.problem_mark is not None:
|
||||
lines.append(str(self.problem_mark))
|
||||
if self.note is not None:
|
||||
lines.append(self.note)
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
|
@ -1,86 +0,0 @@
|
|||
|
||||
# Abstract classes.
|
||||
|
||||
class Event(object):
|
||||
def __init__(self, start_mark=None, end_mark=None):
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
def __repr__(self):
|
||||
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
|
||||
if hasattr(self, key)]
|
||||
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
|
||||
for key in attributes])
|
||||
return '%s(%s)' % (self.__class__.__name__, arguments)
|
||||
|
||||
class NodeEvent(Event):
|
||||
def __init__(self, anchor, start_mark=None, end_mark=None):
|
||||
self.anchor = anchor
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
|
||||
class CollectionStartEvent(NodeEvent):
|
||||
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
|
||||
flow_style=None):
|
||||
self.anchor = anchor
|
||||
self.tag = tag
|
||||
self.implicit = implicit
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.flow_style = flow_style
|
||||
|
||||
class CollectionEndEvent(Event):
|
||||
pass
|
||||
|
||||
# Implementations.
|
||||
|
||||
class StreamStartEvent(Event):
|
||||
def __init__(self, start_mark=None, end_mark=None, encoding=None):
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.encoding = encoding
|
||||
|
||||
class StreamEndEvent(Event):
|
||||
pass
|
||||
|
||||
class DocumentStartEvent(Event):
|
||||
def __init__(self, start_mark=None, end_mark=None,
|
||||
explicit=None, version=None, tags=None):
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.explicit = explicit
|
||||
self.version = version
|
||||
self.tags = tags
|
||||
|
||||
class DocumentEndEvent(Event):
|
||||
def __init__(self, start_mark=None, end_mark=None,
|
||||
explicit=None):
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.explicit = explicit
|
||||
|
||||
class AliasEvent(NodeEvent):
|
||||
pass
|
||||
|
||||
class ScalarEvent(NodeEvent):
|
||||
def __init__(self, anchor, tag, implicit, value,
|
||||
start_mark=None, end_mark=None, style=None):
|
||||
self.anchor = anchor
|
||||
self.tag = tag
|
||||
self.implicit = implicit
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.style = style
|
||||
|
||||
class SequenceStartEvent(CollectionStartEvent):
|
||||
pass
|
||||
|
||||
class SequenceEndEvent(CollectionEndEvent):
|
||||
pass
|
||||
|
||||
class MappingStartEvent(CollectionStartEvent):
|
||||
pass
|
||||
|
||||
class MappingEndEvent(CollectionEndEvent):
|
||||
pass
|
||||
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
|
||||
__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
|
||||
|
||||
from .reader import *
|
||||
from .scanner import *
|
||||
from .parser import *
|
||||
from .composer import *
|
||||
from .constructor import *
|
||||
from .resolver import *
|
||||
|
||||
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
Reader.__init__(self, stream)
|
||||
Scanner.__init__(self)
|
||||
Parser.__init__(self)
|
||||
Composer.__init__(self)
|
||||
BaseConstructor.__init__(self)
|
||||
BaseResolver.__init__(self)
|
||||
|
||||
class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
Reader.__init__(self, stream)
|
||||
Scanner.__init__(self)
|
||||
Parser.__init__(self)
|
||||
Composer.__init__(self)
|
||||
FullConstructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
Reader.__init__(self, stream)
|
||||
Scanner.__init__(self)
|
||||
Parser.__init__(self)
|
||||
Composer.__init__(self)
|
||||
SafeConstructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
Reader.__init__(self, stream)
|
||||
Scanner.__init__(self)
|
||||
Parser.__init__(self)
|
||||
Composer.__init__(self)
|
||||
Constructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
||||
# UnsafeLoader is the same as Loader (which is and was always unsafe on
|
||||
# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
|
||||
# FullLoad should be able to load almost all YAML safely. Loader is left intact
|
||||
# to ensure backwards compatibility.
|
||||
class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
|
||||
|
||||
def __init__(self, stream):
|
||||
Reader.__init__(self, stream)
|
||||
Scanner.__init__(self)
|
||||
Parser.__init__(self)
|
||||
Composer.__init__(self)
|
||||
Constructor.__init__(self)
|
||||
Resolver.__init__(self)
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
|
||||
class Node(object):
|
||||
def __init__(self, tag, value, start_mark, end_mark):
|
||||
self.tag = tag
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
def __repr__(self):
|
||||
value = self.value
|
||||
#if isinstance(value, list):
|
||||
# if len(value) == 0:
|
||||
# value = '<empty>'
|
||||
# elif len(value) == 1:
|
||||
# value = '<1 item>'
|
||||
# else:
|
||||
# value = '<%d items>' % len(value)
|
||||
#else:
|
||||
# if len(value) > 75:
|
||||
# value = repr(value[:70]+u' ... ')
|
||||
# else:
|
||||
# value = repr(value)
|
||||
value = repr(value)
|
||||
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
|
||||
|
||||
class ScalarNode(Node):
|
||||
id = 'scalar'
|
||||
def __init__(self, tag, value,
|
||||
start_mark=None, end_mark=None, style=None):
|
||||
self.tag = tag
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.style = style
|
||||
|
||||
class CollectionNode(Node):
|
||||
def __init__(self, tag, value,
|
||||
start_mark=None, end_mark=None, flow_style=None):
|
||||
self.tag = tag
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.flow_style = flow_style
|
||||
|
||||
class SequenceNode(CollectionNode):
|
||||
id = 'sequence'
|
||||
|
||||
class MappingNode(CollectionNode):
|
||||
id = 'mapping'
|
||||
|
||||
|
|
@ -1,589 +0,0 @@
|
|||
|
||||
# The following YAML grammar is LL(1) and is parsed by a recursive descent
|
||||
# parser.
|
||||
#
|
||||
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
||||
# implicit_document ::= block_node DOCUMENT-END*
|
||||
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||||
# block_node_or_indentless_sequence ::=
|
||||
# ALIAS
|
||||
# | properties (block_content | indentless_block_sequence)?
|
||||
# | block_content
|
||||
# | indentless_block_sequence
|
||||
# block_node ::= ALIAS
|
||||
# | properties block_content?
|
||||
# | block_content
|
||||
# flow_node ::= ALIAS
|
||||
# | properties flow_content?
|
||||
# | flow_content
|
||||
# properties ::= TAG ANCHOR? | ANCHOR TAG?
|
||||
# block_content ::= block_collection | flow_collection | SCALAR
|
||||
# flow_content ::= flow_collection | SCALAR
|
||||
# block_collection ::= block_sequence | block_mapping
|
||||
# flow_collection ::= flow_sequence | flow_mapping
|
||||
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
||||
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
||||
# block_mapping ::= BLOCK-MAPPING_START
|
||||
# ((KEY block_node_or_indentless_sequence?)?
|
||||
# (VALUE block_node_or_indentless_sequence?)?)*
|
||||
# BLOCK-END
|
||||
# flow_sequence ::= FLOW-SEQUENCE-START
|
||||
# (flow_sequence_entry FLOW-ENTRY)*
|
||||
# flow_sequence_entry?
|
||||
# FLOW-SEQUENCE-END
|
||||
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
# flow_mapping ::= FLOW-MAPPING-START
|
||||
# (flow_mapping_entry FLOW-ENTRY)*
|
||||
# flow_mapping_entry?
|
||||
# FLOW-MAPPING-END
|
||||
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
#
|
||||
# FIRST sets:
|
||||
#
|
||||
# stream: { STREAM-START }
|
||||
# explicit_document: { DIRECTIVE DOCUMENT-START }
|
||||
# implicit_document: FIRST(block_node)
|
||||
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
||||
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
||||
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
|
||||
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
|
||||
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
|
||||
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
||||
# block_sequence: { BLOCK-SEQUENCE-START }
|
||||
# block_mapping: { BLOCK-MAPPING-START }
|
||||
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
|
||||
# indentless_sequence: { ENTRY }
|
||||
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
|
||||
# flow_sequence: { FLOW-SEQUENCE-START }
|
||||
# flow_mapping: { FLOW-MAPPING-START }
|
||||
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
|
||||
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
|
||||
|
||||
__all__ = ['Parser', 'ParserError']
|
||||
|
||||
from .error import MarkedYAMLError
|
||||
from .tokens import *
|
||||
from .events import *
|
||||
from .scanner import *
|
||||
|
||||
class ParserError(MarkedYAMLError):
|
||||
pass
|
||||
|
||||
class Parser:
|
||||
# Since writing a recursive-descendant parser is a straightforward task, we
|
||||
# do not give many comments here.
|
||||
|
||||
DEFAULT_TAGS = {
|
||||
'!': '!',
|
||||
'!!': 'tag:yaml.org,2002:',
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.current_event = None
|
||||
self.yaml_version = None
|
||||
self.tag_handles = {}
|
||||
self.states = []
|
||||
self.marks = []
|
||||
self.state = self.parse_stream_start
|
||||
|
||||
def dispose(self):
|
||||
# Reset the state attributes (to clear self-references)
|
||||
self.states = []
|
||||
self.state = None
|
||||
|
||||
def check_event(self, *choices):
|
||||
# Check the type of the next event.
|
||||
if self.current_event is None:
|
||||
if self.state:
|
||||
self.current_event = self.state()
|
||||
if self.current_event is not None:
|
||||
if not choices:
|
||||
return True
|
||||
for choice in choices:
|
||||
if isinstance(self.current_event, choice):
|
||||
return True
|
||||
return False
|
||||
|
||||
def peek_event(self):
|
||||
# Get the next event.
|
||||
if self.current_event is None:
|
||||
if self.state:
|
||||
self.current_event = self.state()
|
||||
return self.current_event
|
||||
|
||||
def get_event(self):
|
||||
# Get the next event and proceed further.
|
||||
if self.current_event is None:
|
||||
if self.state:
|
||||
self.current_event = self.state()
|
||||
value = self.current_event
|
||||
self.current_event = None
|
||||
return value
|
||||
|
||||
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
||||
# implicit_document ::= block_node DOCUMENT-END*
|
||||
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||||
|
||||
def parse_stream_start(self):
|
||||
|
||||
# Parse the stream start.
|
||||
token = self.get_token()
|
||||
event = StreamStartEvent(token.start_mark, token.end_mark,
|
||||
encoding=token.encoding)
|
||||
|
||||
# Prepare the next state.
|
||||
self.state = self.parse_implicit_document_start
|
||||
|
||||
return event
|
||||
|
||||
def parse_implicit_document_start(self):
|
||||
|
||||
# Parse an implicit document.
|
||||
if not self.check_token(DirectiveToken, DocumentStartToken,
|
||||
StreamEndToken):
|
||||
self.tag_handles = self.DEFAULT_TAGS
|
||||
token = self.peek_token()
|
||||
start_mark = end_mark = token.start_mark
|
||||
event = DocumentStartEvent(start_mark, end_mark,
|
||||
explicit=False)
|
||||
|
||||
# Prepare the next state.
|
||||
self.states.append(self.parse_document_end)
|
||||
self.state = self.parse_block_node
|
||||
|
||||
return event
|
||||
|
||||
else:
|
||||
return self.parse_document_start()
|
||||
|
||||
def parse_document_start(self):
|
||||
|
||||
# Parse any extra document end indicators.
|
||||
while self.check_token(DocumentEndToken):
|
||||
self.get_token()
|
||||
|
||||
# Parse an explicit document.
|
||||
if not self.check_token(StreamEndToken):
|
||||
token = self.peek_token()
|
||||
start_mark = token.start_mark
|
||||
version, tags = self.process_directives()
|
||||
if not self.check_token(DocumentStartToken):
|
||||
raise ParserError(None, None,
|
||||
"expected '<document start>', but found %r"
|
||||
% self.peek_token().id,
|
||||
self.peek_token().start_mark)
|
||||
token = self.get_token()
|
||||
end_mark = token.end_mark
|
||||
event = DocumentStartEvent(start_mark, end_mark,
|
||||
explicit=True, version=version, tags=tags)
|
||||
self.states.append(self.parse_document_end)
|
||||
self.state = self.parse_document_content
|
||||
else:
|
||||
# Parse the end of the stream.
|
||||
token = self.get_token()
|
||||
event = StreamEndEvent(token.start_mark, token.end_mark)
|
||||
assert not self.states
|
||||
assert not self.marks
|
||||
self.state = None
|
||||
return event
|
||||
|
||||
def parse_document_end(self):
|
||||
|
||||
# Parse the document end.
|
||||
token = self.peek_token()
|
||||
start_mark = end_mark = token.start_mark
|
||||
explicit = False
|
||||
if self.check_token(DocumentEndToken):
|
||||
token = self.get_token()
|
||||
end_mark = token.end_mark
|
||||
explicit = True
|
||||
event = DocumentEndEvent(start_mark, end_mark,
|
||||
explicit=explicit)
|
||||
|
||||
# Prepare the next state.
|
||||
self.state = self.parse_document_start
|
||||
|
||||
return event
|
||||
|
||||
def parse_document_content(self):
|
||||
if self.check_token(DirectiveToken,
|
||||
DocumentStartToken, DocumentEndToken, StreamEndToken):
|
||||
event = self.process_empty_scalar(self.peek_token().start_mark)
|
||||
self.state = self.states.pop()
|
||||
return event
|
||||
else:
|
||||
return self.parse_block_node()
|
||||
|
||||
def process_directives(self):
|
||||
self.yaml_version = None
|
||||
self.tag_handles = {}
|
||||
while self.check_token(DirectiveToken):
|
||||
token = self.get_token()
|
||||
if token.name == 'YAML':
|
||||
if self.yaml_version is not None:
|
||||
raise ParserError(None, None,
|
||||
"found duplicate YAML directive", token.start_mark)
|
||||
major, minor = token.value
|
||||
if major != 1:
|
||||
raise ParserError(None, None,
|
||||
"found incompatible YAML document (version 1.* is required)",
|
||||
token.start_mark)
|
||||
self.yaml_version = token.value
|
||||
elif token.name == 'TAG':
|
||||
handle, prefix = token.value
|
||||
if handle in self.tag_handles:
|
||||
raise ParserError(None, None,
|
||||
"duplicate tag handle %r" % handle,
|
||||
token.start_mark)
|
||||
self.tag_handles[handle] = prefix
|
||||
if self.tag_handles:
|
||||
value = self.yaml_version, self.tag_handles.copy()
|
||||
else:
|
||||
value = self.yaml_version, None
|
||||
for key in self.DEFAULT_TAGS:
|
||||
if key not in self.tag_handles:
|
||||
self.tag_handles[key] = self.DEFAULT_TAGS[key]
|
||||
return value
|
||||
|
||||
# block_node_or_indentless_sequence ::= ALIAS
|
||||
# | properties (block_content | indentless_block_sequence)?
|
||||
# | block_content
|
||||
# | indentless_block_sequence
|
||||
# block_node ::= ALIAS
|
||||
# | properties block_content?
|
||||
# | block_content
|
||||
# flow_node ::= ALIAS
|
||||
# | properties flow_content?
|
||||
# | flow_content
|
||||
# properties ::= TAG ANCHOR? | ANCHOR TAG?
|
||||
# block_content ::= block_collection | flow_collection | SCALAR
|
||||
# flow_content ::= flow_collection | SCALAR
|
||||
# block_collection ::= block_sequence | block_mapping
|
||||
# flow_collection ::= flow_sequence | flow_mapping
|
||||
|
||||
def parse_block_node(self):
|
||||
return self.parse_node(block=True)
|
||||
|
||||
def parse_flow_node(self):
|
||||
return self.parse_node()
|
||||
|
||||
def parse_block_node_or_indentless_sequence(self):
|
||||
return self.parse_node(block=True, indentless_sequence=True)
|
||||
|
||||
def parse_node(self, block=False, indentless_sequence=False):
|
||||
if self.check_token(AliasToken):
|
||||
token = self.get_token()
|
||||
event = AliasEvent(token.value, token.start_mark, token.end_mark)
|
||||
self.state = self.states.pop()
|
||||
else:
|
||||
anchor = None
|
||||
tag = None
|
||||
start_mark = end_mark = tag_mark = None
|
||||
if self.check_token(AnchorToken):
|
||||
token = self.get_token()
|
||||
start_mark = token.start_mark
|
||||
end_mark = token.end_mark
|
||||
anchor = token.value
|
||||
if self.check_token(TagToken):
|
||||
token = self.get_token()
|
||||
tag_mark = token.start_mark
|
||||
end_mark = token.end_mark
|
||||
tag = token.value
|
||||
elif self.check_token(TagToken):
|
||||
token = self.get_token()
|
||||
start_mark = tag_mark = token.start_mark
|
||||
end_mark = token.end_mark
|
||||
tag = token.value
|
||||
if self.check_token(AnchorToken):
|
||||
token = self.get_token()
|
||||
end_mark = token.end_mark
|
||||
anchor = token.value
|
||||
if tag is not None:
|
||||
handle, suffix = tag
|
||||
if handle is not None:
|
||||
if handle not in self.tag_handles:
|
||||
raise ParserError("while parsing a node", start_mark,
|
||||
"found undefined tag handle %r" % handle,
|
||||
tag_mark)
|
||||
tag = self.tag_handles[handle]+suffix
|
||||
else:
|
||||
tag = suffix
|
||||
#if tag == '!':
|
||||
# raise ParserError("while parsing a node", start_mark,
|
||||
# "found non-specific tag '!'", tag_mark,
|
||||
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
|
||||
if start_mark is None:
|
||||
start_mark = end_mark = self.peek_token().start_mark
|
||||
event = None
|
||||
implicit = (tag is None or tag == '!')
|
||||
if indentless_sequence and self.check_token(BlockEntryToken):
|
||||
end_mark = self.peek_token().end_mark
|
||||
event = SequenceStartEvent(anchor, tag, implicit,
|
||||
start_mark, end_mark)
|
||||
self.state = self.parse_indentless_sequence_entry
|
||||
else:
|
||||
if self.check_token(ScalarToken):
|
||||
token = self.get_token()
|
||||
end_mark = token.end_mark
|
||||
if (token.plain and tag is None) or tag == '!':
|
||||
implicit = (True, False)
|
||||
elif tag is None:
|
||||
implicit = (False, True)
|
||||
else:
|
||||
implicit = (False, False)
|
||||
event = ScalarEvent(anchor, tag, implicit, token.value,
|
||||
start_mark, end_mark, style=token.style)
|
||||
self.state = self.states.pop()
|
||||
elif self.check_token(FlowSequenceStartToken):
|
||||
end_mark = self.peek_token().end_mark
|
||||
event = SequenceStartEvent(anchor, tag, implicit,
|
||||
start_mark, end_mark, flow_style=True)
|
||||
self.state = self.parse_flow_sequence_first_entry
|
||||
elif self.check_token(FlowMappingStartToken):
|
||||
end_mark = self.peek_token().end_mark
|
||||
event = MappingStartEvent(anchor, tag, implicit,
|
||||
start_mark, end_mark, flow_style=True)
|
||||
self.state = self.parse_flow_mapping_first_key
|
||||
elif block and self.check_token(BlockSequenceStartToken):
|
||||
end_mark = self.peek_token().start_mark
|
||||
event = SequenceStartEvent(anchor, tag, implicit,
|
||||
start_mark, end_mark, flow_style=False)
|
||||
self.state = self.parse_block_sequence_first_entry
|
||||
elif block and self.check_token(BlockMappingStartToken):
|
||||
end_mark = self.peek_token().start_mark
|
||||
event = MappingStartEvent(anchor, tag, implicit,
|
||||
start_mark, end_mark, flow_style=False)
|
||||
self.state = self.parse_block_mapping_first_key
|
||||
elif anchor is not None or tag is not None:
|
||||
# Empty scalars are allowed even if a tag or an anchor is
|
||||
# specified.
|
||||
event = ScalarEvent(anchor, tag, (implicit, False), '',
|
||||
start_mark, end_mark)
|
||||
self.state = self.states.pop()
|
||||
else:
|
||||
if block:
|
||||
node = 'block'
|
||||
else:
|
||||
node = 'flow'
|
||||
token = self.peek_token()
|
||||
raise ParserError("while parsing a %s node" % node, start_mark,
|
||||
"expected the node content, but found %r" % token.id,
|
||||
token.start_mark)
|
||||
return event
|
||||
|
||||
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
||||
|
||||
def parse_block_sequence_first_entry(self):
|
||||
token = self.get_token()
|
||||
self.marks.append(token.start_mark)
|
||||
return self.parse_block_sequence_entry()
|
||||
|
||||
def parse_block_sequence_entry(self):
|
||||
if self.check_token(BlockEntryToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(BlockEntryToken, BlockEndToken):
|
||||
self.states.append(self.parse_block_sequence_entry)
|
||||
return self.parse_block_node()
|
||||
else:
|
||||
self.state = self.parse_block_sequence_entry
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
if not self.check_token(BlockEndToken):
|
||||
token = self.peek_token()
|
||||
raise ParserError("while parsing a block collection", self.marks[-1],
|
||||
"expected <block end>, but found %r" % token.id, token.start_mark)
|
||||
token = self.get_token()
|
||||
event = SequenceEndEvent(token.start_mark, token.end_mark)
|
||||
self.state = self.states.pop()
|
||||
self.marks.pop()
|
||||
return event
|
||||
|
||||
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
||||
|
||||
def parse_indentless_sequence_entry(self):
|
||||
if self.check_token(BlockEntryToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(BlockEntryToken,
|
||||
KeyToken, ValueToken, BlockEndToken):
|
||||
self.states.append(self.parse_indentless_sequence_entry)
|
||||
return self.parse_block_node()
|
||||
else:
|
||||
self.state = self.parse_indentless_sequence_entry
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
token = self.peek_token()
|
||||
event = SequenceEndEvent(token.start_mark, token.start_mark)
|
||||
self.state = self.states.pop()
|
||||
return event
|
||||
|
||||
# block_mapping ::= BLOCK-MAPPING_START
|
||||
# ((KEY block_node_or_indentless_sequence?)?
|
||||
# (VALUE block_node_or_indentless_sequence?)?)*
|
||||
# BLOCK-END
|
||||
|
||||
def parse_block_mapping_first_key(self):
|
||||
token = self.get_token()
|
||||
self.marks.append(token.start_mark)
|
||||
return self.parse_block_mapping_key()
|
||||
|
||||
def parse_block_mapping_key(self):
|
||||
if self.check_token(KeyToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
|
||||
self.states.append(self.parse_block_mapping_value)
|
||||
return self.parse_block_node_or_indentless_sequence()
|
||||
else:
|
||||
self.state = self.parse_block_mapping_value
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
if not self.check_token(BlockEndToken):
|
||||
token = self.peek_token()
|
||||
raise ParserError("while parsing a block mapping", self.marks[-1],
|
||||
"expected <block end>, but found %r" % token.id, token.start_mark)
|
||||
token = self.get_token()
|
||||
event = MappingEndEvent(token.start_mark, token.end_mark)
|
||||
self.state = self.states.pop()
|
||||
self.marks.pop()
|
||||
return event
|
||||
|
||||
def parse_block_mapping_value(self):
|
||||
if self.check_token(ValueToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
|
||||
self.states.append(self.parse_block_mapping_key)
|
||||
return self.parse_block_node_or_indentless_sequence()
|
||||
else:
|
||||
self.state = self.parse_block_mapping_key
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
else:
|
||||
self.state = self.parse_block_mapping_key
|
||||
token = self.peek_token()
|
||||
return self.process_empty_scalar(token.start_mark)
|
||||
|
||||
# flow_sequence ::= FLOW-SEQUENCE-START
|
||||
# (flow_sequence_entry FLOW-ENTRY)*
|
||||
# flow_sequence_entry?
|
||||
# FLOW-SEQUENCE-END
|
||||
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
#
|
||||
# Note that while production rules for both flow_sequence_entry and
|
||||
# flow_mapping_entry are equal, their interpretations are different.
|
||||
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
|
||||
# generate an inline mapping (set syntax).
|
||||
|
||||
def parse_flow_sequence_first_entry(self):
|
||||
token = self.get_token()
|
||||
self.marks.append(token.start_mark)
|
||||
return self.parse_flow_sequence_entry(first=True)
|
||||
|
||||
def parse_flow_sequence_entry(self, first=False):
|
||||
if not self.check_token(FlowSequenceEndToken):
|
||||
if not first:
|
||||
if self.check_token(FlowEntryToken):
|
||||
self.get_token()
|
||||
else:
|
||||
token = self.peek_token()
|
||||
raise ParserError("while parsing a flow sequence", self.marks[-1],
|
||||
"expected ',' or ']', but got %r" % token.id, token.start_mark)
|
||||
|
||||
if self.check_token(KeyToken):
|
||||
token = self.peek_token()
|
||||
event = MappingStartEvent(None, None, True,
|
||||
token.start_mark, token.end_mark,
|
||||
flow_style=True)
|
||||
self.state = self.parse_flow_sequence_entry_mapping_key
|
||||
return event
|
||||
elif not self.check_token(FlowSequenceEndToken):
|
||||
self.states.append(self.parse_flow_sequence_entry)
|
||||
return self.parse_flow_node()
|
||||
token = self.get_token()
|
||||
event = SequenceEndEvent(token.start_mark, token.end_mark)
|
||||
self.state = self.states.pop()
|
||||
self.marks.pop()
|
||||
return event
|
||||
|
||||
def parse_flow_sequence_entry_mapping_key(self):
|
||||
token = self.get_token()
|
||||
if not self.check_token(ValueToken,
|
||||
FlowEntryToken, FlowSequenceEndToken):
|
||||
self.states.append(self.parse_flow_sequence_entry_mapping_value)
|
||||
return self.parse_flow_node()
|
||||
else:
|
||||
self.state = self.parse_flow_sequence_entry_mapping_value
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
|
||||
def parse_flow_sequence_entry_mapping_value(self):
|
||||
if self.check_token(ValueToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
|
||||
self.states.append(self.parse_flow_sequence_entry_mapping_end)
|
||||
return self.parse_flow_node()
|
||||
else:
|
||||
self.state = self.parse_flow_sequence_entry_mapping_end
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
else:
|
||||
self.state = self.parse_flow_sequence_entry_mapping_end
|
||||
token = self.peek_token()
|
||||
return self.process_empty_scalar(token.start_mark)
|
||||
|
||||
def parse_flow_sequence_entry_mapping_end(self):
|
||||
self.state = self.parse_flow_sequence_entry
|
||||
token = self.peek_token()
|
||||
return MappingEndEvent(token.start_mark, token.start_mark)
|
||||
|
||||
# flow_mapping ::= FLOW-MAPPING-START
|
||||
# (flow_mapping_entry FLOW-ENTRY)*
|
||||
# flow_mapping_entry?
|
||||
# FLOW-MAPPING-END
|
||||
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||||
|
||||
def parse_flow_mapping_first_key(self):
|
||||
token = self.get_token()
|
||||
self.marks.append(token.start_mark)
|
||||
return self.parse_flow_mapping_key(first=True)
|
||||
|
||||
def parse_flow_mapping_key(self, first=False):
|
||||
if not self.check_token(FlowMappingEndToken):
|
||||
if not first:
|
||||
if self.check_token(FlowEntryToken):
|
||||
self.get_token()
|
||||
else:
|
||||
token = self.peek_token()
|
||||
raise ParserError("while parsing a flow mapping", self.marks[-1],
|
||||
"expected ',' or '}', but got %r" % token.id, token.start_mark)
|
||||
if self.check_token(KeyToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(ValueToken,
|
||||
FlowEntryToken, FlowMappingEndToken):
|
||||
self.states.append(self.parse_flow_mapping_value)
|
||||
return self.parse_flow_node()
|
||||
else:
|
||||
self.state = self.parse_flow_mapping_value
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
elif not self.check_token(FlowMappingEndToken):
|
||||
self.states.append(self.parse_flow_mapping_empty_value)
|
||||
return self.parse_flow_node()
|
||||
token = self.get_token()
|
||||
event = MappingEndEvent(token.start_mark, token.end_mark)
|
||||
self.state = self.states.pop()
|
||||
self.marks.pop()
|
||||
return event
|
||||
|
||||
def parse_flow_mapping_value(self):
|
||||
if self.check_token(ValueToken):
|
||||
token = self.get_token()
|
||||
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
|
||||
self.states.append(self.parse_flow_mapping_key)
|
||||
return self.parse_flow_node()
|
||||
else:
|
||||
self.state = self.parse_flow_mapping_key
|
||||
return self.process_empty_scalar(token.end_mark)
|
||||
else:
|
||||
self.state = self.parse_flow_mapping_key
|
||||
token = self.peek_token()
|
||||
return self.process_empty_scalar(token.start_mark)
|
||||
|
||||
def parse_flow_mapping_empty_value(self):
|
||||
self.state = self.parse_flow_mapping_key
|
||||
return self.process_empty_scalar(self.peek_token().start_mark)
|
||||
|
||||
def process_empty_scalar(self, mark):
|
||||
return ScalarEvent(None, None, (True, False), '', mark, mark)
|
||||
|
||||
|
|
@ -1,185 +0,0 @@
|
|||
# This module contains abstractions for the input stream. You don't have to
|
||||
# looks further, there are no pretty code.
|
||||
#
|
||||
# We define two classes here.
|
||||
#
|
||||
# Mark(source, line, column)
|
||||
# It's just a record and its only use is producing nice error messages.
|
||||
# Parser does not use it for any other purposes.
|
||||
#
|
||||
# Reader(source, data)
|
||||
# Reader determines the encoding of `data` and converts it to unicode.
|
||||
# Reader provides the following methods and attributes:
|
||||
# reader.peek(length=1) - return the next `length` characters
|
||||
# reader.forward(length=1) - move the current position to `length` characters.
|
||||
# reader.index - the number of the current character.
|
||||
# reader.line, stream.column - the line and the column of the current character.
|
||||
|
||||
__all__ = ['Reader', 'ReaderError']
|
||||
|
||||
from .error import YAMLError, Mark
|
||||
|
||||
import codecs, re
|
||||
|
||||
class ReaderError(YAMLError):
|
||||
|
||||
def __init__(self, name, position, character, encoding, reason):
|
||||
self.name = name
|
||||
self.character = character
|
||||
self.position = position
|
||||
self.encoding = encoding
|
||||
self.reason = reason
|
||||
|
||||
def __str__(self):
|
||||
if isinstance(self.character, bytes):
|
||||
return "'%s' codec can't decode byte #x%02x: %s\n" \
|
||||
" in \"%s\", position %d" \
|
||||
% (self.encoding, ord(self.character), self.reason,
|
||||
self.name, self.position)
|
||||
else:
|
||||
return "unacceptable character #x%04x: %s\n" \
|
||||
" in \"%s\", position %d" \
|
||||
% (self.character, self.reason,
|
||||
self.name, self.position)
|
||||
|
||||
class Reader(object):
|
||||
# Reader:
|
||||
# - determines the data encoding and converts it to a unicode string,
|
||||
# - checks if characters are in allowed range,
|
||||
# - adds '\0' to the end.
|
||||
|
||||
# Reader accepts
|
||||
# - a `bytes` object,
|
||||
# - a `str` object,
|
||||
# - a file-like object with its `read` method returning `str`,
|
||||
# - a file-like object with its `read` method returning `unicode`.
|
||||
|
||||
# Yeah, it's ugly and slow.
|
||||
|
||||
def __init__(self, stream):
|
||||
self.name = None
|
||||
self.stream = None
|
||||
self.stream_pointer = 0
|
||||
self.eof = True
|
||||
self.buffer = ''
|
||||
self.pointer = 0
|
||||
self.raw_buffer = None
|
||||
self.raw_decode = None
|
||||
self.encoding = None
|
||||
self.index = 0
|
||||
self.line = 0
|
||||
self.column = 0
|
||||
if isinstance(stream, str):
|
||||
self.name = "<unicode string>"
|
||||
self.check_printable(stream)
|
||||
self.buffer = stream+'\0'
|
||||
elif isinstance(stream, bytes):
|
||||
self.name = "<byte string>"
|
||||
self.raw_buffer = stream
|
||||
self.determine_encoding()
|
||||
else:
|
||||
self.stream = stream
|
||||
self.name = getattr(stream, 'name', "<file>")
|
||||
self.eof = False
|
||||
self.raw_buffer = None
|
||||
self.determine_encoding()
|
||||
|
||||
def peek(self, index=0):
|
||||
try:
|
||||
return self.buffer[self.pointer+index]
|
||||
except IndexError:
|
||||
self.update(index+1)
|
||||
return self.buffer[self.pointer+index]
|
||||
|
||||
def prefix(self, length=1):
|
||||
if self.pointer+length >= len(self.buffer):
|
||||
self.update(length)
|
||||
return self.buffer[self.pointer:self.pointer+length]
|
||||
|
||||
def forward(self, length=1):
|
||||
if self.pointer+length+1 >= len(self.buffer):
|
||||
self.update(length+1)
|
||||
while length:
|
||||
ch = self.buffer[self.pointer]
|
||||
self.pointer += 1
|
||||
self.index += 1
|
||||
if ch in '\n\x85\u2028\u2029' \
|
||||
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
|
||||
self.line += 1
|
||||
self.column = 0
|
||||
elif ch != '\uFEFF':
|
||||
self.column += 1
|
||||
length -= 1
|
||||
|
||||
def get_mark(self):
|
||||
if self.stream is None:
|
||||
return Mark(self.name, self.index, self.line, self.column,
|
||||
self.buffer, self.pointer)
|
||||
else:
|
||||
return Mark(self.name, self.index, self.line, self.column,
|
||||
None, None)
|
||||
|
||||
def determine_encoding(self):
|
||||
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
|
||||
self.update_raw()
|
||||
if isinstance(self.raw_buffer, bytes):
|
||||
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
|
||||
self.raw_decode = codecs.utf_16_le_decode
|
||||
self.encoding = 'utf-16-le'
|
||||
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
|
||||
self.raw_decode = codecs.utf_16_be_decode
|
||||
self.encoding = 'utf-16-be'
|
||||
else:
|
||||
self.raw_decode = codecs.utf_8_decode
|
||||
self.encoding = 'utf-8'
|
||||
self.update(1)
|
||||
|
||||
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
|
||||
def check_printable(self, data):
|
||||
match = self.NON_PRINTABLE.search(data)
|
||||
if match:
|
||||
character = match.group()
|
||||
position = self.index+(len(self.buffer)-self.pointer)+match.start()
|
||||
raise ReaderError(self.name, position, ord(character),
|
||||
'unicode', "special characters are not allowed")
|
||||
|
||||
def update(self, length):
|
||||
if self.raw_buffer is None:
|
||||
return
|
||||
self.buffer = self.buffer[self.pointer:]
|
||||
self.pointer = 0
|
||||
while len(self.buffer) < length:
|
||||
if not self.eof:
|
||||
self.update_raw()
|
||||
if self.raw_decode is not None:
|
||||
try:
|
||||
data, converted = self.raw_decode(self.raw_buffer,
|
||||
'strict', self.eof)
|
||||
except UnicodeDecodeError as exc:
|
||||
character = self.raw_buffer[exc.start]
|
||||
if self.stream is not None:
|
||||
position = self.stream_pointer-len(self.raw_buffer)+exc.start
|
||||
else:
|
||||
position = exc.start
|
||||
raise ReaderError(self.name, position, character,
|
||||
exc.encoding, exc.reason)
|
||||
else:
|
||||
data = self.raw_buffer
|
||||
converted = len(data)
|
||||
self.check_printable(data)
|
||||
self.buffer += data
|
||||
self.raw_buffer = self.raw_buffer[converted:]
|
||||
if self.eof:
|
||||
self.buffer += '\0'
|
||||
self.raw_buffer = None
|
||||
break
|
||||
|
||||
def update_raw(self, size=4096):
|
||||
data = self.stream.read(size)
|
||||
if self.raw_buffer is None:
|
||||
self.raw_buffer = data
|
||||
else:
|
||||
self.raw_buffer += data
|
||||
self.stream_pointer += len(data)
|
||||
if not data:
|
||||
self.eof = True
|
||||
|
|
@ -1,389 +0,0 @@
|
|||
|
||||
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
|
||||
'RepresenterError']
|
||||
|
||||
from .error import *
|
||||
from .nodes import *
|
||||
|
||||
import datetime, copyreg, types, base64, collections
|
||||
|
||||
class RepresenterError(YAMLError):
|
||||
pass
|
||||
|
||||
class BaseRepresenter:
|
||||
|
||||
yaml_representers = {}
|
||||
yaml_multi_representers = {}
|
||||
|
||||
def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
|
||||
self.default_style = default_style
|
||||
self.sort_keys = sort_keys
|
||||
self.default_flow_style = default_flow_style
|
||||
self.represented_objects = {}
|
||||
self.object_keeper = []
|
||||
self.alias_key = None
|
||||
|
||||
def represent(self, data):
|
||||
node = self.represent_data(data)
|
||||
self.serialize(node)
|
||||
self.represented_objects = {}
|
||||
self.object_keeper = []
|
||||
self.alias_key = None
|
||||
|
||||
def represent_data(self, data):
|
||||
if self.ignore_aliases(data):
|
||||
self.alias_key = None
|
||||
else:
|
||||
self.alias_key = id(data)
|
||||
if self.alias_key is not None:
|
||||
if self.alias_key in self.represented_objects:
|
||||
node = self.represented_objects[self.alias_key]
|
||||
#if node is None:
|
||||
# raise RepresenterError("recursive objects are not allowed: %r" % data)
|
||||
return node
|
||||
#self.represented_objects[alias_key] = None
|
||||
self.object_keeper.append(data)
|
||||
data_types = type(data).__mro__
|
||||
if data_types[0] in self.yaml_representers:
|
||||
node = self.yaml_representers[data_types[0]](self, data)
|
||||
else:
|
||||
for data_type in data_types:
|
||||
if data_type in self.yaml_multi_representers:
|
||||
node = self.yaml_multi_representers[data_type](self, data)
|
||||
break
|
||||
else:
|
||||
if None in self.yaml_multi_representers:
|
||||
node = self.yaml_multi_representers[None](self, data)
|
||||
elif None in self.yaml_representers:
|
||||
node = self.yaml_representers[None](self, data)
|
||||
else:
|
||||
node = ScalarNode(None, str(data))
|
||||
#if alias_key is not None:
|
||||
# self.represented_objects[alias_key] = node
|
||||
return node
|
||||
|
||||
@classmethod
|
||||
def add_representer(cls, data_type, representer):
|
||||
if not 'yaml_representers' in cls.__dict__:
|
||||
cls.yaml_representers = cls.yaml_representers.copy()
|
||||
cls.yaml_representers[data_type] = representer
|
||||
|
||||
@classmethod
|
||||
def add_multi_representer(cls, data_type, representer):
|
||||
if not 'yaml_multi_representers' in cls.__dict__:
|
||||
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
|
||||
cls.yaml_multi_representers[data_type] = representer
|
||||
|
||||
def represent_scalar(self, tag, value, style=None):
|
||||
if style is None:
|
||||
style = self.default_style
|
||||
node = ScalarNode(tag, value, style=style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
return node
|
||||
|
||||
def represent_sequence(self, tag, sequence, flow_style=None):
|
||||
value = []
|
||||
node = SequenceNode(tag, value, flow_style=flow_style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
best_style = True
|
||||
for item in sequence:
|
||||
node_item = self.represent_data(item)
|
||||
if not (isinstance(node_item, ScalarNode) and not node_item.style):
|
||||
best_style = False
|
||||
value.append(node_item)
|
||||
if flow_style is None:
|
||||
if self.default_flow_style is not None:
|
||||
node.flow_style = self.default_flow_style
|
||||
else:
|
||||
node.flow_style = best_style
|
||||
return node
|
||||
|
||||
def represent_mapping(self, tag, mapping, flow_style=None):
|
||||
value = []
|
||||
node = MappingNode(tag, value, flow_style=flow_style)
|
||||
if self.alias_key is not None:
|
||||
self.represented_objects[self.alias_key] = node
|
||||
best_style = True
|
||||
if hasattr(mapping, 'items'):
|
||||
mapping = list(mapping.items())
|
||||
if self.sort_keys:
|
||||
try:
|
||||
mapping = sorted(mapping)
|
||||
except TypeError:
|
||||
pass
|
||||
for item_key, item_value in mapping:
|
||||
node_key = self.represent_data(item_key)
|
||||
node_value = self.represent_data(item_value)
|
||||
if not (isinstance(node_key, ScalarNode) and not node_key.style):
|
||||
best_style = False
|
||||
if not (isinstance(node_value, ScalarNode) and not node_value.style):
|
||||
best_style = False
|
||||
value.append((node_key, node_value))
|
||||
if flow_style is None:
|
||||
if self.default_flow_style is not None:
|
||||
node.flow_style = self.default_flow_style
|
||||
else:
|
||||
node.flow_style = best_style
|
||||
return node
|
||||
|
||||
def ignore_aliases(self, data):
|
||||
return False
|
||||
|
||||
class SafeRepresenter(BaseRepresenter):
|
||||
|
||||
def ignore_aliases(self, data):
|
||||
if data is None:
|
||||
return True
|
||||
if isinstance(data, tuple) and data == ():
|
||||
return True
|
||||
if isinstance(data, (str, bytes, bool, int, float)):
|
||||
return True
|
||||
|
||||
def represent_none(self, data):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
def represent_str(self, data):
|
||||
return self.represent_scalar('tag:yaml.org,2002:str', data)
|
||||
|
||||
def represent_binary(self, data):
|
||||
if hasattr(base64, 'encodebytes'):
|
||||
data = base64.encodebytes(data).decode('ascii')
|
||||
else:
|
||||
data = base64.encodestring(data).decode('ascii')
|
||||
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
|
||||
|
||||
def represent_bool(self, data):
|
||||
if data:
|
||||
value = 'true'
|
||||
else:
|
||||
value = 'false'
|
||||
return self.represent_scalar('tag:yaml.org,2002:bool', value)
|
||||
|
||||
def represent_int(self, data):
|
||||
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
|
||||
|
||||
inf_value = 1e300
|
||||
while repr(inf_value) != repr(inf_value*inf_value):
|
||||
inf_value *= inf_value
|
||||
|
||||
def represent_float(self, data):
|
||||
if data != data or (data == 0.0 and data == 1.0):
|
||||
value = '.nan'
|
||||
elif data == self.inf_value:
|
||||
value = '.inf'
|
||||
elif data == -self.inf_value:
|
||||
value = '-.inf'
|
||||
else:
|
||||
value = repr(data).lower()
|
||||
# Note that in some cases `repr(data)` represents a float number
|
||||
# without the decimal parts. For instance:
|
||||
# >>> repr(1e17)
|
||||
# '1e17'
|
||||
# Unfortunately, this is not a valid float representation according
|
||||
# to the definition of the `!!float` tag. We fix this by adding
|
||||
# '.0' before the 'e' symbol.
|
||||
if '.' not in value and 'e' in value:
|
||||
value = value.replace('e', '.0e', 1)
|
||||
return self.represent_scalar('tag:yaml.org,2002:float', value)
|
||||
|
||||
def represent_list(self, data):
|
||||
#pairs = (len(data) > 0 and isinstance(data, list))
|
||||
#if pairs:
|
||||
# for item in data:
|
||||
# if not isinstance(item, tuple) or len(item) != 2:
|
||||
# pairs = False
|
||||
# break
|
||||
#if not pairs:
|
||||
return self.represent_sequence('tag:yaml.org,2002:seq', data)
|
||||
#value = []
|
||||
#for item_key, item_value in data:
|
||||
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
|
||||
# [(item_key, item_value)]))
|
||||
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
|
||||
|
||||
def represent_dict(self, data):
|
||||
return self.represent_mapping('tag:yaml.org,2002:map', data)
|
||||
|
||||
def represent_set(self, data):
|
||||
value = {}
|
||||
for key in data:
|
||||
value[key] = None
|
||||
return self.represent_mapping('tag:yaml.org,2002:set', value)
|
||||
|
||||
def represent_date(self, data):
|
||||
value = data.isoformat()
|
||||
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
|
||||
|
||||
def represent_datetime(self, data):
|
||||
value = data.isoformat(' ')
|
||||
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
|
||||
|
||||
def represent_yaml_object(self, tag, data, cls, flow_style=None):
|
||||
if hasattr(data, '__getstate__'):
|
||||
state = data.__getstate__()
|
||||
else:
|
||||
state = data.__dict__.copy()
|
||||
return self.represent_mapping(tag, state, flow_style=flow_style)
|
||||
|
||||
def represent_undefined(self, data):
|
||||
raise RepresenterError("cannot represent an object", data)
|
||||
|
||||
SafeRepresenter.add_representer(type(None),
|
||||
SafeRepresenter.represent_none)
|
||||
|
||||
SafeRepresenter.add_representer(str,
|
||||
SafeRepresenter.represent_str)
|
||||
|
||||
SafeRepresenter.add_representer(bytes,
|
||||
SafeRepresenter.represent_binary)
|
||||
|
||||
SafeRepresenter.add_representer(bool,
|
||||
SafeRepresenter.represent_bool)
|
||||
|
||||
SafeRepresenter.add_representer(int,
|
||||
SafeRepresenter.represent_int)
|
||||
|
||||
SafeRepresenter.add_representer(float,
|
||||
SafeRepresenter.represent_float)
|
||||
|
||||
SafeRepresenter.add_representer(list,
|
||||
SafeRepresenter.represent_list)
|
||||
|
||||
SafeRepresenter.add_representer(tuple,
|
||||
SafeRepresenter.represent_list)
|
||||
|
||||
SafeRepresenter.add_representer(dict,
|
||||
SafeRepresenter.represent_dict)
|
||||
|
||||
SafeRepresenter.add_representer(set,
|
||||
SafeRepresenter.represent_set)
|
||||
|
||||
SafeRepresenter.add_representer(datetime.date,
|
||||
SafeRepresenter.represent_date)
|
||||
|
||||
SafeRepresenter.add_representer(datetime.datetime,
|
||||
SafeRepresenter.represent_datetime)
|
||||
|
||||
SafeRepresenter.add_representer(None,
|
||||
SafeRepresenter.represent_undefined)
|
||||
|
||||
class Representer(SafeRepresenter):
|
||||
|
||||
def represent_complex(self, data):
|
||||
if data.imag == 0.0:
|
||||
data = '%r' % data.real
|
||||
elif data.real == 0.0:
|
||||
data = '%rj' % data.imag
|
||||
elif data.imag > 0:
|
||||
data = '%r+%rj' % (data.real, data.imag)
|
||||
else:
|
||||
data = '%r%rj' % (data.real, data.imag)
|
||||
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
|
||||
|
||||
def represent_tuple(self, data):
|
||||
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
|
||||
|
||||
def represent_name(self, data):
|
||||
name = '%s.%s' % (data.__module__, data.__name__)
|
||||
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
|
||||
|
||||
def represent_module(self, data):
|
||||
return self.represent_scalar(
|
||||
'tag:yaml.org,2002:python/module:'+data.__name__, '')
|
||||
|
||||
def represent_object(self, data):
|
||||
# We use __reduce__ API to save the data. data.__reduce__ returns
|
||||
# a tuple of length 2-5:
|
||||
# (function, args, state, listitems, dictitems)
|
||||
|
||||
# For reconstructing, we calls function(*args), then set its state,
|
||||
# listitems, and dictitems if they are not None.
|
||||
|
||||
# A special case is when function.__name__ == '__newobj__'. In this
|
||||
# case we create the object with args[0].__new__(*args).
|
||||
|
||||
# Another special case is when __reduce__ returns a string - we don't
|
||||
# support it.
|
||||
|
||||
# We produce a !!python/object, !!python/object/new or
|
||||
# !!python/object/apply node.
|
||||
|
||||
cls = type(data)
|
||||
if cls in copyreg.dispatch_table:
|
||||
reduce = copyreg.dispatch_table[cls](data)
|
||||
elif hasattr(data, '__reduce_ex__'):
|
||||
reduce = data.__reduce_ex__(2)
|
||||
elif hasattr(data, '__reduce__'):
|
||||
reduce = data.__reduce__()
|
||||
else:
|
||||
raise RepresenterError("cannot represent an object", data)
|
||||
reduce = (list(reduce)+[None]*5)[:5]
|
||||
function, args, state, listitems, dictitems = reduce
|
||||
args = list(args)
|
||||
if state is None:
|
||||
state = {}
|
||||
if listitems is not None:
|
||||
listitems = list(listitems)
|
||||
if dictitems is not None:
|
||||
dictitems = dict(dictitems)
|
||||
if function.__name__ == '__newobj__':
|
||||
function = args[0]
|
||||
args = args[1:]
|
||||
tag = 'tag:yaml.org,2002:python/object/new:'
|
||||
newobj = True
|
||||
else:
|
||||
tag = 'tag:yaml.org,2002:python/object/apply:'
|
||||
newobj = False
|
||||
function_name = '%s.%s' % (function.__module__, function.__name__)
|
||||
if not args and not listitems and not dictitems \
|
||||
and isinstance(state, dict) and newobj:
|
||||
return self.represent_mapping(
|
||||
'tag:yaml.org,2002:python/object:'+function_name, state)
|
||||
if not listitems and not dictitems \
|
||||
and isinstance(state, dict) and not state:
|
||||
return self.represent_sequence(tag+function_name, args)
|
||||
value = {}
|
||||
if args:
|
||||
value['args'] = args
|
||||
if state or not isinstance(state, dict):
|
||||
value['state'] = state
|
||||
if listitems:
|
||||
value['listitems'] = listitems
|
||||
if dictitems:
|
||||
value['dictitems'] = dictitems
|
||||
return self.represent_mapping(tag+function_name, value)
|
||||
|
||||
def represent_ordered_dict(self, data):
|
||||
# Provide uniform representation across different Python versions.
|
||||
data_type = type(data)
|
||||
tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
|
||||
% (data_type.__module__, data_type.__name__)
|
||||
items = [[key, value] for key, value in data.items()]
|
||||
return self.represent_sequence(tag, [items])
|
||||
|
||||
Representer.add_representer(complex,
|
||||
Representer.represent_complex)
|
||||
|
||||
Representer.add_representer(tuple,
|
||||
Representer.represent_tuple)
|
||||
|
||||
Representer.add_representer(type,
|
||||
Representer.represent_name)
|
||||
|
||||
Representer.add_representer(collections.OrderedDict,
|
||||
Representer.represent_ordered_dict)
|
||||
|
||||
Representer.add_representer(types.FunctionType,
|
||||
Representer.represent_name)
|
||||
|
||||
Representer.add_representer(types.BuiltinFunctionType,
|
||||
Representer.represent_name)
|
||||
|
||||
Representer.add_representer(types.ModuleType,
|
||||
Representer.represent_module)
|
||||
|
||||
Representer.add_multi_representer(object,
|
||||
Representer.represent_object)
|
||||
|
||||
|
|
@ -1,227 +0,0 @@
|
|||
|
||||
__all__ = ['BaseResolver', 'Resolver']
|
||||
|
||||
from .error import *
|
||||
from .nodes import *
|
||||
|
||||
import re
|
||||
|
||||
class ResolverError(YAMLError):
|
||||
pass
|
||||
|
||||
class BaseResolver:
|
||||
|
||||
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
|
||||
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
|
||||
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
|
||||
|
||||
yaml_implicit_resolvers = {}
|
||||
yaml_path_resolvers = {}
|
||||
|
||||
def __init__(self):
|
||||
self.resolver_exact_paths = []
|
||||
self.resolver_prefix_paths = []
|
||||
|
||||
@classmethod
|
||||
def add_implicit_resolver(cls, tag, regexp, first):
|
||||
if not 'yaml_implicit_resolvers' in cls.__dict__:
|
||||
implicit_resolvers = {}
|
||||
for key in cls.yaml_implicit_resolvers:
|
||||
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
|
||||
cls.yaml_implicit_resolvers = implicit_resolvers
|
||||
if first is None:
|
||||
first = [None]
|
||||
for ch in first:
|
||||
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
|
||||
|
||||
@classmethod
|
||||
def add_path_resolver(cls, tag, path, kind=None):
|
||||
# Note: `add_path_resolver` is experimental. The API could be changed.
|
||||
# `new_path` is a pattern that is matched against the path from the
|
||||
# root to the node that is being considered. `node_path` elements are
|
||||
# tuples `(node_check, index_check)`. `node_check` is a node class:
|
||||
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
|
||||
# matches any kind of a node. `index_check` could be `None`, a boolean
|
||||
# value, a string value, or a number. `None` and `False` match against
|
||||
# any _value_ of sequence and mapping nodes. `True` matches against
|
||||
# any _key_ of a mapping node. A string `index_check` matches against
|
||||
# a mapping value that corresponds to a scalar key which content is
|
||||
# equal to the `index_check` value. An integer `index_check` matches
|
||||
# against a sequence value with the index equal to `index_check`.
|
||||
if not 'yaml_path_resolvers' in cls.__dict__:
|
||||
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
|
||||
new_path = []
|
||||
for element in path:
|
||||
if isinstance(element, (list, tuple)):
|
||||
if len(element) == 2:
|
||||
node_check, index_check = element
|
||||
elif len(element) == 1:
|
||||
node_check = element[0]
|
||||
index_check = True
|
||||
else:
|
||||
raise ResolverError("Invalid path element: %s" % element)
|
||||
else:
|
||||
node_check = None
|
||||
index_check = element
|
||||
if node_check is str:
|
||||
node_check = ScalarNode
|
||||
elif node_check is list:
|
||||
node_check = SequenceNode
|
||||
elif node_check is dict:
|
||||
node_check = MappingNode
|
||||
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
|
||||
and not isinstance(node_check, str) \
|
||||
and node_check is not None:
|
||||
raise ResolverError("Invalid node checker: %s" % node_check)
|
||||
if not isinstance(index_check, (str, int)) \
|
||||
and index_check is not None:
|
||||
raise ResolverError("Invalid index checker: %s" % index_check)
|
||||
new_path.append((node_check, index_check))
|
||||
if kind is str:
|
||||
kind = ScalarNode
|
||||
elif kind is list:
|
||||
kind = SequenceNode
|
||||
elif kind is dict:
|
||||
kind = MappingNode
|
||||
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
|
||||
and kind is not None:
|
||||
raise ResolverError("Invalid node kind: %s" % kind)
|
||||
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
|
||||
|
||||
def descend_resolver(self, current_node, current_index):
|
||||
if not self.yaml_path_resolvers:
|
||||
return
|
||||
exact_paths = {}
|
||||
prefix_paths = []
|
||||
if current_node:
|
||||
depth = len(self.resolver_prefix_paths)
|
||||
for path, kind in self.resolver_prefix_paths[-1]:
|
||||
if self.check_resolver_prefix(depth, path, kind,
|
||||
current_node, current_index):
|
||||
if len(path) > depth:
|
||||
prefix_paths.append((path, kind))
|
||||
else:
|
||||
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
|
||||
else:
|
||||
for path, kind in self.yaml_path_resolvers:
|
||||
if not path:
|
||||
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
|
||||
else:
|
||||
prefix_paths.append((path, kind))
|
||||
self.resolver_exact_paths.append(exact_paths)
|
||||
self.resolver_prefix_paths.append(prefix_paths)
|
||||
|
||||
def ascend_resolver(self):
|
||||
if not self.yaml_path_resolvers:
|
||||
return
|
||||
self.resolver_exact_paths.pop()
|
||||
self.resolver_prefix_paths.pop()
|
||||
|
||||
def check_resolver_prefix(self, depth, path, kind,
|
||||
current_node, current_index):
|
||||
node_check, index_check = path[depth-1]
|
||||
if isinstance(node_check, str):
|
||||
if current_node.tag != node_check:
|
||||
return
|
||||
elif node_check is not None:
|
||||
if not isinstance(current_node, node_check):
|
||||
return
|
||||
if index_check is True and current_index is not None:
|
||||
return
|
||||
if (index_check is False or index_check is None) \
|
||||
and current_index is None:
|
||||
return
|
||||
if isinstance(index_check, str):
|
||||
if not (isinstance(current_index, ScalarNode)
|
||||
and index_check == current_index.value):
|
||||
return
|
||||
elif isinstance(index_check, int) and not isinstance(index_check, bool):
|
||||
if index_check != current_index:
|
||||
return
|
||||
return True
|
||||
|
||||
def resolve(self, kind, value, implicit):
|
||||
if kind is ScalarNode and implicit[0]:
|
||||
if value == '':
|
||||
resolvers = self.yaml_implicit_resolvers.get('', [])
|
||||
else:
|
||||
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
|
||||
resolvers += self.yaml_implicit_resolvers.get(None, [])
|
||||
for tag, regexp in resolvers:
|
||||
if regexp.match(value):
|
||||
return tag
|
||||
implicit = implicit[1]
|
||||
if self.yaml_path_resolvers:
|
||||
exact_paths = self.resolver_exact_paths[-1]
|
||||
if kind in exact_paths:
|
||||
return exact_paths[kind]
|
||||
if None in exact_paths:
|
||||
return exact_paths[None]
|
||||
if kind is ScalarNode:
|
||||
return self.DEFAULT_SCALAR_TAG
|
||||
elif kind is SequenceNode:
|
||||
return self.DEFAULT_SEQUENCE_TAG
|
||||
elif kind is MappingNode:
|
||||
return self.DEFAULT_MAPPING_TAG
|
||||
|
||||
class Resolver(BaseResolver):
|
||||
pass
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:bool',
|
||||
re.compile(r'''^(?:yes|Yes|YES|no|No|NO
|
||||
|true|True|TRUE|false|False|FALSE
|
||||
|on|On|ON|off|Off|OFF)$''', re.X),
|
||||
list('yYnNtTfFoO'))
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:float',
|
||||
re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|
||||
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|
||||
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|
||||
|[-+]?\.(?:inf|Inf|INF)
|
||||
|\.(?:nan|NaN|NAN))$''', re.X),
|
||||
list('-+0123456789.'))
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:int',
|
||||
re.compile(r'''^(?:[-+]?0b[0-1_]+
|
||||
|[-+]?0[0-7_]+
|
||||
|[-+]?(?:0|[1-9][0-9_]*)
|
||||
|[-+]?0x[0-9a-fA-F_]+
|
||||
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
|
||||
list('-+0123456789'))
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:merge',
|
||||
re.compile(r'^(?:<<)$'),
|
||||
['<'])
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:null',
|
||||
re.compile(r'''^(?: ~
|
||||
|null|Null|NULL
|
||||
| )$''', re.X),
|
||||
['~', 'n', 'N', ''])
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:timestamp',
|
||||
re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|
||||
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
|
||||
(?:[Tt]|[ \t]+)[0-9][0-9]?
|
||||
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
|
||||
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
|
||||
list('0123456789'))
|
||||
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:value',
|
||||
re.compile(r'^(?:=)$'),
|
||||
['='])
|
||||
|
||||
# The following resolver is only for documentation purposes. It cannot work
|
||||
# because plain scalars cannot start with '!', '&', or '*'.
|
||||
Resolver.add_implicit_resolver(
|
||||
'tag:yaml.org,2002:yaml',
|
||||
re.compile(r'^(?:!|&|\*)$'),
|
||||
list('!&*'))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,111 +0,0 @@
|
|||
|
||||
__all__ = ['Serializer', 'SerializerError']
|
||||
|
||||
from .error import YAMLError
|
||||
from .events import *
|
||||
from .nodes import *
|
||||
|
||||
class SerializerError(YAMLError):
|
||||
pass
|
||||
|
||||
class Serializer:
|
||||
|
||||
ANCHOR_TEMPLATE = 'id%03d'
|
||||
|
||||
def __init__(self, encoding=None,
|
||||
explicit_start=None, explicit_end=None, version=None, tags=None):
|
||||
self.use_encoding = encoding
|
||||
self.use_explicit_start = explicit_start
|
||||
self.use_explicit_end = explicit_end
|
||||
self.use_version = version
|
||||
self.use_tags = tags
|
||||
self.serialized_nodes = {}
|
||||
self.anchors = {}
|
||||
self.last_anchor_id = 0
|
||||
self.closed = None
|
||||
|
||||
def open(self):
|
||||
if self.closed is None:
|
||||
self.emit(StreamStartEvent(encoding=self.use_encoding))
|
||||
self.closed = False
|
||||
elif self.closed:
|
||||
raise SerializerError("serializer is closed")
|
||||
else:
|
||||
raise SerializerError("serializer is already opened")
|
||||
|
||||
def close(self):
|
||||
if self.closed is None:
|
||||
raise SerializerError("serializer is not opened")
|
||||
elif not self.closed:
|
||||
self.emit(StreamEndEvent())
|
||||
self.closed = True
|
||||
|
||||
#def __del__(self):
|
||||
# self.close()
|
||||
|
||||
def serialize(self, node):
|
||||
if self.closed is None:
|
||||
raise SerializerError("serializer is not opened")
|
||||
elif self.closed:
|
||||
raise SerializerError("serializer is closed")
|
||||
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
|
||||
version=self.use_version, tags=self.use_tags))
|
||||
self.anchor_node(node)
|
||||
self.serialize_node(node, None, None)
|
||||
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
|
||||
self.serialized_nodes = {}
|
||||
self.anchors = {}
|
||||
self.last_anchor_id = 0
|
||||
|
||||
def anchor_node(self, node):
|
||||
if node in self.anchors:
|
||||
if self.anchors[node] is None:
|
||||
self.anchors[node] = self.generate_anchor(node)
|
||||
else:
|
||||
self.anchors[node] = None
|
||||
if isinstance(node, SequenceNode):
|
||||
for item in node.value:
|
||||
self.anchor_node(item)
|
||||
elif isinstance(node, MappingNode):
|
||||
for key, value in node.value:
|
||||
self.anchor_node(key)
|
||||
self.anchor_node(value)
|
||||
|
||||
def generate_anchor(self, node):
|
||||
self.last_anchor_id += 1
|
||||
return self.ANCHOR_TEMPLATE % self.last_anchor_id
|
||||
|
||||
def serialize_node(self, node, parent, index):
|
||||
alias = self.anchors[node]
|
||||
if node in self.serialized_nodes:
|
||||
self.emit(AliasEvent(alias))
|
||||
else:
|
||||
self.serialized_nodes[node] = True
|
||||
self.descend_resolver(parent, index)
|
||||
if isinstance(node, ScalarNode):
|
||||
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
|
||||
default_tag = self.resolve(ScalarNode, node.value, (False, True))
|
||||
implicit = (node.tag == detected_tag), (node.tag == default_tag)
|
||||
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
|
||||
style=node.style))
|
||||
elif isinstance(node, SequenceNode):
|
||||
implicit = (node.tag
|
||||
== self.resolve(SequenceNode, node.value, True))
|
||||
self.emit(SequenceStartEvent(alias, node.tag, implicit,
|
||||
flow_style=node.flow_style))
|
||||
index = 0
|
||||
for item in node.value:
|
||||
self.serialize_node(item, node, index)
|
||||
index += 1
|
||||
self.emit(SequenceEndEvent())
|
||||
elif isinstance(node, MappingNode):
|
||||
implicit = (node.tag
|
||||
== self.resolve(MappingNode, node.value, True))
|
||||
self.emit(MappingStartEvent(alias, node.tag, implicit,
|
||||
flow_style=node.flow_style))
|
||||
for key, value in node.value:
|
||||
self.serialize_node(key, node, None)
|
||||
self.serialize_node(value, node, key)
|
||||
self.emit(MappingEndEvent())
|
||||
self.ascend_resolver()
|
||||
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
|
||||
class Token(object):
|
||||
def __init__(self, start_mark, end_mark):
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
def __repr__(self):
|
||||
attributes = [key for key in self.__dict__
|
||||
if not key.endswith('_mark')]
|
||||
attributes.sort()
|
||||
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
|
||||
for key in attributes])
|
||||
return '%s(%s)' % (self.__class__.__name__, arguments)
|
||||
|
||||
#class BOMToken(Token):
|
||||
# id = '<byte order mark>'
|
||||
|
||||
class DirectiveToken(Token):
|
||||
id = '<directive>'
|
||||
def __init__(self, name, value, start_mark, end_mark):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
|
||||
class DocumentStartToken(Token):
|
||||
id = '<document start>'
|
||||
|
||||
class DocumentEndToken(Token):
|
||||
id = '<document end>'
|
||||
|
||||
class StreamStartToken(Token):
|
||||
id = '<stream start>'
|
||||
def __init__(self, start_mark=None, end_mark=None,
|
||||
encoding=None):
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.encoding = encoding
|
||||
|
||||
class StreamEndToken(Token):
|
||||
id = '<stream end>'
|
||||
|
||||
class BlockSequenceStartToken(Token):
|
||||
id = '<block sequence start>'
|
||||
|
||||
class BlockMappingStartToken(Token):
|
||||
id = '<block mapping start>'
|
||||
|
||||
class BlockEndToken(Token):
|
||||
id = '<block end>'
|
||||
|
||||
class FlowSequenceStartToken(Token):
|
||||
id = '['
|
||||
|
||||
class FlowMappingStartToken(Token):
|
||||
id = '{'
|
||||
|
||||
class FlowSequenceEndToken(Token):
|
||||
id = ']'
|
||||
|
||||
class FlowMappingEndToken(Token):
|
||||
id = '}'
|
||||
|
||||
class KeyToken(Token):
|
||||
id = '?'
|
||||
|
||||
class ValueToken(Token):
|
||||
id = ':'
|
||||
|
||||
class BlockEntryToken(Token):
|
||||
id = '-'
|
||||
|
||||
class FlowEntryToken(Token):
|
||||
id = ','
|
||||
|
||||
class AliasToken(Token):
|
||||
id = '<alias>'
|
||||
def __init__(self, value, start_mark, end_mark):
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
|
||||
class AnchorToken(Token):
|
||||
id = '<anchor>'
|
||||
def __init__(self, value, start_mark, end_mark):
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
|
||||
class TagToken(Token):
|
||||
id = '<tag>'
|
||||
def __init__(self, value, start_mark, end_mark):
|
||||
self.value = value
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
|
||||
class ScalarToken(Token):
|
||||
id = '<scalar>'
|
||||
def __init__(self, value, plain, start_mark, end_mark, style=None):
|
||||
self.value = value
|
||||
self.plain = plain
|
||||
self.start_mark = start_mark
|
||||
self.end_mark = end_mark
|
||||
self.style = style
|
||||
|
||||
|
|
@ -170,6 +170,17 @@ def get_ingress_address(endpoint_name, ignore_addresses=None):
|
|||
# doesn't support spaces, so just return the private address
|
||||
return hookenv.unit_get("private-address")
|
||||
|
||||
excluded_ips = []
|
||||
excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"]
|
||||
for addr in network_info["bind-addresses"]:
|
||||
for prefix in excluded_interfaces:
|
||||
if addr["interface-name"].startswith(prefix):
|
||||
for ip in addr["addresses"]:
|
||||
excluded_ips.append(ip["value"])
|
||||
|
||||
ingress_addresses = network_info["ingress-addresses"]
|
||||
network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips]
|
||||
|
||||
addresses = network_info["ingress-addresses"]
|
||||
|
||||
if ignore_addresses:
|
||||
|
|
|
|||
|
|
@ -170,6 +170,17 @@ def get_ingress_address(endpoint_name, ignore_addresses=None):
|
|||
# doesn't support spaces, so just return the private address
|
||||
return hookenv.unit_get("private-address")
|
||||
|
||||
excluded_ips = []
|
||||
excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"]
|
||||
for addr in network_info["bind-addresses"]:
|
||||
for prefix in excluded_interfaces:
|
||||
if addr["interface-name"].startswith(prefix):
|
||||
for ip in addr["addresses"]:
|
||||
excluded_ips.append(ip["value"])
|
||||
|
||||
ingress_addresses = network_info["ingress-addresses"]
|
||||
network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips]
|
||||
|
||||
addresses = network_info["ingress-addresses"]
|
||||
|
||||
if ignore_addresses:
|
||||
|
|
|
|||
|
|
@ -170,6 +170,17 @@ def get_ingress_address(endpoint_name, ignore_addresses=None):
|
|||
# doesn't support spaces, so just return the private address
|
||||
return hookenv.unit_get("private-address")
|
||||
|
||||
excluded_ips = []
|
||||
excluded_interfaces = ["vxlan", "kube", "wg", "docker", "cali", "virbr", "cni", "flannel"]
|
||||
for addr in network_info["bind-addresses"]:
|
||||
for prefix in excluded_interfaces:
|
||||
if addr["interface-name"].startswith(prefix):
|
||||
for ip in addr["addresses"]:
|
||||
excluded_ips.append(ip["value"])
|
||||
|
||||
ingress_addresses = network_info["ingress-addresses"]
|
||||
network_info["ingress-addresses"] = [ip for ip in ingress_addresses if ip not in excluded_ips]
|
||||
|
||||
addresses = network_info["ingress-addresses"]
|
||||
|
||||
if ignore_addresses:
|
||||
|
|
|
|||
|
|
@ -1,75 +0,0 @@
|
|||
PYTHON := /usr/bin/python3
|
||||
|
||||
PROJECTPATH=$(dir $(realpath $(MAKEFILE_LIST)))
|
||||
ifndef CHARM_BUILD_DIR
|
||||
CHARM_BUILD_DIR=${PROJECTPATH}.build
|
||||
endif
|
||||
METADATA_FILE="metadata.yaml"
|
||||
CHARM_NAME=$(shell cat ${PROJECTPATH}/${METADATA_FILE} | grep -E '^name:' | awk '{print $$2}')
|
||||
|
||||
help:
|
||||
@echo "This project supports the following targets"
|
||||
@echo ""
|
||||
@echo " make help - show this text"
|
||||
@echo " make clean - remove unneeded files"
|
||||
@echo " make submodules - make sure that the submodules are up-to-date"
|
||||
@echo " make submodules-update - update submodules to latest changes on remote branch"
|
||||
@echo " make build - build the charm"
|
||||
@echo " make release - run clean and build targets"
|
||||
@echo " make lint - run flake8 and black --check"
|
||||
@echo " make black - run black and reformat files"
|
||||
@echo " make proof - run charm proof"
|
||||
@echo " make unittests - run the tests defined in the unittest subdirectory"
|
||||
@echo " make functional - run the tests defined in the functional subdirectory"
|
||||
@echo " make test - run lint, proof, unittests and functional targets"
|
||||
@echo ""
|
||||
|
||||
clean:
|
||||
@echo "Cleaning files"
|
||||
@git clean -ffXd -e '!.idea'
|
||||
@echo "Cleaning existing build"
|
||||
@rm -rf ${CHARM_BUILD_DIR}/${CHARM_NAME}
|
||||
|
||||
submodules:
|
||||
@echo "Cloning submodules"
|
||||
@git submodule update --init --recursive
|
||||
|
||||
submodules-update:
|
||||
@echo "Pulling latest updates for submodules"
|
||||
@git submodule update --init --recursive --remote --merge
|
||||
|
||||
build: submodules-update
|
||||
@echo "Building charm to base directory ${CHARM_BUILD_DIR}/${CHARM_NAME}"
|
||||
@-git rev-parse --abbrev-ref HEAD > ./repo-info
|
||||
@-git describe --always > ./version
|
||||
@mkdir -p ${CHARM_BUILD_DIR}/${CHARM_NAME}
|
||||
@cp -a ./* ${CHARM_BUILD_DIR}/${CHARM_NAME}
|
||||
|
||||
release: clean build
|
||||
@echo "Charm is built at ${CHARM_BUILD_DIR}/${CHARM_NAME}"
|
||||
|
||||
lint:
|
||||
@echo "Running lint checks"
|
||||
@tox -e lint
|
||||
|
||||
black:
|
||||
@echo "Reformat files with black"
|
||||
@tox -e black
|
||||
|
||||
proof:
|
||||
@echo "Running charm proof"
|
||||
@-charm proof
|
||||
|
||||
unittests: submodules-update
|
||||
@echo "Running unit tests"
|
||||
@tox -e unit
|
||||
|
||||
functional: build
|
||||
@echo "Executing functional tests in ${CHARM_BUILD_DIR}"
|
||||
@CHARM_BUILD_DIR=${CHARM_BUILD_DIR} tox -e func
|
||||
|
||||
test: lint proof unittests functional
|
||||
@echo "Charm ${CHARM_NAME} has been tested"
|
||||
|
||||
# The targets below don't depend on a file
|
||||
.PHONY: help submodules submodules-update clean build release lint black proof unittests functional test
|
||||
225
nrpe/README.md
225
nrpe/README.md
|
|
@ -1,225 +0,0 @@
|
|||
Introduction
|
||||
============
|
||||
|
||||
This subordinate charm is used to configure nrpe (Nagios Remote Plugin
|
||||
Executor). It can be related to the nagios charm via the monitors relation and
|
||||
will pass a monitors yaml to nagios informing it of what checks to monitor.
|
||||
|
||||
Principal Relations
|
||||
===================
|
||||
|
||||
This charm can be attached to any principal charm (via the juju-info relation)
|
||||
regardless of whether it has implemented the local-monitors or
|
||||
nrpe-external-master relations. For example:
|
||||
|
||||
juju deploy ubuntu
|
||||
juju deploy nrpe
|
||||
juju deploy nagios
|
||||
juju add-relation ubuntu nrpe
|
||||
juju add-relation nrpe:monitors nagios:monitors
|
||||
|
||||
If joined via the juju-info relation the default checks are configured and
|
||||
additional checks can be added via the monitors config option (see below).
|
||||
|
||||
The local-monitors relations allows the principal to request checks to be setup
|
||||
by passing a monitors yaml and listing them in the 'local' section. It can
|
||||
also list checks that is has configured by listing them in the remote nrpe
|
||||
section and finally it can request external monitors are setup by using one of
|
||||
the other remote types. See "Monitors yaml" below.
|
||||
|
||||
Other Subordinate Charms
|
||||
========================
|
||||
|
||||
If another subordinate charm deployed to the same principal has a
|
||||
local-monitors or nrpe-external-master relation then it can also be related to
|
||||
the local nrpe charm. For example:
|
||||
|
||||
echo -e "glance:\n vip: 10.5.106.1" > glance.yaml
|
||||
juju deploy -n3 --config glance.yaml glance
|
||||
juju deploy hacluster glance-hacluster
|
||||
juju deploy nrpe glance-nrpe
|
||||
juju deploy nagios
|
||||
juju add-relation glance glance-hacluster
|
||||
juju add-relation glance-nrpe:monitors nagios:monitors
|
||||
juju add-relation glance glance-nrpe
|
||||
juju add-relation glance-hacluster glance-nrpe
|
||||
|
||||
The glance-hacluster charm will pass monitoring information to glance-nrpe
|
||||
which will amalgamate all monitor definitions before passing them to nagios.
|
||||
|
||||
Check sources
|
||||
=============
|
||||
|
||||
Check definitions can come from three places:
|
||||
|
||||
Default Checks
|
||||
--------------
|
||||
|
||||
This charm creates a base set of checks in /etc/nagios/nrpe.d, including
|
||||
check\_load, check\_users, check\_disk\_root. All of the options for these are
|
||||
configurable but sensible defaults have been set in config.yaml.
|
||||
For example to increase the alert threshold for number of processes:
|
||||
|
||||
juju config nrpe load="-w 10,10,10 -c 25,25,25"
|
||||
|
||||
Default checks maybe disabled by setting them to the empty string.
|
||||
|
||||
Principal Requested Checks
|
||||
--------------------------
|
||||
|
||||
Monitors passed to this charm by the principal charm via the local-monitors
|
||||
or nrpe-external-master relation. The principal charm can write its own
|
||||
check definition into */etc/nagios/nrpe.d* and then inform this charm via the
|
||||
monitors setting. It can also request a direct external check of a service
|
||||
without using nrpe. See "Monitors yaml" below for examples.
|
||||
|
||||
User Requested Checks
|
||||
---------------------
|
||||
|
||||
This works in the same way as the Principal requested except the monitors yaml
|
||||
is set by the user via the monitors config option. For example to add a monitor
|
||||
for the rsyslog process:
|
||||
|
||||
juju config nrpe monitors="
|
||||
monitors:
|
||||
local:
|
||||
procrunning:
|
||||
rsyslogd:
|
||||
min: 1
|
||||
max: 1
|
||||
executable: rsyslogd
|
||||
"
|
||||
|
||||
|
||||
|
||||
External Nagios
|
||||
===============
|
||||
|
||||
If the nagios server is not deployed in the juju environment then the charm can
|
||||
be configured, via the export\_nagios\_definitions, to write out nagios config
|
||||
fragments to /var/lib/nagios/export. Rsync is then configured to allow a host
|
||||
(specified by nagios\_master) to collect the fragments. An rsync stanza is created
|
||||
allowing the Nagios server to pick up configs from /var/lib/nagios/export (as
|
||||
a target called "external-nagios"), which will also be configured to allow
|
||||
connections from the hostname or IP address as specified for the
|
||||
"nagios\_master" variable.
|
||||
|
||||
It is up to you to configure the Nagios master to pull the configs needed, which
|
||||
will then cause it to connect back to the instances in question to run the nrpe
|
||||
checks you have defined.
|
||||
|
||||
Monitors yaml
|
||||
=============
|
||||
|
||||
The list of monitors past down the monitors relation is an amalgamation of the
|
||||
lists provided via the principal, the user and the default checks.
|
||||
|
||||
The monitors yaml is of the following form:
|
||||
|
||||
|
||||
# Version of the spec, mostly ignored but 0.3 is the current one
|
||||
version: '0.3'
|
||||
# Dict with just 'local' and 'remote' as parts
|
||||
monitors:
|
||||
# local monitors need an agent to be handled. See nrpe charm for
|
||||
# some example implementations
|
||||
local:
|
||||
# procrunning checks for a running process named X (no path)
|
||||
procrunning:
|
||||
# Multiple procrunning can be defined, this is the "name" of it
|
||||
nagios3:
|
||||
min: 1
|
||||
max: 1
|
||||
executable: nagios3
|
||||
# Remote monitors can be polled directly by a remote system
|
||||
remote:
|
||||
# do a request on the HTTP protocol
|
||||
http:
|
||||
nagios:
|
||||
port: 80
|
||||
path: /nagios3/
|
||||
# expected status response (otherwise just look for 200)
|
||||
status: 'HTTP/1.1 401'
|
||||
# Use as the Host: header (the server address will still be used to connect() to)
|
||||
host: www.fewbar.com
|
||||
mysql:
|
||||
# Named basic check
|
||||
basic:
|
||||
username: monitors
|
||||
password: abcdefg123456
|
||||
nrpe:
|
||||
apache2:
|
||||
command: check_apache2
|
||||
|
||||
|
||||
|
||||
Before a monitor is added it is checked to see if it is in the 'local' section.
|
||||
If it is this charm needs to convert it into an nrpe checks. Only a small
|
||||
number of check types are currently supported (see below) .These checks can
|
||||
then be called by the nagios charm via the nrpe service. So for each check
|
||||
listed in the local section:
|
||||
|
||||
1. The definition is read and a check definition it written /etc/nagios/nrpe.d
|
||||
2. The check is defined as a remote nrpe check in the yaml passed to nagios
|
||||
|
||||
In the example above a check\_proc\_nagios3\_user.cfg file would be written
|
||||
out which contains:
|
||||
|
||||
# Check process nagios3 is running (user)
|
||||
command[check_proc_nagios3_user]=/usr/lib/nagios/plugins/check_procs -w 1 -c 1 -C nagios3
|
||||
|
||||
And the monitors yaml passed to nagios would include:
|
||||
|
||||
monitors:
|
||||
nrpe:
|
||||
check_proc_nagios3_user:
|
||||
command: check_proc_nagios3_user
|
||||
|
||||
The principal charm, or the user via the monitors config option, can request an
|
||||
external check by adding it to the remote section of the monitors yaml. In the
|
||||
example above direct checks of a webserver and of mysql are being requested.
|
||||
This charm passes those on to nagios unaltered.
|
||||
|
||||
Local check types
|
||||
-----------------
|
||||
|
||||
Supported nrpe checks are:
|
||||
|
||||
procrunning:
|
||||
min: Minimum number of 'executable' processes
|
||||
max: Maximum number of 'executable' processes
|
||||
executable: Name of executable to look for in process list
|
||||
processcount:
|
||||
min: Minimum total number processes
|
||||
max: Maximum total number processes
|
||||
executable: Name of executable to look for in process list
|
||||
disk:
|
||||
path: Directory to monitor space usage of
|
||||
custom:
|
||||
check: the name of the check to execute
|
||||
plugin_path: (optional) Absolute path to the directory containing the
|
||||
custom plugin. Default value is /var/lib/nagios/plugins
|
||||
description: (optional) Description of the check
|
||||
params: (optional) Parameters to pass to the check on invocation
|
||||
|
||||
Remote check types
|
||||
------------------
|
||||
|
||||
Supported remote types:
|
||||
http, mysql, nrpe, tcp, rpc, pgsql
|
||||
(See Nagios charm for up-to-date list and options)
|
||||
|
||||
Spaces
|
||||
======
|
||||
|
||||
By defining 'monitors' binding, you can influence which nrpe's IP will be reported
|
||||
back to Nagios. This can be very handy if nrpe is placed on machines with multiple
|
||||
IPs/networks.
|
||||
|
||||
Actions
|
||||
=======
|
||||
|
||||
The charm defines 2 actions, 'list-nrpe-checks' that gives a list of all the
|
||||
nrpe checks defined for this unit and what commands they use. The other is
|
||||
run-nrpe-check, which allows you to run a specified nrpe check and get the
|
||||
output. This is useful to confirm if an alert is actually resolved.
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
list-nrpe-checks:
|
||||
description: Lists all NRPE checks defined on this unit
|
||||
run-nrpe-check:
|
||||
description: Run a specific NRPE check defined on this unit
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: Check name to run
|
||||
required: [name]
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
nrpedir=/etc/nagios/nrpe.d
|
||||
|
||||
if [ ! -d $nrpedir ]; then
|
||||
action-fail "No $nrpedir exists"
|
||||
exit 1
|
||||
else
|
||||
for i in $nrpedir/*.cfg; do
|
||||
check=$(grep command $i | awk -F "=" '{ print $1 }' | sed -e 's/command\[//' | sed -e 's/\]//' | sed -e 's/_/-/g');
|
||||
command=$(grep command $i | awk -F "=" '{ print $2 }');
|
||||
action-set checks.$check="$command";
|
||||
done
|
||||
fi
|
||||
|
||||
action-set timestamp="$(date)"
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
check=$(action-get name | sed -e 's/-/_/g')
|
||||
|
||||
nrpedir="/etc/nagios/nrpe.d"
|
||||
checkfile="$nrpedir/${check}.cfg"
|
||||
|
||||
if [ -f $checkfile ]; then
|
||||
command=$(awk -F "=" '{ print $2 }' $checkfile)
|
||||
output=$(sudo -u nagios $command)
|
||||
action-set check-output="$output"
|
||||
else
|
||||
action-fail "$checkfile does not exist"
|
||||
fi
|
||||
|
||||
210
nrpe/config.yaml
210
nrpe/config.yaml
|
|
@ -1,210 +0,0 @@
|
|||
options:
|
||||
nagios_master:
|
||||
default: "None"
|
||||
type: string
|
||||
description: |
|
||||
IP address of the nagios master from which to allow rsync access
|
||||
server_port:
|
||||
default: 5666
|
||||
type: int
|
||||
description: |
|
||||
Port on which nagios-nrpe-server will listen
|
||||
nagios_address_type:
|
||||
default: "private"
|
||||
type: string
|
||||
description: |
|
||||
Determines whether the nagios host check should use the private
|
||||
or public IP address of an instance. Can be "private" or "public".
|
||||
nagios_host_context:
|
||||
default: "juju"
|
||||
type: string
|
||||
description: |
|
||||
A string which will be prepended to instance name to set the host name
|
||||
in nagios. So for instance the hostname would be something like:
|
||||
juju-postgresql-0
|
||||
If you're running multiple environments with the same services in them
|
||||
this allows you to differentiate between them.
|
||||
nagios_hostname_type:
|
||||
default: "auto"
|
||||
type: string
|
||||
description: |
|
||||
Determines whether a server is identified by its unit name or
|
||||
host name. If you're in a virtual environment, "unit" is
|
||||
probably best. If you're using MaaS, you may prefer "host".
|
||||
Use "auto" to have nrpe automatically distinguish between
|
||||
metal and non-metal hosts.
|
||||
dont_blame_nrpe:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
Setting dont_blame_nrpe to True sets dont_blame_nrpe=1 in nrpe.cfg
|
||||
This config option which allows specifying arguments to nrpe scripts.
|
||||
This can be a security risk so it is disabled by default. Nrpe is
|
||||
compiled with --enable-command-args option by default, which this
|
||||
option enables.
|
||||
debug:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
Setting debug to True enables debug=1 in nrpe.cfg
|
||||
disk_root:
|
||||
default: "-u GB -w 25% -c 20% -K 5%"
|
||||
type: string
|
||||
description: |
|
||||
Root disk check. This can be made to also check non-root disk systems
|
||||
as follows:
|
||||
-u GB -w 20% -c 15% -r '/srv/juju/vol-' -C -u GB -w 25% -c 20%
|
||||
The string '-p /' will be appended to this check, so you must finish
|
||||
the string taking that into account. See the nagios check_disk plugin
|
||||
help for further details.
|
||||
.
|
||||
Set to '' in order to disable this check.
|
||||
zombies:
|
||||
default: ""
|
||||
type: string
|
||||
description: |
|
||||
Zombie processes check; defaults to disabled. To enable, set the desired
|
||||
check_procs arguments pertaining to zombies, for example: "-w 3 -c 6 -s Z"
|
||||
procs:
|
||||
default: ""
|
||||
type: string
|
||||
description: |
|
||||
Set thresholds for number of running processes. Defaults to disabled;
|
||||
to enable, specify 'auto' for the charm to generate thresholds based
|
||||
on processor count, or manually provide arguments for check_procs, for
|
||||
example: "-k -w 250 -c 300" to set warning and critical levels
|
||||
manually and exclude kernel threads.
|
||||
load:
|
||||
default: "auto"
|
||||
type: string
|
||||
description: |
|
||||
Load check arguments (e.g. "-w 8,8,8 -c 15,15,15"); if 'auto' is set,
|
||||
thresholds will be set to multipliers of processor count for 1m, 5m
|
||||
and 15m thresholds, with warning as "(4, 2, 1)", and critical set to
|
||||
"(8, 4, 2)". So if you have two processors, you'd get thresholds of
|
||||
"-w 8,4,2 -c 16,8,4".
|
||||
.
|
||||
Set to '' in order to disable this check.
|
||||
conntrack:
|
||||
default: "-w 80 -c 90"
|
||||
type: string
|
||||
description: |
|
||||
Check conntrack (net.netfilter.nf_conntrack_count) against thresholds.
|
||||
.
|
||||
Set to '' in order to disable this check.
|
||||
users:
|
||||
default: ""
|
||||
type: string
|
||||
description: |
|
||||
Set thresholds for number of logged-in users. Defaults to disabled;
|
||||
to enable, manually provide arguments for check_user, for example:
|
||||
"-w 20 -c 25"
|
||||
swap:
|
||||
default: ''
|
||||
type: string
|
||||
description: |
|
||||
Check swap utilisation. See the nagios check_swap plugin help for
|
||||
further details. The format looks like "-w 40% -c 25%"
|
||||
.
|
||||
Set to '' in order to disable this check.
|
||||
swap_activity:
|
||||
default: "-i 5 -w 10240 -c 40960"
|
||||
type: string
|
||||
description: |
|
||||
Swapout activity check. Thresholds are expressed in kB, interval in
|
||||
seconds.
|
||||
.
|
||||
Set to '' in order to disable this check.
|
||||
mem:
|
||||
default: "-C -h -u -w 85 -c 90"
|
||||
type: string
|
||||
description: |
|
||||
Check memory % used.
|
||||
By default, thresholds are applied to the non-hugepages portion of the
|
||||
memory.
|
||||
.
|
||||
Set to '' in order to disable this check.
|
||||
lacp_bonds:
|
||||
default: ''
|
||||
type: string
|
||||
description: |
|
||||
LACP bond interfaces, space-delimited (ie. 'bond0 bond1')
|
||||
netlinks:
|
||||
default: ''
|
||||
type: string
|
||||
description: |
|
||||
Network interfaces to monitor for correct link state, MTU size
|
||||
and speed negotiated. The first argument is either an interface name or
|
||||
a CIDR expression. Parsed keywords are "mtu", "speed", and "op". Other
|
||||
keywords are ignored.
|
||||
.
|
||||
Note that CIDR expressions can match multiple devices.
|
||||
.
|
||||
For example (multi-line starts with pipe):
|
||||
- 10.1.2.0/24 mtu:9000 speed:25000
|
||||
- eth0 mtu:9000 speed:25000
|
||||
- lo mtu:65536 op:unknown
|
||||
- br0-mgmt mtu:9000
|
||||
- br0-sta mtu:9000
|
||||
- br0-stc mtu:9000
|
||||
- br0-api mtu:1500
|
||||
- bond0 mtu:9000 speed:50000
|
||||
- bond0.25 mtu:1500 speed:50000
|
||||
- ens3 mtu:1500 speed:-1 desc:openstack_iface
|
||||
- ...
|
||||
netlinks_skip_unfound_ifaces:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
add --skip-unfound-ifaces to check_netlinks.py.
|
||||
monitors:
|
||||
default: ''
|
||||
type: string
|
||||
description: |
|
||||
Additional monitors defined in the monitors yaml format (see README)
|
||||
hostgroups:
|
||||
default: ""
|
||||
type: string
|
||||
description: Comma separated list of hostgroups to add for these hosts
|
||||
hostcheck_inherit:
|
||||
default: "server"
|
||||
type: string
|
||||
description: Hostcheck to inherit
|
||||
export_nagios_definitions:
|
||||
default: False
|
||||
type: boolean
|
||||
description: |
|
||||
If True nagios check definitions are written to
|
||||
'/var/lib/nagios/export' and rync is configured to allow nagios_master
|
||||
to collect them. Useful when Nagios is outside of the juju environment
|
||||
sub_postfix:
|
||||
default: ""
|
||||
type: string
|
||||
description: |
|
||||
A string to be appended onto all the nrpe checks created by this charm
|
||||
to avoid potential clashes with existing checks
|
||||
xfs_errors:
|
||||
default: ""
|
||||
type: string
|
||||
description: |
|
||||
dmesg history length to check for xfs errors, in minutes
|
||||
.
|
||||
Defaults to disabled, set the time to enable.
|
||||
ro_filesystem_excludes:
|
||||
default: "/snap/,/sys/fs/cgroup,/run/containerd,/var/lib/docker"
|
||||
type: string
|
||||
description: |
|
||||
Comma separated list of mount points to exclude from checks for readonly filesystem.
|
||||
Can be a substring rather than the entire mount point, e.g. /sys will match all filesystems
|
||||
beginning with the string /sys.
|
||||
The check is disabled on all LXD units, and also for non-container units if this parameter is
|
||||
set to ''.
|
||||
cpu_governor:
|
||||
default: ""
|
||||
type: string
|
||||
description: |
|
||||
CPU governor check. The string value here will be checked against all CPUs in
|
||||
/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor. The supported values are
|
||||
'ondemand', 'performance', 'powersave'. Unset value means the check will be disabled.
|
||||
There is a relation key called requested_cpu_governor='string', but the charm config value
|
||||
will take precedence over the relation data.
|
||||
|
|
@ -1,53 +0,0 @@
|
|||
Format: http://dep.debian.net/deps/dep5/
|
||||
|
||||
Files: *
|
||||
Copyright: Copyright 2012, Canonical Ltd., All Rights Reserved.
|
||||
License: GPL-3
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Files: files/plugins/check_exit_status.pl
|
||||
Copyright: Copyright (C) 2011 Chad Columbus <ccolumbu@hotmail.com>
|
||||
License: GPL-2
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
.
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
Files: files/plugins/check_mem.pl
|
||||
Copyright: Copyright (c) 2011 justin@techadvise.com
|
||||
License: MIT/X11
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this
|
||||
software and associated documentation files (the "Software"), to deal in the Software
|
||||
without restriction, including without limitation the rights to use, copy, modify,
|
||||
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be included in all copies
|
||||
or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
|
||||
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
|
||||
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
|
||||
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
#------------------------------------------------
|
||||
# This file is juju managed
|
||||
#------------------------------------------------
|
||||
|
||||
RSYNC_ENABLE=true
|
||||
RSYNC_NICE=''
|
||||
RSYNC_OPTS=''
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
"""Nagios plugin for python2.7."""
|
||||
# Copyright (C) 2005, 2006, 2007, 2012 James Troup <james.troup@canonical.com>
|
||||
|
||||
import os
|
||||
import stat
|
||||
import time
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
class CriticalError(Exception):
|
||||
"""This indicates a critical error."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class WarnError(Exception):
|
||||
"""This indicates a warning condition."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class UnknownError(Exception):
|
||||
"""This indicates a unknown error was encountered."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def try_check(function, *args, **kwargs):
|
||||
"""Perform a check with error/warn/unknown handling."""
|
||||
try:
|
||||
function(*args, **kwargs)
|
||||
except UnknownError, msg: # noqa: E999
|
||||
print msg
|
||||
sys.exit(3)
|
||||
except CriticalError, msg: # noqa: E999
|
||||
print msg
|
||||
sys.exit(2)
|
||||
except WarnError, msg: # noqa: E999
|
||||
print msg
|
||||
sys.exit(1)
|
||||
except: # noqa: E722
|
||||
print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0])
|
||||
print "=" * 60
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
print "=" * 60
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
################################################################################
|
||||
|
||||
|
||||
def check_file_freshness(filename, newer_than=600):
|
||||
"""Check a file.
|
||||
|
||||
It check that file exists, is readable and is newer than <n> seconds (where
|
||||
<n> defaults to 600).
|
||||
"""
|
||||
# First check the file exists and is readable
|
||||
if not os.path.exists(filename):
|
||||
raise CriticalError("%s: does not exist." % (filename))
|
||||
if os.access(filename, os.R_OK) == 0:
|
||||
raise CriticalError("%s: is not readable." % (filename))
|
||||
|
||||
# Then ensure the file is up-to-date enough
|
||||
mtime = os.stat(filename)[stat.ST_MTIME]
|
||||
last_modified = time.time() - mtime
|
||||
if last_modified > newer_than:
|
||||
raise CriticalError(
|
||||
"%s: was last modified on %s and is too old (> %s seconds)."
|
||||
% (filename, time.ctime(mtime), newer_than)
|
||||
)
|
||||
if last_modified < 0:
|
||||
raise CriticalError(
|
||||
"%s: was last modified on %s which is in the future."
|
||||
% (filename, time.ctime(mtime))
|
||||
)
|
||||
|
||||
|
||||
################################################################################
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Nagios plugin for python3."""
|
||||
|
||||
# Copyright (C) 2005, 2006, 2007, 2012, 2017 James Troup <james.troup@canonical.com>
|
||||
|
||||
import os
|
||||
import stat
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
|
||||
###############################################################################
|
||||
|
||||
|
||||
class CriticalError(Exception):
|
||||
"""This indicates a critical error."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class WarnError(Exception):
|
||||
"""This indicates a warning condition."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class UnknownError(Exception):
|
||||
"""This indicates a unknown error was encountered."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def try_check(function, *args, **kwargs):
|
||||
"""Perform a check with error/warn/unknown handling."""
|
||||
try:
|
||||
function(*args, **kwargs)
|
||||
except UnknownError as msg:
|
||||
print(msg)
|
||||
sys.exit(3)
|
||||
except CriticalError as msg:
|
||||
print(msg)
|
||||
sys.exit(2)
|
||||
except WarnError as msg:
|
||||
print(msg)
|
||||
sys.exit(1)
|
||||
except: # noqa: E722
|
||||
print("{} raised unknown exception '{}'".format(function, sys.exc_info()[0]))
|
||||
print("=" * 60)
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
print("=" * 60)
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
###############################################################################
|
||||
|
||||
|
||||
def check_file_freshness(filename, newer_than=600):
|
||||
"""Check a file.
|
||||
|
||||
It check that file exists, is readable and is newer than <n> seconds (where
|
||||
<n> defaults to 600).
|
||||
"""
|
||||
# First check the file exists and is readable
|
||||
if not os.path.exists(filename):
|
||||
raise CriticalError("%s: does not exist." % (filename))
|
||||
if os.access(filename, os.R_OK) == 0:
|
||||
raise CriticalError("%s: is not readable." % (filename))
|
||||
|
||||
# Then ensure the file is up-to-date enough
|
||||
mtime = os.stat(filename)[stat.ST_MTIME]
|
||||
last_modified = time.time() - mtime
|
||||
if last_modified > newer_than:
|
||||
raise CriticalError(
|
||||
"%s: was last modified on %s and is too old (> %s "
|
||||
"seconds)." % (filename, time.ctime(mtime), newer_than)
|
||||
)
|
||||
if last_modified < 0:
|
||||
raise CriticalError(
|
||||
"%s: was last modified on %s which is in the "
|
||||
"future." % (filename, time.ctime(mtime))
|
||||
)
|
||||
|
||||
|
||||
###############################################################################
|
||||
|
|
@ -1,89 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check arp cache usage and alert."""
|
||||
# -*- coding: us-ascii -*-
|
||||
|
||||
# Copyright (C) 2019 Canonical
|
||||
# All rights reserved
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from nagios_plugin3 import (
|
||||
CriticalError,
|
||||
UnknownError,
|
||||
WarnError,
|
||||
try_check,
|
||||
)
|
||||
|
||||
|
||||
def check_arp_cache(warn, crit):
|
||||
"""Check the usage of arp cache against gc_thresh.
|
||||
|
||||
Alerts when the number of arp entries exceeds a threshold of gc_thresh3.
|
||||
See https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt for
|
||||
full details.
|
||||
|
||||
:param warn: integer, % level of hard limit at which to raise Warning
|
||||
:param crit: integer, % level of hard limit at which to raise Critical
|
||||
"""
|
||||
arp_table_entries = "/proc/net/arp"
|
||||
gc_thresh_location = "/proc/sys/net/ipv4/neigh/default/gc_thresh3"
|
||||
|
||||
if not os.path.exists(arp_table_entries):
|
||||
raise UnknownError("No arp table found!")
|
||||
if not os.path.exists(gc_thresh_location):
|
||||
raise UnknownError("sysctl entry net.ipv4.neigh.default.gc_thresh3 not found!")
|
||||
|
||||
with open(gc_thresh_location) as fd:
|
||||
gc_thresh3 = int(fd.read())
|
||||
|
||||
with open(arp_table_entries) as fd:
|
||||
arp_cache = fd.read().count("\n") - 1 # remove header
|
||||
extra_info = "arp cache entries: {}".format(arp_cache)
|
||||
|
||||
warn_threshold = gc_thresh3 * warn / 100
|
||||
crit_threshold = gc_thresh3 * crit / 100
|
||||
|
||||
if arp_cache >= crit_threshold:
|
||||
message = "CRITICAL: arp cache is more than {} of limit, {}".format(
|
||||
crit, extra_info
|
||||
)
|
||||
raise CriticalError(message)
|
||||
if arp_cache >= warn_threshold:
|
||||
message = "WARNING: arp cache is more than {} of limit, {}".format(
|
||||
warn, extra_info
|
||||
)
|
||||
raise WarnError(message)
|
||||
|
||||
print("OK: arp cache is healthy: {}".format(extra_info))
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line options."""
|
||||
parser = argparse.ArgumentParser(description="Check bond status")
|
||||
parser.add_argument(
|
||||
"--warn",
|
||||
"-w",
|
||||
type=int,
|
||||
help="% of gc_thresh3 to exceed for warning",
|
||||
default=60,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--crit",
|
||||
"-c",
|
||||
type=int,
|
||||
help="% of gc_thresh3 to exceed for critical",
|
||||
default=80,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse args and check the arp cache."""
|
||||
args = parse_args()
|
||||
try_check(check_arp_cache, args.warn, args.crit)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
#!/bin/sh
|
||||
# This file is managed by juju. Do not make local changes.
|
||||
|
||||
# Copyright (C) 2013, 2016 Canonical Ltd.
|
||||
# Author: Haw Loeung <haw.loeung@canonical.com>
|
||||
# Paul Gear <paul.gear@canonical.com>
|
||||
|
||||
# Alert when current conntrack entries exceeds certain percentage of max. to
|
||||
# detect when we're about to fill it up and start dropping packets.
|
||||
|
||||
set -eu
|
||||
|
||||
STATE_OK=0
|
||||
STATE_WARNING=1
|
||||
STATE_CRITICAL=2
|
||||
STATE_UNKNOWN=3
|
||||
|
||||
if ! lsmod | grep -q conntrack; then
|
||||
echo "OK: no conntrack modules present"
|
||||
exit $STATE_OK
|
||||
fi
|
||||
|
||||
if ! [ -e /proc/sys/net/netfilter/nf_conntrack_max ]; then
|
||||
echo "OK: conntrack not available"
|
||||
exit $STATE_OK
|
||||
fi
|
||||
|
||||
max=$(sysctl net.netfilter.nf_conntrack_max 2>/dev/null | awk '{ print $3 }')
|
||||
if [ -z "$max" ]; then
|
||||
echo "UNKNOWN: unable to retrieve value of net.netfilter.nf_conntrack_max"
|
||||
exit $STATE_UNKNOWN
|
||||
fi
|
||||
current=$(sysctl net.netfilter.nf_conntrack_count 2>/dev/null | awk '{ print $3 }')
|
||||
if [ -z "$current" ]; then
|
||||
echo "UNKNOWN: unable to retrieve value of net.netfilter.nf_conntrack_count"
|
||||
exit $STATE_UNKNOWN
|
||||
fi
|
||||
|
||||
# default thresholds
|
||||
crit=90
|
||||
warn=80
|
||||
|
||||
# parse command line
|
||||
set +e
|
||||
OPTIONS=$(getopt w:c: "$@")
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Usage: $0 [-w warningpercent] [-c criticalpercent]" >&2
|
||||
echo " Check nf_conntrack_count against nf_conntrack_max" >&2
|
||||
exit $STATE_UNKNOWN
|
||||
fi
|
||||
set -e
|
||||
|
||||
set -- $OPTIONS
|
||||
while true; do
|
||||
case "$1" in
|
||||
-w) warn=$2; shift 2 ;;
|
||||
-c) crit=$2; shift 2 ;;
|
||||
--) shift; break ;;
|
||||
*) break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
percent=$((current * 100 / max))
|
||||
stats="| current=$current max=$max percent=$percent;$warn;$crit"
|
||||
|
||||
threshold=$((max * crit / 100))
|
||||
if [ $current -gt $threshold ]; then
|
||||
echo "CRITICAL: conntrack table nearly full. $stats"
|
||||
exit $STATE_CRITICAL
|
||||
fi
|
||||
|
||||
threshold=$((max * warn / 100))
|
||||
if [ $current -gt $threshold ]; then
|
||||
echo "WARNING: conntrack table filling. $stats"
|
||||
exit $STATE_WARNING
|
||||
fi
|
||||
|
||||
echo "OK: conntrack table normal $stats"
|
||||
exit $STATE_OK
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check CPU governor scaling and alert."""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
|
||||
from nagios_plugin3 import (
|
||||
CriticalError,
|
||||
try_check,
|
||||
)
|
||||
|
||||
|
||||
def wanted_governor(governor):
|
||||
"""Check /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor."""
|
||||
cpu_path = os.listdir("/sys/devices/system/cpu")
|
||||
regex = re.compile("(cpu[0-9][0-9]*)")
|
||||
numcpus = sum(1 for x in cpu_path if regex.match(x))
|
||||
error_cpus = set()
|
||||
for cpu in range(0, numcpus):
|
||||
path = f"/sys/devices/system/cpu/cpu{cpu}/cpufreq/scaling_governor"
|
||||
with open(path) as f:
|
||||
out = f.readline().strip()
|
||||
|
||||
if governor in out:
|
||||
continue
|
||||
else:
|
||||
error_cpus.add(f"CPU{cpu}")
|
||||
|
||||
if error_cpus:
|
||||
error_cpus = ",".join(error_cpus)
|
||||
raise CriticalError(f"CRITICAL: {error_cpus} not set to {governor}")
|
||||
|
||||
print(f"OK: All CPUs set to {governor}.")
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line options."""
|
||||
parser = argparse.ArgumentParser(description="Check CPU governor")
|
||||
parser.add_argument(
|
||||
"--governor",
|
||||
"-g",
|
||||
type=str,
|
||||
help="The requested governor to check for each CPU",
|
||||
default="performance",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
"""Check the CPU governors."""
|
||||
args = parse_args()
|
||||
try_check(wanted_governor, args.governor)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,189 +0,0 @@
|
|||
#!/usr/bin/perl
|
||||
################################################################################
|
||||
# #
|
||||
# Copyright (C) 2011 Chad Columbus <ccolumbu@hotmail.com> #
|
||||
# #
|
||||
# This program is free software; you can redistribute it and/or modify #
|
||||
# it under the terms of the GNU General Public License as published by #
|
||||
# the Free Software Foundation; either version 2 of the License, or #
|
||||
# (at your option) any later version. #
|
||||
# #
|
||||
# This program is distributed in the hope that it will be useful, #
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
||||
# GNU General Public License for more details. #
|
||||
# #
|
||||
# You should have received a copy of the GNU General Public License #
|
||||
# along with this program; if not, write to the Free Software #
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #
|
||||
# #
|
||||
################################################################################
|
||||
|
||||
use strict;
|
||||
use Getopt::Std;
|
||||
$| = 1;
|
||||
|
||||
my %opts;
|
||||
getopts('heronp:s:', \%opts);
|
||||
|
||||
my $VERSION = "Version 1.0";
|
||||
my $AUTHOR = '(c) 2011 Chad Columbus <ccolumbu@hotmail.com>';
|
||||
|
||||
# Default values:
|
||||
my $script_to_check;
|
||||
my $pattern = 'is running';
|
||||
my $cmd;
|
||||
my $message;
|
||||
my $error;
|
||||
|
||||
# Exit codes
|
||||
my $STATE_OK = 0;
|
||||
my $STATE_WARNING = 1;
|
||||
my $STATE_CRITICAL = 2;
|
||||
my $STATE_UNKNOWN = 3;
|
||||
|
||||
# Parse command line options
|
||||
if ($opts{'h'} || scalar(%opts) == 0) {
|
||||
&print_help();
|
||||
exit($STATE_OK);
|
||||
}
|
||||
|
||||
# Make sure scipt is provided:
|
||||
if ($opts{'s'} eq '') {
|
||||
# Script to run not provided
|
||||
print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n";
|
||||
exit($STATE_UNKNOWN);
|
||||
} else {
|
||||
$script_to_check = $opts{'s'};
|
||||
}
|
||||
|
||||
# Make sure only a-z, 0-9, /, _, and - are used in the script.
|
||||
if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) {
|
||||
# Script contains illegal characters exit.
|
||||
print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n";
|
||||
exit($STATE_UNKNOWN);
|
||||
}
|
||||
|
||||
# See if script is executable
|
||||
if (! -x "$script_to_check") {
|
||||
print "\nIt appears you can't execute $script_to_check, $!\n";
|
||||
exit($STATE_UNKNOWN);
|
||||
}
|
||||
|
||||
# If a pattern is provided use it:
|
||||
if ($opts{'p'} ne '') {
|
||||
$pattern = $opts{'p'};
|
||||
}
|
||||
|
||||
# If -r run command via sudo as root:
|
||||
if ($opts{'r'}) {
|
||||
$cmd = "sudo -n $script_to_check status" . ' 2>&1';
|
||||
} else {
|
||||
$cmd = "$script_to_check status" . ' 2>&1';
|
||||
}
|
||||
|
||||
my $cmd_result = `$cmd`;
|
||||
chomp($cmd_result);
|
||||
if ($cmd_result =~ /sudo/i) {
|
||||
# This means it could not run the sudo command
|
||||
$message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result";
|
||||
$error = $STATE_UNKNOWN;
|
||||
} else {
|
||||
# Check exitstatus instead of output:
|
||||
if ($opts{'e'} == 1) {
|
||||
if ($? != 0) {
|
||||
# error
|
||||
$message = "$script_to_check CRITICAL - Exit code: $?\.";
|
||||
if ($opts{'o'} == 0) {
|
||||
$message .= " $cmd_result";
|
||||
}
|
||||
$error = $STATE_CRITICAL;
|
||||
} else {
|
||||
# success
|
||||
$message = "$script_to_check OK - Exit code: $?\.";
|
||||
if ($opts{'o'} == 0) {
|
||||
$message .= " $cmd_result";
|
||||
}
|
||||
$error = $STATE_OK;
|
||||
}
|
||||
} else {
|
||||
my $not_check = 1;
|
||||
if ($opts{'n'} == 1) {
|
||||
$not_check = 0;
|
||||
}
|
||||
if (($cmd_result =~ /$pattern/i) == $not_check) {
|
||||
$message = "$script_to_check OK";
|
||||
if ($opts{'o'} == 0) {
|
||||
$message .= " - $cmd_result";
|
||||
}
|
||||
$error = $STATE_OK;
|
||||
} else {
|
||||
$message = "$script_to_check CRITICAL";
|
||||
if ($opts{'o'} == 0) {
|
||||
$message .= " - $cmd_result";
|
||||
}
|
||||
$error = $STATE_CRITICAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ($message eq '') {
|
||||
print "Error: program failed in an unknown way\n";
|
||||
exit($STATE_UNKNOWN);
|
||||
}
|
||||
|
||||
if ($error) {
|
||||
print "$message\n";
|
||||
exit($error);
|
||||
} else {
|
||||
# If we get here we are OK
|
||||
print "$message\n";
|
||||
exit($STATE_OK);
|
||||
}
|
||||
|
||||
####################################
|
||||
# Start Subs:
|
||||
####################################
|
||||
sub print_help() {
|
||||
print << "EOF";
|
||||
Check the output or exit status of a script.
|
||||
$VERSION
|
||||
$AUTHOR
|
||||
|
||||
Options:
|
||||
-h
|
||||
Print detailed help screen
|
||||
|
||||
-s
|
||||
'FULL PATH TO SCRIPT' (required)
|
||||
This is the script to run, the script is designed to run scripts in the
|
||||
/etc/init.d dir (but can run any script) and will call the script with
|
||||
a 'status' argument. So if you use another script make sure it will
|
||||
work with /path/script status, example: /etc/init.d/httpd status
|
||||
|
||||
-e
|
||||
This is the "exitstaus" flag, it means check the exit status
|
||||
code instead of looking for a pattern in the output of the script.
|
||||
|
||||
-p 'REGEX'
|
||||
This is a pattern to look for in the output of the script to confirm it
|
||||
is running, default is 'is running', but not all init.d scripts output
|
||||
(iptables), so you can specify an arbitrary pattern.
|
||||
All patterns are case insensitive.
|
||||
|
||||
-n
|
||||
This is the "NOT" flag, it means not the -p pattern, so if you want to
|
||||
make sure the output of the script does NOT contain -p 'REGEX'
|
||||
|
||||
-r
|
||||
This is the "ROOT" flag, it means run as root via sudo. You will need a
|
||||
line in your /etc/sudoers file like:
|
||||
nagios ALL=(root) NOPASSWD: /etc/init.d/* status
|
||||
|
||||
-o
|
||||
This is the "SUPPRESS OUTPUT" flag. Some programs have a long output
|
||||
(like iptables), this flag suppresses that output so it is not printed
|
||||
as a part of the nagios message.
|
||||
EOF
|
||||
}
|
||||
|
||||
|
|
@ -1,133 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check lacp bonds and alert."""
|
||||
# -*- coding: us-ascii -*-
|
||||
|
||||
# Copyright (C) 2017 Canonical
|
||||
# All rights reserved
|
||||
# Author: Alvaro Uria <alvaro.uria@canonical.com>
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
from nagios_plugin3 import CriticalError, WarnError, try_check
|
||||
|
||||
# LACPDU port states in binary
|
||||
LACPDU_ACTIVE = 0b1 # 1 = Active, 0 = Passive
|
||||
LACPDU_RATE = 0b10 # 1 = Short Timeout, 0 = Long Timeout
|
||||
LACPDU_AGGREGATED = 0b100 # 1 = Yes, 0 = No (individual link)
|
||||
LACPDU_SYNC = 0b1000 # 1 = In sync, 0 = Not in sync
|
||||
LACPDU_COLLECT = 0b10000 # Mux is accepting traffic received on this port
|
||||
LACPDU_DIST = 0b100000 # Mux is sending traffic using this port
|
||||
LACPDU_DEFAULT = 0b1000000 # 1 = default settings, 0 = via LACP PDU
|
||||
LACPDU_EXPIRED = 0b10000000 # In an expired state
|
||||
|
||||
|
||||
def check_lacpdu_port(actor_port, partner_port):
|
||||
"""Return message for LACPDU port state mismatch."""
|
||||
diff = int(actor_port) ^ int(partner_port)
|
||||
msg = []
|
||||
if diff & LACPDU_RATE:
|
||||
msg.append("lacp rate mismatch")
|
||||
if diff & LACPDU_AGGREGATED:
|
||||
msg.append("not aggregated")
|
||||
if diff & LACPDU_SYNC:
|
||||
msg.append("not in sync")
|
||||
if diff & LACPDU_COLLECT:
|
||||
msg.append("not collecting")
|
||||
return ", ".join(msg)
|
||||
|
||||
|
||||
def check_lacp_bond(iface):
|
||||
"""Check LACP bonds are correctly configured (AD Aggregator IDs match)."""
|
||||
bond_aggr_template = "/sys/class/net/{0}/bonding/ad_aggregator"
|
||||
bond_slaves_template = "/sys/class/net/{0}/bonding/slaves"
|
||||
bond_mode_template = "/sys/class/net/{0}/bonding/mode"
|
||||
slave_template = "/sys/class/net/{0}/bonding_slave/ad_aggregator_id"
|
||||
actor_port_state = "/sys/class/net/{0}/bonding_slave/ad_actor_oper_port_state"
|
||||
partnet_port_state = "/sys/class/net/{0}/bonding_slave/ad_partner_oper_port_state"
|
||||
|
||||
bond_aggr = bond_aggr_template.format(iface)
|
||||
bond_slaves = bond_slaves_template.format(iface)
|
||||
|
||||
if os.path.exists(bond_aggr):
|
||||
with open(bond_mode_template.format(iface)) as fd:
|
||||
bond_mode = fd.readline()
|
||||
|
||||
if "802.3ad" not in bond_mode:
|
||||
msg = "WARNING: {} is not in lacp mode".format(iface)
|
||||
raise WarnError(msg)
|
||||
|
||||
with open(bond_aggr) as fd:
|
||||
bond_aggr_value = fd.readline().strip()
|
||||
|
||||
d_bond = {iface: bond_aggr_value}
|
||||
|
||||
with open(bond_slaves) as fd:
|
||||
slaves = fd.readline().strip().split(" ")
|
||||
for slave in slaves:
|
||||
# Check aggregator ID
|
||||
with open(slave_template.format(slave)) as fd:
|
||||
slave_aggr_value = fd.readline().strip()
|
||||
|
||||
d_bond[slave] = slave_aggr_value
|
||||
|
||||
if slave_aggr_value != bond_aggr_value:
|
||||
# If we can report then only 1/2 the bond is down
|
||||
msg = "WARNING: aggregator_id mismatch "
|
||||
msg += "({0}:{1} - {2}:{3})"
|
||||
msg = msg.format(iface, bond_aggr_value, slave, slave_aggr_value)
|
||||
raise WarnError(msg)
|
||||
# Check LACPDU port state
|
||||
with open(actor_port_state.format(slave)) as fd:
|
||||
actor_port_value = fd.readline().strip()
|
||||
with open(partnet_port_state.format(slave)) as fd:
|
||||
partner_port_value = fd.readline().strip()
|
||||
if actor_port_value != partner_port_value:
|
||||
res = check_lacpdu_port(actor_port_value, partner_port_value)
|
||||
msg = (
|
||||
"WARNING: LACPDU port state mismatch "
|
||||
"({0}: {1} - actor_port_state={2}, "
|
||||
"partner_port_state={3})".format(
|
||||
res, slave, actor_port_value, partner_port_value
|
||||
)
|
||||
)
|
||||
raise WarnError(msg)
|
||||
|
||||
else:
|
||||
msg = "CRITICAL: {} is not a bonding interface".format(iface)
|
||||
raise CriticalError(msg)
|
||||
|
||||
extra_info = "{0}:{1}".format(iface, d_bond[iface])
|
||||
for k_iface, v_aggrid in d_bond.items():
|
||||
if k_iface == iface:
|
||||
continue
|
||||
extra_info += ", {0}:{1}".format(k_iface, v_aggrid)
|
||||
print("OK: bond config is healthy: {}".format(extra_info))
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line options."""
|
||||
parser = argparse.ArgumentParser(description="Check bond status")
|
||||
parser.add_argument("--iface", "-i", help="bond iface name")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.iface:
|
||||
ifaces = map(os.path.basename, glob.glob("/sys/class/net/bond?"))
|
||||
print(
|
||||
"UNKNOWN: Please specify one of these bond "
|
||||
"ifaces: {}".format(",".join(ifaces))
|
||||
)
|
||||
sys.exit(1)
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse args and check the lacp bonds."""
|
||||
args = parse_args()
|
||||
try_check(check_lacp_bond, args.iface)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,412 +0,0 @@
|
|||
#!/usr/bin/perl -w
|
||||
|
||||
# Heavily based on the script from:
|
||||
# check_mem.pl Copyright (C) 2000 Dan Larsson <dl@tyfon.net>
|
||||
# heavily modified by
|
||||
# Justin Ellison <justin@techadvise.com>
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
# Copyright (c) 2011 justin@techadvise.com
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
|
||||
# software and associated documentation files (the "Software"), to deal in the Software
|
||||
# without restriction, including without limitation the rights to use, copy, modify,
|
||||
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in all copies
|
||||
# or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
|
||||
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
|
||||
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
|
||||
# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
# Tell Perl what we need to use
|
||||
use strict;
|
||||
use Getopt::Std;
|
||||
|
||||
#TODO - Convert to Nagios::Plugin
|
||||
#TODO - Use an alarm
|
||||
|
||||
# Predefined exit codes for Nagios
|
||||
use vars qw($opt_c $opt_f $opt_u $opt_w $opt_C $opt_v $opt_h %exit_codes);
|
||||
%exit_codes = ('UNKNOWN' , 3,
|
||||
'OK' , 0,
|
||||
'WARNING' , 1,
|
||||
'CRITICAL', 2,
|
||||
);
|
||||
|
||||
# Get our variables, do our checking:
|
||||
init();
|
||||
|
||||
# Get the numbers:
|
||||
my ($free_memory_kb,$used_memory_kb,$caches_kb,$hugepages_kb) = get_memory_info();
|
||||
print "$free_memory_kb Free\n$used_memory_kb Used\n$caches_kb Cache\n" if ($opt_v);
|
||||
print "$hugepages_kb Hugepages\n" if ($opt_v and $opt_h);
|
||||
|
||||
if ($opt_C) { #Do we count caches as free?
|
||||
$used_memory_kb -= $caches_kb;
|
||||
$free_memory_kb += $caches_kb;
|
||||
}
|
||||
|
||||
if ($opt_h) {
|
||||
$used_memory_kb -= $hugepages_kb;
|
||||
}
|
||||
|
||||
print "$used_memory_kb Used (after Hugepages)\n" if ($opt_v);
|
||||
|
||||
# Round to the nearest KB
|
||||
$free_memory_kb = sprintf('%d',$free_memory_kb);
|
||||
$used_memory_kb = sprintf('%d',$used_memory_kb);
|
||||
$caches_kb = sprintf('%d',$caches_kb);
|
||||
|
||||
# Tell Nagios what we came up with
|
||||
tell_nagios($used_memory_kb,$free_memory_kb,$caches_kb,$hugepages_kb);
|
||||
|
||||
|
||||
sub tell_nagios {
|
||||
my ($used,$free,$caches,$hugepages) = @_;
|
||||
|
||||
# Calculate Total Memory
|
||||
my $total = $free + $used;
|
||||
print "$total Total\n" if ($opt_v);
|
||||
|
||||
my $perf_warn;
|
||||
my $perf_crit;
|
||||
if ( $opt_u ) {
|
||||
$perf_warn = int(${total} * $opt_w / 100);
|
||||
$perf_crit = int(${total} * $opt_c / 100);
|
||||
} else {
|
||||
$perf_warn = int(${total} * ( 100 - $opt_w ) / 100);
|
||||
$perf_crit = int(${total} * ( 100 - $opt_c ) / 100);
|
||||
}
|
||||
|
||||
my $perfdata = "|TOTAL=${total}KB;;;; USED=${used}KB;${perf_warn};${perf_crit};; FREE=${free}KB;;;; CACHES=${caches}KB;;;;";
|
||||
$perfdata .= " HUGEPAGES=${hugepages}KB;;;;" if ($opt_h);
|
||||
|
||||
if ($opt_f) {
|
||||
my $percent = sprintf "%.1f", ($free / $total * 100);
|
||||
if ($percent <= $opt_c) {
|
||||
finish("CRITICAL - $percent% ($free kB) free!$perfdata",$exit_codes{'CRITICAL'});
|
||||
}
|
||||
elsif ($percent <= $opt_w) {
|
||||
finish("WARNING - $percent% ($free kB) free!$perfdata",$exit_codes{'WARNING'});
|
||||
}
|
||||
else {
|
||||
finish("OK - $percent% ($free kB) free.$perfdata",$exit_codes{'OK'});
|
||||
}
|
||||
}
|
||||
elsif ($opt_u) {
|
||||
my $percent = sprintf "%.1f", ($used / $total * 100);
|
||||
if ($percent >= $opt_c) {
|
||||
finish("CRITICAL - $percent% ($used kB) used!$perfdata",$exit_codes{'CRITICAL'});
|
||||
}
|
||||
elsif ($percent >= $opt_w) {
|
||||
finish("WARNING - $percent% ($used kB) used!$perfdata",$exit_codes{'WARNING'});
|
||||
}
|
||||
else {
|
||||
finish("OK - $percent% ($used kB) used.$perfdata",$exit_codes{'OK'});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Show usage
|
||||
sub usage() {
|
||||
print "\ncheck_mem.pl v1.0 - Nagios Plugin\n\n";
|
||||
print "usage:\n";
|
||||
print " check_mem.pl -<f|u> -w <warnlevel> -c <critlevel>\n\n";
|
||||
print "options:\n";
|
||||
print " -f Check FREE memory\n";
|
||||
print " -u Check USED memory\n";
|
||||
print " -C Count OS caches as FREE memory\n";
|
||||
print " -h Remove hugepages from the total memory count\n";
|
||||
print " -w PERCENT Percent free/used when to warn\n";
|
||||
print " -c PERCENT Percent free/used when critical\n";
|
||||
print "\nCopyright (C) 2000 Dan Larsson <dl\@tyfon.net>\n";
|
||||
print "check_mem.pl comes with absolutely NO WARRANTY either implied or explicit\n";
|
||||
print "This program is licensed under the terms of the\n";
|
||||
print "MIT License (check source code for details)\n";
|
||||
exit $exit_codes{'UNKNOWN'};
|
||||
}
|
||||
|
||||
sub get_memory_info {
|
||||
my $used_memory_kb = 0;
|
||||
my $free_memory_kb = 0;
|
||||
my $total_memory_kb = 0;
|
||||
my $caches_kb = 0;
|
||||
my $hugepages_nr = 0;
|
||||
my $hugepages_size = 0;
|
||||
my $hugepages_kb = 0;
|
||||
|
||||
my $uname;
|
||||
if ( -e '/usr/bin/uname') {
|
||||
$uname = `/usr/bin/uname -a`;
|
||||
}
|
||||
elsif ( -e '/bin/uname') {
|
||||
$uname = `/bin/uname -a`;
|
||||
}
|
||||
else {
|
||||
die "Unable to find uname in /usr/bin or /bin!\n";
|
||||
}
|
||||
print "uname returns $uname" if ($opt_v);
|
||||
if ( $uname =~ /Linux/ ) {
|
||||
my @meminfo = `/bin/cat /proc/meminfo`;
|
||||
foreach (@meminfo) {
|
||||
chomp;
|
||||
if (/^Mem(Total|Free):\s+(\d+) kB/) {
|
||||
my $counter_name = $1;
|
||||
if ($counter_name eq 'Free') {
|
||||
$free_memory_kb = $2;
|
||||
}
|
||||
elsif ($counter_name eq 'Total') {
|
||||
$total_memory_kb = $2;
|
||||
}
|
||||
}
|
||||
elsif (/^MemAvailable:\s+(\d+) kB/) {
|
||||
$caches_kb += $1;
|
||||
}
|
||||
elsif (/^(Buffers|Cached|SReclaimable):\s+(\d+) kB/) {
|
||||
$caches_kb += $2;
|
||||
}
|
||||
elsif (/^Shmem:\s+(\d+) kB/) {
|
||||
$caches_kb -= $1;
|
||||
}
|
||||
# These variables will most likely be overwritten once we look into
|
||||
# /sys/kernel/mm/hugepages, unless we are running on linux <2.6.27
|
||||
# and have to rely on them
|
||||
elsif (/^HugePages_Total:\s+(\d+)/) {
|
||||
$hugepages_nr = $1;
|
||||
}
|
||||
elsif (/^Hugepagesize:\s+(\d+) kB/) {
|
||||
$hugepages_size = $1;
|
||||
}
|
||||
}
|
||||
$hugepages_kb = $hugepages_nr * $hugepages_size;
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
|
||||
# Read hugepages info from the newer sysfs interface if available
|
||||
my $hugepages_sysfs_dir = '/sys/kernel/mm/hugepages';
|
||||
if ( -d $hugepages_sysfs_dir ) {
|
||||
# Reset what we read from /proc/meminfo
|
||||
$hugepages_kb = 0;
|
||||
opendir(my $dh, $hugepages_sysfs_dir)
|
||||
|| die "Can't open $hugepages_sysfs_dir: $!";
|
||||
while (my $entry = readdir $dh) {
|
||||
if ($entry =~ /^hugepages-(\d+)kB/) {
|
||||
$hugepages_size = $1;
|
||||
my $hugepages_nr_file = "$hugepages_sysfs_dir/$entry/nr_hugepages";
|
||||
open(my $fh, '<', $hugepages_nr_file)
|
||||
|| die "Can't open $hugepages_nr_file for reading: $!";
|
||||
$hugepages_nr = <$fh>;
|
||||
close($fh);
|
||||
$hugepages_kb += $hugepages_nr * $hugepages_size;
|
||||
}
|
||||
}
|
||||
closedir($dh);
|
||||
}
|
||||
}
|
||||
elsif ( $uname =~ /HP-UX/ ) {
|
||||
# HP-UX, thanks to Christoph Fürstaller
|
||||
my @meminfo = `/usr/bin/sudo /usr/local/bin/kmeminfo`;
|
||||
foreach (@meminfo) {
|
||||
chomp;
|
||||
if (/^Physical memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) {
|
||||
$total_memory_kb = ($2 * 1024 * 1024);
|
||||
}
|
||||
elsif (/^Free memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) {
|
||||
$free_memory_kb = ($2 * 1024 * 1024);
|
||||
}
|
||||
}
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
}
|
||||
elsif ( $uname =~ /FreeBSD/ ) {
|
||||
# The FreeBSD case. 2013-03-19 www.claudiokuenzler.com
|
||||
# free mem = Inactive*Page Size + Cache*Page Size + Free*Page Size
|
||||
my $pagesize = `sysctl vm.stats.vm.v_page_size`;
|
||||
$pagesize =~ s/[^0-9]//g;
|
||||
my $mem_inactive = 0;
|
||||
my $mem_cache = 0;
|
||||
my $mem_free = 0;
|
||||
my $mem_total = 0;
|
||||
my $free_memory = 0;
|
||||
my @meminfo = `/sbin/sysctl vm.stats.vm`;
|
||||
foreach (@meminfo) {
|
||||
chomp;
|
||||
if (/^vm.stats.vm.v_inactive_count:\s+(\d+)/) {
|
||||
$mem_inactive = ($1 * $pagesize);
|
||||
}
|
||||
elsif (/^vm.stats.vm.v_cache_count:\s+(\d+)/) {
|
||||
$mem_cache = ($1 * $pagesize);
|
||||
}
|
||||
elsif (/^vm.stats.vm.v_free_count:\s+(\d+)/) {
|
||||
$mem_free = ($1 * $pagesize);
|
||||
}
|
||||
elsif (/^vm.stats.vm.v_page_count:\s+(\d+)/) {
|
||||
$mem_total = ($1 * $pagesize);
|
||||
}
|
||||
}
|
||||
$free_memory = $mem_inactive + $mem_cache + $mem_free;
|
||||
$free_memory_kb = ( $free_memory / 1024);
|
||||
$total_memory_kb = ( $mem_total / 1024);
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
$caches_kb = ($mem_cache / 1024);
|
||||
}
|
||||
elsif ( $uname =~ /joyent/ ) {
|
||||
# The SmartOS case. 2014-01-10 www.claudiokuenzler.com
|
||||
# free mem = pagesfree * pagesize
|
||||
my $pagesize = `pagesize`;
|
||||
my $phys_pages = `kstat -p unix:0:system_pages:pagestotal | awk '{print \$NF}'`;
|
||||
my $free_pages = `kstat -p unix:0:system_pages:pagesfree | awk '{print \$NF}'`;
|
||||
my $arc_size = `kstat -p zfs:0:arcstats:size | awk '{print \$NF}'`;
|
||||
my $arc_size_kb = $arc_size / 1024;
|
||||
|
||||
print "Pagesize is $pagesize" if ($opt_v);
|
||||
print "Total pages is $phys_pages" if ($opt_v);
|
||||
print "Free pages is $free_pages" if ($opt_v);
|
||||
print "Arc size is $arc_size" if ($opt_v);
|
||||
|
||||
$caches_kb += $arc_size_kb;
|
||||
|
||||
$total_memory_kb = $phys_pages * $pagesize / 1024;
|
||||
$free_memory_kb = $free_pages * $pagesize / 1024;
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
}
|
||||
elsif ( $uname =~ /SunOS/ ) {
|
||||
eval "use Sun::Solaris::Kstat";
|
||||
if ($@) { #Kstat not available
|
||||
if ($opt_C) {
|
||||
print "You can't report on Solaris caches without Sun::Solaris::Kstat available!\n";
|
||||
exit $exit_codes{UNKNOWN};
|
||||
}
|
||||
my @vmstat = `/usr/bin/vmstat 1 2`;
|
||||
my $line;
|
||||
foreach (@vmstat) {
|
||||
chomp;
|
||||
$line = $_;
|
||||
}
|
||||
$free_memory_kb = (split(/ /,$line))[5] / 1024;
|
||||
my @prtconf = `/usr/sbin/prtconf`;
|
||||
foreach (@prtconf) {
|
||||
if (/^Memory size: (\d+) Megabytes/) {
|
||||
$total_memory_kb = $1 * 1024;
|
||||
}
|
||||
}
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
|
||||
}
|
||||
else { # We have kstat
|
||||
my $kstat = Sun::Solaris::Kstat->new();
|
||||
my $phys_pages = ${kstat}->{unix}->{0}->{system_pages}->{physmem};
|
||||
my $free_pages = ${kstat}->{unix}->{0}->{system_pages}->{freemem};
|
||||
# We probably should account for UFS caching here, but it's unclear
|
||||
# to me how to determine UFS's cache size. There's inode_cache,
|
||||
# and maybe the physmem variable in the system_pages module??
|
||||
# In the real world, it looks to be so small as not to really matter,
|
||||
# so we don't grab it. If someone can give me code that does this,
|
||||
# I'd be glad to put it in.
|
||||
my $arc_size = (exists ${kstat}->{zfs} && ${kstat}->{zfs}->{0}->{arcstats}->{size}) ?
|
||||
${kstat}->{zfs}->{0}->{arcstats}->{size} / 1024
|
||||
: 0;
|
||||
$caches_kb += $arc_size;
|
||||
my $pagesize = `pagesize`;
|
||||
|
||||
$total_memory_kb = $phys_pages * $pagesize / 1024;
|
||||
$free_memory_kb = $free_pages * $pagesize / 1024;
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
}
|
||||
}
|
||||
elsif ( $uname =~ /Darwin/ ) {
|
||||
$total_memory_kb = (split(/ /,`/usr/sbin/sysctl hw.memsize`))[1]/1024;
|
||||
my $pagesize = (split(/ /,`/usr/sbin/sysctl hw.pagesize`))[1];
|
||||
$caches_kb = 0;
|
||||
my @vm_stat = `/usr/bin/vm_stat`;
|
||||
foreach (@vm_stat) {
|
||||
chomp;
|
||||
if (/^(Pages free):\s+(\d+)\.$/) {
|
||||
$free_memory_kb = $2*$pagesize/1024;
|
||||
}
|
||||
# 'caching' concept works different on MACH
|
||||
# this should be a reasonable approximation
|
||||
elsif (/^Pages (inactive|purgable):\s+(\d+).$/) {
|
||||
$caches_kb += $2*$pagesize/1024;
|
||||
}
|
||||
}
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
}
|
||||
elsif ( $uname =~ /AIX/ ) {
|
||||
my @meminfo = `/usr/bin/vmstat -vh`;
|
||||
foreach (@meminfo) {
|
||||
chomp;
|
||||
if (/^\s*([0-9.]+)\s+(.*)/) {
|
||||
my $counter_name = $2;
|
||||
if ($counter_name eq 'memory pages') {
|
||||
$total_memory_kb = $1*4;
|
||||
}
|
||||
if ($counter_name eq 'free pages') {
|
||||
$free_memory_kb = $1*4;
|
||||
}
|
||||
if ($counter_name eq 'file pages') {
|
||||
$caches_kb = $1*4;
|
||||
}
|
||||
if ($counter_name eq 'Number of 4k page frames loaned') {
|
||||
$free_memory_kb += $1*4;
|
||||
}
|
||||
}
|
||||
}
|
||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
||||
}
|
||||
else {
|
||||
if ($opt_C) {
|
||||
print "You can't report on $uname caches!\n";
|
||||
exit $exit_codes{UNKNOWN};
|
||||
}
|
||||
my $command_line = `vmstat | tail -1 | awk '{print \$4,\$5}'`;
|
||||
chomp $command_line;
|
||||
my @memlist = split(/ /, $command_line);
|
||||
|
||||
# Define the calculating scalars
|
||||
$used_memory_kb = $memlist[0]/1024;
|
||||
$free_memory_kb = $memlist[1]/1024;
|
||||
$total_memory_kb = $used_memory_kb + $free_memory_kb;
|
||||
}
|
||||
return ($free_memory_kb,$used_memory_kb,$caches_kb,$hugepages_kb);
|
||||
}
|
||||
|
||||
sub init {
|
||||
# Get the options
|
||||
if ($#ARGV le 0) {
|
||||
&usage;
|
||||
}
|
||||
else {
|
||||
getopts('c:fuChvw:');
|
||||
}
|
||||
|
||||
# Shortcircuit the switches
|
||||
if (!$opt_w or $opt_w == 0 or !$opt_c or $opt_c == 0) {
|
||||
print "*** You must define WARN and CRITICAL levels!\n";
|
||||
&usage;
|
||||
}
|
||||
elsif (!$opt_f and !$opt_u) {
|
||||
print "*** You must select to monitor either USED or FREE memory!\n";
|
||||
&usage;
|
||||
}
|
||||
|
||||
# Check if levels are sane
|
||||
if ($opt_w <= $opt_c and $opt_f) {
|
||||
print "*** WARN level must not be less than CRITICAL when checking FREE memory!\n";
|
||||
&usage;
|
||||
}
|
||||
elsif ($opt_w >= $opt_c and $opt_u) {
|
||||
print "*** WARN level must not be greater than CRITICAL when checking USED memory!\n";
|
||||
&usage;
|
||||
}
|
||||
}
|
||||
|
||||
sub finish {
|
||||
my ($msg,$state) = @_;
|
||||
print "$msg\n";
|
||||
exit $state;
|
||||
}
|
||||
|
|
@ -1,134 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check netlinks and alert."""
|
||||
# -*- coding: us-ascii -*-
|
||||
|
||||
# Copyright (C) 2017 Canonical
|
||||
# All rights reserved
|
||||
# Author: Alvaro Uria <alvaro.uria@canonical.com>
|
||||
#
|
||||
# check_netlinks.py -i eth0 -o up -m 1500 -s 1000
|
||||
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import os
|
||||
import sys
|
||||
|
||||
from nagios_plugin3 import (
|
||||
CriticalError,
|
||||
WarnError,
|
||||
try_check,
|
||||
)
|
||||
|
||||
FILTER = ("operstate", "mtu", "speed")
|
||||
|
||||
|
||||
def check_iface(iface, skiperror, crit_thr):
|
||||
"""Return /sys/class/net/<iface>/<FILTER> values."""
|
||||
file_path = "/sys/class/net/{0}/{1}"
|
||||
filter = ["operstate", "mtu"]
|
||||
if not os.path.exists(file_path.format(iface, "bridge")) and iface != "lo":
|
||||
filter.append("speed")
|
||||
|
||||
for metric_key in filter:
|
||||
try:
|
||||
with open(file_path.format(iface, metric_key)) as fd:
|
||||
metric_value = fd.readline().strip()
|
||||
except FileNotFoundError:
|
||||
if not skiperror:
|
||||
raise WarnError("WARNING: {} iface does not exist".format(iface))
|
||||
return
|
||||
except OSError as e:
|
||||
if (
|
||||
metric_key == "speed"
|
||||
and "Invalid argument" in str(e)
|
||||
and crit_thr["operstate"] == "down"
|
||||
):
|
||||
filter = [f for f in filter if f != "speed"]
|
||||
continue
|
||||
else:
|
||||
raise CriticalError(
|
||||
"CRITICAL: {} ({} returns "
|
||||
"invalid argument)".format(iface, metric_key)
|
||||
)
|
||||
|
||||
if metric_key == "operstate" and metric_value != "up":
|
||||
if metric_value != crit_thr["operstate"]:
|
||||
raise CriticalError(
|
||||
"CRITICAL: {} link state is {}".format(iface, metric_value)
|
||||
)
|
||||
|
||||
if metric_value != crit_thr[metric_key]:
|
||||
raise CriticalError(
|
||||
"CRITICAL: {}/{} is {} (target: "
|
||||
"{})".format(iface, metric_key, metric_value, crit_thr[metric_key])
|
||||
)
|
||||
|
||||
for metric in crit_thr:
|
||||
if metric not in filter:
|
||||
crit_thr[metric] = "n/a"
|
||||
crit_thr["iface"] = iface
|
||||
print(
|
||||
"OK: {iface} matches thresholds: "
|
||||
"o:{operstate}, m:{mtu}, s:{speed}".format(**crit_thr)
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line options."""
|
||||
parser = argparse.ArgumentParser(description="check ifaces status")
|
||||
parser.add_argument(
|
||||
"--iface",
|
||||
"-i",
|
||||
type=str,
|
||||
help="interface to monitor; listed in /sys/class/net/*)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-unfound-ifaces",
|
||||
"-q",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="ignores unfound ifaces; otherwise, alert will be triggered",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--operstate",
|
||||
"-o",
|
||||
default="up",
|
||||
type=str,
|
||||
help="operstate: up, down, unknown (default: up)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mtu", "-m", default="1500", type=str, help="mtu size (default: 1500)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--speed",
|
||||
"-s",
|
||||
default="10000",
|
||||
type=str,
|
||||
help="link speed in Mbps (default 10000)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.iface:
|
||||
ifaces = map(os.path.basename, glob.glob("/sys/class/net/*"))
|
||||
print(
|
||||
"UNKNOWN: Please specify one of these "
|
||||
"ifaces: {}".format(",".join(ifaces))
|
||||
)
|
||||
sys.exit(1)
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse args and check the netlinks."""
|
||||
args = parse_args()
|
||||
crit_thr = {
|
||||
"operstate": args.operstate.lower(),
|
||||
"mtu": args.mtu,
|
||||
"speed": args.speed,
|
||||
}
|
||||
try_check(check_iface, args.iface, args.skip_unfound_ifaces, crit_thr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2014 Canonical, Ltd
|
||||
# Author: Brad Marshall <brad.marshall@canonical.com>
|
||||
|
||||
# Checks if a network namespace is responding by doing an ip a in each one.
|
||||
|
||||
. /usr/lib/nagios/plugins/utils.sh
|
||||
|
||||
check_ret_value() {
|
||||
RET=$1
|
||||
if [[ $RET -ne 0 ]];then
|
||||
echo "CRIT: $2"
|
||||
exit $STATE_CRIT
|
||||
fi
|
||||
}
|
||||
|
||||
check_netns_create() {
|
||||
RET_VAL=$(ip netns add nrpe-check 2>&1)
|
||||
check_ret_value $? "$RET_VAL"
|
||||
RET_VAL=$(ip netns delete nrpe-check 2>&1)
|
||||
check_ret_value $? "$RET_VAL"
|
||||
}
|
||||
|
||||
|
||||
netnsok=()
|
||||
netnscrit=()
|
||||
|
||||
for ns in $(ip netns list |awk '!/^nrpe-check$/ {print $1}'); do
|
||||
output=$(ip netns exec $ns ip a 2>/dev/null)
|
||||
err=$?
|
||||
if [ $err -eq 0 ]; then
|
||||
netnsok=("${netnsok[@]}" $ns)
|
||||
else
|
||||
netnscrit=("${netnscrit[@]}" $ns)
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#netnscrit[@]} -eq 0 ]; then
|
||||
if [ ${#netnsok[@]} -eq 0 ]; then
|
||||
check_netns_create
|
||||
echo "OK: no namespaces defined"
|
||||
exit $STATE_OK
|
||||
else
|
||||
echo "OK: ${netnsok[@]} are responding"
|
||||
exit $STATE_OK
|
||||
fi
|
||||
else
|
||||
echo "CRIT: ${netnscrit[@]} aren't responding"
|
||||
exit $STATE_CRIT
|
||||
fi
|
||||
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check readonly filesystems and alert."""
|
||||
# -*- coding: us-ascii -*-
|
||||
|
||||
# Copyright (C) 2020 Canonical
|
||||
# All rights reserved
|
||||
#
|
||||
|
||||
import argparse
|
||||
|
||||
from nagios_plugin3 import (
|
||||
CriticalError,
|
||||
UnknownError,
|
||||
try_check,
|
||||
)
|
||||
|
||||
EXCLUDE = {"/snap/", "/sys/fs/cgroup"}
|
||||
|
||||
|
||||
def check_ro_filesystem(excludes=""):
|
||||
"""Loop /proc/mounts looking for readonly mounts.
|
||||
|
||||
:param excludes: list of mount points to exclude from checks
|
||||
"""
|
||||
# read /proc/mounts, add each line to a list
|
||||
try:
|
||||
with open("/proc/mounts") as fd:
|
||||
mounts = [mount.strip() for mount in fd.readlines()]
|
||||
except Exception as e:
|
||||
raise UnknownError("UNKNOWN: unable to read mounts with {}".format(e))
|
||||
|
||||
exclude_mounts = EXCLUDE
|
||||
ro_filesystems = []
|
||||
# if excludes != "" and excludes is not None:
|
||||
if excludes:
|
||||
try:
|
||||
exclude_mounts = EXCLUDE.union(set(excludes.split(",")))
|
||||
except Exception as e:
|
||||
msg = "UNKNOWN: unable to read list of mounts to exclude {}".format(e)
|
||||
raise UnknownError(msg)
|
||||
for mount in mounts:
|
||||
# for each line in the list, split by space to a new list
|
||||
split_mount = mount.split()
|
||||
# if mount[1] matches EXCLUDE_FS then next, else check it's not readonly
|
||||
if not any(
|
||||
split_mount[1].startswith(exclusion.strip()) for exclusion in exclude_mounts
|
||||
):
|
||||
mount_options = split_mount[3].split(",")
|
||||
if "ro" in mount_options:
|
||||
ro_filesystems.append(split_mount[1])
|
||||
if len(ro_filesystems) > 0:
|
||||
msg = "CRITICAL: filesystem(s) {} readonly".format(",".join(ro_filesystems))
|
||||
raise CriticalError(msg)
|
||||
|
||||
print("OK: no readonly filesystems found")
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line options."""
|
||||
parser = argparse.ArgumentParser(description="Check for readonly filesystems")
|
||||
parser.add_argument(
|
||||
"--exclude",
|
||||
"-e",
|
||||
type=str,
|
||||
help="""Comma separated list of mount points to exclude from checks for readonly filesystem.
|
||||
Can be just a substring of the whole mount point.""",
|
||||
default="",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
"""Parse args and check the readonly filesystem."""
|
||||
args = parse_args()
|
||||
try_check(check_ro_filesystem, args.exclude)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Read file and return nagios status based on its content."""
|
||||
# --------------------------------------------------------
|
||||
# This file is managed by Juju
|
||||
# --------------------------------------------------------
|
||||
|
||||
#
|
||||
# Copyright 2014 Canonical Ltd.
|
||||
#
|
||||
# Author: Jacek Nykis <jacek.nykis@canonical.com>
|
||||
#
|
||||
|
||||
import re
|
||||
|
||||
import nagios_plugin3 as nagios_plugin
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line options."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Read file and return nagios status based on its content",
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
)
|
||||
parser.add_argument("-f", "--status-file", required=True, help="Status file path")
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--critical-text",
|
||||
default="CRITICAL",
|
||||
help="String indicating critical status",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-w",
|
||||
"--warning-text",
|
||||
default="WARNING",
|
||||
help="String indicating warning status",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o", "--ok-text", default="OK", help="String indicating OK status"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--unknown-text",
|
||||
default="UNKNOWN",
|
||||
help="String indicating unknown status",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def check_status(args):
|
||||
"""Return nagios status."""
|
||||
nagios_plugin.check_file_freshness(args.status_file, 43200)
|
||||
|
||||
with open(args.status_file, "r") as f:
|
||||
content = [line.strip() for line in f.readlines()]
|
||||
|
||||
for line in content:
|
||||
if re.search(args.critical_text, line):
|
||||
raise nagios_plugin.CriticalError(line)
|
||||
elif re.search(args.warning_text, line):
|
||||
raise nagios_plugin.WarnError(line)
|
||||
elif re.search(args.unknown_text, line):
|
||||
raise nagios_plugin.UnknownError(line)
|
||||
else:
|
||||
print(line)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
nagios_plugin.try_check(check_status, args)
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script checks swap pageouts and reports number of kbytes moved
|
||||
# from physical ram to swap space in a given number of seconds
|
||||
#
|
||||
# Usage: "check_swap_activity -i interval -w warning_kbyts -c critical_kbytes
|
||||
#
|
||||
#
|
||||
|
||||
set -eu
|
||||
|
||||
. /usr/lib/nagios/plugins/utils.sh
|
||||
|
||||
|
||||
help() {
|
||||
cat << EOH
|
||||
usage: $0 [ -i ## ] -w ## -c ##
|
||||
|
||||
Measures page-outs to swap over a given interval, by default 5 seconds.
|
||||
|
||||
-i time in seconds to monitor (defaults to 5 seconds)
|
||||
-w warning Level in kbytes
|
||||
-c critical Level in kbytes
|
||||
|
||||
EOH
|
||||
}
|
||||
|
||||
TIMEWORD=seconds
|
||||
WARN_LVL=
|
||||
CRIT_LVL=
|
||||
INTERVAL=5
|
||||
## FETCH ARGUMENTS
|
||||
while getopts "i:w:c:" OPTION; do
|
||||
case "${OPTION}" in
|
||||
i)
|
||||
INTERVAL=${OPTARG}
|
||||
if [ $INTERVAL -eq 1 ]; then
|
||||
TIMEWORD=second
|
||||
fi
|
||||
;;
|
||||
w)
|
||||
WARN_LVL=${OPTARG}
|
||||
;;
|
||||
c)
|
||||
CRIT_LVL=${OPTARG}
|
||||
;;
|
||||
?)
|
||||
help
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z ${WARN_LVL} ] || [ -z ${CRIT_LVL} ] ; then
|
||||
help
|
||||
exit 3
|
||||
fi
|
||||
|
||||
## Get swap pageouts over $INTERVAL
|
||||
PAGEOUTS=$(vmstat -w ${INTERVAL} 2 | tail -n 1 | awk '{print $8}')
|
||||
|
||||
SUMMARY="| swapout_size=${PAGEOUTS}KB;${WARN_LVL};${CRIT_LVL};"
|
||||
if [ ${PAGEOUTS} -lt ${WARN_LVL} ]; then
|
||||
# pageouts are below threshold
|
||||
echo "OK - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
|
||||
exit $STATE_OK
|
||||
elif [ ${PAGEOUTS} -ge ${CRIT_LVL} ]; then
|
||||
## SWAP IS IN CRITICAL STATE
|
||||
echo "CRITICAL - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
|
||||
exit $STATE_CRITICAL
|
||||
elif [ ${PAGEOUTS} -ge ${WARN_LVL} ] && [ ${PAGEOUTS} -lt ${CRIT_LVL} ]; then
|
||||
## SWAP IS IN WARNING STATE
|
||||
echo "WARNING - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
|
||||
exit $STATE_WARNING
|
||||
else
|
||||
echo "CRITICAL: Failure to process pageout information $SUMMARY"
|
||||
exit $STATE_UNKNOWN
|
||||
fi
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
#!/usr/bin/python3
|
||||
"""Check systemd service and alert."""
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Author: Brad Marshall <brad.marshall@canonical.com>
|
||||
#
|
||||
# Based on check_upstart_job and
|
||||
# https://zignar.net/2014/09/08/getting-started-with-dbus-python-systemd/
|
||||
#
|
||||
import sys
|
||||
|
||||
import dbus
|
||||
|
||||
|
||||
service_arg = sys.argv[1]
|
||||
service_name = "%s.service" % service_arg
|
||||
|
||||
try:
|
||||
bus = dbus.SystemBus()
|
||||
systemd = bus.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
|
||||
manager = dbus.Interface(systemd, dbus_interface="org.freedesktop.systemd1.Manager")
|
||||
try:
|
||||
service_unit = manager.LoadUnit(service_name)
|
||||
service_proxy = bus.get_object("org.freedesktop.systemd1", str(service_unit))
|
||||
service = dbus.Interface(
|
||||
service_proxy, dbus_interface="org.freedesktop.systemd1.Unit"
|
||||
)
|
||||
service_res = service_proxy.Get(
|
||||
"org.freedesktop.systemd1.Unit",
|
||||
"SubState",
|
||||
dbus_interface="org.freedesktop.DBus.Properties",
|
||||
)
|
||||
|
||||
if service_res == "running":
|
||||
print("OK: %s is running" % service_name)
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("CRITICAL: %s is not running" % service_name)
|
||||
sys.exit(2)
|
||||
|
||||
except dbus.DBusException:
|
||||
print("CRITICAL: unable to find %s in systemd" % service_name)
|
||||
sys.exit(2)
|
||||
|
||||
except dbus.DBusException:
|
||||
print("CRITICAL: unable to connect to system for %s" % service_name)
|
||||
sys.exit(2)
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
#
|
||||
# Copyright 2012, 2013 Canonical Ltd.
|
||||
#
|
||||
# Author: Paul Collins <paul.collins@canonical.com>
|
||||
#
|
||||
# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html
|
||||
#
|
||||
|
||||
import sys
|
||||
|
||||
import dbus
|
||||
|
||||
|
||||
class Upstart(object):
|
||||
def __init__(self):
|
||||
self._bus = dbus.SystemBus()
|
||||
self._upstart = self._bus.get_object('com.ubuntu.Upstart',
|
||||
'/com/ubuntu/Upstart')
|
||||
def get_job(self, job_name):
|
||||
path = self._upstart.GetJobByName(job_name,
|
||||
dbus_interface='com.ubuntu.Upstart0_6')
|
||||
return self._bus.get_object('com.ubuntu.Upstart', path)
|
||||
|
||||
def get_properties(self, job):
|
||||
path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job')
|
||||
instance = self._bus.get_object('com.ubuntu.Upstart', path)
|
||||
return instance.GetAll('com.ubuntu.Upstart0_6.Instance',
|
||||
dbus_interface=dbus.PROPERTIES_IFACE)
|
||||
|
||||
def get_job_instances(self, job_name):
|
||||
job = self.get_job(job_name)
|
||||
paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job')
|
||||
return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths]
|
||||
|
||||
def get_job_instance_properties(self, job):
|
||||
return job.GetAll('com.ubuntu.Upstart0_6.Instance',
|
||||
dbus_interface=dbus.PROPERTIES_IFACE)
|
||||
|
||||
try:
|
||||
upstart = Upstart()
|
||||
try:
|
||||
job = upstart.get_job(sys.argv[1])
|
||||
props = upstart.get_properties(job)
|
||||
|
||||
if props['state'] == 'running':
|
||||
print 'OK: %s is running' % sys.argv[1]
|
||||
sys.exit(0)
|
||||
else:
|
||||
print 'CRITICAL: %s is not running' % sys.argv[1]
|
||||
sys.exit(2)
|
||||
|
||||
except dbus.DBusException as e:
|
||||
instances = upstart.get_job_instances(sys.argv[1])
|
||||
propses = [upstart.get_job_instance_properties(instance) for instance in instances]
|
||||
states = dict([(props['name'], props['state']) for props in propses])
|
||||
if len(states) != states.values().count('running'):
|
||||
not_running = []
|
||||
for name in states.keys():
|
||||
if states[name] != 'running':
|
||||
not_running.append(name)
|
||||
print 'CRITICAL: %d instances of %s not running: %s' % \
|
||||
(len(not_running), sys.argv[1], not_running.join(', '))
|
||||
sys.exit(2)
|
||||
else:
|
||||
print 'OK: %d instances of %s running' % (len(states), sys.argv[1])
|
||||
|
||||
except dbus.DBusException as e:
|
||||
print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1]
|
||||
sys.exit(2)
|
||||
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Check for xfs errors and alert."""
|
||||
#
|
||||
# Copyright 2017 Canonical Ltd
|
||||
#
|
||||
# Author: Jill Rouleau <jill.rouleau@canonical.com>
|
||||
#
|
||||
# Check for xfs errors and alert
|
||||
#
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
# error messages commonly seen in dmesg on xfs errors
|
||||
raw_xfs_errors = [
|
||||
"XFS_WANT_CORRUPTED_",
|
||||
"xfs_error_report",
|
||||
"corruption detected at xfs_",
|
||||
"Unmount and run xfs_repair",
|
||||
]
|
||||
|
||||
xfs_regex = [re.compile(i) for i in raw_xfs_errors]
|
||||
|
||||
# nagios can't read from kern.log, so we look at dmesg - this does present
|
||||
# a known limitation if a node is rebooted or dmesg is otherwise cleared.
|
||||
log_lines = [line for line in subprocess.getoutput(["dmesg -T"]).split("\n")]
|
||||
|
||||
err_results = [line for line in log_lines for rgx in xfs_regex if re.search(rgx, line)]
|
||||
|
||||
# Look for errors within the last N minutes, specified in the check definition
|
||||
check_delta = int(sys.argv[1])
|
||||
|
||||
# dmesg -T formatted timestamps are inside [], so we need to add them
|
||||
datetime_delta = datetime.now() - timedelta(minutes=check_delta)
|
||||
|
||||
recent_logs = [
|
||||
i for i in err_results if datetime.strptime(i[1:25], "%c") >= datetime_delta
|
||||
]
|
||||
|
||||
if recent_logs:
|
||||
print("CRITICAL: Recent XFS errors in kern.log." + "\n" + "{}".format(recent_logs))
|
||||
sys.exit(2)
|
||||
else:
|
||||
print("OK")
|
||||
sys.exit(0)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
#------------------------------------------------
|
||||
# This file is juju managed
|
||||
#------------------------------------------------
|
||||
|
||||
uid = nobody
|
||||
gid = nogroup
|
||||
pid file = /var/run/rsyncd.pid
|
||||
syslog facility = daemon
|
||||
socket options = SO_KEEPALIVE
|
||||
timeout = 7200
|
||||
|
||||
&merge /etc/rsync-juju.d
|
||||
&include /etc/rsync-juju.d
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue