update
This commit is contained in:
parent
f346ad05c8
commit
8608a73a36
|
|
@ -13,7 +13,4 @@ charm pull cs:~containers/kata-138
|
||||||
charm pull cs:~containers/kubeapi-load-balancer-843
|
charm pull cs:~containers/kubeapi-load-balancer-843
|
||||||
charm pull cs:~containers/keepalived-110
|
charm pull cs:~containers/keepalived-110
|
||||||
charm pull cs:~containers/coredns-20
|
charm pull cs:~containers/coredns-20
|
||||||
# Other
|
|
||||||
charm pull cs:~containers/ubuntu-20
|
|
||||||
charm pull cs:~containers/nrpe-75
|
|
||||||
```
|
```
|
||||||
|
|
@ -1,75 +0,0 @@
|
||||||
PYTHON := /usr/bin/python3
|
|
||||||
|
|
||||||
PROJECTPATH=$(dir $(realpath $(MAKEFILE_LIST)))
|
|
||||||
ifndef CHARM_BUILD_DIR
|
|
||||||
CHARM_BUILD_DIR=${PROJECTPATH}.build
|
|
||||||
endif
|
|
||||||
METADATA_FILE="metadata.yaml"
|
|
||||||
CHARM_NAME=$(shell cat ${PROJECTPATH}/${METADATA_FILE} | grep -E '^name:' | awk '{print $$2}')
|
|
||||||
|
|
||||||
help:
|
|
||||||
@echo "This project supports the following targets"
|
|
||||||
@echo ""
|
|
||||||
@echo " make help - show this text"
|
|
||||||
@echo " make clean - remove unneeded files"
|
|
||||||
@echo " make submodules - make sure that the submodules are up-to-date"
|
|
||||||
@echo " make submodules-update - update submodules to latest changes on remote branch"
|
|
||||||
@echo " make build - build the charm"
|
|
||||||
@echo " make release - run clean and build targets"
|
|
||||||
@echo " make lint - run flake8 and black --check"
|
|
||||||
@echo " make black - run black and reformat files"
|
|
||||||
@echo " make proof - run charm proof"
|
|
||||||
@echo " make unittests - run the tests defined in the unittest subdirectory"
|
|
||||||
@echo " make functional - run the tests defined in the functional subdirectory"
|
|
||||||
@echo " make test - run lint, proof, unittests and functional targets"
|
|
||||||
@echo ""
|
|
||||||
|
|
||||||
clean:
|
|
||||||
@echo "Cleaning files"
|
|
||||||
@git clean -ffXd -e '!.idea'
|
|
||||||
@echo "Cleaning existing build"
|
|
||||||
@rm -rf ${CHARM_BUILD_DIR}/${CHARM_NAME}
|
|
||||||
|
|
||||||
submodules:
|
|
||||||
@echo "Cloning submodules"
|
|
||||||
@git submodule update --init --recursive
|
|
||||||
|
|
||||||
submodules-update:
|
|
||||||
@echo "Pulling latest updates for submodules"
|
|
||||||
@git submodule update --init --recursive --remote --merge
|
|
||||||
|
|
||||||
build: submodules-update
|
|
||||||
@echo "Building charm to base directory ${CHARM_BUILD_DIR}/${CHARM_NAME}"
|
|
||||||
@-git rev-parse --abbrev-ref HEAD > ./repo-info
|
|
||||||
@-git describe --always > ./version
|
|
||||||
@mkdir -p ${CHARM_BUILD_DIR}/${CHARM_NAME}
|
|
||||||
@cp -a ./* ${CHARM_BUILD_DIR}/${CHARM_NAME}
|
|
||||||
|
|
||||||
release: clean build
|
|
||||||
@echo "Charm is built at ${CHARM_BUILD_DIR}/${CHARM_NAME}"
|
|
||||||
|
|
||||||
lint:
|
|
||||||
@echo "Running lint checks"
|
|
||||||
@tox -e lint
|
|
||||||
|
|
||||||
black:
|
|
||||||
@echo "Reformat files with black"
|
|
||||||
@tox -e black
|
|
||||||
|
|
||||||
proof:
|
|
||||||
@echo "Running charm proof"
|
|
||||||
@-charm proof
|
|
||||||
|
|
||||||
unittests: submodules-update
|
|
||||||
@echo "Running unit tests"
|
|
||||||
@tox -e unit
|
|
||||||
|
|
||||||
functional: build
|
|
||||||
@echo "Executing functional tests in ${CHARM_BUILD_DIR}"
|
|
||||||
@CHARM_BUILD_DIR=${CHARM_BUILD_DIR} tox -e func
|
|
||||||
|
|
||||||
test: lint proof unittests functional
|
|
||||||
@echo "Charm ${CHARM_NAME} has been tested"
|
|
||||||
|
|
||||||
# The targets below don't depend on a file
|
|
||||||
.PHONY: help submodules submodules-update clean build release lint black proof unittests functional test
|
|
||||||
225
nrpe/README.md
225
nrpe/README.md
|
|
@ -1,225 +0,0 @@
|
||||||
Introduction
|
|
||||||
============
|
|
||||||
|
|
||||||
This subordinate charm is used to configure nrpe (Nagios Remote Plugin
|
|
||||||
Executor). It can be related to the nagios charm via the monitors relation and
|
|
||||||
will pass a monitors yaml to nagios informing it of what checks to monitor.
|
|
||||||
|
|
||||||
Principal Relations
|
|
||||||
===================
|
|
||||||
|
|
||||||
This charm can be attached to any principal charm (via the juju-info relation)
|
|
||||||
regardless of whether it has implemented the local-monitors or
|
|
||||||
nrpe-external-master relations. For example:
|
|
||||||
|
|
||||||
juju deploy ubuntu
|
|
||||||
juju deploy nrpe
|
|
||||||
juju deploy nagios
|
|
||||||
juju add-relation ubuntu nrpe
|
|
||||||
juju add-relation nrpe:monitors nagios:monitors
|
|
||||||
|
|
||||||
If joined via the juju-info relation the default checks are configured and
|
|
||||||
additional checks can be added via the monitors config option (see below).
|
|
||||||
|
|
||||||
The local-monitors relations allows the principal to request checks to be setup
|
|
||||||
by passing a monitors yaml and listing them in the 'local' section. It can
|
|
||||||
also list checks that is has configured by listing them in the remote nrpe
|
|
||||||
section and finally it can request external monitors are setup by using one of
|
|
||||||
the other remote types. See "Monitors yaml" below.
|
|
||||||
|
|
||||||
Other Subordinate Charms
|
|
||||||
========================
|
|
||||||
|
|
||||||
If another subordinate charm deployed to the same principal has a
|
|
||||||
local-monitors or nrpe-external-master relation then it can also be related to
|
|
||||||
the local nrpe charm. For example:
|
|
||||||
|
|
||||||
echo -e "glance:\n vip: 10.5.106.1" > glance.yaml
|
|
||||||
juju deploy -n3 --config glance.yaml glance
|
|
||||||
juju deploy hacluster glance-hacluster
|
|
||||||
juju deploy nrpe glance-nrpe
|
|
||||||
juju deploy nagios
|
|
||||||
juju add-relation glance glance-hacluster
|
|
||||||
juju add-relation glance-nrpe:monitors nagios:monitors
|
|
||||||
juju add-relation glance glance-nrpe
|
|
||||||
juju add-relation glance-hacluster glance-nrpe
|
|
||||||
|
|
||||||
The glance-hacluster charm will pass monitoring information to glance-nrpe
|
|
||||||
which will amalgamate all monitor definitions before passing them to nagios.
|
|
||||||
|
|
||||||
Check sources
|
|
||||||
=============
|
|
||||||
|
|
||||||
Check definitions can come from three places:
|
|
||||||
|
|
||||||
Default Checks
|
|
||||||
--------------
|
|
||||||
|
|
||||||
This charm creates a base set of checks in /etc/nagios/nrpe.d, including
|
|
||||||
check\_load, check\_users, check\_disk\_root. All of the options for these are
|
|
||||||
configurable but sensible defaults have been set in config.yaml.
|
|
||||||
For example to increase the alert threshold for number of processes:
|
|
||||||
|
|
||||||
juju config nrpe load="-w 10,10,10 -c 25,25,25"
|
|
||||||
|
|
||||||
Default checks maybe disabled by setting them to the empty string.
|
|
||||||
|
|
||||||
Principal Requested Checks
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
Monitors passed to this charm by the principal charm via the local-monitors
|
|
||||||
or nrpe-external-master relation. The principal charm can write its own
|
|
||||||
check definition into */etc/nagios/nrpe.d* and then inform this charm via the
|
|
||||||
monitors setting. It can also request a direct external check of a service
|
|
||||||
without using nrpe. See "Monitors yaml" below for examples.
|
|
||||||
|
|
||||||
User Requested Checks
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
This works in the same way as the Principal requested except the monitors yaml
|
|
||||||
is set by the user via the monitors config option. For example to add a monitor
|
|
||||||
for the rsyslog process:
|
|
||||||
|
|
||||||
juju config nrpe monitors="
|
|
||||||
monitors:
|
|
||||||
local:
|
|
||||||
procrunning:
|
|
||||||
rsyslogd:
|
|
||||||
min: 1
|
|
||||||
max: 1
|
|
||||||
executable: rsyslogd
|
|
||||||
"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
External Nagios
|
|
||||||
===============
|
|
||||||
|
|
||||||
If the nagios server is not deployed in the juju environment then the charm can
|
|
||||||
be configured, via the export\_nagios\_definitions, to write out nagios config
|
|
||||||
fragments to /var/lib/nagios/export. Rsync is then configured to allow a host
|
|
||||||
(specified by nagios\_master) to collect the fragments. An rsync stanza is created
|
|
||||||
allowing the Nagios server to pick up configs from /var/lib/nagios/export (as
|
|
||||||
a target called "external-nagios"), which will also be configured to allow
|
|
||||||
connections from the hostname or IP address as specified for the
|
|
||||||
"nagios\_master" variable.
|
|
||||||
|
|
||||||
It is up to you to configure the Nagios master to pull the configs needed, which
|
|
||||||
will then cause it to connect back to the instances in question to run the nrpe
|
|
||||||
checks you have defined.
|
|
||||||
|
|
||||||
Monitors yaml
|
|
||||||
=============
|
|
||||||
|
|
||||||
The list of monitors past down the monitors relation is an amalgamation of the
|
|
||||||
lists provided via the principal, the user and the default checks.
|
|
||||||
|
|
||||||
The monitors yaml is of the following form:
|
|
||||||
|
|
||||||
|
|
||||||
# Version of the spec, mostly ignored but 0.3 is the current one
|
|
||||||
version: '0.3'
|
|
||||||
# Dict with just 'local' and 'remote' as parts
|
|
||||||
monitors:
|
|
||||||
# local monitors need an agent to be handled. See nrpe charm for
|
|
||||||
# some example implementations
|
|
||||||
local:
|
|
||||||
# procrunning checks for a running process named X (no path)
|
|
||||||
procrunning:
|
|
||||||
# Multiple procrunning can be defined, this is the "name" of it
|
|
||||||
nagios3:
|
|
||||||
min: 1
|
|
||||||
max: 1
|
|
||||||
executable: nagios3
|
|
||||||
# Remote monitors can be polled directly by a remote system
|
|
||||||
remote:
|
|
||||||
# do a request on the HTTP protocol
|
|
||||||
http:
|
|
||||||
nagios:
|
|
||||||
port: 80
|
|
||||||
path: /nagios3/
|
|
||||||
# expected status response (otherwise just look for 200)
|
|
||||||
status: 'HTTP/1.1 401'
|
|
||||||
# Use as the Host: header (the server address will still be used to connect() to)
|
|
||||||
host: www.fewbar.com
|
|
||||||
mysql:
|
|
||||||
# Named basic check
|
|
||||||
basic:
|
|
||||||
username: monitors
|
|
||||||
password: abcdefg123456
|
|
||||||
nrpe:
|
|
||||||
apache2:
|
|
||||||
command: check_apache2
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Before a monitor is added it is checked to see if it is in the 'local' section.
|
|
||||||
If it is this charm needs to convert it into an nrpe checks. Only a small
|
|
||||||
number of check types are currently supported (see below) .These checks can
|
|
||||||
then be called by the nagios charm via the nrpe service. So for each check
|
|
||||||
listed in the local section:
|
|
||||||
|
|
||||||
1. The definition is read and a check definition it written /etc/nagios/nrpe.d
|
|
||||||
2. The check is defined as a remote nrpe check in the yaml passed to nagios
|
|
||||||
|
|
||||||
In the example above a check\_proc\_nagios3\_user.cfg file would be written
|
|
||||||
out which contains:
|
|
||||||
|
|
||||||
# Check process nagios3 is running (user)
|
|
||||||
command[check_proc_nagios3_user]=/usr/lib/nagios/plugins/check_procs -w 1 -c 1 -C nagios3
|
|
||||||
|
|
||||||
And the monitors yaml passed to nagios would include:
|
|
||||||
|
|
||||||
monitors:
|
|
||||||
nrpe:
|
|
||||||
check_proc_nagios3_user:
|
|
||||||
command: check_proc_nagios3_user
|
|
||||||
|
|
||||||
The principal charm, or the user via the monitors config option, can request an
|
|
||||||
external check by adding it to the remote section of the monitors yaml. In the
|
|
||||||
example above direct checks of a webserver and of mysql are being requested.
|
|
||||||
This charm passes those on to nagios unaltered.
|
|
||||||
|
|
||||||
Local check types
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
Supported nrpe checks are:
|
|
||||||
|
|
||||||
procrunning:
|
|
||||||
min: Minimum number of 'executable' processes
|
|
||||||
max: Maximum number of 'executable' processes
|
|
||||||
executable: Name of executable to look for in process list
|
|
||||||
processcount:
|
|
||||||
min: Minimum total number processes
|
|
||||||
max: Maximum total number processes
|
|
||||||
executable: Name of executable to look for in process list
|
|
||||||
disk:
|
|
||||||
path: Directory to monitor space usage of
|
|
||||||
custom:
|
|
||||||
check: the name of the check to execute
|
|
||||||
plugin_path: (optional) Absolute path to the directory containing the
|
|
||||||
custom plugin. Default value is /var/lib/nagios/plugins
|
|
||||||
description: (optional) Description of the check
|
|
||||||
params: (optional) Parameters to pass to the check on invocation
|
|
||||||
|
|
||||||
Remote check types
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Supported remote types:
|
|
||||||
http, mysql, nrpe, tcp, rpc, pgsql
|
|
||||||
(See Nagios charm for up-to-date list and options)
|
|
||||||
|
|
||||||
Spaces
|
|
||||||
======
|
|
||||||
|
|
||||||
By defining 'monitors' binding, you can influence which nrpe's IP will be reported
|
|
||||||
back to Nagios. This can be very handy if nrpe is placed on machines with multiple
|
|
||||||
IPs/networks.
|
|
||||||
|
|
||||||
Actions
|
|
||||||
=======
|
|
||||||
|
|
||||||
The charm defines 2 actions, 'list-nrpe-checks' that gives a list of all the
|
|
||||||
nrpe checks defined for this unit and what commands they use. The other is
|
|
||||||
run-nrpe-check, which allows you to run a specified nrpe check and get the
|
|
||||||
output. This is useful to confirm if an alert is actually resolved.
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
list-nrpe-checks:
|
|
||||||
description: Lists all NRPE checks defined on this unit
|
|
||||||
run-nrpe-check:
|
|
||||||
description: Run a specific NRPE check defined on this unit
|
|
||||||
params:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: Check name to run
|
|
||||||
required: [name]
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
nrpedir=/etc/nagios/nrpe.d
|
|
||||||
|
|
||||||
if [ ! -d $nrpedir ]; then
|
|
||||||
action-fail "No $nrpedir exists"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
for i in $nrpedir/*.cfg; do
|
|
||||||
check=$(grep command $i | awk -F "=" '{ print $1 }' | sed -e 's/command\[//' | sed -e 's/\]//' | sed -e 's/_/-/g');
|
|
||||||
command=$(grep command $i | awk -F "=" '{ print $2 }');
|
|
||||||
action-set checks.$check="$command";
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
action-set timestamp="$(date)"
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
check=$(action-get name | sed -e 's/-/_/g')
|
|
||||||
|
|
||||||
nrpedir="/etc/nagios/nrpe.d"
|
|
||||||
checkfile="$nrpedir/${check}.cfg"
|
|
||||||
|
|
||||||
if [ -f $checkfile ]; then
|
|
||||||
command=$(awk -F "=" '{ print $2 }' $checkfile)
|
|
||||||
output=$(sudo -u nagios $command)
|
|
||||||
action-set check-output="$output"
|
|
||||||
else
|
|
||||||
action-fail "$checkfile does not exist"
|
|
||||||
fi
|
|
||||||
|
|
||||||
210
nrpe/config.yaml
210
nrpe/config.yaml
|
|
@ -1,210 +0,0 @@
|
||||||
options:
|
|
||||||
nagios_master:
|
|
||||||
default: "None"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
IP address of the nagios master from which to allow rsync access
|
|
||||||
server_port:
|
|
||||||
default: 5666
|
|
||||||
type: int
|
|
||||||
description: |
|
|
||||||
Port on which nagios-nrpe-server will listen
|
|
||||||
nagios_address_type:
|
|
||||||
default: "private"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Determines whether the nagios host check should use the private
|
|
||||||
or public IP address of an instance. Can be "private" or "public".
|
|
||||||
nagios_host_context:
|
|
||||||
default: "juju"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
A string which will be prepended to instance name to set the host name
|
|
||||||
in nagios. So for instance the hostname would be something like:
|
|
||||||
juju-postgresql-0
|
|
||||||
If you're running multiple environments with the same services in them
|
|
||||||
this allows you to differentiate between them.
|
|
||||||
nagios_hostname_type:
|
|
||||||
default: "auto"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Determines whether a server is identified by its unit name or
|
|
||||||
host name. If you're in a virtual environment, "unit" is
|
|
||||||
probably best. If you're using MaaS, you may prefer "host".
|
|
||||||
Use "auto" to have nrpe automatically distinguish between
|
|
||||||
metal and non-metal hosts.
|
|
||||||
dont_blame_nrpe:
|
|
||||||
default: False
|
|
||||||
type: boolean
|
|
||||||
description: |
|
|
||||||
Setting dont_blame_nrpe to True sets dont_blame_nrpe=1 in nrpe.cfg
|
|
||||||
This config option which allows specifying arguments to nrpe scripts.
|
|
||||||
This can be a security risk so it is disabled by default. Nrpe is
|
|
||||||
compiled with --enable-command-args option by default, which this
|
|
||||||
option enables.
|
|
||||||
debug:
|
|
||||||
default: False
|
|
||||||
type: boolean
|
|
||||||
description: |
|
|
||||||
Setting debug to True enables debug=1 in nrpe.cfg
|
|
||||||
disk_root:
|
|
||||||
default: "-u GB -w 25% -c 20% -K 5%"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Root disk check. This can be made to also check non-root disk systems
|
|
||||||
as follows:
|
|
||||||
-u GB -w 20% -c 15% -r '/srv/juju/vol-' -C -u GB -w 25% -c 20%
|
|
||||||
The string '-p /' will be appended to this check, so you must finish
|
|
||||||
the string taking that into account. See the nagios check_disk plugin
|
|
||||||
help for further details.
|
|
||||||
.
|
|
||||||
Set to '' in order to disable this check.
|
|
||||||
zombies:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Zombie processes check; defaults to disabled. To enable, set the desired
|
|
||||||
check_procs arguments pertaining to zombies, for example: "-w 3 -c 6 -s Z"
|
|
||||||
procs:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Set thresholds for number of running processes. Defaults to disabled;
|
|
||||||
to enable, specify 'auto' for the charm to generate thresholds based
|
|
||||||
on processor count, or manually provide arguments for check_procs, for
|
|
||||||
example: "-k -w 250 -c 300" to set warning and critical levels
|
|
||||||
manually and exclude kernel threads.
|
|
||||||
load:
|
|
||||||
default: "auto"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Load check arguments (e.g. "-w 8,8,8 -c 15,15,15"); if 'auto' is set,
|
|
||||||
thresholds will be set to multipliers of processor count for 1m, 5m
|
|
||||||
and 15m thresholds, with warning as "(4, 2, 1)", and critical set to
|
|
||||||
"(8, 4, 2)". So if you have two processors, you'd get thresholds of
|
|
||||||
"-w 8,4,2 -c 16,8,4".
|
|
||||||
.
|
|
||||||
Set to '' in order to disable this check.
|
|
||||||
conntrack:
|
|
||||||
default: "-w 80 -c 90"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Check conntrack (net.netfilter.nf_conntrack_count) against thresholds.
|
|
||||||
.
|
|
||||||
Set to '' in order to disable this check.
|
|
||||||
users:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Set thresholds for number of logged-in users. Defaults to disabled;
|
|
||||||
to enable, manually provide arguments for check_user, for example:
|
|
||||||
"-w 20 -c 25"
|
|
||||||
swap:
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Check swap utilisation. See the nagios check_swap plugin help for
|
|
||||||
further details. The format looks like "-w 40% -c 25%"
|
|
||||||
.
|
|
||||||
Set to '' in order to disable this check.
|
|
||||||
swap_activity:
|
|
||||||
default: "-i 5 -w 10240 -c 40960"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Swapout activity check. Thresholds are expressed in kB, interval in
|
|
||||||
seconds.
|
|
||||||
.
|
|
||||||
Set to '' in order to disable this check.
|
|
||||||
mem:
|
|
||||||
default: "-C -h -u -w 85 -c 90"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Check memory % used.
|
|
||||||
By default, thresholds are applied to the non-hugepages portion of the
|
|
||||||
memory.
|
|
||||||
.
|
|
||||||
Set to '' in order to disable this check.
|
|
||||||
lacp_bonds:
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
LACP bond interfaces, space-delimited (ie. 'bond0 bond1')
|
|
||||||
netlinks:
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Network interfaces to monitor for correct link state, MTU size
|
|
||||||
and speed negotiated. The first argument is either an interface name or
|
|
||||||
a CIDR expression. Parsed keywords are "mtu", "speed", and "op". Other
|
|
||||||
keywords are ignored.
|
|
||||||
.
|
|
||||||
Note that CIDR expressions can match multiple devices.
|
|
||||||
.
|
|
||||||
For example (multi-line starts with pipe):
|
|
||||||
- 10.1.2.0/24 mtu:9000 speed:25000
|
|
||||||
- eth0 mtu:9000 speed:25000
|
|
||||||
- lo mtu:65536 op:unknown
|
|
||||||
- br0-mgmt mtu:9000
|
|
||||||
- br0-sta mtu:9000
|
|
||||||
- br0-stc mtu:9000
|
|
||||||
- br0-api mtu:1500
|
|
||||||
- bond0 mtu:9000 speed:50000
|
|
||||||
- bond0.25 mtu:1500 speed:50000
|
|
||||||
- ens3 mtu:1500 speed:-1 desc:openstack_iface
|
|
||||||
- ...
|
|
||||||
netlinks_skip_unfound_ifaces:
|
|
||||||
default: False
|
|
||||||
type: boolean
|
|
||||||
description: |
|
|
||||||
add --skip-unfound-ifaces to check_netlinks.py.
|
|
||||||
monitors:
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Additional monitors defined in the monitors yaml format (see README)
|
|
||||||
hostgroups:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: Comma separated list of hostgroups to add for these hosts
|
|
||||||
hostcheck_inherit:
|
|
||||||
default: "server"
|
|
||||||
type: string
|
|
||||||
description: Hostcheck to inherit
|
|
||||||
export_nagios_definitions:
|
|
||||||
default: False
|
|
||||||
type: boolean
|
|
||||||
description: |
|
|
||||||
If True nagios check definitions are written to
|
|
||||||
'/var/lib/nagios/export' and rync is configured to allow nagios_master
|
|
||||||
to collect them. Useful when Nagios is outside of the juju environment
|
|
||||||
sub_postfix:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
A string to be appended onto all the nrpe checks created by this charm
|
|
||||||
to avoid potential clashes with existing checks
|
|
||||||
xfs_errors:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
dmesg history length to check for xfs errors, in minutes
|
|
||||||
.
|
|
||||||
Defaults to disabled, set the time to enable.
|
|
||||||
ro_filesystem_excludes:
|
|
||||||
default: "/snap/,/sys/fs/cgroup,/run/containerd,/var/lib/docker"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Comma separated list of mount points to exclude from checks for readonly filesystem.
|
|
||||||
Can be a substring rather than the entire mount point, e.g. /sys will match all filesystems
|
|
||||||
beginning with the string /sys.
|
|
||||||
The check is disabled on all LXD units, and also for non-container units if this parameter is
|
|
||||||
set to ''.
|
|
||||||
cpu_governor:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
CPU governor check. The string value here will be checked against all CPUs in
|
|
||||||
/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor. The supported values are
|
|
||||||
'ondemand', 'performance', 'powersave'. Unset value means the check will be disabled.
|
|
||||||
There is a relation key called requested_cpu_governor='string', but the charm config value
|
|
||||||
will take precedence over the relation data.
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
Format: http://dep.debian.net/deps/dep5/
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: Copyright 2012, Canonical Ltd., All Rights Reserved.
|
|
||||||
License: GPL-3
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
.
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
.
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Files: files/plugins/check_exit_status.pl
|
|
||||||
Copyright: Copyright (C) 2011 Chad Columbus <ccolumbu@hotmail.com>
|
|
||||||
License: GPL-2
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
.
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
.
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program; if not, write to the Free Software
|
|
||||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
||||||
|
|
||||||
Files: files/plugins/check_mem.pl
|
|
||||||
Copyright: Copyright (c) 2011 justin@techadvise.com
|
|
||||||
License: MIT/X11
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this
|
|
||||||
software and associated documentation files (the "Software"), to deal in the Software
|
|
||||||
without restriction, including without limitation the rights to use, copy, modify,
|
|
||||||
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
.
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies
|
|
||||||
or substantial portions of the Software.
|
|
||||||
.
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
|
|
||||||
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
|
|
||||||
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
|
|
||||||
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
#------------------------------------------------
|
|
||||||
# This file is juju managed
|
|
||||||
#------------------------------------------------
|
|
||||||
|
|
||||||
RSYNC_ENABLE=true
|
|
||||||
RSYNC_NICE=''
|
|
||||||
RSYNC_OPTS=''
|
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
"""Nagios plugin for python2.7."""
|
|
||||||
# Copyright (C) 2005, 2006, 2007, 2012 James Troup <james.troup@canonical.com>
|
|
||||||
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
|
|
||||||
class CriticalError(Exception):
|
|
||||||
"""This indicates a critical error."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class WarnError(Exception):
|
|
||||||
"""This indicates a warning condition."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UnknownError(Exception):
|
|
||||||
"""This indicates a unknown error was encountered."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def try_check(function, *args, **kwargs):
|
|
||||||
"""Perform a check with error/warn/unknown handling."""
|
|
||||||
try:
|
|
||||||
function(*args, **kwargs)
|
|
||||||
except UnknownError, msg: # noqa: E999
|
|
||||||
print msg
|
|
||||||
sys.exit(3)
|
|
||||||
except CriticalError, msg: # noqa: E999
|
|
||||||
print msg
|
|
||||||
sys.exit(2)
|
|
||||||
except WarnError, msg: # noqa: E999
|
|
||||||
print msg
|
|
||||||
sys.exit(1)
|
|
||||||
except: # noqa: E722
|
|
||||||
print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0])
|
|
||||||
print "=" * 60
|
|
||||||
traceback.print_exc(file=sys.stdout)
|
|
||||||
print "=" * 60
|
|
||||||
sys.exit(3)
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
|
|
||||||
def check_file_freshness(filename, newer_than=600):
|
|
||||||
"""Check a file.
|
|
||||||
|
|
||||||
It check that file exists, is readable and is newer than <n> seconds (where
|
|
||||||
<n> defaults to 600).
|
|
||||||
"""
|
|
||||||
# First check the file exists and is readable
|
|
||||||
if not os.path.exists(filename):
|
|
||||||
raise CriticalError("%s: does not exist." % (filename))
|
|
||||||
if os.access(filename, os.R_OK) == 0:
|
|
||||||
raise CriticalError("%s: is not readable." % (filename))
|
|
||||||
|
|
||||||
# Then ensure the file is up-to-date enough
|
|
||||||
mtime = os.stat(filename)[stat.ST_MTIME]
|
|
||||||
last_modified = time.time() - mtime
|
|
||||||
if last_modified > newer_than:
|
|
||||||
raise CriticalError(
|
|
||||||
"%s: was last modified on %s and is too old (> %s seconds)."
|
|
||||||
% (filename, time.ctime(mtime), newer_than)
|
|
||||||
)
|
|
||||||
if last_modified < 0:
|
|
||||||
raise CriticalError(
|
|
||||||
"%s: was last modified on %s which is in the future."
|
|
||||||
% (filename, time.ctime(mtime))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
|
|
@ -1,85 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Nagios plugin for python3."""
|
|
||||||
|
|
||||||
# Copyright (C) 2005, 2006, 2007, 2012, 2017 James Troup <james.troup@canonical.com>
|
|
||||||
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
|
|
||||||
class CriticalError(Exception):
|
|
||||||
"""This indicates a critical error."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class WarnError(Exception):
|
|
||||||
"""This indicates a warning condition."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UnknownError(Exception):
|
|
||||||
"""This indicates a unknown error was encountered."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def try_check(function, *args, **kwargs):
|
|
||||||
"""Perform a check with error/warn/unknown handling."""
|
|
||||||
try:
|
|
||||||
function(*args, **kwargs)
|
|
||||||
except UnknownError as msg:
|
|
||||||
print(msg)
|
|
||||||
sys.exit(3)
|
|
||||||
except CriticalError as msg:
|
|
||||||
print(msg)
|
|
||||||
sys.exit(2)
|
|
||||||
except WarnError as msg:
|
|
||||||
print(msg)
|
|
||||||
sys.exit(1)
|
|
||||||
except: # noqa: E722
|
|
||||||
print("{} raised unknown exception '{}'".format(function, sys.exc_info()[0]))
|
|
||||||
print("=" * 60)
|
|
||||||
traceback.print_exc(file=sys.stdout)
|
|
||||||
print("=" * 60)
|
|
||||||
sys.exit(3)
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
|
|
||||||
def check_file_freshness(filename, newer_than=600):
|
|
||||||
"""Check a file.
|
|
||||||
|
|
||||||
It check that file exists, is readable and is newer than <n> seconds (where
|
|
||||||
<n> defaults to 600).
|
|
||||||
"""
|
|
||||||
# First check the file exists and is readable
|
|
||||||
if not os.path.exists(filename):
|
|
||||||
raise CriticalError("%s: does not exist." % (filename))
|
|
||||||
if os.access(filename, os.R_OK) == 0:
|
|
||||||
raise CriticalError("%s: is not readable." % (filename))
|
|
||||||
|
|
||||||
# Then ensure the file is up-to-date enough
|
|
||||||
mtime = os.stat(filename)[stat.ST_MTIME]
|
|
||||||
last_modified = time.time() - mtime
|
|
||||||
if last_modified > newer_than:
|
|
||||||
raise CriticalError(
|
|
||||||
"%s: was last modified on %s and is too old (> %s "
|
|
||||||
"seconds)." % (filename, time.ctime(mtime), newer_than)
|
|
||||||
)
|
|
||||||
if last_modified < 0:
|
|
||||||
raise CriticalError(
|
|
||||||
"%s: was last modified on %s which is in the "
|
|
||||||
"future." % (filename, time.ctime(mtime))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
|
|
@ -1,89 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Check arp cache usage and alert."""
|
|
||||||
# -*- coding: us-ascii -*-
|
|
||||||
|
|
||||||
# Copyright (C) 2019 Canonical
|
|
||||||
# All rights reserved
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
|
|
||||||
from nagios_plugin3 import (
|
|
||||||
CriticalError,
|
|
||||||
UnknownError,
|
|
||||||
WarnError,
|
|
||||||
try_check,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_arp_cache(warn, crit):
|
|
||||||
"""Check the usage of arp cache against gc_thresh.
|
|
||||||
|
|
||||||
Alerts when the number of arp entries exceeds a threshold of gc_thresh3.
|
|
||||||
See https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt for
|
|
||||||
full details.
|
|
||||||
|
|
||||||
:param warn: integer, % level of hard limit at which to raise Warning
|
|
||||||
:param crit: integer, % level of hard limit at which to raise Critical
|
|
||||||
"""
|
|
||||||
arp_table_entries = "/proc/net/arp"
|
|
||||||
gc_thresh_location = "/proc/sys/net/ipv4/neigh/default/gc_thresh3"
|
|
||||||
|
|
||||||
if not os.path.exists(arp_table_entries):
|
|
||||||
raise UnknownError("No arp table found!")
|
|
||||||
if not os.path.exists(gc_thresh_location):
|
|
||||||
raise UnknownError("sysctl entry net.ipv4.neigh.default.gc_thresh3 not found!")
|
|
||||||
|
|
||||||
with open(gc_thresh_location) as fd:
|
|
||||||
gc_thresh3 = int(fd.read())
|
|
||||||
|
|
||||||
with open(arp_table_entries) as fd:
|
|
||||||
arp_cache = fd.read().count("\n") - 1 # remove header
|
|
||||||
extra_info = "arp cache entries: {}".format(arp_cache)
|
|
||||||
|
|
||||||
warn_threshold = gc_thresh3 * warn / 100
|
|
||||||
crit_threshold = gc_thresh3 * crit / 100
|
|
||||||
|
|
||||||
if arp_cache >= crit_threshold:
|
|
||||||
message = "CRITICAL: arp cache is more than {} of limit, {}".format(
|
|
||||||
crit, extra_info
|
|
||||||
)
|
|
||||||
raise CriticalError(message)
|
|
||||||
if arp_cache >= warn_threshold:
|
|
||||||
message = "WARNING: arp cache is more than {} of limit, {}".format(
|
|
||||||
warn, extra_info
|
|
||||||
)
|
|
||||||
raise WarnError(message)
|
|
||||||
|
|
||||||
print("OK: arp cache is healthy: {}".format(extra_info))
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
"""Parse command-line options."""
|
|
||||||
parser = argparse.ArgumentParser(description="Check bond status")
|
|
||||||
parser.add_argument(
|
|
||||||
"--warn",
|
|
||||||
"-w",
|
|
||||||
type=int,
|
|
||||||
help="% of gc_thresh3 to exceed for warning",
|
|
||||||
default=60,
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--crit",
|
|
||||||
"-c",
|
|
||||||
type=int,
|
|
||||||
help="% of gc_thresh3 to exceed for critical",
|
|
||||||
default=80,
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Parse args and check the arp cache."""
|
|
||||||
args = parse_args()
|
|
||||||
try_check(check_arp_cache, args.warn, args.crit)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,79 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# This file is managed by juju. Do not make local changes.
|
|
||||||
|
|
||||||
# Copyright (C) 2013, 2016 Canonical Ltd.
|
|
||||||
# Author: Haw Loeung <haw.loeung@canonical.com>
|
|
||||||
# Paul Gear <paul.gear@canonical.com>
|
|
||||||
|
|
||||||
# Alert when current conntrack entries exceeds certain percentage of max. to
|
|
||||||
# detect when we're about to fill it up and start dropping packets.
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
STATE_OK=0
|
|
||||||
STATE_WARNING=1
|
|
||||||
STATE_CRITICAL=2
|
|
||||||
STATE_UNKNOWN=3
|
|
||||||
|
|
||||||
if ! lsmod | grep -q conntrack; then
|
|
||||||
echo "OK: no conntrack modules present"
|
|
||||||
exit $STATE_OK
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! [ -e /proc/sys/net/netfilter/nf_conntrack_max ]; then
|
|
||||||
echo "OK: conntrack not available"
|
|
||||||
exit $STATE_OK
|
|
||||||
fi
|
|
||||||
|
|
||||||
max=$(sysctl net.netfilter.nf_conntrack_max 2>/dev/null | awk '{ print $3 }')
|
|
||||||
if [ -z "$max" ]; then
|
|
||||||
echo "UNKNOWN: unable to retrieve value of net.netfilter.nf_conntrack_max"
|
|
||||||
exit $STATE_UNKNOWN
|
|
||||||
fi
|
|
||||||
current=$(sysctl net.netfilter.nf_conntrack_count 2>/dev/null | awk '{ print $3 }')
|
|
||||||
if [ -z "$current" ]; then
|
|
||||||
echo "UNKNOWN: unable to retrieve value of net.netfilter.nf_conntrack_count"
|
|
||||||
exit $STATE_UNKNOWN
|
|
||||||
fi
|
|
||||||
|
|
||||||
# default thresholds
|
|
||||||
crit=90
|
|
||||||
warn=80
|
|
||||||
|
|
||||||
# parse command line
|
|
||||||
set +e
|
|
||||||
OPTIONS=$(getopt w:c: "$@")
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Usage: $0 [-w warningpercent] [-c criticalpercent]" >&2
|
|
||||||
echo " Check nf_conntrack_count against nf_conntrack_max" >&2
|
|
||||||
exit $STATE_UNKNOWN
|
|
||||||
fi
|
|
||||||
set -e
|
|
||||||
|
|
||||||
set -- $OPTIONS
|
|
||||||
while true; do
|
|
||||||
case "$1" in
|
|
||||||
-w) warn=$2; shift 2 ;;
|
|
||||||
-c) crit=$2; shift 2 ;;
|
|
||||||
--) shift; break ;;
|
|
||||||
*) break ;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
percent=$((current * 100 / max))
|
|
||||||
stats="| current=$current max=$max percent=$percent;$warn;$crit"
|
|
||||||
|
|
||||||
threshold=$((max * crit / 100))
|
|
||||||
if [ $current -gt $threshold ]; then
|
|
||||||
echo "CRITICAL: conntrack table nearly full. $stats"
|
|
||||||
exit $STATE_CRITICAL
|
|
||||||
fi
|
|
||||||
|
|
||||||
threshold=$((max * warn / 100))
|
|
||||||
if [ $current -gt $threshold ]; then
|
|
||||||
echo "WARNING: conntrack table filling. $stats"
|
|
||||||
exit $STATE_WARNING
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "OK: conntrack table normal $stats"
|
|
||||||
exit $STATE_OK
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Check CPU governor scaling and alert."""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from nagios_plugin3 import (
|
|
||||||
CriticalError,
|
|
||||||
try_check,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def wanted_governor(governor):
|
|
||||||
"""Check /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor."""
|
|
||||||
cpu_path = os.listdir("/sys/devices/system/cpu")
|
|
||||||
regex = re.compile("(cpu[0-9][0-9]*)")
|
|
||||||
numcpus = sum(1 for x in cpu_path if regex.match(x))
|
|
||||||
error_cpus = set()
|
|
||||||
for cpu in range(0, numcpus):
|
|
||||||
path = f"/sys/devices/system/cpu/cpu{cpu}/cpufreq/scaling_governor"
|
|
||||||
with open(path) as f:
|
|
||||||
out = f.readline().strip()
|
|
||||||
|
|
||||||
if governor in out:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
error_cpus.add(f"CPU{cpu}")
|
|
||||||
|
|
||||||
if error_cpus:
|
|
||||||
error_cpus = ",".join(error_cpus)
|
|
||||||
raise CriticalError(f"CRITICAL: {error_cpus} not set to {governor}")
|
|
||||||
|
|
||||||
print(f"OK: All CPUs set to {governor}.")
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
"""Parse command-line options."""
|
|
||||||
parser = argparse.ArgumentParser(description="Check CPU governor")
|
|
||||||
parser.add_argument(
|
|
||||||
"--governor",
|
|
||||||
"-g",
|
|
||||||
type=str,
|
|
||||||
help="The requested governor to check for each CPU",
|
|
||||||
default="performance",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Check the CPU governors."""
|
|
||||||
args = parse_args()
|
|
||||||
try_check(wanted_governor, args.governor)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,189 +0,0 @@
|
||||||
#!/usr/bin/perl
|
|
||||||
################################################################################
|
|
||||||
# #
|
|
||||||
# Copyright (C) 2011 Chad Columbus <ccolumbu@hotmail.com> #
|
|
||||||
# #
|
|
||||||
# This program is free software; you can redistribute it and/or modify #
|
|
||||||
# it under the terms of the GNU General Public License as published by #
|
|
||||||
# the Free Software Foundation; either version 2 of the License, or #
|
|
||||||
# (at your option) any later version. #
|
|
||||||
# #
|
|
||||||
# This program is distributed in the hope that it will be useful, #
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
|
|
||||||
# GNU General Public License for more details. #
|
|
||||||
# #
|
|
||||||
# You should have received a copy of the GNU General Public License #
|
|
||||||
# along with this program; if not, write to the Free Software #
|
|
||||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #
|
|
||||||
# #
|
|
||||||
################################################################################
|
|
||||||
|
|
||||||
use strict;
|
|
||||||
use Getopt::Std;
|
|
||||||
$| = 1;
|
|
||||||
|
|
||||||
my %opts;
|
|
||||||
getopts('heronp:s:', \%opts);
|
|
||||||
|
|
||||||
my $VERSION = "Version 1.0";
|
|
||||||
my $AUTHOR = '(c) 2011 Chad Columbus <ccolumbu@hotmail.com>';
|
|
||||||
|
|
||||||
# Default values:
|
|
||||||
my $script_to_check;
|
|
||||||
my $pattern = 'is running';
|
|
||||||
my $cmd;
|
|
||||||
my $message;
|
|
||||||
my $error;
|
|
||||||
|
|
||||||
# Exit codes
|
|
||||||
my $STATE_OK = 0;
|
|
||||||
my $STATE_WARNING = 1;
|
|
||||||
my $STATE_CRITICAL = 2;
|
|
||||||
my $STATE_UNKNOWN = 3;
|
|
||||||
|
|
||||||
# Parse command line options
|
|
||||||
if ($opts{'h'} || scalar(%opts) == 0) {
|
|
||||||
&print_help();
|
|
||||||
exit($STATE_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
# Make sure scipt is provided:
|
|
||||||
if ($opts{'s'} eq '') {
|
|
||||||
# Script to run not provided
|
|
||||||
print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n";
|
|
||||||
exit($STATE_UNKNOWN);
|
|
||||||
} else {
|
|
||||||
$script_to_check = $opts{'s'};
|
|
||||||
}
|
|
||||||
|
|
||||||
# Make sure only a-z, 0-9, /, _, and - are used in the script.
|
|
||||||
if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) {
|
|
||||||
# Script contains illegal characters exit.
|
|
||||||
print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n";
|
|
||||||
exit($STATE_UNKNOWN);
|
|
||||||
}
|
|
||||||
|
|
||||||
# See if script is executable
|
|
||||||
if (! -x "$script_to_check") {
|
|
||||||
print "\nIt appears you can't execute $script_to_check, $!\n";
|
|
||||||
exit($STATE_UNKNOWN);
|
|
||||||
}
|
|
||||||
|
|
||||||
# If a pattern is provided use it:
|
|
||||||
if ($opts{'p'} ne '') {
|
|
||||||
$pattern = $opts{'p'};
|
|
||||||
}
|
|
||||||
|
|
||||||
# If -r run command via sudo as root:
|
|
||||||
if ($opts{'r'}) {
|
|
||||||
$cmd = "sudo -n $script_to_check status" . ' 2>&1';
|
|
||||||
} else {
|
|
||||||
$cmd = "$script_to_check status" . ' 2>&1';
|
|
||||||
}
|
|
||||||
|
|
||||||
my $cmd_result = `$cmd`;
|
|
||||||
chomp($cmd_result);
|
|
||||||
if ($cmd_result =~ /sudo/i) {
|
|
||||||
# This means it could not run the sudo command
|
|
||||||
$message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result";
|
|
||||||
$error = $STATE_UNKNOWN;
|
|
||||||
} else {
|
|
||||||
# Check exitstatus instead of output:
|
|
||||||
if ($opts{'e'} == 1) {
|
|
||||||
if ($? != 0) {
|
|
||||||
# error
|
|
||||||
$message = "$script_to_check CRITICAL - Exit code: $?\.";
|
|
||||||
if ($opts{'o'} == 0) {
|
|
||||||
$message .= " $cmd_result";
|
|
||||||
}
|
|
||||||
$error = $STATE_CRITICAL;
|
|
||||||
} else {
|
|
||||||
# success
|
|
||||||
$message = "$script_to_check OK - Exit code: $?\.";
|
|
||||||
if ($opts{'o'} == 0) {
|
|
||||||
$message .= " $cmd_result";
|
|
||||||
}
|
|
||||||
$error = $STATE_OK;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
my $not_check = 1;
|
|
||||||
if ($opts{'n'} == 1) {
|
|
||||||
$not_check = 0;
|
|
||||||
}
|
|
||||||
if (($cmd_result =~ /$pattern/i) == $not_check) {
|
|
||||||
$message = "$script_to_check OK";
|
|
||||||
if ($opts{'o'} == 0) {
|
|
||||||
$message .= " - $cmd_result";
|
|
||||||
}
|
|
||||||
$error = $STATE_OK;
|
|
||||||
} else {
|
|
||||||
$message = "$script_to_check CRITICAL";
|
|
||||||
if ($opts{'o'} == 0) {
|
|
||||||
$message .= " - $cmd_result";
|
|
||||||
}
|
|
||||||
$error = $STATE_CRITICAL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($message eq '') {
|
|
||||||
print "Error: program failed in an unknown way\n";
|
|
||||||
exit($STATE_UNKNOWN);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($error) {
|
|
||||||
print "$message\n";
|
|
||||||
exit($error);
|
|
||||||
} else {
|
|
||||||
# If we get here we are OK
|
|
||||||
print "$message\n";
|
|
||||||
exit($STATE_OK);
|
|
||||||
}
|
|
||||||
|
|
||||||
####################################
|
|
||||||
# Start Subs:
|
|
||||||
####################################
|
|
||||||
sub print_help() {
|
|
||||||
print << "EOF";
|
|
||||||
Check the output or exit status of a script.
|
|
||||||
$VERSION
|
|
||||||
$AUTHOR
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-h
|
|
||||||
Print detailed help screen
|
|
||||||
|
|
||||||
-s
|
|
||||||
'FULL PATH TO SCRIPT' (required)
|
|
||||||
This is the script to run, the script is designed to run scripts in the
|
|
||||||
/etc/init.d dir (but can run any script) and will call the script with
|
|
||||||
a 'status' argument. So if you use another script make sure it will
|
|
||||||
work with /path/script status, example: /etc/init.d/httpd status
|
|
||||||
|
|
||||||
-e
|
|
||||||
This is the "exitstaus" flag, it means check the exit status
|
|
||||||
code instead of looking for a pattern in the output of the script.
|
|
||||||
|
|
||||||
-p 'REGEX'
|
|
||||||
This is a pattern to look for in the output of the script to confirm it
|
|
||||||
is running, default is 'is running', but not all init.d scripts output
|
|
||||||
(iptables), so you can specify an arbitrary pattern.
|
|
||||||
All patterns are case insensitive.
|
|
||||||
|
|
||||||
-n
|
|
||||||
This is the "NOT" flag, it means not the -p pattern, so if you want to
|
|
||||||
make sure the output of the script does NOT contain -p 'REGEX'
|
|
||||||
|
|
||||||
-r
|
|
||||||
This is the "ROOT" flag, it means run as root via sudo. You will need a
|
|
||||||
line in your /etc/sudoers file like:
|
|
||||||
nagios ALL=(root) NOPASSWD: /etc/init.d/* status
|
|
||||||
|
|
||||||
-o
|
|
||||||
This is the "SUPPRESS OUTPUT" flag. Some programs have a long output
|
|
||||||
(like iptables), this flag suppresses that output so it is not printed
|
|
||||||
as a part of the nagios message.
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@ -1,133 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Check lacp bonds and alert."""
|
|
||||||
# -*- coding: us-ascii -*-
|
|
||||||
|
|
||||||
# Copyright (C) 2017 Canonical
|
|
||||||
# All rights reserved
|
|
||||||
# Author: Alvaro Uria <alvaro.uria@canonical.com>
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from nagios_plugin3 import CriticalError, WarnError, try_check
|
|
||||||
|
|
||||||
# LACPDU port states in binary
|
|
||||||
LACPDU_ACTIVE = 0b1 # 1 = Active, 0 = Passive
|
|
||||||
LACPDU_RATE = 0b10 # 1 = Short Timeout, 0 = Long Timeout
|
|
||||||
LACPDU_AGGREGATED = 0b100 # 1 = Yes, 0 = No (individual link)
|
|
||||||
LACPDU_SYNC = 0b1000 # 1 = In sync, 0 = Not in sync
|
|
||||||
LACPDU_COLLECT = 0b10000 # Mux is accepting traffic received on this port
|
|
||||||
LACPDU_DIST = 0b100000 # Mux is sending traffic using this port
|
|
||||||
LACPDU_DEFAULT = 0b1000000 # 1 = default settings, 0 = via LACP PDU
|
|
||||||
LACPDU_EXPIRED = 0b10000000 # In an expired state
|
|
||||||
|
|
||||||
|
|
||||||
def check_lacpdu_port(actor_port, partner_port):
|
|
||||||
"""Return message for LACPDU port state mismatch."""
|
|
||||||
diff = int(actor_port) ^ int(partner_port)
|
|
||||||
msg = []
|
|
||||||
if diff & LACPDU_RATE:
|
|
||||||
msg.append("lacp rate mismatch")
|
|
||||||
if diff & LACPDU_AGGREGATED:
|
|
||||||
msg.append("not aggregated")
|
|
||||||
if diff & LACPDU_SYNC:
|
|
||||||
msg.append("not in sync")
|
|
||||||
if diff & LACPDU_COLLECT:
|
|
||||||
msg.append("not collecting")
|
|
||||||
return ", ".join(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def check_lacp_bond(iface):
|
|
||||||
"""Check LACP bonds are correctly configured (AD Aggregator IDs match)."""
|
|
||||||
bond_aggr_template = "/sys/class/net/{0}/bonding/ad_aggregator"
|
|
||||||
bond_slaves_template = "/sys/class/net/{0}/bonding/slaves"
|
|
||||||
bond_mode_template = "/sys/class/net/{0}/bonding/mode"
|
|
||||||
slave_template = "/sys/class/net/{0}/bonding_slave/ad_aggregator_id"
|
|
||||||
actor_port_state = "/sys/class/net/{0}/bonding_slave/ad_actor_oper_port_state"
|
|
||||||
partnet_port_state = "/sys/class/net/{0}/bonding_slave/ad_partner_oper_port_state"
|
|
||||||
|
|
||||||
bond_aggr = bond_aggr_template.format(iface)
|
|
||||||
bond_slaves = bond_slaves_template.format(iface)
|
|
||||||
|
|
||||||
if os.path.exists(bond_aggr):
|
|
||||||
with open(bond_mode_template.format(iface)) as fd:
|
|
||||||
bond_mode = fd.readline()
|
|
||||||
|
|
||||||
if "802.3ad" not in bond_mode:
|
|
||||||
msg = "WARNING: {} is not in lacp mode".format(iface)
|
|
||||||
raise WarnError(msg)
|
|
||||||
|
|
||||||
with open(bond_aggr) as fd:
|
|
||||||
bond_aggr_value = fd.readline().strip()
|
|
||||||
|
|
||||||
d_bond = {iface: bond_aggr_value}
|
|
||||||
|
|
||||||
with open(bond_slaves) as fd:
|
|
||||||
slaves = fd.readline().strip().split(" ")
|
|
||||||
for slave in slaves:
|
|
||||||
# Check aggregator ID
|
|
||||||
with open(slave_template.format(slave)) as fd:
|
|
||||||
slave_aggr_value = fd.readline().strip()
|
|
||||||
|
|
||||||
d_bond[slave] = slave_aggr_value
|
|
||||||
|
|
||||||
if slave_aggr_value != bond_aggr_value:
|
|
||||||
# If we can report then only 1/2 the bond is down
|
|
||||||
msg = "WARNING: aggregator_id mismatch "
|
|
||||||
msg += "({0}:{1} - {2}:{3})"
|
|
||||||
msg = msg.format(iface, bond_aggr_value, slave, slave_aggr_value)
|
|
||||||
raise WarnError(msg)
|
|
||||||
# Check LACPDU port state
|
|
||||||
with open(actor_port_state.format(slave)) as fd:
|
|
||||||
actor_port_value = fd.readline().strip()
|
|
||||||
with open(partnet_port_state.format(slave)) as fd:
|
|
||||||
partner_port_value = fd.readline().strip()
|
|
||||||
if actor_port_value != partner_port_value:
|
|
||||||
res = check_lacpdu_port(actor_port_value, partner_port_value)
|
|
||||||
msg = (
|
|
||||||
"WARNING: LACPDU port state mismatch "
|
|
||||||
"({0}: {1} - actor_port_state={2}, "
|
|
||||||
"partner_port_state={3})".format(
|
|
||||||
res, slave, actor_port_value, partner_port_value
|
|
||||||
)
|
|
||||||
)
|
|
||||||
raise WarnError(msg)
|
|
||||||
|
|
||||||
else:
|
|
||||||
msg = "CRITICAL: {} is not a bonding interface".format(iface)
|
|
||||||
raise CriticalError(msg)
|
|
||||||
|
|
||||||
extra_info = "{0}:{1}".format(iface, d_bond[iface])
|
|
||||||
for k_iface, v_aggrid in d_bond.items():
|
|
||||||
if k_iface == iface:
|
|
||||||
continue
|
|
||||||
extra_info += ", {0}:{1}".format(k_iface, v_aggrid)
|
|
||||||
print("OK: bond config is healthy: {}".format(extra_info))
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
"""Parse command-line options."""
|
|
||||||
parser = argparse.ArgumentParser(description="Check bond status")
|
|
||||||
parser.add_argument("--iface", "-i", help="bond iface name")
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if not args.iface:
|
|
||||||
ifaces = map(os.path.basename, glob.glob("/sys/class/net/bond?"))
|
|
||||||
print(
|
|
||||||
"UNKNOWN: Please specify one of these bond "
|
|
||||||
"ifaces: {}".format(",".join(ifaces))
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Parse args and check the lacp bonds."""
|
|
||||||
args = parse_args()
|
|
||||||
try_check(check_lacp_bond, args.iface)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,412 +0,0 @@
|
||||||
#!/usr/bin/perl -w
|
|
||||||
|
|
||||||
# Heavily based on the script from:
|
|
||||||
# check_mem.pl Copyright (C) 2000 Dan Larsson <dl@tyfon.net>
|
|
||||||
# heavily modified by
|
|
||||||
# Justin Ellison <justin@techadvise.com>
|
|
||||||
#
|
|
||||||
# The MIT License (MIT)
|
|
||||||
# Copyright (c) 2011 justin@techadvise.com
|
|
||||||
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
|
|
||||||
# software and associated documentation files (the "Software"), to deal in the Software
|
|
||||||
# without restriction, including without limitation the rights to use, copy, modify,
|
|
||||||
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
# The above copyright notice and this permission notice shall be included in all copies
|
|
||||||
# or substantial portions of the Software.
|
|
||||||
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
|
|
||||||
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
|
||||||
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
|
|
||||||
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
|
|
||||||
# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
# OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
# Tell Perl what we need to use
|
|
||||||
use strict;
|
|
||||||
use Getopt::Std;
|
|
||||||
|
|
||||||
#TODO - Convert to Nagios::Plugin
|
|
||||||
#TODO - Use an alarm
|
|
||||||
|
|
||||||
# Predefined exit codes for Nagios
|
|
||||||
use vars qw($opt_c $opt_f $opt_u $opt_w $opt_C $opt_v $opt_h %exit_codes);
|
|
||||||
%exit_codes = ('UNKNOWN' , 3,
|
|
||||||
'OK' , 0,
|
|
||||||
'WARNING' , 1,
|
|
||||||
'CRITICAL', 2,
|
|
||||||
);
|
|
||||||
|
|
||||||
# Get our variables, do our checking:
|
|
||||||
init();
|
|
||||||
|
|
||||||
# Get the numbers:
|
|
||||||
my ($free_memory_kb,$used_memory_kb,$caches_kb,$hugepages_kb) = get_memory_info();
|
|
||||||
print "$free_memory_kb Free\n$used_memory_kb Used\n$caches_kb Cache\n" if ($opt_v);
|
|
||||||
print "$hugepages_kb Hugepages\n" if ($opt_v and $opt_h);
|
|
||||||
|
|
||||||
if ($opt_C) { #Do we count caches as free?
|
|
||||||
$used_memory_kb -= $caches_kb;
|
|
||||||
$free_memory_kb += $caches_kb;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($opt_h) {
|
|
||||||
$used_memory_kb -= $hugepages_kb;
|
|
||||||
}
|
|
||||||
|
|
||||||
print "$used_memory_kb Used (after Hugepages)\n" if ($opt_v);
|
|
||||||
|
|
||||||
# Round to the nearest KB
|
|
||||||
$free_memory_kb = sprintf('%d',$free_memory_kb);
|
|
||||||
$used_memory_kb = sprintf('%d',$used_memory_kb);
|
|
||||||
$caches_kb = sprintf('%d',$caches_kb);
|
|
||||||
|
|
||||||
# Tell Nagios what we came up with
|
|
||||||
tell_nagios($used_memory_kb,$free_memory_kb,$caches_kb,$hugepages_kb);
|
|
||||||
|
|
||||||
|
|
||||||
sub tell_nagios {
|
|
||||||
my ($used,$free,$caches,$hugepages) = @_;
|
|
||||||
|
|
||||||
# Calculate Total Memory
|
|
||||||
my $total = $free + $used;
|
|
||||||
print "$total Total\n" if ($opt_v);
|
|
||||||
|
|
||||||
my $perf_warn;
|
|
||||||
my $perf_crit;
|
|
||||||
if ( $opt_u ) {
|
|
||||||
$perf_warn = int(${total} * $opt_w / 100);
|
|
||||||
$perf_crit = int(${total} * $opt_c / 100);
|
|
||||||
} else {
|
|
||||||
$perf_warn = int(${total} * ( 100 - $opt_w ) / 100);
|
|
||||||
$perf_crit = int(${total} * ( 100 - $opt_c ) / 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
my $perfdata = "|TOTAL=${total}KB;;;; USED=${used}KB;${perf_warn};${perf_crit};; FREE=${free}KB;;;; CACHES=${caches}KB;;;;";
|
|
||||||
$perfdata .= " HUGEPAGES=${hugepages}KB;;;;" if ($opt_h);
|
|
||||||
|
|
||||||
if ($opt_f) {
|
|
||||||
my $percent = sprintf "%.1f", ($free / $total * 100);
|
|
||||||
if ($percent <= $opt_c) {
|
|
||||||
finish("CRITICAL - $percent% ($free kB) free!$perfdata",$exit_codes{'CRITICAL'});
|
|
||||||
}
|
|
||||||
elsif ($percent <= $opt_w) {
|
|
||||||
finish("WARNING - $percent% ($free kB) free!$perfdata",$exit_codes{'WARNING'});
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
finish("OK - $percent% ($free kB) free.$perfdata",$exit_codes{'OK'});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
elsif ($opt_u) {
|
|
||||||
my $percent = sprintf "%.1f", ($used / $total * 100);
|
|
||||||
if ($percent >= $opt_c) {
|
|
||||||
finish("CRITICAL - $percent% ($used kB) used!$perfdata",$exit_codes{'CRITICAL'});
|
|
||||||
}
|
|
||||||
elsif ($percent >= $opt_w) {
|
|
||||||
finish("WARNING - $percent% ($used kB) used!$perfdata",$exit_codes{'WARNING'});
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
finish("OK - $percent% ($used kB) used.$perfdata",$exit_codes{'OK'});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Show usage
|
|
||||||
sub usage() {
|
|
||||||
print "\ncheck_mem.pl v1.0 - Nagios Plugin\n\n";
|
|
||||||
print "usage:\n";
|
|
||||||
print " check_mem.pl -<f|u> -w <warnlevel> -c <critlevel>\n\n";
|
|
||||||
print "options:\n";
|
|
||||||
print " -f Check FREE memory\n";
|
|
||||||
print " -u Check USED memory\n";
|
|
||||||
print " -C Count OS caches as FREE memory\n";
|
|
||||||
print " -h Remove hugepages from the total memory count\n";
|
|
||||||
print " -w PERCENT Percent free/used when to warn\n";
|
|
||||||
print " -c PERCENT Percent free/used when critical\n";
|
|
||||||
print "\nCopyright (C) 2000 Dan Larsson <dl\@tyfon.net>\n";
|
|
||||||
print "check_mem.pl comes with absolutely NO WARRANTY either implied or explicit\n";
|
|
||||||
print "This program is licensed under the terms of the\n";
|
|
||||||
print "MIT License (check source code for details)\n";
|
|
||||||
exit $exit_codes{'UNKNOWN'};
|
|
||||||
}
|
|
||||||
|
|
||||||
sub get_memory_info {
|
|
||||||
my $used_memory_kb = 0;
|
|
||||||
my $free_memory_kb = 0;
|
|
||||||
my $total_memory_kb = 0;
|
|
||||||
my $caches_kb = 0;
|
|
||||||
my $hugepages_nr = 0;
|
|
||||||
my $hugepages_size = 0;
|
|
||||||
my $hugepages_kb = 0;
|
|
||||||
|
|
||||||
my $uname;
|
|
||||||
if ( -e '/usr/bin/uname') {
|
|
||||||
$uname = `/usr/bin/uname -a`;
|
|
||||||
}
|
|
||||||
elsif ( -e '/bin/uname') {
|
|
||||||
$uname = `/bin/uname -a`;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
die "Unable to find uname in /usr/bin or /bin!\n";
|
|
||||||
}
|
|
||||||
print "uname returns $uname" if ($opt_v);
|
|
||||||
if ( $uname =~ /Linux/ ) {
|
|
||||||
my @meminfo = `/bin/cat /proc/meminfo`;
|
|
||||||
foreach (@meminfo) {
|
|
||||||
chomp;
|
|
||||||
if (/^Mem(Total|Free):\s+(\d+) kB/) {
|
|
||||||
my $counter_name = $1;
|
|
||||||
if ($counter_name eq 'Free') {
|
|
||||||
$free_memory_kb = $2;
|
|
||||||
}
|
|
||||||
elsif ($counter_name eq 'Total') {
|
|
||||||
$total_memory_kb = $2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
elsif (/^MemAvailable:\s+(\d+) kB/) {
|
|
||||||
$caches_kb += $1;
|
|
||||||
}
|
|
||||||
elsif (/^(Buffers|Cached|SReclaimable):\s+(\d+) kB/) {
|
|
||||||
$caches_kb += $2;
|
|
||||||
}
|
|
||||||
elsif (/^Shmem:\s+(\d+) kB/) {
|
|
||||||
$caches_kb -= $1;
|
|
||||||
}
|
|
||||||
# These variables will most likely be overwritten once we look into
|
|
||||||
# /sys/kernel/mm/hugepages, unless we are running on linux <2.6.27
|
|
||||||
# and have to rely on them
|
|
||||||
elsif (/^HugePages_Total:\s+(\d+)/) {
|
|
||||||
$hugepages_nr = $1;
|
|
||||||
}
|
|
||||||
elsif (/^Hugepagesize:\s+(\d+) kB/) {
|
|
||||||
$hugepages_size = $1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$hugepages_kb = $hugepages_nr * $hugepages_size;
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
|
|
||||||
# Read hugepages info from the newer sysfs interface if available
|
|
||||||
my $hugepages_sysfs_dir = '/sys/kernel/mm/hugepages';
|
|
||||||
if ( -d $hugepages_sysfs_dir ) {
|
|
||||||
# Reset what we read from /proc/meminfo
|
|
||||||
$hugepages_kb = 0;
|
|
||||||
opendir(my $dh, $hugepages_sysfs_dir)
|
|
||||||
|| die "Can't open $hugepages_sysfs_dir: $!";
|
|
||||||
while (my $entry = readdir $dh) {
|
|
||||||
if ($entry =~ /^hugepages-(\d+)kB/) {
|
|
||||||
$hugepages_size = $1;
|
|
||||||
my $hugepages_nr_file = "$hugepages_sysfs_dir/$entry/nr_hugepages";
|
|
||||||
open(my $fh, '<', $hugepages_nr_file)
|
|
||||||
|| die "Can't open $hugepages_nr_file for reading: $!";
|
|
||||||
$hugepages_nr = <$fh>;
|
|
||||||
close($fh);
|
|
||||||
$hugepages_kb += $hugepages_nr * $hugepages_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
closedir($dh);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
elsif ( $uname =~ /HP-UX/ ) {
|
|
||||||
# HP-UX, thanks to Christoph Fürstaller
|
|
||||||
my @meminfo = `/usr/bin/sudo /usr/local/bin/kmeminfo`;
|
|
||||||
foreach (@meminfo) {
|
|
||||||
chomp;
|
|
||||||
if (/^Physical memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) {
|
|
||||||
$total_memory_kb = ($2 * 1024 * 1024);
|
|
||||||
}
|
|
||||||
elsif (/^Free memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) {
|
|
||||||
$free_memory_kb = ($2 * 1024 * 1024);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
}
|
|
||||||
elsif ( $uname =~ /FreeBSD/ ) {
|
|
||||||
# The FreeBSD case. 2013-03-19 www.claudiokuenzler.com
|
|
||||||
# free mem = Inactive*Page Size + Cache*Page Size + Free*Page Size
|
|
||||||
my $pagesize = `sysctl vm.stats.vm.v_page_size`;
|
|
||||||
$pagesize =~ s/[^0-9]//g;
|
|
||||||
my $mem_inactive = 0;
|
|
||||||
my $mem_cache = 0;
|
|
||||||
my $mem_free = 0;
|
|
||||||
my $mem_total = 0;
|
|
||||||
my $free_memory = 0;
|
|
||||||
my @meminfo = `/sbin/sysctl vm.stats.vm`;
|
|
||||||
foreach (@meminfo) {
|
|
||||||
chomp;
|
|
||||||
if (/^vm.stats.vm.v_inactive_count:\s+(\d+)/) {
|
|
||||||
$mem_inactive = ($1 * $pagesize);
|
|
||||||
}
|
|
||||||
elsif (/^vm.stats.vm.v_cache_count:\s+(\d+)/) {
|
|
||||||
$mem_cache = ($1 * $pagesize);
|
|
||||||
}
|
|
||||||
elsif (/^vm.stats.vm.v_free_count:\s+(\d+)/) {
|
|
||||||
$mem_free = ($1 * $pagesize);
|
|
||||||
}
|
|
||||||
elsif (/^vm.stats.vm.v_page_count:\s+(\d+)/) {
|
|
||||||
$mem_total = ($1 * $pagesize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$free_memory = $mem_inactive + $mem_cache + $mem_free;
|
|
||||||
$free_memory_kb = ( $free_memory / 1024);
|
|
||||||
$total_memory_kb = ( $mem_total / 1024);
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
$caches_kb = ($mem_cache / 1024);
|
|
||||||
}
|
|
||||||
elsif ( $uname =~ /joyent/ ) {
|
|
||||||
# The SmartOS case. 2014-01-10 www.claudiokuenzler.com
|
|
||||||
# free mem = pagesfree * pagesize
|
|
||||||
my $pagesize = `pagesize`;
|
|
||||||
my $phys_pages = `kstat -p unix:0:system_pages:pagestotal | awk '{print \$NF}'`;
|
|
||||||
my $free_pages = `kstat -p unix:0:system_pages:pagesfree | awk '{print \$NF}'`;
|
|
||||||
my $arc_size = `kstat -p zfs:0:arcstats:size | awk '{print \$NF}'`;
|
|
||||||
my $arc_size_kb = $arc_size / 1024;
|
|
||||||
|
|
||||||
print "Pagesize is $pagesize" if ($opt_v);
|
|
||||||
print "Total pages is $phys_pages" if ($opt_v);
|
|
||||||
print "Free pages is $free_pages" if ($opt_v);
|
|
||||||
print "Arc size is $arc_size" if ($opt_v);
|
|
||||||
|
|
||||||
$caches_kb += $arc_size_kb;
|
|
||||||
|
|
||||||
$total_memory_kb = $phys_pages * $pagesize / 1024;
|
|
||||||
$free_memory_kb = $free_pages * $pagesize / 1024;
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
}
|
|
||||||
elsif ( $uname =~ /SunOS/ ) {
|
|
||||||
eval "use Sun::Solaris::Kstat";
|
|
||||||
if ($@) { #Kstat not available
|
|
||||||
if ($opt_C) {
|
|
||||||
print "You can't report on Solaris caches without Sun::Solaris::Kstat available!\n";
|
|
||||||
exit $exit_codes{UNKNOWN};
|
|
||||||
}
|
|
||||||
my @vmstat = `/usr/bin/vmstat 1 2`;
|
|
||||||
my $line;
|
|
||||||
foreach (@vmstat) {
|
|
||||||
chomp;
|
|
||||||
$line = $_;
|
|
||||||
}
|
|
||||||
$free_memory_kb = (split(/ /,$line))[5] / 1024;
|
|
||||||
my @prtconf = `/usr/sbin/prtconf`;
|
|
||||||
foreach (@prtconf) {
|
|
||||||
if (/^Memory size: (\d+) Megabytes/) {
|
|
||||||
$total_memory_kb = $1 * 1024;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
|
|
||||||
}
|
|
||||||
else { # We have kstat
|
|
||||||
my $kstat = Sun::Solaris::Kstat->new();
|
|
||||||
my $phys_pages = ${kstat}->{unix}->{0}->{system_pages}->{physmem};
|
|
||||||
my $free_pages = ${kstat}->{unix}->{0}->{system_pages}->{freemem};
|
|
||||||
# We probably should account for UFS caching here, but it's unclear
|
|
||||||
# to me how to determine UFS's cache size. There's inode_cache,
|
|
||||||
# and maybe the physmem variable in the system_pages module??
|
|
||||||
# In the real world, it looks to be so small as not to really matter,
|
|
||||||
# so we don't grab it. If someone can give me code that does this,
|
|
||||||
# I'd be glad to put it in.
|
|
||||||
my $arc_size = (exists ${kstat}->{zfs} && ${kstat}->{zfs}->{0}->{arcstats}->{size}) ?
|
|
||||||
${kstat}->{zfs}->{0}->{arcstats}->{size} / 1024
|
|
||||||
: 0;
|
|
||||||
$caches_kb += $arc_size;
|
|
||||||
my $pagesize = `pagesize`;
|
|
||||||
|
|
||||||
$total_memory_kb = $phys_pages * $pagesize / 1024;
|
|
||||||
$free_memory_kb = $free_pages * $pagesize / 1024;
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
elsif ( $uname =~ /Darwin/ ) {
|
|
||||||
$total_memory_kb = (split(/ /,`/usr/sbin/sysctl hw.memsize`))[1]/1024;
|
|
||||||
my $pagesize = (split(/ /,`/usr/sbin/sysctl hw.pagesize`))[1];
|
|
||||||
$caches_kb = 0;
|
|
||||||
my @vm_stat = `/usr/bin/vm_stat`;
|
|
||||||
foreach (@vm_stat) {
|
|
||||||
chomp;
|
|
||||||
if (/^(Pages free):\s+(\d+)\.$/) {
|
|
||||||
$free_memory_kb = $2*$pagesize/1024;
|
|
||||||
}
|
|
||||||
# 'caching' concept works different on MACH
|
|
||||||
# this should be a reasonable approximation
|
|
||||||
elsif (/^Pages (inactive|purgable):\s+(\d+).$/) {
|
|
||||||
$caches_kb += $2*$pagesize/1024;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
}
|
|
||||||
elsif ( $uname =~ /AIX/ ) {
|
|
||||||
my @meminfo = `/usr/bin/vmstat -vh`;
|
|
||||||
foreach (@meminfo) {
|
|
||||||
chomp;
|
|
||||||
if (/^\s*([0-9.]+)\s+(.*)/) {
|
|
||||||
my $counter_name = $2;
|
|
||||||
if ($counter_name eq 'memory pages') {
|
|
||||||
$total_memory_kb = $1*4;
|
|
||||||
}
|
|
||||||
if ($counter_name eq 'free pages') {
|
|
||||||
$free_memory_kb = $1*4;
|
|
||||||
}
|
|
||||||
if ($counter_name eq 'file pages') {
|
|
||||||
$caches_kb = $1*4;
|
|
||||||
}
|
|
||||||
if ($counter_name eq 'Number of 4k page frames loaned') {
|
|
||||||
$free_memory_kb += $1*4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$used_memory_kb = $total_memory_kb - $free_memory_kb;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
if ($opt_C) {
|
|
||||||
print "You can't report on $uname caches!\n";
|
|
||||||
exit $exit_codes{UNKNOWN};
|
|
||||||
}
|
|
||||||
my $command_line = `vmstat | tail -1 | awk '{print \$4,\$5}'`;
|
|
||||||
chomp $command_line;
|
|
||||||
my @memlist = split(/ /, $command_line);
|
|
||||||
|
|
||||||
# Define the calculating scalars
|
|
||||||
$used_memory_kb = $memlist[0]/1024;
|
|
||||||
$free_memory_kb = $memlist[1]/1024;
|
|
||||||
$total_memory_kb = $used_memory_kb + $free_memory_kb;
|
|
||||||
}
|
|
||||||
return ($free_memory_kb,$used_memory_kb,$caches_kb,$hugepages_kb);
|
|
||||||
}
|
|
||||||
|
|
||||||
sub init {
|
|
||||||
# Get the options
|
|
||||||
if ($#ARGV le 0) {
|
|
||||||
&usage;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
getopts('c:fuChvw:');
|
|
||||||
}
|
|
||||||
|
|
||||||
# Shortcircuit the switches
|
|
||||||
if (!$opt_w or $opt_w == 0 or !$opt_c or $opt_c == 0) {
|
|
||||||
print "*** You must define WARN and CRITICAL levels!\n";
|
|
||||||
&usage;
|
|
||||||
}
|
|
||||||
elsif (!$opt_f and !$opt_u) {
|
|
||||||
print "*** You must select to monitor either USED or FREE memory!\n";
|
|
||||||
&usage;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check if levels are sane
|
|
||||||
if ($opt_w <= $opt_c and $opt_f) {
|
|
||||||
print "*** WARN level must not be less than CRITICAL when checking FREE memory!\n";
|
|
||||||
&usage;
|
|
||||||
}
|
|
||||||
elsif ($opt_w >= $opt_c and $opt_u) {
|
|
||||||
print "*** WARN level must not be greater than CRITICAL when checking USED memory!\n";
|
|
||||||
&usage;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sub finish {
|
|
||||||
my ($msg,$state) = @_;
|
|
||||||
print "$msg\n";
|
|
||||||
exit $state;
|
|
||||||
}
|
|
||||||
|
|
@ -1,134 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Check netlinks and alert."""
|
|
||||||
# -*- coding: us-ascii -*-
|
|
||||||
|
|
||||||
# Copyright (C) 2017 Canonical
|
|
||||||
# All rights reserved
|
|
||||||
# Author: Alvaro Uria <alvaro.uria@canonical.com>
|
|
||||||
#
|
|
||||||
# check_netlinks.py -i eth0 -o up -m 1500 -s 1000
|
|
||||||
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from nagios_plugin3 import (
|
|
||||||
CriticalError,
|
|
||||||
WarnError,
|
|
||||||
try_check,
|
|
||||||
)
|
|
||||||
|
|
||||||
FILTER = ("operstate", "mtu", "speed")
|
|
||||||
|
|
||||||
|
|
||||||
def check_iface(iface, skiperror, crit_thr):
|
|
||||||
"""Return /sys/class/net/<iface>/<FILTER> values."""
|
|
||||||
file_path = "/sys/class/net/{0}/{1}"
|
|
||||||
filter = ["operstate", "mtu"]
|
|
||||||
if not os.path.exists(file_path.format(iface, "bridge")) and iface != "lo":
|
|
||||||
filter.append("speed")
|
|
||||||
|
|
||||||
for metric_key in filter:
|
|
||||||
try:
|
|
||||||
with open(file_path.format(iface, metric_key)) as fd:
|
|
||||||
metric_value = fd.readline().strip()
|
|
||||||
except FileNotFoundError:
|
|
||||||
if not skiperror:
|
|
||||||
raise WarnError("WARNING: {} iface does not exist".format(iface))
|
|
||||||
return
|
|
||||||
except OSError as e:
|
|
||||||
if (
|
|
||||||
metric_key == "speed"
|
|
||||||
and "Invalid argument" in str(e)
|
|
||||||
and crit_thr["operstate"] == "down"
|
|
||||||
):
|
|
||||||
filter = [f for f in filter if f != "speed"]
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise CriticalError(
|
|
||||||
"CRITICAL: {} ({} returns "
|
|
||||||
"invalid argument)".format(iface, metric_key)
|
|
||||||
)
|
|
||||||
|
|
||||||
if metric_key == "operstate" and metric_value != "up":
|
|
||||||
if metric_value != crit_thr["operstate"]:
|
|
||||||
raise CriticalError(
|
|
||||||
"CRITICAL: {} link state is {}".format(iface, metric_value)
|
|
||||||
)
|
|
||||||
|
|
||||||
if metric_value != crit_thr[metric_key]:
|
|
||||||
raise CriticalError(
|
|
||||||
"CRITICAL: {}/{} is {} (target: "
|
|
||||||
"{})".format(iface, metric_key, metric_value, crit_thr[metric_key])
|
|
||||||
)
|
|
||||||
|
|
||||||
for metric in crit_thr:
|
|
||||||
if metric not in filter:
|
|
||||||
crit_thr[metric] = "n/a"
|
|
||||||
crit_thr["iface"] = iface
|
|
||||||
print(
|
|
||||||
"OK: {iface} matches thresholds: "
|
|
||||||
"o:{operstate}, m:{mtu}, s:{speed}".format(**crit_thr)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
"""Parse command-line options."""
|
|
||||||
parser = argparse.ArgumentParser(description="check ifaces status")
|
|
||||||
parser.add_argument(
|
|
||||||
"--iface",
|
|
||||||
"-i",
|
|
||||||
type=str,
|
|
||||||
help="interface to monitor; listed in /sys/class/net/*)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--skip-unfound-ifaces",
|
|
||||||
"-q",
|
|
||||||
default=False,
|
|
||||||
action="store_true",
|
|
||||||
help="ignores unfound ifaces; otherwise, alert will be triggered",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--operstate",
|
|
||||||
"-o",
|
|
||||||
default="up",
|
|
||||||
type=str,
|
|
||||||
help="operstate: up, down, unknown (default: up)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--mtu", "-m", default="1500", type=str, help="mtu size (default: 1500)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--speed",
|
|
||||||
"-s",
|
|
||||||
default="10000",
|
|
||||||
type=str,
|
|
||||||
help="link speed in Mbps (default 10000)",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if not args.iface:
|
|
||||||
ifaces = map(os.path.basename, glob.glob("/sys/class/net/*"))
|
|
||||||
print(
|
|
||||||
"UNKNOWN: Please specify one of these "
|
|
||||||
"ifaces: {}".format(",".join(ifaces))
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Parse args and check the netlinks."""
|
|
||||||
args = parse_args()
|
|
||||||
crit_thr = {
|
|
||||||
"operstate": args.operstate.lower(),
|
|
||||||
"mtu": args.mtu,
|
|
||||||
"speed": args.speed,
|
|
||||||
}
|
|
||||||
try_check(check_iface, args.iface, args.skip_unfound_ifaces, crit_thr)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright (c) 2014 Canonical, Ltd
|
|
||||||
# Author: Brad Marshall <brad.marshall@canonical.com>
|
|
||||||
|
|
||||||
# Checks if a network namespace is responding by doing an ip a in each one.
|
|
||||||
|
|
||||||
. /usr/lib/nagios/plugins/utils.sh
|
|
||||||
|
|
||||||
check_ret_value() {
|
|
||||||
RET=$1
|
|
||||||
if [[ $RET -ne 0 ]];then
|
|
||||||
echo "CRIT: $2"
|
|
||||||
exit $STATE_CRIT
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
check_netns_create() {
|
|
||||||
RET_VAL=$(ip netns add nrpe-check 2>&1)
|
|
||||||
check_ret_value $? "$RET_VAL"
|
|
||||||
RET_VAL=$(ip netns delete nrpe-check 2>&1)
|
|
||||||
check_ret_value $? "$RET_VAL"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
netnsok=()
|
|
||||||
netnscrit=()
|
|
||||||
|
|
||||||
for ns in $(ip netns list |awk '!/^nrpe-check$/ {print $1}'); do
|
|
||||||
output=$(ip netns exec $ns ip a 2>/dev/null)
|
|
||||||
err=$?
|
|
||||||
if [ $err -eq 0 ]; then
|
|
||||||
netnsok=("${netnsok[@]}" $ns)
|
|
||||||
else
|
|
||||||
netnscrit=("${netnscrit[@]}" $ns)
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ ${#netnscrit[@]} -eq 0 ]; then
|
|
||||||
if [ ${#netnsok[@]} -eq 0 ]; then
|
|
||||||
check_netns_create
|
|
||||||
echo "OK: no namespaces defined"
|
|
||||||
exit $STATE_OK
|
|
||||||
else
|
|
||||||
echo "OK: ${netnsok[@]} are responding"
|
|
||||||
exit $STATE_OK
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "CRIT: ${netnscrit[@]} aren't responding"
|
|
||||||
exit $STATE_CRIT
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
@ -1,80 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Check readonly filesystems and alert."""
|
|
||||||
# -*- coding: us-ascii -*-
|
|
||||||
|
|
||||||
# Copyright (C) 2020 Canonical
|
|
||||||
# All rights reserved
|
|
||||||
#
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
from nagios_plugin3 import (
|
|
||||||
CriticalError,
|
|
||||||
UnknownError,
|
|
||||||
try_check,
|
|
||||||
)
|
|
||||||
|
|
||||||
EXCLUDE = {"/snap/", "/sys/fs/cgroup"}
|
|
||||||
|
|
||||||
|
|
||||||
def check_ro_filesystem(excludes=""):
|
|
||||||
"""Loop /proc/mounts looking for readonly mounts.
|
|
||||||
|
|
||||||
:param excludes: list of mount points to exclude from checks
|
|
||||||
"""
|
|
||||||
# read /proc/mounts, add each line to a list
|
|
||||||
try:
|
|
||||||
with open("/proc/mounts") as fd:
|
|
||||||
mounts = [mount.strip() for mount in fd.readlines()]
|
|
||||||
except Exception as e:
|
|
||||||
raise UnknownError("UNKNOWN: unable to read mounts with {}".format(e))
|
|
||||||
|
|
||||||
exclude_mounts = EXCLUDE
|
|
||||||
ro_filesystems = []
|
|
||||||
# if excludes != "" and excludes is not None:
|
|
||||||
if excludes:
|
|
||||||
try:
|
|
||||||
exclude_mounts = EXCLUDE.union(set(excludes.split(",")))
|
|
||||||
except Exception as e:
|
|
||||||
msg = "UNKNOWN: unable to read list of mounts to exclude {}".format(e)
|
|
||||||
raise UnknownError(msg)
|
|
||||||
for mount in mounts:
|
|
||||||
# for each line in the list, split by space to a new list
|
|
||||||
split_mount = mount.split()
|
|
||||||
# if mount[1] matches EXCLUDE_FS then next, else check it's not readonly
|
|
||||||
if not any(
|
|
||||||
split_mount[1].startswith(exclusion.strip()) for exclusion in exclude_mounts
|
|
||||||
):
|
|
||||||
mount_options = split_mount[3].split(",")
|
|
||||||
if "ro" in mount_options:
|
|
||||||
ro_filesystems.append(split_mount[1])
|
|
||||||
if len(ro_filesystems) > 0:
|
|
||||||
msg = "CRITICAL: filesystem(s) {} readonly".format(",".join(ro_filesystems))
|
|
||||||
raise CriticalError(msg)
|
|
||||||
|
|
||||||
print("OK: no readonly filesystems found")
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
"""Parse command-line options."""
|
|
||||||
parser = argparse.ArgumentParser(description="Check for readonly filesystems")
|
|
||||||
parser.add_argument(
|
|
||||||
"--exclude",
|
|
||||||
"-e",
|
|
||||||
type=str,
|
|
||||||
help="""Comma separated list of mount points to exclude from checks for readonly filesystem.
|
|
||||||
Can be just a substring of the whole mount point.""",
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
return args
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Parse args and check the readonly filesystem."""
|
|
||||||
args = parse_args()
|
|
||||||
try_check(check_ro_filesystem, args.exclude)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
@ -1,71 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Read file and return nagios status based on its content."""
|
|
||||||
# --------------------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
# --------------------------------------------------------
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2014 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Author: Jacek Nykis <jacek.nykis@canonical.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
import nagios_plugin3 as nagios_plugin
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
"""Parse command-line options."""
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Read file and return nagios status based on its content",
|
|
||||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
||||||
)
|
|
||||||
parser.add_argument("-f", "--status-file", required=True, help="Status file path")
|
|
||||||
parser.add_argument(
|
|
||||||
"-c",
|
|
||||||
"--critical-text",
|
|
||||||
default="CRITICAL",
|
|
||||||
help="String indicating critical status",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-w",
|
|
||||||
"--warning-text",
|
|
||||||
default="WARNING",
|
|
||||||
help="String indicating warning status",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-o", "--ok-text", default="OK", help="String indicating OK status"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-u",
|
|
||||||
"--unknown-text",
|
|
||||||
default="UNKNOWN",
|
|
||||||
help="String indicating unknown status",
|
|
||||||
)
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def check_status(args):
|
|
||||||
"""Return nagios status."""
|
|
||||||
nagios_plugin.check_file_freshness(args.status_file, 43200)
|
|
||||||
|
|
||||||
with open(args.status_file, "r") as f:
|
|
||||||
content = [line.strip() for line in f.readlines()]
|
|
||||||
|
|
||||||
for line in content:
|
|
||||||
if re.search(args.critical_text, line):
|
|
||||||
raise nagios_plugin.CriticalError(line)
|
|
||||||
elif re.search(args.warning_text, line):
|
|
||||||
raise nagios_plugin.WarnError(line)
|
|
||||||
elif re.search(args.unknown_text, line):
|
|
||||||
raise nagios_plugin.UnknownError(line)
|
|
||||||
else:
|
|
||||||
print(line)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
args = parse_args()
|
|
||||||
nagios_plugin.try_check(check_status, args)
|
|
||||||
|
|
@ -1,78 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script checks swap pageouts and reports number of kbytes moved
|
|
||||||
# from physical ram to swap space in a given number of seconds
|
|
||||||
#
|
|
||||||
# Usage: "check_swap_activity -i interval -w warning_kbyts -c critical_kbytes
|
|
||||||
#
|
|
||||||
#
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
. /usr/lib/nagios/plugins/utils.sh
|
|
||||||
|
|
||||||
|
|
||||||
help() {
|
|
||||||
cat << EOH
|
|
||||||
usage: $0 [ -i ## ] -w ## -c ##
|
|
||||||
|
|
||||||
Measures page-outs to swap over a given interval, by default 5 seconds.
|
|
||||||
|
|
||||||
-i time in seconds to monitor (defaults to 5 seconds)
|
|
||||||
-w warning Level in kbytes
|
|
||||||
-c critical Level in kbytes
|
|
||||||
|
|
||||||
EOH
|
|
||||||
}
|
|
||||||
|
|
||||||
TIMEWORD=seconds
|
|
||||||
WARN_LVL=
|
|
||||||
CRIT_LVL=
|
|
||||||
INTERVAL=5
|
|
||||||
## FETCH ARGUMENTS
|
|
||||||
while getopts "i:w:c:" OPTION; do
|
|
||||||
case "${OPTION}" in
|
|
||||||
i)
|
|
||||||
INTERVAL=${OPTARG}
|
|
||||||
if [ $INTERVAL -eq 1 ]; then
|
|
||||||
TIMEWORD=second
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
w)
|
|
||||||
WARN_LVL=${OPTARG}
|
|
||||||
;;
|
|
||||||
c)
|
|
||||||
CRIT_LVL=${OPTARG}
|
|
||||||
;;
|
|
||||||
?)
|
|
||||||
help
|
|
||||||
exit 3
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z ${WARN_LVL} ] || [ -z ${CRIT_LVL} ] ; then
|
|
||||||
help
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Get swap pageouts over $INTERVAL
|
|
||||||
PAGEOUTS=$(vmstat -w ${INTERVAL} 2 | tail -n 1 | awk '{print $8}')
|
|
||||||
|
|
||||||
SUMMARY="| swapout_size=${PAGEOUTS}KB;${WARN_LVL};${CRIT_LVL};"
|
|
||||||
if [ ${PAGEOUTS} -lt ${WARN_LVL} ]; then
|
|
||||||
# pageouts are below threshold
|
|
||||||
echo "OK - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
|
|
||||||
exit $STATE_OK
|
|
||||||
elif [ ${PAGEOUTS} -ge ${CRIT_LVL} ]; then
|
|
||||||
## SWAP IS IN CRITICAL STATE
|
|
||||||
echo "CRITICAL - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
|
|
||||||
exit $STATE_CRITICAL
|
|
||||||
elif [ ${PAGEOUTS} -ge ${WARN_LVL} ] && [ ${PAGEOUTS} -lt ${CRIT_LVL} ]; then
|
|
||||||
## SWAP IS IN WARNING STATE
|
|
||||||
echo "WARNING - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
|
|
||||||
exit $STATE_WARNING
|
|
||||||
else
|
|
||||||
echo "CRITICAL: Failure to process pageout information $SUMMARY"
|
|
||||||
exit $STATE_UNKNOWN
|
|
||||||
fi
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
#!/usr/bin/python3
|
|
||||||
"""Check systemd service and alert."""
|
|
||||||
#
|
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Author: Brad Marshall <brad.marshall@canonical.com>
|
|
||||||
#
|
|
||||||
# Based on check_upstart_job and
|
|
||||||
# https://zignar.net/2014/09/08/getting-started-with-dbus-python-systemd/
|
|
||||||
#
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import dbus
|
|
||||||
|
|
||||||
|
|
||||||
service_arg = sys.argv[1]
|
|
||||||
service_name = "%s.service" % service_arg
|
|
||||||
|
|
||||||
try:
|
|
||||||
bus = dbus.SystemBus()
|
|
||||||
systemd = bus.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
|
|
||||||
manager = dbus.Interface(systemd, dbus_interface="org.freedesktop.systemd1.Manager")
|
|
||||||
try:
|
|
||||||
service_unit = manager.LoadUnit(service_name)
|
|
||||||
service_proxy = bus.get_object("org.freedesktop.systemd1", str(service_unit))
|
|
||||||
service = dbus.Interface(
|
|
||||||
service_proxy, dbus_interface="org.freedesktop.systemd1.Unit"
|
|
||||||
)
|
|
||||||
service_res = service_proxy.Get(
|
|
||||||
"org.freedesktop.systemd1.Unit",
|
|
||||||
"SubState",
|
|
||||||
dbus_interface="org.freedesktop.DBus.Properties",
|
|
||||||
)
|
|
||||||
|
|
||||||
if service_res == "running":
|
|
||||||
print("OK: %s is running" % service_name)
|
|
||||||
sys.exit(0)
|
|
||||||
else:
|
|
||||||
print("CRITICAL: %s is not running" % service_name)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
except dbus.DBusException:
|
|
||||||
print("CRITICAL: unable to find %s in systemd" % service_name)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
except dbus.DBusException:
|
|
||||||
print("CRITICAL: unable to connect to system for %s" % service_name)
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
@ -1,72 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012, 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Author: Paul Collins <paul.collins@canonical.com>
|
|
||||||
#
|
|
||||||
# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html
|
|
||||||
#
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import dbus
|
|
||||||
|
|
||||||
|
|
||||||
class Upstart(object):
|
|
||||||
def __init__(self):
|
|
||||||
self._bus = dbus.SystemBus()
|
|
||||||
self._upstart = self._bus.get_object('com.ubuntu.Upstart',
|
|
||||||
'/com/ubuntu/Upstart')
|
|
||||||
def get_job(self, job_name):
|
|
||||||
path = self._upstart.GetJobByName(job_name,
|
|
||||||
dbus_interface='com.ubuntu.Upstart0_6')
|
|
||||||
return self._bus.get_object('com.ubuntu.Upstart', path)
|
|
||||||
|
|
||||||
def get_properties(self, job):
|
|
||||||
path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job')
|
|
||||||
instance = self._bus.get_object('com.ubuntu.Upstart', path)
|
|
||||||
return instance.GetAll('com.ubuntu.Upstart0_6.Instance',
|
|
||||||
dbus_interface=dbus.PROPERTIES_IFACE)
|
|
||||||
|
|
||||||
def get_job_instances(self, job_name):
|
|
||||||
job = self.get_job(job_name)
|
|
||||||
paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job')
|
|
||||||
return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths]
|
|
||||||
|
|
||||||
def get_job_instance_properties(self, job):
|
|
||||||
return job.GetAll('com.ubuntu.Upstart0_6.Instance',
|
|
||||||
dbus_interface=dbus.PROPERTIES_IFACE)
|
|
||||||
|
|
||||||
try:
|
|
||||||
upstart = Upstart()
|
|
||||||
try:
|
|
||||||
job = upstart.get_job(sys.argv[1])
|
|
||||||
props = upstart.get_properties(job)
|
|
||||||
|
|
||||||
if props['state'] == 'running':
|
|
||||||
print 'OK: %s is running' % sys.argv[1]
|
|
||||||
sys.exit(0)
|
|
||||||
else:
|
|
||||||
print 'CRITICAL: %s is not running' % sys.argv[1]
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
except dbus.DBusException as e:
|
|
||||||
instances = upstart.get_job_instances(sys.argv[1])
|
|
||||||
propses = [upstart.get_job_instance_properties(instance) for instance in instances]
|
|
||||||
states = dict([(props['name'], props['state']) for props in propses])
|
|
||||||
if len(states) != states.values().count('running'):
|
|
||||||
not_running = []
|
|
||||||
for name in states.keys():
|
|
||||||
if states[name] != 'running':
|
|
||||||
not_running.append(name)
|
|
||||||
print 'CRITICAL: %d instances of %s not running: %s' % \
|
|
||||||
(len(not_running), sys.argv[1], not_running.join(', '))
|
|
||||||
sys.exit(2)
|
|
||||||
else:
|
|
||||||
print 'OK: %d instances of %s running' % (len(states), sys.argv[1])
|
|
||||||
|
|
||||||
except dbus.DBusException as e:
|
|
||||||
print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1]
|
|
||||||
sys.exit(2)
|
|
||||||
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
"""Check for xfs errors and alert."""
|
|
||||||
#
|
|
||||||
# Copyright 2017 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Author: Jill Rouleau <jill.rouleau@canonical.com>
|
|
||||||
#
|
|
||||||
# Check for xfs errors and alert
|
|
||||||
#
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
|
|
||||||
|
|
||||||
# error messages commonly seen in dmesg on xfs errors
|
|
||||||
raw_xfs_errors = [
|
|
||||||
"XFS_WANT_CORRUPTED_",
|
|
||||||
"xfs_error_report",
|
|
||||||
"corruption detected at xfs_",
|
|
||||||
"Unmount and run xfs_repair",
|
|
||||||
]
|
|
||||||
|
|
||||||
xfs_regex = [re.compile(i) for i in raw_xfs_errors]
|
|
||||||
|
|
||||||
# nagios can't read from kern.log, so we look at dmesg - this does present
|
|
||||||
# a known limitation if a node is rebooted or dmesg is otherwise cleared.
|
|
||||||
log_lines = [line for line in subprocess.getoutput(["dmesg -T"]).split("\n")]
|
|
||||||
|
|
||||||
err_results = [line for line in log_lines for rgx in xfs_regex if re.search(rgx, line)]
|
|
||||||
|
|
||||||
# Look for errors within the last N minutes, specified in the check definition
|
|
||||||
check_delta = int(sys.argv[1])
|
|
||||||
|
|
||||||
# dmesg -T formatted timestamps are inside [], so we need to add them
|
|
||||||
datetime_delta = datetime.now() - timedelta(minutes=check_delta)
|
|
||||||
|
|
||||||
recent_logs = [
|
|
||||||
i for i in err_results if datetime.strptime(i[1:25], "%c") >= datetime_delta
|
|
||||||
]
|
|
||||||
|
|
||||||
if recent_logs:
|
|
||||||
print("CRITICAL: Recent XFS errors in kern.log." + "\n" + "{}".format(recent_logs))
|
|
||||||
sys.exit(2)
|
|
||||||
else:
|
|
||||||
print("OK")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
#------------------------------------------------
|
|
||||||
# This file is juju managed
|
|
||||||
#------------------------------------------------
|
|
||||||
|
|
||||||
uid = nobody
|
|
||||||
gid = nogroup
|
|
||||||
pid file = /var/run/rsyncd.pid
|
|
||||||
syslog facility = daemon
|
|
||||||
socket options = SO_KEEPALIVE
|
|
||||||
timeout = 7200
|
|
||||||
|
|
||||||
&merge /etc/rsync-juju.d
|
|
||||||
&include /etc/rsync-juju.d
|
|
||||||
|
|
@ -1,99 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
|
||||||
# only standard libraries.
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import absolute_import
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import inspect
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import six # NOQA:F401
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
|
||||||
import six # NOQA:F401
|
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml # NOQA:F401
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
|
||||||
import yaml # NOQA:F401
|
|
||||||
|
|
||||||
|
|
||||||
# Holds a list of mapping of mangled function names that have been deprecated
|
|
||||||
# using the @deprecate decorator below. This is so that the warning is only
|
|
||||||
# printed once for each usage of the function.
|
|
||||||
__deprecated_functions = {}
|
|
||||||
|
|
||||||
|
|
||||||
def deprecate(warning, date=None, log=None):
|
|
||||||
"""Add a deprecation warning the first time the function is used.
|
|
||||||
|
|
||||||
The date which is a string in semi-ISO8660 format indicates the year-month
|
|
||||||
that the function is officially going to be removed.
|
|
||||||
|
|
||||||
usage:
|
|
||||||
|
|
||||||
@deprecate('use core/fetch/add_source() instead', '2017-04')
|
|
||||||
def contributed_add_source_thing(...):
|
|
||||||
...
|
|
||||||
|
|
||||||
And it then prints to the log ONCE that the function is deprecated.
|
|
||||||
The reason for passing the logging function (log) is so that hookenv.log
|
|
||||||
can be used for a charm if needed.
|
|
||||||
|
|
||||||
:param warning: String to indicate what is to be used instead.
|
|
||||||
:param date: Optional string in YYYY-MM format to indicate when the
|
|
||||||
function will definitely (probably) be removed.
|
|
||||||
:param log: The log function to call in order to log. If None, logs to
|
|
||||||
stdout
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapped_f(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
module = inspect.getmodule(f)
|
|
||||||
file = inspect.getsourcefile(f)
|
|
||||||
lines = inspect.getsourcelines(f)
|
|
||||||
f_name = "{}-{}-{}..{}-{}".format(
|
|
||||||
module.__name__, file, lines[0], lines[-1], f.__name__)
|
|
||||||
except (IOError, TypeError):
|
|
||||||
# assume it was local, so just use the name of the function
|
|
||||||
f_name = f.__name__
|
|
||||||
if f_name not in __deprecated_functions:
|
|
||||||
__deprecated_functions[f_name] = True
|
|
||||||
s = "DEPRECATION WARNING: Function {} is being removed".format(
|
|
||||||
f.__name__)
|
|
||||||
if date:
|
|
||||||
s = "{} on/around {}".format(s, date)
|
|
||||||
if warning:
|
|
||||||
s = "{} : {}".format(s, warning)
|
|
||||||
if log:
|
|
||||||
log(s)
|
|
||||||
else:
|
|
||||||
print(s)
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
||||||
|
|
@ -1,57 +0,0 @@
|
||||||
==========
|
|
||||||
Commandant
|
|
||||||
==========
|
|
||||||
|
|
||||||
-----------------------------------------------------
|
|
||||||
Automatic command-line interfaces to Python functions
|
|
||||||
-----------------------------------------------------
|
|
||||||
|
|
||||||
One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands.
|
|
||||||
|
|
||||||
Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life.
|
|
||||||
|
|
||||||
Goals
|
|
||||||
=====
|
|
||||||
|
|
||||||
* Single decorator to expose a function as a command.
|
|
||||||
* now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW)
|
|
||||||
* Automatic analysis of function signature through ``inspect.getargspec()`` on python 2 or ``inspect.getfullargspec()`` on python 3
|
|
||||||
* Command argument parser built automatically with ``argparse``
|
|
||||||
* Interactive interpreter loop object made with ``Cmd``
|
|
||||||
* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps.
|
|
||||||
|
|
||||||
Other Important Features that need writing
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour
|
|
||||||
* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc.
|
|
||||||
- Filename arguments are important, as good practice is for functions to accept file objects as parameters.
|
|
||||||
- choices arguments help to limit bad input before the function is called
|
|
||||||
* Some automatic behaviour could make for better defaults, once the user can override them.
|
|
||||||
- We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True.
|
|
||||||
- We could automatically support hyphens as alternates for underscores
|
|
||||||
- Arguments defaulting to sequence types could support the ``append`` action.
|
|
||||||
|
|
||||||
|
|
||||||
-----------------------------------------------------
|
|
||||||
Implementing subcommands
|
|
||||||
-----------------------------------------------------
|
|
||||||
|
|
||||||
(WIP)
|
|
||||||
|
|
||||||
So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose.
|
|
||||||
|
|
||||||
Some examples::
|
|
||||||
|
|
||||||
from charmhelpers.cli import CommandLine
|
|
||||||
from charmhelpers.payload import execd
|
|
||||||
from charmhelpers.foo import bar
|
|
||||||
|
|
||||||
cli = CommandLine()
|
|
||||||
|
|
||||||
cli.subcommand(execd.execd_run)
|
|
||||||
|
|
||||||
@cli.subcommand_builder("bar", help="Bar baz qux")
|
|
||||||
def barcmd_builder(subparser):
|
|
||||||
subparser.add_argument('argument1', help="yackety")
|
|
||||||
return bar
|
|
||||||
|
|
@ -1,196 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import six
|
|
||||||
from six.moves import zip
|
|
||||||
|
|
||||||
import charmhelpers.core.unitdata
|
|
||||||
|
|
||||||
|
|
||||||
class OutputFormatter(object):
|
|
||||||
def __init__(self, outfile=sys.stdout):
|
|
||||||
self.formats = (
|
|
||||||
"raw",
|
|
||||||
"json",
|
|
||||||
"py",
|
|
||||||
"yaml",
|
|
||||||
"csv",
|
|
||||||
"tab",
|
|
||||||
)
|
|
||||||
self.outfile = outfile
|
|
||||||
|
|
||||||
def add_arguments(self, argument_parser):
|
|
||||||
formatgroup = argument_parser.add_mutually_exclusive_group()
|
|
||||||
choices = self.supported_formats
|
|
||||||
formatgroup.add_argument("--format", metavar='FMT',
|
|
||||||
help="Select output format for returned data, "
|
|
||||||
"where FMT is one of: {}".format(choices),
|
|
||||||
choices=choices, default='raw')
|
|
||||||
for fmt in self.formats:
|
|
||||||
fmtfunc = getattr(self, fmt)
|
|
||||||
formatgroup.add_argument("-{}".format(fmt[0]),
|
|
||||||
"--{}".format(fmt), action='store_const',
|
|
||||||
const=fmt, dest='format',
|
|
||||||
help=fmtfunc.__doc__)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def supported_formats(self):
|
|
||||||
return self.formats
|
|
||||||
|
|
||||||
def raw(self, output):
|
|
||||||
"""Output data as raw string (default)"""
|
|
||||||
if isinstance(output, (list, tuple)):
|
|
||||||
output = '\n'.join(map(str, output))
|
|
||||||
self.outfile.write(str(output))
|
|
||||||
|
|
||||||
def py(self, output):
|
|
||||||
"""Output data as a nicely-formatted python data structure"""
|
|
||||||
import pprint
|
|
||||||
pprint.pprint(output, stream=self.outfile)
|
|
||||||
|
|
||||||
def json(self, output):
|
|
||||||
"""Output data in JSON format"""
|
|
||||||
import json
|
|
||||||
json.dump(output, self.outfile)
|
|
||||||
|
|
||||||
def yaml(self, output):
|
|
||||||
"""Output data in YAML format"""
|
|
||||||
import yaml
|
|
||||||
yaml.safe_dump(output, self.outfile)
|
|
||||||
|
|
||||||
def csv(self, output):
|
|
||||||
"""Output data as excel-compatible CSV"""
|
|
||||||
import csv
|
|
||||||
csvwriter = csv.writer(self.outfile)
|
|
||||||
csvwriter.writerows(output)
|
|
||||||
|
|
||||||
def tab(self, output):
|
|
||||||
"""Output data in excel-compatible tab-delimited format"""
|
|
||||||
import csv
|
|
||||||
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
|
|
||||||
csvwriter.writerows(output)
|
|
||||||
|
|
||||||
def format_output(self, output, fmt='raw'):
|
|
||||||
fmtfunc = getattr(self, fmt)
|
|
||||||
fmtfunc(output)
|
|
||||||
|
|
||||||
|
|
||||||
class CommandLine(object):
|
|
||||||
argument_parser = None
|
|
||||||
subparsers = None
|
|
||||||
formatter = None
|
|
||||||
exit_code = 0
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
if not self.argument_parser:
|
|
||||||
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
|
|
||||||
if not self.formatter:
|
|
||||||
self.formatter = OutputFormatter()
|
|
||||||
self.formatter.add_arguments(self.argument_parser)
|
|
||||||
if not self.subparsers:
|
|
||||||
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
|
|
||||||
|
|
||||||
def subcommand(self, command_name=None):
|
|
||||||
"""
|
|
||||||
Decorate a function as a subcommand. Use its arguments as the
|
|
||||||
command-line arguments"""
|
|
||||||
def wrapper(decorated):
|
|
||||||
cmd_name = command_name or decorated.__name__
|
|
||||||
subparser = self.subparsers.add_parser(cmd_name,
|
|
||||||
description=decorated.__doc__)
|
|
||||||
for args, kwargs in describe_arguments(decorated):
|
|
||||||
subparser.add_argument(*args, **kwargs)
|
|
||||||
subparser.set_defaults(func=decorated)
|
|
||||||
return decorated
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def test_command(self, decorated):
|
|
||||||
"""
|
|
||||||
Subcommand is a boolean test function, so bool return values should be
|
|
||||||
converted to a 0/1 exit code.
|
|
||||||
"""
|
|
||||||
decorated._cli_test_command = True
|
|
||||||
return decorated
|
|
||||||
|
|
||||||
def no_output(self, decorated):
|
|
||||||
"""
|
|
||||||
Subcommand is not expected to return a value, so don't print a spurious None.
|
|
||||||
"""
|
|
||||||
decorated._cli_no_output = True
|
|
||||||
return decorated
|
|
||||||
|
|
||||||
def subcommand_builder(self, command_name, description=None):
|
|
||||||
"""
|
|
||||||
Decorate a function that builds a subcommand. Builders should accept a
|
|
||||||
single argument (the subparser instance) and return the function to be
|
|
||||||
run as the command."""
|
|
||||||
def wrapper(decorated):
|
|
||||||
subparser = self.subparsers.add_parser(command_name)
|
|
||||||
func = decorated(subparser)
|
|
||||||
subparser.set_defaults(func=func)
|
|
||||||
subparser.description = description or func.__doc__
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"Run cli, processing arguments and executing subcommands."
|
|
||||||
arguments = self.argument_parser.parse_args()
|
|
||||||
if six.PY2:
|
|
||||||
argspec = inspect.getargspec(arguments.func)
|
|
||||||
else:
|
|
||||||
argspec = inspect.getfullargspec(arguments.func)
|
|
||||||
vargs = []
|
|
||||||
for arg in argspec.args:
|
|
||||||
vargs.append(getattr(arguments, arg))
|
|
||||||
if argspec.varargs:
|
|
||||||
vargs.extend(getattr(arguments, argspec.varargs))
|
|
||||||
output = arguments.func(*vargs)
|
|
||||||
if getattr(arguments.func, '_cli_test_command', False):
|
|
||||||
self.exit_code = 0 if output else 1
|
|
||||||
output = ''
|
|
||||||
if getattr(arguments.func, '_cli_no_output', False):
|
|
||||||
output = ''
|
|
||||||
self.formatter.format_output(output, arguments.format)
|
|
||||||
if charmhelpers.core.unitdata._KV:
|
|
||||||
charmhelpers.core.unitdata._KV.flush()
|
|
||||||
|
|
||||||
|
|
||||||
cmdline = CommandLine()
|
|
||||||
|
|
||||||
|
|
||||||
def describe_arguments(func):
|
|
||||||
"""
|
|
||||||
Analyze a function's signature and return a data structure suitable for
|
|
||||||
passing in as arguments to an argparse parser's add_argument() method."""
|
|
||||||
|
|
||||||
if six.PY2:
|
|
||||||
argspec = inspect.getargspec(func)
|
|
||||||
else:
|
|
||||||
argspec = inspect.getfullargspec(func)
|
|
||||||
# we should probably raise an exception somewhere if func includes **kwargs
|
|
||||||
if argspec.defaults:
|
|
||||||
positional_args = argspec.args[:-len(argspec.defaults)]
|
|
||||||
keyword_names = argspec.args[-len(argspec.defaults):]
|
|
||||||
for arg, default in zip(keyword_names, argspec.defaults):
|
|
||||||
yield ('--{}'.format(arg),), {'default': default}
|
|
||||||
else:
|
|
||||||
positional_args = argspec.args
|
|
||||||
|
|
||||||
for arg in positional_args:
|
|
||||||
yield (arg,), {}
|
|
||||||
if argspec.varargs:
|
|
||||||
yield (argspec.varargs,), {'nargs': '*'}
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.contrib.benchmark import Benchmark
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand(command_name='benchmark-start')
|
|
||||||
def start():
|
|
||||||
Benchmark.start()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand(command_name='benchmark-finish')
|
|
||||||
def finish():
|
|
||||||
Benchmark.finish()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
|
|
||||||
def service(subparser):
|
|
||||||
subparser.add_argument("value", help="The composite score.")
|
|
||||||
subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
|
|
||||||
subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
|
|
||||||
return Benchmark.set_composite_score
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
This module loads sub-modules into the python runtime so they can be
|
|
||||||
discovered via the inspect module. In order to prevent flake8 from (rightfully)
|
|
||||||
telling us these are unused modules, throw a ' # noqa' at the end of each import
|
|
||||||
so that the warning is suppressed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from . import CommandLine # noqa
|
|
||||||
|
|
||||||
"""
|
|
||||||
Import the sub-modules which have decorated subcommands to register with chlp.
|
|
||||||
"""
|
|
||||||
from . import host # noqa
|
|
||||||
from . import benchmark # noqa
|
|
||||||
from . import unitdata # noqa
|
|
||||||
from . import hookenv # noqa
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
|
|
||||||
cmdline.subcommand('service-name')(hookenv.service_name)
|
|
||||||
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import host
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand()
|
|
||||||
def mounts():
|
|
||||||
"List mounts"
|
|
||||||
return host.mounts()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('service', description="Control system services")
|
|
||||||
def service(subparser):
|
|
||||||
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
|
|
||||||
subparser.add_argument("service_name", help="Name of the service to control")
|
|
||||||
return host.service
|
|
||||||
|
|
@ -1,46 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import unitdata
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
|
||||||
def unitdata_cmd(subparser):
|
|
||||||
nested = subparser.add_subparsers()
|
|
||||||
|
|
||||||
get_cmd = nested.add_parser('get', help='Retrieve data')
|
|
||||||
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
|
||||||
get_cmd.set_defaults(action='get', value=None)
|
|
||||||
|
|
||||||
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
|
|
||||||
getrange_cmd.add_argument('key', metavar='prefix',
|
|
||||||
help='Prefix of the keys to retrieve')
|
|
||||||
getrange_cmd.set_defaults(action='getrange', value=None)
|
|
||||||
|
|
||||||
set_cmd = nested.add_parser('set', help='Store data')
|
|
||||||
set_cmd.add_argument('key', help='Key to set')
|
|
||||||
set_cmd.add_argument('value', help='Value to store')
|
|
||||||
set_cmd.set_defaults(action='set')
|
|
||||||
|
|
||||||
def _unitdata_cmd(action, key, value):
|
|
||||||
if action == 'get':
|
|
||||||
return unitdata.kv().get(key)
|
|
||||||
elif action == 'getrange':
|
|
||||||
return unitdata.kv().getrange(key)
|
|
||||||
elif action == 'set':
|
|
||||||
unitdata.kv().set(key, value)
|
|
||||||
unitdata.kv().flush()
|
|
||||||
return ''
|
|
||||||
return _unitdata_cmd
|
|
||||||
|
|
@ -1,205 +0,0 @@
|
||||||
# Copyright 2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
'''
|
|
||||||
A Pythonic API to interact with the charm hook environment.
|
|
||||||
|
|
||||||
:author: Stuart Bishop <stuart.bishop@canonical.com>
|
|
||||||
'''
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
if six.PY3:
|
|
||||||
from collections import UserDict # pragma: nocover
|
|
||||||
else:
|
|
||||||
from UserDict import IterableUserDict as UserDict # pragma: nocover
|
|
||||||
|
|
||||||
|
|
||||||
class Relations(OrderedDict):
|
|
||||||
'''Mapping relation name -> relation id -> Relation.
|
|
||||||
|
|
||||||
>>> rels = Relations()
|
|
||||||
>>> rels['sprog']['sprog:12']['client/6']['widget']
|
|
||||||
'remote widget'
|
|
||||||
>>> rels['sprog']['sprog:12'].local['widget'] = 'local widget'
|
|
||||||
>>> rels['sprog']['sprog:12'].local['widget']
|
|
||||||
'local widget'
|
|
||||||
>>> rels.peer.local['widget']
|
|
||||||
'local widget on the peer relation'
|
|
||||||
'''
|
|
||||||
def __init__(self):
|
|
||||||
super(Relations, self).__init__()
|
|
||||||
for relname in sorted(hookenv.relation_types()):
|
|
||||||
self[relname] = OrderedDict()
|
|
||||||
relids = hookenv.relation_ids(relname)
|
|
||||||
relids.sort(key=lambda x: int(x.split(':', 1)[-1]))
|
|
||||||
for relid in relids:
|
|
||||||
self[relname][relid] = Relation(relid)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def peer(self):
|
|
||||||
peer_relid = hookenv.peer_relation_id()
|
|
||||||
for rels in self.values():
|
|
||||||
if peer_relid in rels:
|
|
||||||
return rels[peer_relid]
|
|
||||||
|
|
||||||
|
|
||||||
class Relation(OrderedDict):
|
|
||||||
'''Mapping of unit -> remote RelationInfo for a relation.
|
|
||||||
|
|
||||||
This is an OrderedDict mapping, ordered numerically by
|
|
||||||
by unit number.
|
|
||||||
|
|
||||||
Also provides access to the local RelationInfo, and peer RelationInfo
|
|
||||||
instances by the 'local' and 'peers' attributes.
|
|
||||||
|
|
||||||
>>> r = Relation('sprog:12')
|
|
||||||
>>> r.keys()
|
|
||||||
['client/9', 'client/10'] # Ordered numerically
|
|
||||||
>>> r['client/10']['widget'] # A remote RelationInfo setting
|
|
||||||
'remote widget'
|
|
||||||
>>> r.local['widget'] # The local RelationInfo setting
|
|
||||||
'local widget'
|
|
||||||
'''
|
|
||||||
relid = None # The relation id.
|
|
||||||
relname = None # The relation name (also known as relation type).
|
|
||||||
service = None # The remote service name, if known.
|
|
||||||
local = None # The local end's RelationInfo.
|
|
||||||
peers = None # Map of peer -> RelationInfo. None if no peer relation.
|
|
||||||
|
|
||||||
def __init__(self, relid):
|
|
||||||
remote_units = hookenv.related_units(relid)
|
|
||||||
remote_units.sort(key=lambda u: int(u.split('/', 1)[-1]))
|
|
||||||
super(Relation, self).__init__((unit, RelationInfo(relid, unit))
|
|
||||||
for unit in remote_units)
|
|
||||||
|
|
||||||
self.relname = relid.split(':', 1)[0]
|
|
||||||
self.relid = relid
|
|
||||||
self.local = RelationInfo(relid, hookenv.local_unit())
|
|
||||||
|
|
||||||
for relinfo in self.values():
|
|
||||||
self.service = relinfo.service
|
|
||||||
break
|
|
||||||
|
|
||||||
# If we have peers, and they have joined both the provided peer
|
|
||||||
# relation and this relation, we can peek at their data too.
|
|
||||||
# This is useful for creating consensus without leadership.
|
|
||||||
peer_relid = hookenv.peer_relation_id()
|
|
||||||
if peer_relid and peer_relid != relid:
|
|
||||||
peers = hookenv.related_units(peer_relid)
|
|
||||||
if peers:
|
|
||||||
peers.sort(key=lambda u: int(u.split('/', 1)[-1]))
|
|
||||||
self.peers = OrderedDict((peer, RelationInfo(relid, peer))
|
|
||||||
for peer in peers)
|
|
||||||
else:
|
|
||||||
self.peers = OrderedDict()
|
|
||||||
else:
|
|
||||||
self.peers = None
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return '{} ({})'.format(self.relid, self.service)
|
|
||||||
|
|
||||||
|
|
||||||
class RelationInfo(UserDict):
|
|
||||||
'''The bag of data at an end of a relation.
|
|
||||||
|
|
||||||
Every unit participating in a relation has a single bag of
|
|
||||||
data associated with that relation. This is that bag.
|
|
||||||
|
|
||||||
The bag of data for the local unit may be updated. Remote data
|
|
||||||
is immutable and will remain static for the duration of the hook.
|
|
||||||
|
|
||||||
Changes made to the local units relation data only become visible
|
|
||||||
to other units after the hook completes successfully. If the hook
|
|
||||||
does not complete successfully, the changes are rolled back.
|
|
||||||
|
|
||||||
Unlike standard Python mappings, setting an item to None is the
|
|
||||||
same as deleting it.
|
|
||||||
|
|
||||||
>>> relinfo = RelationInfo('db:12') # Default is the local unit.
|
|
||||||
>>> relinfo['user'] = 'fred'
|
|
||||||
>>> relinfo['user']
|
|
||||||
'fred'
|
|
||||||
>>> relinfo['user'] = None
|
|
||||||
>>> 'fred' in relinfo
|
|
||||||
False
|
|
||||||
|
|
||||||
This class wraps hookenv.relation_get and hookenv.relation_set.
|
|
||||||
All caching is left up to these two methods to avoid synchronization
|
|
||||||
issues. Data is only loaded on demand.
|
|
||||||
'''
|
|
||||||
relid = None # The relation id.
|
|
||||||
relname = None # The relation name (also know as the relation type).
|
|
||||||
unit = None # The unit id.
|
|
||||||
number = None # The unit number (integer).
|
|
||||||
service = None # The service name.
|
|
||||||
|
|
||||||
def __init__(self, relid, unit):
|
|
||||||
self.relname = relid.split(':', 1)[0]
|
|
||||||
self.relid = relid
|
|
||||||
self.unit = unit
|
|
||||||
self.service, num = self.unit.split('/', 1)
|
|
||||||
self.number = int(num)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return '{} ({})'.format(self.relid, self.unit)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data(self):
|
|
||||||
return hookenv.relation_get(rid=self.relid, unit=self.unit)
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
if self.unit != hookenv.local_unit():
|
|
||||||
raise TypeError('Attempting to set {} on remote unit {}'
|
|
||||||
''.format(key, self.unit))
|
|
||||||
if value is not None and not isinstance(value, six.string_types):
|
|
||||||
# We don't do implicit casting. This would cause simple
|
|
||||||
# types like integers to be read back as strings in subsequent
|
|
||||||
# hooks, and mutable types would require a lot of wrapping
|
|
||||||
# to ensure relation-set gets called when they are mutated.
|
|
||||||
raise ValueError('Only string values allowed')
|
|
||||||
hookenv.relation_set(self.relid, {key: value})
|
|
||||||
|
|
||||||
def __delitem__(self, key):
|
|
||||||
# Deleting a key and setting it to null is the same thing in
|
|
||||||
# Juju relations.
|
|
||||||
self[key] = None
|
|
||||||
|
|
||||||
|
|
||||||
class Leader(UserDict):
|
|
||||||
def __init__(self):
|
|
||||||
pass # Don't call superclass initializer, as it will nuke self.data
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data(self):
|
|
||||||
return hookenv.leader_get()
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
if not hookenv.is_leader():
|
|
||||||
raise TypeError('Not the leader. Cannot change leader settings.')
|
|
||||||
if value is not None and not isinstance(value, six.string_types):
|
|
||||||
# We don't do implicit casting. This would cause simple
|
|
||||||
# types like integers to be read back as strings in subsequent
|
|
||||||
# hooks, and mutable types would require a lot of wrapping
|
|
||||||
# to ensure leader-set gets called when they are mutated.
|
|
||||||
raise ValueError('Only string values allowed')
|
|
||||||
hookenv.leader_set({key: value})
|
|
||||||
|
|
||||||
def __delitem__(self, key):
|
|
||||||
# Deleting a key and setting it to null is the same thing in
|
|
||||||
# Juju leadership settings.
|
|
||||||
self[key] = None
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
@ -1,306 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
"""
|
|
||||||
The ansible package enables you to easily use the configuration management
|
|
||||||
tool `Ansible`_ to setup and configure your charm. All of your charm
|
|
||||||
configuration options and relation-data are available as regular Ansible
|
|
||||||
variables which can be used in your playbooks and templates.
|
|
||||||
|
|
||||||
.. _Ansible: https://www.ansible.com/
|
|
||||||
|
|
||||||
Usage
|
|
||||||
=====
|
|
||||||
|
|
||||||
Here is an example directory structure for a charm to get you started::
|
|
||||||
|
|
||||||
charm-ansible-example/
|
|
||||||
|-- ansible
|
|
||||||
| |-- playbook.yaml
|
|
||||||
| `-- templates
|
|
||||||
| `-- example.j2
|
|
||||||
|-- config.yaml
|
|
||||||
|-- copyright
|
|
||||||
|-- icon.svg
|
|
||||||
|-- layer.yaml
|
|
||||||
|-- metadata.yaml
|
|
||||||
|-- reactive
|
|
||||||
| `-- example.py
|
|
||||||
|-- README.md
|
|
||||||
|
|
||||||
Running a playbook called ``playbook.yaml`` when the ``install`` hook is run
|
|
||||||
can be as simple as::
|
|
||||||
|
|
||||||
from charmhelpers.contrib import ansible
|
|
||||||
from charms.reactive import hook
|
|
||||||
|
|
||||||
@hook('install')
|
|
||||||
def install():
|
|
||||||
ansible.install_ansible_support()
|
|
||||||
ansible.apply_playbook('ansible/playbook.yaml')
|
|
||||||
|
|
||||||
Here is an example playbook that uses the ``template`` module to template the
|
|
||||||
file ``example.j2`` to the charm host and then uses the ``debug`` module to
|
|
||||||
print out all the host and Juju variables that you can use in your playbooks.
|
|
||||||
Note that you must target ``localhost`` as the playbook is run locally on the
|
|
||||||
charm host::
|
|
||||||
|
|
||||||
---
|
|
||||||
- hosts: localhost
|
|
||||||
tasks:
|
|
||||||
- name: Template a file
|
|
||||||
template:
|
|
||||||
src: templates/example.j2
|
|
||||||
dest: /tmp/example.j2
|
|
||||||
|
|
||||||
- name: Print all variables available to Ansible
|
|
||||||
debug:
|
|
||||||
var: vars
|
|
||||||
|
|
||||||
Read more online about `playbooks`_ and standard Ansible `modules`_.
|
|
||||||
|
|
||||||
.. _playbooks: https://docs.ansible.com/ansible/latest/user_guide/playbooks.html
|
|
||||||
.. _modules: https://docs.ansible.com/ansible/latest/user_guide/modules.html
|
|
||||||
|
|
||||||
A further feature of the Ansible hooks is to provide a light weight "action"
|
|
||||||
scripting tool. This is a decorator that you apply to a function, and that
|
|
||||||
function can now receive cli args, and can pass extra args to the playbook::
|
|
||||||
|
|
||||||
@hooks.action()
|
|
||||||
def some_action(amount, force="False"):
|
|
||||||
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
|
|
||||||
# process the arguments
|
|
||||||
# do some calls
|
|
||||||
# return extra-vars to be passed to ansible-playbook
|
|
||||||
return {
|
|
||||||
'amount': int(amount),
|
|
||||||
'type': force,
|
|
||||||
}
|
|
||||||
|
|
||||||
You can now create a symlink to hooks.py that can be invoked like a hook, but
|
|
||||||
with cli params::
|
|
||||||
|
|
||||||
# link actions/some-action to hooks/hooks.py
|
|
||||||
|
|
||||||
actions/some-action amount=10 force=true
|
|
||||||
|
|
||||||
Install Ansible via pip
|
|
||||||
=======================
|
|
||||||
|
|
||||||
If you want to install a specific version of Ansible via pip instead of
|
|
||||||
``install_ansible_support`` which uses APT, consider using the layer options
|
|
||||||
of `layer-basic`_ to install Ansible in a virtualenv::
|
|
||||||
|
|
||||||
options:
|
|
||||||
basic:
|
|
||||||
python_packages: ['ansible==2.9.0']
|
|
||||||
include_system_packages: true
|
|
||||||
use_venv: true
|
|
||||||
|
|
||||||
.. _layer-basic: https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#layer-configuration
|
|
||||||
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import stat
|
|
||||||
import subprocess
|
|
||||||
import functools
|
|
||||||
|
|
||||||
import charmhelpers.contrib.templating.contexts
|
|
||||||
import charmhelpers.core.host
|
|
||||||
import charmhelpers.core.hookenv
|
|
||||||
import charmhelpers.fetch
|
|
||||||
|
|
||||||
|
|
||||||
charm_dir = os.environ.get('CHARM_DIR', '')
|
|
||||||
ansible_hosts_path = '/etc/ansible/hosts'
|
|
||||||
# Ansible will automatically include any vars in the following
|
|
||||||
# file in its inventory when run locally.
|
|
||||||
ansible_vars_path = '/etc/ansible/host_vars/localhost'
|
|
||||||
|
|
||||||
|
|
||||||
def install_ansible_support(from_ppa=True, ppa_location='ppa:ansible/ansible'):
|
|
||||||
"""Installs Ansible via APT.
|
|
||||||
|
|
||||||
By default this installs Ansible from the `PPA`_ linked from
|
|
||||||
the Ansible `website`_ or from a PPA set in ``ppa_location``.
|
|
||||||
|
|
||||||
.. _PPA: https://launchpad.net/~ansible/+archive/ubuntu/ansible
|
|
||||||
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
|
|
||||||
|
|
||||||
If ``from_ppa`` is ``False``, then Ansible will be installed from
|
|
||||||
Ubuntu's Universe repositories.
|
|
||||||
"""
|
|
||||||
if from_ppa:
|
|
||||||
charmhelpers.fetch.add_source(ppa_location)
|
|
||||||
charmhelpers.fetch.apt_update(fatal=True)
|
|
||||||
charmhelpers.fetch.apt_install('ansible')
|
|
||||||
with open(ansible_hosts_path, 'w+') as hosts_file:
|
|
||||||
hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp')
|
|
||||||
|
|
||||||
|
|
||||||
def apply_playbook(playbook, tags=None, extra_vars=None):
|
|
||||||
"""Run a playbook.
|
|
||||||
|
|
||||||
This helper runs a playbook with juju state variables as context,
|
|
||||||
therefore variables set in application config can be used directly.
|
|
||||||
List of tags (--tags) and dictionary with extra_vars (--extra-vars)
|
|
||||||
can be passed as additional parameters.
|
|
||||||
|
|
||||||
Read more about playbook `_variables`_ online.
|
|
||||||
|
|
||||||
.. _variables: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
# Run ansible/playbook.yaml with tag install and pass extra
|
|
||||||
# variables var_a and var_b
|
|
||||||
apply_playbook(
|
|
||||||
playbook='ansible/playbook.yaml',
|
|
||||||
tags=['install'],
|
|
||||||
extra_vars={'var_a': 'val_a', 'var_b': 'val_b'}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run ansible/playbook.yaml with tag config and extra variable nested,
|
|
||||||
# which is passed as json and can be used as dictionary in playbook
|
|
||||||
apply_playbook(
|
|
||||||
playbook='ansible/playbook.yaml',
|
|
||||||
tags=['config'],
|
|
||||||
extra_vars={'nested': {'a': 'value1', 'b': 'value2'}}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Custom config file can be passed within extra_vars
|
|
||||||
apply_playbook(
|
|
||||||
playbook='ansible/playbook.yaml',
|
|
||||||
extra_vars="@some_file.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
"""
|
|
||||||
tags = tags or []
|
|
||||||
tags = ",".join(tags)
|
|
||||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
|
||||||
ansible_vars_path, namespace_separator='__',
|
|
||||||
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
|
|
||||||
|
|
||||||
# we want ansible's log output to be unbuffered
|
|
||||||
env = os.environ.copy()
|
|
||||||
proxy_settings = charmhelpers.core.hookenv.env_proxy_settings()
|
|
||||||
if proxy_settings:
|
|
||||||
env.update(proxy_settings)
|
|
||||||
env['PYTHONUNBUFFERED'] = "1"
|
|
||||||
call = [
|
|
||||||
'ansible-playbook',
|
|
||||||
'-c',
|
|
||||||
'local',
|
|
||||||
playbook,
|
|
||||||
]
|
|
||||||
if tags:
|
|
||||||
call.extend(['--tags', '{}'.format(tags)])
|
|
||||||
if extra_vars:
|
|
||||||
call.extend(['--extra-vars', json.dumps(extra_vars)])
|
|
||||||
subprocess.check_call(call, env=env)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
|
|
||||||
"""Run a playbook with the hook-name as the tag.
|
|
||||||
|
|
||||||
This helper builds on the standard hookenv.Hooks helper,
|
|
||||||
but additionally runs the playbook with the hook-name specified
|
|
||||||
using --tags (ie. running all the tasks tagged with the hook-name).
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
hooks = AnsibleHooks(playbook_path='ansible/my_machine_state.yaml')
|
|
||||||
|
|
||||||
# All the tasks within my_machine_state.yaml tagged with 'install'
|
|
||||||
# will be run automatically after do_custom_work()
|
|
||||||
@hooks.hook()
|
|
||||||
def install():
|
|
||||||
do_custom_work()
|
|
||||||
|
|
||||||
# For most of your hooks, you won't need to do anything other
|
|
||||||
# than run the tagged tasks for the hook:
|
|
||||||
@hooks.hook('config-changed', 'start', 'stop')
|
|
||||||
def just_use_playbook():
|
|
||||||
pass
|
|
||||||
|
|
||||||
# As a convenience, you can avoid the above noop function by specifying
|
|
||||||
# the hooks which are handled by ansible-only and they'll be registered
|
|
||||||
# for you:
|
|
||||||
# hooks = AnsibleHooks(
|
|
||||||
# 'ansible/my_machine_state.yaml',
|
|
||||||
# default_hooks=['config-changed', 'start', 'stop'])
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# execute a hook based on the name the program is called by
|
|
||||||
hooks.execute(sys.argv)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, playbook_path, default_hooks=None):
|
|
||||||
"""Register any hooks handled by ansible."""
|
|
||||||
super(AnsibleHooks, self).__init__()
|
|
||||||
|
|
||||||
self._actions = {}
|
|
||||||
self.playbook_path = playbook_path
|
|
||||||
|
|
||||||
default_hooks = default_hooks or []
|
|
||||||
|
|
||||||
def noop(*args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
for hook in default_hooks:
|
|
||||||
self.register(hook, noop)
|
|
||||||
|
|
||||||
def register_action(self, name, function):
|
|
||||||
"""Register a hook"""
|
|
||||||
self._actions[name] = function
|
|
||||||
|
|
||||||
def execute(self, args):
|
|
||||||
"""Execute the hook followed by the playbook using the hook as tag."""
|
|
||||||
hook_name = os.path.basename(args[0])
|
|
||||||
extra_vars = None
|
|
||||||
if hook_name in self._actions:
|
|
||||||
extra_vars = self._actions[hook_name](args[1:])
|
|
||||||
else:
|
|
||||||
super(AnsibleHooks, self).execute(args)
|
|
||||||
|
|
||||||
charmhelpers.contrib.ansible.apply_playbook(
|
|
||||||
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
|
|
||||||
|
|
||||||
def action(self, *action_names):
|
|
||||||
"""Decorator, registering them as actions"""
|
|
||||||
def action_wrapper(decorated):
|
|
||||||
|
|
||||||
@functools.wraps(decorated)
|
|
||||||
def wrapper(argv):
|
|
||||||
kwargs = dict(arg.split('=') for arg in argv)
|
|
||||||
try:
|
|
||||||
return decorated(**kwargs)
|
|
||||||
except TypeError as e:
|
|
||||||
if decorated.__doc__:
|
|
||||||
e.args += (decorated.__doc__,)
|
|
||||||
raise
|
|
||||||
|
|
||||||
self.register_action(decorated.__name__, wrapper)
|
|
||||||
if '_' in decorated.__name__:
|
|
||||||
self.register_action(
|
|
||||||
decorated.__name__.replace('_', '-'), wrapper)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return action_wrapper
|
|
||||||
|
|
@ -1,124 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from distutils.spawn import find_executable
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
in_relation_hook,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relation_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def action_set(key, val):
|
|
||||||
if find_executable('action-set'):
|
|
||||||
action_cmd = ['action-set']
|
|
||||||
|
|
||||||
if isinstance(val, dict):
|
|
||||||
for k, v in iter(val.items()):
|
|
||||||
action_set('%s.%s' % (key, k), v)
|
|
||||||
return True
|
|
||||||
|
|
||||||
action_cmd.append('%s=%s' % (key, val))
|
|
||||||
subprocess.check_call(action_cmd)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class Benchmark():
|
|
||||||
"""
|
|
||||||
Helper class for the `benchmark` interface.
|
|
||||||
|
|
||||||
:param list actions: Define the actions that are also benchmarks
|
|
||||||
|
|
||||||
From inside the benchmark-relation-changed hook, you would
|
|
||||||
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
siege = Benchmark(['siege'])
|
|
||||||
siege.start()
|
|
||||||
[... run siege ...]
|
|
||||||
# The higher the score, the better the benchmark
|
|
||||||
siege.set_composite_score(16.70, 'trans/sec', 'desc')
|
|
||||||
siege.finish()
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
|
|
||||||
|
|
||||||
required_keys = [
|
|
||||||
'hostname',
|
|
||||||
'port',
|
|
||||||
'graphite_port',
|
|
||||||
'graphite_endpoint',
|
|
||||||
'api_port'
|
|
||||||
]
|
|
||||||
|
|
||||||
def __init__(self, benchmarks=None):
|
|
||||||
if in_relation_hook():
|
|
||||||
if benchmarks is not None:
|
|
||||||
for rid in sorted(relation_ids('benchmark')):
|
|
||||||
relation_set(relation_id=rid, relation_settings={
|
|
||||||
'benchmarks': ",".join(benchmarks)
|
|
||||||
})
|
|
||||||
|
|
||||||
# Check the relation data
|
|
||||||
config = {}
|
|
||||||
for key in self.required_keys:
|
|
||||||
val = relation_get(key)
|
|
||||||
if val is not None:
|
|
||||||
config[key] = val
|
|
||||||
else:
|
|
||||||
# We don't have all of the required keys
|
|
||||||
config = {}
|
|
||||||
break
|
|
||||||
|
|
||||||
if len(config):
|
|
||||||
with open(self.BENCHMARK_CONF, 'w') as f:
|
|
||||||
for key, val in iter(config.items()):
|
|
||||||
f.write("%s=%s\n" % (key, val))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def start():
|
|
||||||
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
|
||||||
|
|
||||||
"""
|
|
||||||
If the collectd charm is also installed, tell it to send a snapshot
|
|
||||||
of the current profile data.
|
|
||||||
"""
|
|
||||||
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
|
|
||||||
if os.path.exists(COLLECT_PROFILE_DATA):
|
|
||||||
subprocess.check_output([COLLECT_PROFILE_DATA])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def finish():
|
|
||||||
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_composite_score(value, units, direction='asc'):
|
|
||||||
"""
|
|
||||||
Set the composite score for a benchmark run. This is a single number
|
|
||||||
representative of the benchmark results. This could be the most
|
|
||||||
important metric, or an amalgamation of metric scores.
|
|
||||||
"""
|
|
||||||
return action_set(
|
|
||||||
"meta.composite",
|
|
||||||
{'value': value, 'units': units, 'direction': direction}
|
|
||||||
)
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
Source lp:charm-tools/trunk
|
|
||||||
|
|
||||||
charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py
|
|
||||||
charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py
|
|
||||||
|
|
@ -1,203 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
|
|
||||||
|
|
||||||
import operator
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
import yaml
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
from urllib.request import urlopen
|
|
||||||
from urllib.error import (HTTPError, URLError)
|
|
||||||
else:
|
|
||||||
from urllib2 import (urlopen, HTTPError, URLError)
|
|
||||||
|
|
||||||
"""Helper functions for writing Juju charms in Python."""
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
__all__ = [
|
|
||||||
# 'get_config', # core.hookenv.config()
|
|
||||||
# 'log', # core.hookenv.log()
|
|
||||||
# 'log_entry', # core.hookenv.log()
|
|
||||||
# 'log_exit', # core.hookenv.log()
|
|
||||||
# 'relation_get', # core.hookenv.relation_get()
|
|
||||||
# 'relation_set', # core.hookenv.relation_set()
|
|
||||||
# 'relation_ids', # core.hookenv.relation_ids()
|
|
||||||
# 'relation_list', # core.hookenv.relation_units()
|
|
||||||
# 'config_get', # core.hookenv.config()
|
|
||||||
# 'unit_get', # core.hookenv.unit_get()
|
|
||||||
# 'open_port', # core.hookenv.open_port()
|
|
||||||
# 'close_port', # core.hookenv.close_port()
|
|
||||||
# 'service_control', # core.host.service()
|
|
||||||
'unit_info', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_machine', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_relation', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_unit', # client-side, NOT IMPLEMENTED
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
SLEEP_AMOUNT = 0.1
|
|
||||||
|
|
||||||
|
|
||||||
# We create a juju_status Command here because it makes testing much,
|
|
||||||
# much easier.
|
|
||||||
def juju_status():
|
|
||||||
subprocess.check_call(['juju', 'status'])
|
|
||||||
|
|
||||||
# re-implemented as charmhelpers.fetch.configure_sources()
|
|
||||||
# def configure_source(update=False):
|
|
||||||
# source = config_get('source')
|
|
||||||
# if ((source.startswith('ppa:') or
|
|
||||||
# source.startswith('cloud:') or
|
|
||||||
# source.startswith('http:'))):
|
|
||||||
# run('add-apt-repository', source)
|
|
||||||
# if source.startswith("http:"):
|
|
||||||
# run('apt-key', 'import', config_get('key'))
|
|
||||||
# if update:
|
|
||||||
# run('apt-get', 'update')
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def make_charm_config_file(charm_config):
|
|
||||||
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
|
|
||||||
charm_config_file.write(yaml.dump(charm_config))
|
|
||||||
charm_config_file.flush()
|
|
||||||
# The NamedTemporaryFile instance is returned instead of just the name
|
|
||||||
# because we want to take advantage of garbage collection-triggered
|
|
||||||
# deletion of the temp file when it goes out of scope in the caller.
|
|
||||||
return charm_config_file
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def unit_info(service_name, item_name, data=None, unit=None):
|
|
||||||
if data is None:
|
|
||||||
data = yaml.safe_load(juju_status())
|
|
||||||
service = data['services'].get(service_name)
|
|
||||||
if service is None:
|
|
||||||
# XXX 2012-02-08 gmb:
|
|
||||||
# This allows us to cope with the race condition that we
|
|
||||||
# have between deploying a service and having it come up in
|
|
||||||
# `juju status`. We could probably do with cleaning it up so
|
|
||||||
# that it fails a bit more noisily after a while.
|
|
||||||
return ''
|
|
||||||
units = service['units']
|
|
||||||
if unit is not None:
|
|
||||||
item = units[unit][item_name]
|
|
||||||
else:
|
|
||||||
# It might seem odd to sort the units here, but we do it to
|
|
||||||
# ensure that when no unit is specified, the first unit for the
|
|
||||||
# service (or at least the one with the lowest number) is the
|
|
||||||
# one whose data gets returned.
|
|
||||||
sorted_unit_names = sorted(units.keys())
|
|
||||||
item = units[sorted_unit_names[0]][item_name]
|
|
||||||
return item
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def get_machine_data():
|
|
||||||
return yaml.safe_load(juju_status())['machines']
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_machine(num_machines=1, timeout=300):
|
|
||||||
"""Wait `timeout` seconds for `num_machines` machines to come up.
|
|
||||||
|
|
||||||
This wait_for... function can be called by other wait_for functions
|
|
||||||
whose timeouts might be too short in situations where only a bare
|
|
||||||
Juju setup has been bootstrapped.
|
|
||||||
|
|
||||||
:return: A tuple of (num_machines, time_taken). This is used for
|
|
||||||
testing.
|
|
||||||
"""
|
|
||||||
# You may think this is a hack, and you'd be right. The easiest way
|
|
||||||
# to tell what environment we're working in (LXC vs EC2) is to check
|
|
||||||
# the dns-name of the first machine. If it's localhost we're in LXC
|
|
||||||
# and we can just return here.
|
|
||||||
if get_machine_data()[0]['dns-name'] == 'localhost':
|
|
||||||
return 1, 0
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
# Drop the first machine, since it's the Zookeeper and that's
|
|
||||||
# not a machine that we need to wait for. This will only work
|
|
||||||
# for EC2 environments, which is why we return early above if
|
|
||||||
# we're in LXC.
|
|
||||||
machine_data = get_machine_data()
|
|
||||||
non_zookeeper_machines = [
|
|
||||||
machine_data[key] for key in list(machine_data.keys())[1:]]
|
|
||||||
if len(non_zookeeper_machines) >= num_machines:
|
|
||||||
all_machines_running = True
|
|
||||||
for machine in non_zookeeper_machines:
|
|
||||||
if machine.get('instance-state') != 'running':
|
|
||||||
all_machines_running = False
|
|
||||||
break
|
|
||||||
if all_machines_running:
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for service to start')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
return num_machines, time.time() - start_time
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_unit(service_name, timeout=480):
|
|
||||||
"""Wait `timeout` seconds for a given service name to come up."""
|
|
||||||
wait_for_machine(num_machines=1)
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
state = unit_info(service_name, 'agent-state')
|
|
||||||
if 'error' in state or state == 'started':
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for service to start')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
if state != 'started':
|
|
||||||
raise RuntimeError('unit did not start, agent-state: ' + state)
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_relation(service_name, relation_name, timeout=120):
|
|
||||||
"""Wait `timeout` seconds for a given relation to come up."""
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
relation = unit_info(service_name, 'relations').get(relation_name)
|
|
||||||
if relation is not None and relation['state'] == 'up':
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for relation to be up')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_page_contents(url, contents, timeout=120, validate=None):
|
|
||||||
if validate is None:
|
|
||||||
validate = operator.contains
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
stream = urlopen(url)
|
|
||||||
except (HTTPError, URLError):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
page = stream.read()
|
|
||||||
if validate(page, contents):
|
|
||||||
return page
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for contents of ' + url)
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
Source: lp:charmsupport/trunk
|
|
||||||
|
|
||||||
charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py
|
|
||||||
charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py
|
|
||||||
charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py
|
|
||||||
charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py
|
|
||||||
charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py
|
|
||||||
|
|
||||||
charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py
|
|
||||||
charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py
|
|
||||||
charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py
|
|
||||||
charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py
|
|
||||||
|
|
||||||
charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
@ -1,522 +0,0 @@
|
||||||
# Copyright 2012-2021 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""Compatibility with the nrpe-external-master charm"""
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
|
||||||
|
|
||||||
import glob
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import re
|
|
||||||
import shlex
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
hook_name,
|
|
||||||
local_unit,
|
|
||||||
log,
|
|
||||||
relation_get,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relations_of_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.host import service
|
|
||||||
from charmhelpers.core import host
|
|
||||||
|
|
||||||
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
|
||||||
# subordinate charms. To use it in your charm:
|
|
||||||
#
|
|
||||||
# 1. Update metadata.yaml
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# nrpe-external-master:
|
|
||||||
# interface: nrpe-external-master
|
|
||||||
# scope: container
|
|
||||||
#
|
|
||||||
# and/or
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# local-monitors:
|
|
||||||
# interface: local-monitors
|
|
||||||
# scope: container
|
|
||||||
|
|
||||||
#
|
|
||||||
# 2. Add the following to config.yaml
|
|
||||||
#
|
|
||||||
# nagios_context:
|
|
||||||
# default: "juju"
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# Used by the nrpe subordinate charms.
|
|
||||||
# A string that will be prepended to instance name to set the host name
|
|
||||||
# in nagios. So for instance the hostname would be something like:
|
|
||||||
# juju-myservice-0
|
|
||||||
# If you're running multiple environments with the same services in them
|
|
||||||
# this allows you to differentiate between them.
|
|
||||||
# nagios_servicegroups:
|
|
||||||
# default: ""
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# A comma-separated list of nagios servicegroups.
|
|
||||||
# If left empty, the nagios_context will be used as the servicegroup
|
|
||||||
#
|
|
||||||
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
|
|
||||||
#
|
|
||||||
# 4. Update your hooks.py with something like this:
|
|
||||||
#
|
|
||||||
# from charmsupport.nrpe import NRPE
|
|
||||||
# (...)
|
|
||||||
# def update_nrpe_config():
|
|
||||||
# nrpe_compat = NRPE()
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# shortname = "myservice",
|
|
||||||
# description = "Check MyService",
|
|
||||||
# check_cmd = "check_http -w 2 -c 10 http://localhost"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# "myservice_other",
|
|
||||||
# "Check for widget failures",
|
|
||||||
# check_cmd = "/srv/myapp/scripts/widget_check"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.write()
|
|
||||||
#
|
|
||||||
# def config_changed():
|
|
||||||
# (...)
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def nrpe_external_master_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def local_monitors_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# 4.a If your charm is a subordinate charm set primary=False
|
|
||||||
#
|
|
||||||
# from charmsupport.nrpe import NRPE
|
|
||||||
# (...)
|
|
||||||
# def update_nrpe_config():
|
|
||||||
# nrpe_compat = NRPE(primary=False)
|
|
||||||
#
|
|
||||||
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
|
||||||
# ln -s hooks.py local-monitors-relation-changed
|
|
||||||
|
|
||||||
|
|
||||||
class CheckException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Check(object):
|
|
||||||
shortname_re = '[A-Za-z0-9-_.@]+$'
|
|
||||||
service_template = ("""
|
|
||||||
#---------------------------------------------------
|
|
||||||
# This file is Juju managed
|
|
||||||
#---------------------------------------------------
|
|
||||||
define service {{
|
|
||||||
use active-service
|
|
||||||
host_name {nagios_hostname}
|
|
||||||
service_description {nagios_hostname}[{shortname}] """
|
|
||||||
"""{description}
|
|
||||||
check_command check_nrpe!{command}
|
|
||||||
servicegroups {nagios_servicegroup}
|
|
||||||
{service_config_overrides}
|
|
||||||
}}
|
|
||||||
""")
|
|
||||||
|
|
||||||
def __init__(self, shortname, description, check_cmd, max_check_attempts=None):
|
|
||||||
super(Check, self).__init__()
|
|
||||||
# XXX: could be better to calculate this from the service name
|
|
||||||
if not re.match(self.shortname_re, shortname):
|
|
||||||
raise CheckException("shortname must match {}".format(
|
|
||||||
Check.shortname_re))
|
|
||||||
self.shortname = shortname
|
|
||||||
self.command = "check_{}".format(shortname)
|
|
||||||
# Note: a set of invalid characters is defined by the
|
|
||||||
# Nagios server config
|
|
||||||
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
|
|
||||||
self.description = description
|
|
||||||
self.check_cmd = self._locate_cmd(check_cmd)
|
|
||||||
self.max_check_attempts = max_check_attempts
|
|
||||||
|
|
||||||
def _get_check_filename(self):
|
|
||||||
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
|
|
||||||
|
|
||||||
def _get_service_filename(self, hostname):
|
|
||||||
return os.path.join(NRPE.nagios_exportdir,
|
|
||||||
'service__{}_{}.cfg'.format(hostname, self.command))
|
|
||||||
|
|
||||||
def _locate_cmd(self, check_cmd):
|
|
||||||
search_path = (
|
|
||||||
'/usr/lib/nagios/plugins',
|
|
||||||
'/usr/local/lib/nagios/plugins',
|
|
||||||
)
|
|
||||||
parts = shlex.split(check_cmd)
|
|
||||||
for path in search_path:
|
|
||||||
if os.path.exists(os.path.join(path, parts[0])):
|
|
||||||
command = os.path.join(path, parts[0])
|
|
||||||
if len(parts) > 1:
|
|
||||||
command += " " + " ".join(parts[1:])
|
|
||||||
return command
|
|
||||||
log('Check command not found: {}'.format(parts[0]))
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def _remove_service_files(self):
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
return
|
|
||||||
for f in os.listdir(NRPE.nagios_exportdir):
|
|
||||||
if f.endswith('_{}.cfg'.format(self.command)):
|
|
||||||
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
|
||||||
|
|
||||||
def remove(self, hostname):
|
|
||||||
nrpe_check_file = self._get_check_filename()
|
|
||||||
if os.path.exists(nrpe_check_file):
|
|
||||||
os.remove(nrpe_check_file)
|
|
||||||
self._remove_service_files()
|
|
||||||
|
|
||||||
def write(self, nagios_context, hostname, nagios_servicegroups):
|
|
||||||
nrpe_check_file = self._get_check_filename()
|
|
||||||
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
|
||||||
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
|
||||||
if nagios_servicegroups:
|
|
||||||
nrpe_check_config.write(
|
|
||||||
"# The following header was added automatically by juju\n")
|
|
||||||
nrpe_check_config.write(
|
|
||||||
"# Modifying it will affect nagios monitoring and alerting\n")
|
|
||||||
nrpe_check_config.write(
|
|
||||||
"# servicegroups: {}\n".format(nagios_servicegroups))
|
|
||||||
nrpe_check_config.write("command[{}]={}\n".format(
|
|
||||||
self.command, self.check_cmd))
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
log('Not writing service config as {} is not accessible'.format(
|
|
||||||
NRPE.nagios_exportdir))
|
|
||||||
else:
|
|
||||||
self.write_service_config(nagios_context, hostname,
|
|
||||||
nagios_servicegroups)
|
|
||||||
|
|
||||||
def write_service_config(self, nagios_context, hostname,
|
|
||||||
nagios_servicegroups):
|
|
||||||
self._remove_service_files()
|
|
||||||
|
|
||||||
if self.max_check_attempts:
|
|
||||||
service_config_overrides = ' max_check_attempts {}'.format(
|
|
||||||
self.max_check_attempts
|
|
||||||
) # Note indentation is here rather than in the template to avoid trailing spaces
|
|
||||||
else:
|
|
||||||
service_config_overrides = '' # empty string to avoid printing 'None'
|
|
||||||
templ_vars = {
|
|
||||||
'nagios_hostname': hostname,
|
|
||||||
'nagios_servicegroup': nagios_servicegroups,
|
|
||||||
'description': self.description,
|
|
||||||
'shortname': self.shortname,
|
|
||||||
'command': self.command,
|
|
||||||
'service_config_overrides': service_config_overrides,
|
|
||||||
}
|
|
||||||
nrpe_service_text = Check.service_template.format(**templ_vars)
|
|
||||||
nrpe_service_file = self._get_service_filename(hostname)
|
|
||||||
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
|
||||||
nrpe_service_config.write(str(nrpe_service_text))
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
subprocess.call(self.check_cmd)
|
|
||||||
|
|
||||||
|
|
||||||
class NRPE(object):
|
|
||||||
nagios_logdir = '/var/log/nagios'
|
|
||||||
nagios_exportdir = '/var/lib/nagios/export'
|
|
||||||
nrpe_confdir = '/etc/nagios/nrpe.d'
|
|
||||||
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
|
|
||||||
|
|
||||||
def __init__(self, hostname=None, primary=True):
|
|
||||||
super(NRPE, self).__init__()
|
|
||||||
self.config = config()
|
|
||||||
self.primary = primary
|
|
||||||
self.nagios_context = self.config['nagios_context']
|
|
||||||
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
|
||||||
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
|
||||||
else:
|
|
||||||
self.nagios_servicegroups = self.nagios_context
|
|
||||||
self.unit_name = local_unit().replace('/', '-')
|
|
||||||
if hostname:
|
|
||||||
self.hostname = hostname
|
|
||||||
else:
|
|
||||||
nagios_hostname = get_nagios_hostname()
|
|
||||||
if nagios_hostname:
|
|
||||||
self.hostname = nagios_hostname
|
|
||||||
else:
|
|
||||||
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
|
||||||
self.checks = []
|
|
||||||
# Iff in an nrpe-external-master relation hook, set primary status
|
|
||||||
relation = relation_ids('nrpe-external-master')
|
|
||||||
if relation:
|
|
||||||
log("Setting charm primary status {}".format(primary))
|
|
||||||
for rid in relation:
|
|
||||||
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
|
|
||||||
self.remove_check_queue = set()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def does_nrpe_conf_dir_exist(cls):
|
|
||||||
"""Return True if th nrpe_confdif directory exists."""
|
|
||||||
return os.path.isdir(cls.nrpe_confdir)
|
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
|
||||||
shortname = None
|
|
||||||
if kwargs.get('shortname') is None:
|
|
||||||
if len(args) > 0:
|
|
||||||
shortname = args[0]
|
|
||||||
else:
|
|
||||||
shortname = kwargs['shortname']
|
|
||||||
|
|
||||||
self.checks.append(Check(*args, **kwargs))
|
|
||||||
try:
|
|
||||||
self.remove_check_queue.remove(shortname)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def remove_check(self, *args, **kwargs):
|
|
||||||
if kwargs.get('shortname') is None:
|
|
||||||
raise ValueError('shortname of check must be specified')
|
|
||||||
|
|
||||||
# Use sensible defaults if they're not specified - these are not
|
|
||||||
# actually used during removal, but they're required for constructing
|
|
||||||
# the Check object; check_disk is chosen because it's part of the
|
|
||||||
# nagios-plugins-basic package.
|
|
||||||
if kwargs.get('check_cmd') is None:
|
|
||||||
kwargs['check_cmd'] = 'check_disk'
|
|
||||||
if kwargs.get('description') is None:
|
|
||||||
kwargs['description'] = ''
|
|
||||||
|
|
||||||
check = Check(*args, **kwargs)
|
|
||||||
check.remove(self.hostname)
|
|
||||||
self.remove_check_queue.add(kwargs['shortname'])
|
|
||||||
|
|
||||||
def write(self):
|
|
||||||
try:
|
|
||||||
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
|
||||||
nagios_gid = grp.getgrnam('nagios').gr_gid
|
|
||||||
except Exception:
|
|
||||||
log("Nagios user not set up, nrpe checks not updated")
|
|
||||||
return
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_logdir):
|
|
||||||
os.mkdir(NRPE.nagios_logdir)
|
|
||||||
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
|
|
||||||
|
|
||||||
nrpe_monitors = {}
|
|
||||||
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
|
|
||||||
|
|
||||||
# check that the charm can write to the conf dir. If not, then nagios
|
|
||||||
# probably isn't installed, and we can defer.
|
|
||||||
if not self.does_nrpe_conf_dir_exist():
|
|
||||||
return
|
|
||||||
|
|
||||||
for nrpecheck in self.checks:
|
|
||||||
nrpecheck.write(self.nagios_context, self.hostname,
|
|
||||||
self.nagios_servicegroups)
|
|
||||||
nrpe_monitors[nrpecheck.shortname] = {
|
|
||||||
"command": nrpecheck.command,
|
|
||||||
}
|
|
||||||
# If we were passed max_check_attempts, add that to the relation data
|
|
||||||
if nrpecheck.max_check_attempts is not None:
|
|
||||||
nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts
|
|
||||||
|
|
||||||
# update-status hooks are configured to firing every 5 minutes by
|
|
||||||
# default. When nagios-nrpe-server is restarted, the nagios server
|
|
||||||
# reports checks failing causing unnecessary alerts. Let's not restart
|
|
||||||
# on update-status hooks.
|
|
||||||
if not hook_name() == 'update-status':
|
|
||||||
service('restart', 'nagios-nrpe-server')
|
|
||||||
|
|
||||||
monitor_ids = relation_ids("local-monitors") + \
|
|
||||||
relation_ids("nrpe-external-master")
|
|
||||||
for rid in monitor_ids:
|
|
||||||
reldata = relation_get(unit=local_unit(), rid=rid)
|
|
||||||
if 'monitors' in reldata:
|
|
||||||
# update the existing set of monitors with the new data
|
|
||||||
old_monitors = yaml.safe_load(reldata['monitors'])
|
|
||||||
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
|
|
||||||
# remove keys that are in the remove_check_queue
|
|
||||||
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
|
|
||||||
if k not in self.remove_check_queue}
|
|
||||||
# update/add nrpe_monitors
|
|
||||||
old_nrpe_monitors.update(nrpe_monitors)
|
|
||||||
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
|
|
||||||
# write back to the relation
|
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
|
|
||||||
else:
|
|
||||||
# write a brand new set of monitors, as no existing ones.
|
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
|
||||||
|
|
||||||
self.remove_check_queue.clear()
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_host_context
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_host_context' in rel:
|
|
||||||
return rel['nagios_host_context']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostname(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_hostname
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_hostname' in rel:
|
|
||||||
return rel['nagios_hostname']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_unit_name(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Return the nagios unit name prepended with host_context if needed
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
host_context = get_nagios_hostcontext(relation_name)
|
|
||||||
if host_context:
|
|
||||||
unit = "%s:%s" % (host_context, local_unit())
|
|
||||||
else:
|
|
||||||
unit = local_unit()
|
|
||||||
return unit
|
|
||||||
|
|
||||||
|
|
||||||
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param list services: List of services to check
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
:param bool immediate_check: For sysv init, run the service check immediately
|
|
||||||
"""
|
|
||||||
for svc in services:
|
|
||||||
# Don't add a check for these services from neutron-gateway
|
|
||||||
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
|
||||||
next
|
|
||||||
|
|
||||||
upstart_init = '/etc/init/%s.conf' % svc
|
|
||||||
sysv_init = '/etc/init.d/%s' % svc
|
|
||||||
|
|
||||||
if host.init_is_systemd(service_name=svc):
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_systemd.py %s' % svc
|
|
||||||
)
|
|
||||||
elif os.path.exists(upstart_init):
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_upstart_job %s' % svc
|
|
||||||
)
|
|
||||||
elif os.path.exists(sysv_init):
|
|
||||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
|
||||||
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
|
|
||||||
croncmd = (
|
|
||||||
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
|
|
||||||
'-e -s /etc/init.d/%s status' % svc
|
|
||||||
)
|
|
||||||
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
|
|
||||||
f = open(cronpath, 'w')
|
|
||||||
f.write(cron_file)
|
|
||||||
f.close()
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='service check {%s}' % unit_name,
|
|
||||||
check_cmd='check_status_file.py -f %s' % checkpath,
|
|
||||||
)
|
|
||||||
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
|
|
||||||
# (LP: #1670223).
|
|
||||||
if immediate_check and os.path.isdir(nrpe.homedir):
|
|
||||||
f = open(checkpath, 'w')
|
|
||||||
subprocess.call(
|
|
||||||
croncmd.split(),
|
|
||||||
stdout=f,
|
|
||||||
stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
f.close()
|
|
||||||
os.chmod(checkpath, 0o644)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_nrpe_checks(nrpe_files_dir=None):
|
|
||||||
"""
|
|
||||||
Copy the nrpe checks into place
|
|
||||||
|
|
||||||
"""
|
|
||||||
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
|
||||||
if nrpe_files_dir is None:
|
|
||||||
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
|
|
||||||
for segment in ['.', 'hooks']:
|
|
||||||
nrpe_files_dir = os.path.abspath(os.path.join(
|
|
||||||
os.getenv('CHARM_DIR'),
|
|
||||||
segment,
|
|
||||||
'charmhelpers',
|
|
||||||
'contrib',
|
|
||||||
'openstack',
|
|
||||||
'files'))
|
|
||||||
if os.path.isdir(nrpe_files_dir):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError("Couldn't find charmhelpers directory")
|
|
||||||
if not os.path.exists(NAGIOS_PLUGINS):
|
|
||||||
os.makedirs(NAGIOS_PLUGINS)
|
|
||||||
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
|
|
||||||
if os.path.isfile(fname):
|
|
||||||
shutil.copy2(fname,
|
|
||||||
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
|
|
||||||
|
|
||||||
|
|
||||||
def add_haproxy_checks(nrpe, unit_name):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
"""
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_servers',
|
|
||||||
description='Check HAProxy {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy.sh')
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_queue',
|
|
||||||
description='Check HAProxy queue depth {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy_queue_depth.sh')
|
|
||||||
|
|
||||||
|
|
||||||
def remove_deprecated_check(nrpe, deprecated_services):
|
|
||||||
"""
|
|
||||||
Remove checks for deprecated services in list
|
|
||||||
|
|
||||||
:param nrpe: NRPE object to remove check from
|
|
||||||
:type nrpe: NRPE
|
|
||||||
:param deprecated_services: List of deprecated services that are removed
|
|
||||||
:type deprecated_services: list
|
|
||||||
"""
|
|
||||||
for dep_svc in deprecated_services:
|
|
||||||
log('Deprecated service: {}'.format(dep_svc))
|
|
||||||
nrpe.remove_check(shortname=dep_svc)
|
|
||||||
|
|
@ -1,173 +0,0 @@
|
||||||
# Copyright 2014-2021 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
'''
|
|
||||||
Functions for managing volumes in juju units. One volume is supported per unit.
|
|
||||||
Subordinates may have their own storage, provided it is on its own partition.
|
|
||||||
|
|
||||||
Configuration stanzas::
|
|
||||||
|
|
||||||
volume-ephemeral:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
description: >
|
|
||||||
If false, a volume is mounted as specified in "volume-map"
|
|
||||||
If true, ephemeral storage will be used, meaning that log data
|
|
||||||
will only exist as long as the machine. YOU HAVE BEEN WARNED.
|
|
||||||
volume-map:
|
|
||||||
type: string
|
|
||||||
default: {}
|
|
||||||
description: >
|
|
||||||
YAML map of units to device names, e.g:
|
|
||||||
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
|
|
||||||
Service units will raise a configure-error if volume-ephemeral
|
|
||||||
is 'true' and no volume-map value is set. Use 'juju set' to set a
|
|
||||||
value and 'juju resolved' to complete configuration.
|
|
||||||
|
|
||||||
Usage::
|
|
||||||
|
|
||||||
from charmsupport.volumes import configure_volume, VolumeConfigurationError
|
|
||||||
from charmsupport.hookenv import log, ERROR
|
|
||||||
def post_mount_hook():
|
|
||||||
stop_service('myservice')
|
|
||||||
def post_mount_hook():
|
|
||||||
start_service('myservice')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
configure_volume(before_change=pre_mount_hook,
|
|
||||||
after_change=post_mount_hook)
|
|
||||||
except VolumeConfigurationError:
|
|
||||||
log('Storage could not be configured', ERROR)
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
# XXX: Known limitations
|
|
||||||
# - fstab is neither consulted nor updated
|
|
||||||
|
|
||||||
import os
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core import host
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
MOUNT_BASE = '/srv/juju/volumes'
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeConfigurationError(Exception):
|
|
||||||
'''Volume configuration data is missing or invalid'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_config():
|
|
||||||
'''Gather and sanity-check volume configuration data'''
|
|
||||||
volume_config = {}
|
|
||||||
config = hookenv.config()
|
|
||||||
|
|
||||||
errors = False
|
|
||||||
|
|
||||||
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
|
|
||||||
volume_config['ephemeral'] = True
|
|
||||||
else:
|
|
||||||
volume_config['ephemeral'] = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
|
|
||||||
except yaml.YAMLError as e:
|
|
||||||
hookenv.log("Error parsing YAML volume-map: {}".format(e),
|
|
||||||
hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
if volume_map is None:
|
|
||||||
# probably an empty string
|
|
||||||
volume_map = {}
|
|
||||||
elif not isinstance(volume_map, dict):
|
|
||||||
hookenv.log("Volume-map should be a dictionary, not {}".format(
|
|
||||||
type(volume_map)))
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
|
|
||||||
if volume_config['device'] and volume_config['ephemeral']:
|
|
||||||
# asked for ephemeral storage but also defined a volume ID
|
|
||||||
hookenv.log('A volume is defined for this unit, but ephemeral '
|
|
||||||
'storage was requested', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
elif not volume_config['device'] and not volume_config['ephemeral']:
|
|
||||||
# asked for permanent storage but did not define volume ID
|
|
||||||
hookenv.log('Ephemeral storage was requested, but there is no volume '
|
|
||||||
'defined for this unit.', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
unit_mount_name = hookenv.local_unit().replace('/', '-')
|
|
||||||
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
|
|
||||||
|
|
||||||
if errors:
|
|
||||||
return None
|
|
||||||
return volume_config
|
|
||||||
|
|
||||||
|
|
||||||
def mount_volume(config):
|
|
||||||
if os.path.exists(config['mountpoint']):
|
|
||||||
if not os.path.isdir(config['mountpoint']):
|
|
||||||
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
else:
|
|
||||||
host.mkdir(config['mountpoint'])
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
unmount_volume(config)
|
|
||||||
if not host.mount(config['device'], config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def unmount_volume(config):
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
if not host.umount(config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def managed_mounts():
|
|
||||||
'''List of all mounted managed volumes'''
|
|
||||||
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
|
|
||||||
|
|
||||||
|
|
||||||
def configure_volume(before_change=lambda: None, after_change=lambda: None):
|
|
||||||
'''Set up storage (or don't) according to the charm's volume configuration.
|
|
||||||
Returns the mount point or "ephemeral". before_change and after_change
|
|
||||||
are optional functions to be called if the volume configuration changes.
|
|
||||||
'''
|
|
||||||
|
|
||||||
config = get_config()
|
|
||||||
if not config:
|
|
||||||
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
if config['ephemeral']:
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return 'ephemeral'
|
|
||||||
else:
|
|
||||||
# persistent storage
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
mounts = dict(managed_mounts())
|
|
||||||
if mounts.get(config['mountpoint']) != config['device']:
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
else:
|
|
||||||
before_change()
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return config['mountpoint']
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
@ -1,840 +0,0 @@
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""Helper for working with a MySQL database"""
|
|
||||||
import collections
|
|
||||||
import copy
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import six
|
|
||||||
|
|
||||||
# from string import upper
|
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
CompareHostReleases,
|
|
||||||
lsb_release,
|
|
||||||
mkdir,
|
|
||||||
pwgen,
|
|
||||||
write_file
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config as config_get,
|
|
||||||
relation_get,
|
|
||||||
related_units,
|
|
||||||
unit_get,
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
ERROR,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
leader_get,
|
|
||||||
leader_set,
|
|
||||||
is_leader,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_update,
|
|
||||||
filter_installed_packages,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.network.ip import get_host_ip
|
|
||||||
|
|
||||||
try:
|
|
||||||
import MySQLdb
|
|
||||||
except ImportError:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
if six.PY2:
|
|
||||||
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
|
|
||||||
else:
|
|
||||||
apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True)
|
|
||||||
import MySQLdb
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLSetPasswordError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLHelper(object):
|
|
||||||
|
|
||||||
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
|
|
||||||
migrate_passwd_to_leader_storage=True,
|
|
||||||
delete_ondisk_passwd_file=True, user="root", password=None,
|
|
||||||
port=None, connect_timeout=None):
|
|
||||||
self.user = user
|
|
||||||
self.host = host
|
|
||||||
self.password = password
|
|
||||||
self.port = port
|
|
||||||
# default timeout of 30 seconds.
|
|
||||||
self.connect_timeout = connect_timeout or 30
|
|
||||||
|
|
||||||
# Password file path templates
|
|
||||||
self.root_passwd_file_template = rpasswdf_template
|
|
||||||
self.user_passwd_file_template = upasswdf_template
|
|
||||||
|
|
||||||
self.migrate_passwd_to_leader_storage = migrate_passwd_to_leader_storage
|
|
||||||
# If we migrate we have the option to delete local copy of root passwd
|
|
||||||
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
|
|
||||||
self.connection = None
|
|
||||||
|
|
||||||
def connect(self, user='root', password=None, host=None, port=None,
|
|
||||||
connect_timeout=None):
|
|
||||||
_connection_info = {
|
|
||||||
"user": user or self.user,
|
|
||||||
"passwd": password or self.password,
|
|
||||||
"host": host or self.host
|
|
||||||
}
|
|
||||||
# set the connection timeout; for mysql8 it can hang forever, so some
|
|
||||||
# timeout is required.
|
|
||||||
timeout = connect_timeout or self.connect_timeout
|
|
||||||
if timeout:
|
|
||||||
_connection_info["connect_timeout"] = timeout
|
|
||||||
# port cannot be None but we also do not want to specify it unless it
|
|
||||||
# has been explicit set.
|
|
||||||
port = port or self.port
|
|
||||||
if port is not None:
|
|
||||||
_connection_info["port"] = port
|
|
||||||
|
|
||||||
log("Opening db connection for %s@%s" % (user, host), level=DEBUG)
|
|
||||||
try:
|
|
||||||
self.connection = MySQLdb.connect(**_connection_info)
|
|
||||||
except Exception as e:
|
|
||||||
log("Failed to connect to database due to '{}'".format(str(e)),
|
|
||||||
level=ERROR)
|
|
||||||
raise
|
|
||||||
|
|
||||||
def database_exists(self, db_name):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW DATABASES")
|
|
||||||
databases = [i[0] for i in cursor.fetchall()]
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
return db_name in databases
|
|
||||||
|
|
||||||
def create_database(self, db_name):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("CREATE DATABASE `{}` CHARACTER SET UTF8"
|
|
||||||
.format(db_name))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def grant_exists(self, db_name, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
|
|
||||||
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
grants = [i[0] for i in cursor.fetchall()]
|
|
||||||
except MySQLdb.OperationalError:
|
|
||||||
return False
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
# TODO: review for different grants
|
|
||||||
return priv_string in grants
|
|
||||||
|
|
||||||
def create_grant(self, db_name, db_user, remote_ip, password):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
# TODO: review for different grants
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}' "
|
|
||||||
"IDENTIFIED BY '{}'".format(db_name,
|
|
||||||
db_user,
|
|
||||||
remote_ip,
|
|
||||||
password))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def create_admin_grant(self, db_user, remote_ip, password):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
|
|
||||||
"IDENTIFIED BY '{}'".format(db_user,
|
|
||||||
remote_ip,
|
|
||||||
password))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def cleanup_grant(self, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
|
|
||||||
"AND HOST='{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def flush_priviledges(self):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("FLUSH PRIVILEGES")
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def execute(self, sql):
|
|
||||||
"""Execute arbitrary SQL against the database."""
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute(sql)
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def select(self, sql):
|
|
||||||
"""
|
|
||||||
Execute arbitrary SQL select query against the database
|
|
||||||
and return the results.
|
|
||||||
|
|
||||||
:param sql: SQL select query to execute
|
|
||||||
:type sql: string
|
|
||||||
:returns: SQL select query result
|
|
||||||
:rtype: list of lists
|
|
||||||
:raises: MySQLdb.Error
|
|
||||||
"""
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute(sql)
|
|
||||||
results = [list(i) for i in cursor.fetchall()]
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
return results
|
|
||||||
|
|
||||||
def migrate_passwords_to_leader_storage(self, excludes=None):
|
|
||||||
"""Migrate any passwords storage on disk to leader storage."""
|
|
||||||
if not is_leader():
|
|
||||||
log("Skipping password migration as not the lead unit",
|
|
||||||
level=DEBUG)
|
|
||||||
return
|
|
||||||
dirname = os.path.dirname(self.root_passwd_file_template)
|
|
||||||
path = os.path.join(dirname, '*.passwd')
|
|
||||||
for f in glob.glob(path):
|
|
||||||
if excludes and f in excludes:
|
|
||||||
log("Excluding %s from leader storage migration" % (f),
|
|
||||||
level=DEBUG)
|
|
||||||
continue
|
|
||||||
|
|
||||||
key = os.path.basename(f)
|
|
||||||
with open(f, 'r') as passwd:
|
|
||||||
_value = passwd.read().strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
leader_set(settings={key: _value})
|
|
||||||
|
|
||||||
if self.delete_ondisk_passwd_file:
|
|
||||||
os.unlink(f)
|
|
||||||
except ValueError:
|
|
||||||
# NOTE cluster relation not yet ready - skip for now
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_mysql_password_on_disk(self, username=None, password=None):
|
|
||||||
"""Retrieve, generate or store a mysql password for the provided
|
|
||||||
username on disk."""
|
|
||||||
if username:
|
|
||||||
template = self.user_passwd_file_template
|
|
||||||
passwd_file = template.format(username)
|
|
||||||
else:
|
|
||||||
passwd_file = self.root_passwd_file_template
|
|
||||||
|
|
||||||
_password = None
|
|
||||||
if os.path.exists(passwd_file):
|
|
||||||
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
|
|
||||||
with open(passwd_file, 'r') as passwd:
|
|
||||||
_password = passwd.read().strip()
|
|
||||||
else:
|
|
||||||
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
|
|
||||||
if not os.path.isdir(os.path.dirname(passwd_file)):
|
|
||||||
# NOTE: need to ensure this is not mysql root dir (which needs
|
|
||||||
# to be mysql readable)
|
|
||||||
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
|
|
||||||
perms=0o770)
|
|
||||||
# Force permissions - for some reason the chmod in makedirs
|
|
||||||
# fails
|
|
||||||
os.chmod(os.path.dirname(passwd_file), 0o770)
|
|
||||||
|
|
||||||
_password = password or pwgen(length=32)
|
|
||||||
write_file(passwd_file, _password, owner='root', group='root',
|
|
||||||
perms=0o660)
|
|
||||||
|
|
||||||
return _password
|
|
||||||
|
|
||||||
def passwd_keys(self, username):
|
|
||||||
"""Generator to return keys used to store passwords in peer store.
|
|
||||||
|
|
||||||
NOTE: we support both legacy and new format to support mysql
|
|
||||||
charm prior to refactor. This is necessary to avoid LP 1451890.
|
|
||||||
"""
|
|
||||||
keys = []
|
|
||||||
if username == 'mysql':
|
|
||||||
log("Bad username '%s'" % (username), level=WARNING)
|
|
||||||
|
|
||||||
if username:
|
|
||||||
# IMPORTANT: *newer* format must be returned first
|
|
||||||
keys.append('mysql-%s.passwd' % (username))
|
|
||||||
keys.append('%s.passwd' % (username))
|
|
||||||
else:
|
|
||||||
keys.append('mysql.passwd')
|
|
||||||
|
|
||||||
for key in keys:
|
|
||||||
yield key
|
|
||||||
|
|
||||||
def get_mysql_password(self, username=None, password=None):
|
|
||||||
"""Retrieve, generate or store a mysql password for the provided
|
|
||||||
username using peer relation cluster."""
|
|
||||||
excludes = []
|
|
||||||
|
|
||||||
# First check peer relation.
|
|
||||||
try:
|
|
||||||
for key in self.passwd_keys(username):
|
|
||||||
_password = leader_get(key)
|
|
||||||
if _password:
|
|
||||||
break
|
|
||||||
|
|
||||||
# If root password available don't update peer relation from local
|
|
||||||
if _password and not username:
|
|
||||||
excludes.append(self.root_passwd_file_template)
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
# cluster relation is not yet started; use on-disk
|
|
||||||
_password = None
|
|
||||||
|
|
||||||
# If none available, generate new one
|
|
||||||
if not _password:
|
|
||||||
_password = self.get_mysql_password_on_disk(username, password)
|
|
||||||
|
|
||||||
# Put on wire if required
|
|
||||||
if self.migrate_passwd_to_leader_storage:
|
|
||||||
self.migrate_passwords_to_leader_storage(excludes=excludes)
|
|
||||||
|
|
||||||
return _password
|
|
||||||
|
|
||||||
def get_mysql_root_password(self, password=None):
|
|
||||||
"""Retrieve or generate mysql root password for service units."""
|
|
||||||
return self.get_mysql_password(username=None, password=password)
|
|
||||||
|
|
||||||
def set_mysql_password(self, username, password, current_password=None):
|
|
||||||
"""Update a mysql password for the provided username changing the
|
|
||||||
leader settings
|
|
||||||
|
|
||||||
To update root's password pass `None` in the username
|
|
||||||
|
|
||||||
:param username: Username to change password of
|
|
||||||
:type username: str
|
|
||||||
:param password: New password for user.
|
|
||||||
:type password: str
|
|
||||||
:param current_password: Existing password for user.
|
|
||||||
:type current_password: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
if username is None:
|
|
||||||
username = 'root'
|
|
||||||
|
|
||||||
# get root password via leader-get, it may be that in the past (when
|
|
||||||
# changes to root-password were not supported) the user changed the
|
|
||||||
# password, so leader-get is more reliable source than
|
|
||||||
# config.previous('root-password').
|
|
||||||
rel_username = None if username == 'root' else username
|
|
||||||
if not current_password:
|
|
||||||
current_password = self.get_mysql_password(rel_username)
|
|
||||||
|
|
||||||
# password that needs to be set
|
|
||||||
new_passwd = password
|
|
||||||
|
|
||||||
# update password for all users (e.g. root@localhost, root@::1, etc)
|
|
||||||
try:
|
|
||||||
self.connect(user=username, password=current_password)
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
except MySQLdb.OperationalError as ex:
|
|
||||||
raise MySQLSetPasswordError(('Cannot connect using password in '
|
|
||||||
'leader settings (%s)') % ex, ex)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
|
|
||||||
# fails when using SET PASSWORD so using UPDATE against the
|
|
||||||
# mysql.user table is needed, but changes to this table are not
|
|
||||||
# replicated across the cluster, so this update needs to run in
|
|
||||||
# all the nodes. More info at
|
|
||||||
# http://galeracluster.com/documentation-webpages/userchanges.html
|
|
||||||
release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
|
|
||||||
if release < 'bionic':
|
|
||||||
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "
|
|
||||||
"PASSWORD( %s ) WHERE user = %s;")
|
|
||||||
else:
|
|
||||||
# PXC 5.7 (introduced in Bionic) uses authentication_string
|
|
||||||
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
|
|
||||||
"authentication_string = "
|
|
||||||
"PASSWORD( %s ) WHERE user = %s;")
|
|
||||||
cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
|
|
||||||
cursor.execute('FLUSH PRIVILEGES;')
|
|
||||||
self.connection.commit()
|
|
||||||
except MySQLdb.OperationalError as ex:
|
|
||||||
raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
|
|
||||||
ex)
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
# check the password was changed
|
|
||||||
try:
|
|
||||||
self.connect(user=username, password=new_passwd)
|
|
||||||
self.execute('select 1;')
|
|
||||||
except MySQLdb.OperationalError as ex:
|
|
||||||
raise MySQLSetPasswordError(('Cannot connect using new password: '
|
|
||||||
'%s') % str(ex), ex)
|
|
||||||
|
|
||||||
if not is_leader():
|
|
||||||
log('Only the leader can set a new password in the relation',
|
|
||||||
level=DEBUG)
|
|
||||||
return
|
|
||||||
|
|
||||||
for key in self.passwd_keys(rel_username):
|
|
||||||
_password = leader_get(key)
|
|
||||||
if _password:
|
|
||||||
log('Updating password for %s (%s)' % (key, rel_username),
|
|
||||||
level=DEBUG)
|
|
||||||
leader_set(settings={key: new_passwd})
|
|
||||||
|
|
||||||
def set_mysql_root_password(self, password, current_password=None):
|
|
||||||
"""Update mysql root password changing the leader settings
|
|
||||||
|
|
||||||
:param password: New password for user.
|
|
||||||
:type password: str
|
|
||||||
:param current_password: Existing password for user.
|
|
||||||
:type current_password: str
|
|
||||||
"""
|
|
||||||
self.set_mysql_password(
|
|
||||||
'root',
|
|
||||||
password,
|
|
||||||
current_password=current_password)
|
|
||||||
|
|
||||||
def normalize_address(self, hostname):
|
|
||||||
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
|
|
||||||
if config_get('prefer-ipv6'):
|
|
||||||
# TODO: add support for ipv6 dns
|
|
||||||
return hostname
|
|
||||||
|
|
||||||
if hostname != unit_get('private-address'):
|
|
||||||
return get_host_ip(hostname, fallback=hostname)
|
|
||||||
|
|
||||||
# Otherwise assume localhost
|
|
||||||
return '127.0.0.1'
|
|
||||||
|
|
||||||
def get_allowed_units(self, database, username, relation_id=None, prefix=None):
|
|
||||||
"""Get list of units with access grants for database with username.
|
|
||||||
|
|
||||||
This is typically used to provide shared-db relations with a list of
|
|
||||||
which units have been granted access to the given database.
|
|
||||||
"""
|
|
||||||
if not self.connection:
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
allowed_units = set()
|
|
||||||
if not prefix:
|
|
||||||
prefix = database
|
|
||||||
for unit in related_units(relation_id):
|
|
||||||
settings = relation_get(rid=relation_id, unit=unit)
|
|
||||||
# First check for setting with prefix, then without
|
|
||||||
for attr in ["%s_hostname" % (prefix), 'hostname']:
|
|
||||||
hosts = settings.get(attr, None)
|
|
||||||
if hosts:
|
|
||||||
break
|
|
||||||
|
|
||||||
if hosts:
|
|
||||||
# hostname can be json-encoded list of hostnames
|
|
||||||
try:
|
|
||||||
hosts = json.loads(hosts)
|
|
||||||
except ValueError:
|
|
||||||
hosts = [hosts]
|
|
||||||
else:
|
|
||||||
hosts = [settings['private-address']]
|
|
||||||
|
|
||||||
if hosts:
|
|
||||||
for host in hosts:
|
|
||||||
host = self.normalize_address(host)
|
|
||||||
if self.grant_exists(database, username, host):
|
|
||||||
log("Grant exists for host '%s' on db '%s'" %
|
|
||||||
(host, database), level=DEBUG)
|
|
||||||
if unit not in allowed_units:
|
|
||||||
allowed_units.add(unit)
|
|
||||||
else:
|
|
||||||
log("Grant does NOT exist for host '%s' on db '%s'" %
|
|
||||||
(host, database), level=DEBUG)
|
|
||||||
else:
|
|
||||||
log("No hosts found for grant check", level=INFO)
|
|
||||||
|
|
||||||
return allowed_units
|
|
||||||
|
|
||||||
def configure_db(self, hostname, database, username, admin=False):
|
|
||||||
"""Configure access to database for username from hostname."""
|
|
||||||
if not self.connection:
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
if not self.database_exists(database):
|
|
||||||
self.create_database(database)
|
|
||||||
|
|
||||||
remote_ip = self.normalize_address(hostname)
|
|
||||||
password = self.get_mysql_password(username)
|
|
||||||
if not self.grant_exists(database, username, remote_ip):
|
|
||||||
if not admin:
|
|
||||||
self.create_grant(database, username, remote_ip, password)
|
|
||||||
else:
|
|
||||||
self.create_admin_grant(username, remote_ip, password)
|
|
||||||
self.flush_priviledges()
|
|
||||||
|
|
||||||
return password
|
|
||||||
|
|
||||||
|
|
||||||
# `_singleton_config_helper` stores the instance of the helper class that is
|
|
||||||
# being used during a hook invocation.
|
|
||||||
_singleton_config_helper = None
|
|
||||||
|
|
||||||
|
|
||||||
def get_mysql_config_helper():
|
|
||||||
global _singleton_config_helper
|
|
||||||
if _singleton_config_helper is None:
|
|
||||||
_singleton_config_helper = MySQLConfigHelper()
|
|
||||||
return _singleton_config_helper
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLConfigHelper(object):
|
|
||||||
"""Base configuration helper for MySQL."""
|
|
||||||
|
|
||||||
# Going for the biggest page size to avoid wasted bytes.
|
|
||||||
# InnoDB page size is 16MB
|
|
||||||
|
|
||||||
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
|
|
||||||
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
|
|
||||||
DEFAULT_INNODB_BUFFER_SIZE_MAX = 512 * 1024 * 1024
|
|
||||||
|
|
||||||
# Validation and lookups for InnoDB configuration
|
|
||||||
INNODB_VALID_BUFFERING_VALUES = [
|
|
||||||
'none',
|
|
||||||
'inserts',
|
|
||||||
'deletes',
|
|
||||||
'changes',
|
|
||||||
'purges',
|
|
||||||
'all'
|
|
||||||
]
|
|
||||||
INNODB_FLUSH_CONFIG_VALUES = {
|
|
||||||
'fast': 2,
|
|
||||||
'safest': 1,
|
|
||||||
'unsafe': 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
def human_to_bytes(self, human):
|
|
||||||
"""Convert human readable configuration options to bytes."""
|
|
||||||
num_re = re.compile('^[0-9]+$')
|
|
||||||
if num_re.match(human):
|
|
||||||
return human
|
|
||||||
|
|
||||||
factors = {
|
|
||||||
'K': 1024,
|
|
||||||
'M': 1048576,
|
|
||||||
'G': 1073741824,
|
|
||||||
'T': 1099511627776
|
|
||||||
}
|
|
||||||
modifier = human[-1]
|
|
||||||
if modifier in factors:
|
|
||||||
return int(human[:-1]) * factors[modifier]
|
|
||||||
|
|
||||||
if modifier == '%':
|
|
||||||
total_ram = self.human_to_bytes(self.get_mem_total())
|
|
||||||
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
|
|
||||||
total_ram = self.sys_mem_limit()
|
|
||||||
factor = int(human[:-1]) * 0.01
|
|
||||||
pctram = total_ram * factor
|
|
||||||
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
|
|
||||||
|
|
||||||
raise ValueError("Can only convert K,M,G, or T")
|
|
||||||
|
|
||||||
def is_32bit_system(self):
|
|
||||||
"""Determine whether system is 32 or 64 bit."""
|
|
||||||
try:
|
|
||||||
return sys.maxsize < 2 ** 32
|
|
||||||
except OverflowError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def sys_mem_limit(self):
|
|
||||||
"""Determine the default memory limit for the current service unit."""
|
|
||||||
if platform.machine() in ['armv7l']:
|
|
||||||
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
|
|
||||||
else:
|
|
||||||
# Limit for x86 based 32bit systems
|
|
||||||
_mem_limit = self.human_to_bytes('4G')
|
|
||||||
|
|
||||||
return _mem_limit
|
|
||||||
|
|
||||||
def get_mem_total(self):
|
|
||||||
"""Calculate the total memory in the current service unit."""
|
|
||||||
with open('/proc/meminfo') as meminfo_file:
|
|
||||||
for line in meminfo_file:
|
|
||||||
key, mem = line.split(':', 2)
|
|
||||||
if key == 'MemTotal':
|
|
||||||
mtot, modifier = mem.strip().split(' ')
|
|
||||||
return '%s%s' % (mtot, modifier[0].upper())
|
|
||||||
|
|
||||||
def get_innodb_flush_log_at_trx_commit(self):
|
|
||||||
"""Get value for innodb_flush_log_at_trx_commit.
|
|
||||||
|
|
||||||
Use the innodb-flush-log-at-trx-commit or the tunning-level setting
|
|
||||||
translated by INNODB_FLUSH_CONFIG_VALUES to get the
|
|
||||||
innodb_flush_log_at_trx_commit value.
|
|
||||||
|
|
||||||
:returns: Numeric value for innodb_flush_log_at_trx_commit
|
|
||||||
:rtype: Union[None, int]
|
|
||||||
"""
|
|
||||||
_iflatc = config_get('innodb-flush-log-at-trx-commit')
|
|
||||||
_tuning_level = config_get('tuning-level')
|
|
||||||
if _iflatc:
|
|
||||||
return _iflatc
|
|
||||||
elif _tuning_level:
|
|
||||||
return self.INNODB_FLUSH_CONFIG_VALUES.get(_tuning_level, 1)
|
|
||||||
|
|
||||||
def get_innodb_change_buffering(self):
|
|
||||||
"""Get value for innodb_change_buffering.
|
|
||||||
|
|
||||||
Use the innodb-change-buffering validated against
|
|
||||||
INNODB_VALID_BUFFERING_VALUES to get the innodb_change_buffering value.
|
|
||||||
|
|
||||||
:returns: String value for innodb_change_buffering.
|
|
||||||
:rtype: Union[None, str]
|
|
||||||
"""
|
|
||||||
_icb = config_get('innodb-change-buffering')
|
|
||||||
if _icb and _icb in self.INNODB_VALID_BUFFERING_VALUES:
|
|
||||||
return _icb
|
|
||||||
|
|
||||||
def get_innodb_buffer_pool_size(self):
|
|
||||||
"""Get value for innodb_buffer_pool_size.
|
|
||||||
|
|
||||||
Return the number value of innodb-buffer-pool-size or dataset-size. If
|
|
||||||
neither is set, calculate a sane default based on total memory.
|
|
||||||
|
|
||||||
:returns: Numeric value for innodb_buffer_pool_size.
|
|
||||||
:rtype: int
|
|
||||||
"""
|
|
||||||
total_memory = self.human_to_bytes(self.get_mem_total())
|
|
||||||
|
|
||||||
dataset_bytes = config_get('dataset-size')
|
|
||||||
innodb_buffer_pool_size = config_get('innodb-buffer-pool-size')
|
|
||||||
|
|
||||||
if innodb_buffer_pool_size:
|
|
||||||
innodb_buffer_pool_size = self.human_to_bytes(
|
|
||||||
innodb_buffer_pool_size)
|
|
||||||
elif dataset_bytes:
|
|
||||||
log("Option 'dataset-size' has been deprecated, please use"
|
|
||||||
"innodb_buffer_pool_size option instead", level="WARN")
|
|
||||||
innodb_buffer_pool_size = self.human_to_bytes(
|
|
||||||
dataset_bytes)
|
|
||||||
else:
|
|
||||||
# NOTE(jamespage): pick the smallest of 50% of RAM or 512MB
|
|
||||||
# to ensure that deployments in containers
|
|
||||||
# without constraints don't try to consume
|
|
||||||
# silly amounts of memory.
|
|
||||||
innodb_buffer_pool_size = min(
|
|
||||||
int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR),
|
|
||||||
self.DEFAULT_INNODB_BUFFER_SIZE_MAX
|
|
||||||
)
|
|
||||||
|
|
||||||
if innodb_buffer_pool_size > total_memory:
|
|
||||||
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
|
|
||||||
innodb_buffer_pool_size,
|
|
||||||
total_memory), level='WARN')
|
|
||||||
|
|
||||||
return innodb_buffer_pool_size
|
|
||||||
|
|
||||||
|
|
||||||
class PerconaClusterHelper(MySQLConfigHelper):
|
|
||||||
"""Percona-cluster specific configuration helper."""
|
|
||||||
|
|
||||||
def parse_config(self):
|
|
||||||
"""Parse charm configuration and calculate values for config files."""
|
|
||||||
config = config_get()
|
|
||||||
mysql_config = {}
|
|
||||||
if 'max-connections' in config:
|
|
||||||
mysql_config['max_connections'] = config['max-connections']
|
|
||||||
|
|
||||||
if 'wait-timeout' in config:
|
|
||||||
mysql_config['wait_timeout'] = config['wait-timeout']
|
|
||||||
|
|
||||||
if self.get_innodb_flush_log_at_trx_commit() is not None:
|
|
||||||
mysql_config['innodb_flush_log_at_trx_commit'] = \
|
|
||||||
self.get_innodb_flush_log_at_trx_commit()
|
|
||||||
|
|
||||||
if self.get_innodb_change_buffering() is not None:
|
|
||||||
mysql_config['innodb_change_buffering'] = config['innodb-change-buffering']
|
|
||||||
|
|
||||||
if 'innodb-io-capacity' in config:
|
|
||||||
mysql_config['innodb_io_capacity'] = config['innodb-io-capacity']
|
|
||||||
|
|
||||||
# Set a sane default key_buffer size
|
|
||||||
mysql_config['key_buffer'] = self.human_to_bytes('32M')
|
|
||||||
mysql_config['innodb_buffer_pool_size'] = self.get_innodb_buffer_pool_size()
|
|
||||||
return mysql_config
|
|
||||||
|
|
||||||
|
|
||||||
class MySQL8Helper(MySQLHelper):
|
|
||||||
|
|
||||||
def grant_exists(self, db_name, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
priv_string = ("GRANT ALL PRIVILEGES ON {}.* "
|
|
||||||
"TO {}@{}".format(db_name, db_user, remote_ip))
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW GRANTS FOR '{}'@'{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
grants = [i[0] for i in cursor.fetchall()]
|
|
||||||
except MySQLdb.OperationalError:
|
|
||||||
return False
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
# Different versions of MySQL use ' or `. Ignore these in the check.
|
|
||||||
return priv_string in [
|
|
||||||
i.replace("'", "").replace("`", "") for i in grants]
|
|
||||||
|
|
||||||
def create_grant(self, db_name, db_user, remote_ip, password):
|
|
||||||
if self.grant_exists(db_name, db_user, remote_ip):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Make sure the user exists
|
|
||||||
# MySQL8 must create the user before the grant
|
|
||||||
self.create_user(db_user, remote_ip, password)
|
|
||||||
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}'"
|
|
||||||
.format(db_name, db_user, remote_ip))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def create_user(self, db_user, remote_ip, password):
|
|
||||||
|
|
||||||
SQL_USER_CREATE = (
|
|
||||||
"CREATE USER '{db_user}'@'{remote_ip}' "
|
|
||||||
"IDENTIFIED BY '{password}'")
|
|
||||||
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute(SQL_USER_CREATE.format(
|
|
||||||
db_user=db_user,
|
|
||||||
remote_ip=remote_ip,
|
|
||||||
password=password)
|
|
||||||
)
|
|
||||||
except MySQLdb._exceptions.OperationalError:
|
|
||||||
log("DB user {} already exists.".format(db_user),
|
|
||||||
"WARNING")
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def create_router_grant(self, db_user, remote_ip, password):
|
|
||||||
|
|
||||||
# Make sure the user exists
|
|
||||||
# MySQL8 must create the user before the grant
|
|
||||||
self.create_user(db_user, remote_ip, password)
|
|
||||||
|
|
||||||
# Mysql-Router specific grants
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT "
|
|
||||||
"OPTION".format(db_user, remote_ip))
|
|
||||||
cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON "
|
|
||||||
"mysql_innodb_cluster_metadata.* TO '{}'@'{}'"
|
|
||||||
.format(db_user, remote_ip))
|
|
||||||
cursor.execute("GRANT SELECT ON mysql.user TO '{}'@'{}'"
|
|
||||||
.format(db_user, remote_ip))
|
|
||||||
cursor.execute("GRANT SELECT ON "
|
|
||||||
"performance_schema.replication_group_members "
|
|
||||||
"TO '{}'@'{}'".format(db_user, remote_ip))
|
|
||||||
cursor.execute("GRANT SELECT ON "
|
|
||||||
"performance_schema.replication_group_member_stats "
|
|
||||||
"TO '{}'@'{}'".format(db_user, remote_ip))
|
|
||||||
cursor.execute("GRANT SELECT ON "
|
|
||||||
"performance_schema.global_variables "
|
|
||||||
"TO '{}'@'{}'".format(db_user, remote_ip))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def configure_router(self, hostname, username):
|
|
||||||
|
|
||||||
if self.connection is None:
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
|
|
||||||
remote_ip = self.normalize_address(hostname)
|
|
||||||
password = self.get_mysql_password(username)
|
|
||||||
self.create_user(username, remote_ip, password)
|
|
||||||
self.create_router_grant(username, remote_ip, password)
|
|
||||||
|
|
||||||
return password
|
|
||||||
|
|
||||||
|
|
||||||
def get_prefix(requested, keys=None):
|
|
||||||
"""Return existing prefix or None.
|
|
||||||
|
|
||||||
:param requested: Request string. i.e. novacell0_username
|
|
||||||
:type requested: str
|
|
||||||
:param keys: Keys to determine prefix. Defaults set in function.
|
|
||||||
:type keys: List of str keys
|
|
||||||
:returns: String prefix i.e. novacell0
|
|
||||||
:rtype: Union[None, str]
|
|
||||||
"""
|
|
||||||
if keys is None:
|
|
||||||
# Shared-DB default keys
|
|
||||||
keys = ["_database", "_username", "_hostname"]
|
|
||||||
for key in keys:
|
|
||||||
if requested.endswith(key):
|
|
||||||
return requested[:-len(key)]
|
|
||||||
|
|
||||||
|
|
||||||
def get_db_data(relation_data, unprefixed):
|
|
||||||
"""Organize database requests into a collections.OrderedDict
|
|
||||||
|
|
||||||
:param relation_data: shared-db relation data
|
|
||||||
:type relation_data: dict
|
|
||||||
:param unprefixed: Prefix to use for requests without a prefix. This should
|
|
||||||
be unique for each side of the relation to avoid
|
|
||||||
conflicts.
|
|
||||||
:type unprefixed: str
|
|
||||||
:returns: Order dict of databases and users
|
|
||||||
:rtype: collections.OrderedDict
|
|
||||||
"""
|
|
||||||
# Deep copy to avoid unintentionally changing relation data
|
|
||||||
settings = copy.deepcopy(relation_data)
|
|
||||||
databases = collections.OrderedDict()
|
|
||||||
|
|
||||||
# Clear non-db related elements
|
|
||||||
if "egress-subnets" in settings.keys():
|
|
||||||
settings.pop("egress-subnets")
|
|
||||||
if "ingress-address" in settings.keys():
|
|
||||||
settings.pop("ingress-address")
|
|
||||||
if "private-address" in settings.keys():
|
|
||||||
settings.pop("private-address")
|
|
||||||
|
|
||||||
singleset = {"database", "username", "hostname"}
|
|
||||||
if singleset.issubset(settings):
|
|
||||||
settings["{}_{}".format(unprefixed, "hostname")] = (
|
|
||||||
settings["hostname"])
|
|
||||||
settings.pop("hostname")
|
|
||||||
settings["{}_{}".format(unprefixed, "database")] = (
|
|
||||||
settings["database"])
|
|
||||||
settings.pop("database")
|
|
||||||
settings["{}_{}".format(unprefixed, "username")] = (
|
|
||||||
settings["username"])
|
|
||||||
settings.pop("username")
|
|
||||||
|
|
||||||
for k, v in settings.items():
|
|
||||||
db = k.split("_")[0]
|
|
||||||
x = "_".join(k.split("_")[1:])
|
|
||||||
if db not in databases:
|
|
||||||
databases[db] = collections.OrderedDict()
|
|
||||||
databases[db][x] = v
|
|
||||||
|
|
||||||
return databases
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# This file is sourced from lp:openstack-charm-helpers
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from charmhelpers.core import host
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config as config_get,
|
|
||||||
relation_get,
|
|
||||||
relation_ids,
|
|
||||||
related_units as relation_list,
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
|
|
||||||
# This file contains the CA cert from the charms ssl_ca configuration
|
|
||||||
# option, in future the file name should be updated reflect that.
|
|
||||||
CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert'
|
|
||||||
|
|
||||||
|
|
||||||
def get_cert(cn=None):
|
|
||||||
# TODO: deal with multiple https endpoints via charm config
|
|
||||||
cert = config_get('ssl_cert')
|
|
||||||
key = config_get('ssl_key')
|
|
||||||
if not (cert and key):
|
|
||||||
log("Inspecting identity-service relations for SSL certificate.",
|
|
||||||
level=INFO)
|
|
||||||
cert = key = None
|
|
||||||
if cn:
|
|
||||||
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
|
||||||
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
|
||||||
else:
|
|
||||||
ssl_cert_attr = 'ssl_cert'
|
|
||||||
ssl_key_attr = 'ssl_key'
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
if not cert:
|
|
||||||
cert = relation_get(ssl_cert_attr,
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
if not key:
|
|
||||||
key = relation_get(ssl_key_attr,
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
return (cert, key)
|
|
||||||
|
|
||||||
|
|
||||||
def get_ca_cert():
|
|
||||||
ca_cert = config_get('ssl_ca')
|
|
||||||
if ca_cert is None:
|
|
||||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
|
||||||
level=INFO)
|
|
||||||
for r_id in (relation_ids('identity-service') +
|
|
||||||
relation_ids('identity-credentials')):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
if ca_cert is None:
|
|
||||||
ca_cert = relation_get('ca_cert',
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
return ca_cert
|
|
||||||
|
|
||||||
|
|
||||||
def retrieve_ca_cert(cert_file):
|
|
||||||
cert = None
|
|
||||||
if os.path.isfile(cert_file):
|
|
||||||
with open(cert_file, 'rb') as crt:
|
|
||||||
cert = crt.read()
|
|
||||||
return cert
|
|
||||||
|
|
||||||
|
|
||||||
def install_ca_cert(ca_cert):
|
|
||||||
host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE)
|
|
||||||
|
|
@ -1,451 +0,0 @@
|
||||||
# Copyright 2014-2021 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Helpers for clustering and determining "cluster leadership" and other
|
|
||||||
clustering-related helpers.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
from socket import gethostname as get_unit_hostname
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
relation_ids,
|
|
||||||
related_units as relation_list,
|
|
||||||
relation_get,
|
|
||||||
config as config_get,
|
|
||||||
INFO,
|
|
||||||
DEBUG,
|
|
||||||
WARNING,
|
|
||||||
unit_get,
|
|
||||||
is_leader as juju_is_leader,
|
|
||||||
status_set,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
modulo_distribution,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.decorators import (
|
|
||||||
retry_on_exception,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.strutils import (
|
|
||||||
bool_from_string,
|
|
||||||
)
|
|
||||||
|
|
||||||
DC_RESOURCE_NAME = 'DC'
|
|
||||||
|
|
||||||
|
|
||||||
class HAIncompleteConfig(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class HAIncorrectConfig(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CRMResourceNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CRMDCNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def is_elected_leader(resource):
|
|
||||||
"""
|
|
||||||
Returns True if the charm executing this is the elected cluster leader.
|
|
||||||
|
|
||||||
It relies on two mechanisms to determine leadership:
|
|
||||||
1. If juju is sufficiently new and leadership election is supported,
|
|
||||||
the is_leader command will be used.
|
|
||||||
2. If the charm is part of a corosync cluster, call corosync to
|
|
||||||
determine leadership.
|
|
||||||
3. If the charm is not part of a corosync cluster, the leader is
|
|
||||||
determined as being "the alive unit with the lowest unit number". In
|
|
||||||
other words, the oldest surviving unit.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return juju_is_leader()
|
|
||||||
except NotImplementedError:
|
|
||||||
log('Juju leadership election feature not enabled'
|
|
||||||
', using fallback support',
|
|
||||||
level=WARNING)
|
|
||||||
|
|
||||||
if is_clustered():
|
|
||||||
if not is_crm_leader(resource):
|
|
||||||
log('Deferring action to CRM leader.', level=INFO)
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
peers = peer_units()
|
|
||||||
if peers and not oldest_peer(peers):
|
|
||||||
log('Deferring action to oldest service unit.', level=INFO)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def is_clustered():
|
|
||||||
for r_id in (relation_ids('ha') or []):
|
|
||||||
for unit in (relation_list(r_id) or []):
|
|
||||||
clustered = relation_get('clustered',
|
|
||||||
rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
if clustered:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_crm_dc():
|
|
||||||
"""
|
|
||||||
Determine leadership by querying the pacemaker Designated Controller
|
|
||||||
"""
|
|
||||||
cmd = ['crm', 'status']
|
|
||||||
try:
|
|
||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
if not isinstance(status, six.text_type):
|
|
||||||
status = six.text_type(status, "utf-8")
|
|
||||||
except subprocess.CalledProcessError as ex:
|
|
||||||
raise CRMDCNotFound(str(ex))
|
|
||||||
|
|
||||||
current_dc = ''
|
|
||||||
for line in status.split('\n'):
|
|
||||||
if line.startswith('Current DC'):
|
|
||||||
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
|
|
||||||
current_dc = line.split(':')[1].split()[0]
|
|
||||||
if current_dc == get_unit_hostname():
|
|
||||||
return True
|
|
||||||
elif current_dc == 'NONE':
|
|
||||||
raise CRMDCNotFound('Current DC: NONE')
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
@retry_on_exception(5, base_delay=2,
|
|
||||||
exc_type=(CRMResourceNotFound, CRMDCNotFound))
|
|
||||||
def is_crm_leader(resource, retry=False):
|
|
||||||
"""
|
|
||||||
Returns True if the charm calling this is the elected corosync leader,
|
|
||||||
as returned by calling the external "crm" command.
|
|
||||||
|
|
||||||
We allow this operation to be retried to avoid the possibility of getting a
|
|
||||||
false negative. See LP #1396246 for more info.
|
|
||||||
"""
|
|
||||||
if resource == DC_RESOURCE_NAME:
|
|
||||||
return is_crm_dc()
|
|
||||||
cmd = ['crm', 'resource', 'show', resource]
|
|
||||||
try:
|
|
||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
if not isinstance(status, six.text_type):
|
|
||||||
status = six.text_type(status, "utf-8")
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
status = None
|
|
||||||
|
|
||||||
if status and get_unit_hostname() in status:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if status and "resource %s is NOT running" % (resource) in status:
|
|
||||||
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_leader(resource):
|
|
||||||
log("is_leader is deprecated. Please consider using is_crm_leader "
|
|
||||||
"instead.", level=WARNING)
|
|
||||||
return is_crm_leader(resource)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_units(peer_relation="cluster"):
|
|
||||||
peers = []
|
|
||||||
for r_id in (relation_ids(peer_relation) or []):
|
|
||||||
for unit in (relation_list(r_id) or []):
|
|
||||||
peers.append(unit)
|
|
||||||
return peers
|
|
||||||
|
|
||||||
|
|
||||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
|
||||||
'''Return a dict of peers and their private-address'''
|
|
||||||
peers = {}
|
|
||||||
for r_id in relation_ids(peer_relation):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
|
||||||
return peers
|
|
||||||
|
|
||||||
|
|
||||||
def oldest_peer(peers):
|
|
||||||
"""Determines who the oldest peer is by comparing unit numbers."""
|
|
||||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
|
||||||
for peer in peers:
|
|
||||||
remote_unit_no = int(peer.split('/')[1])
|
|
||||||
if remote_unit_no < local_unit_no:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def eligible_leader(resource):
|
|
||||||
log("eligible_leader is deprecated. Please consider using "
|
|
||||||
"is_elected_leader instead.", level=WARNING)
|
|
||||||
return is_elected_leader(resource)
|
|
||||||
|
|
||||||
|
|
||||||
def https():
|
|
||||||
'''
|
|
||||||
Determines whether enough data has been provided in configuration
|
|
||||||
or relation data to configure HTTPS
|
|
||||||
.
|
|
||||||
returns: boolean
|
|
||||||
'''
|
|
||||||
use_https = config_get('use-https')
|
|
||||||
if use_https and bool_from_string(use_https):
|
|
||||||
return True
|
|
||||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
|
||||||
return True
|
|
||||||
for r_id in relation_ids('certificates'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
ca = relation_get('ca', rid=r_id, unit=unit)
|
|
||||||
if ca:
|
|
||||||
return True
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
|
||||||
rel_state = [
|
|
||||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
|
||||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
|
||||||
]
|
|
||||||
# NOTE: works around (LP: #1203241)
|
|
||||||
if (None not in rel_state) and ('' not in rel_state):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def determine_api_port(public_port, singlenode_mode=False):
|
|
||||||
'''
|
|
||||||
Determine correct API server listening port based on
|
|
||||||
existence of HTTPS reverse proxy and/or haproxy.
|
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
|
||||||
|
|
||||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
|
||||||
|
|
||||||
returns: int: the correct listening port for the API service
|
|
||||||
'''
|
|
||||||
i = 0
|
|
||||||
if singlenode_mode:
|
|
||||||
i += 1
|
|
||||||
elif len(peer_units()) > 0 or is_clustered():
|
|
||||||
i += 1
|
|
||||||
if https():
|
|
||||||
i += 1
|
|
||||||
return public_port - (i * 10)
|
|
||||||
|
|
||||||
|
|
||||||
def determine_apache_port(public_port, singlenode_mode=False):
|
|
||||||
'''
|
|
||||||
Description: Determine correct apache listening port based on public IP +
|
|
||||||
state of the cluster.
|
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
|
||||||
|
|
||||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
|
||||||
|
|
||||||
returns: int: the correct listening port for the HAProxy service
|
|
||||||
'''
|
|
||||||
i = 0
|
|
||||||
if singlenode_mode:
|
|
||||||
i += 1
|
|
||||||
elif len(peer_units()) > 0 or is_clustered():
|
|
||||||
i += 1
|
|
||||||
return public_port - (i * 10)
|
|
||||||
|
|
||||||
|
|
||||||
determine_apache_port_single = functools.partial(
|
|
||||||
determine_apache_port, singlenode_mode=True)
|
|
||||||
|
|
||||||
|
|
||||||
def get_hacluster_config(exclude_keys=None):
|
|
||||||
'''
|
|
||||||
Obtains all relevant configuration from charm configuration required
|
|
||||||
for initiating a relation to hacluster:
|
|
||||||
|
|
||||||
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
|
|
||||||
os-admin-hostname, os-public-hostname, os-access-hostname
|
|
||||||
|
|
||||||
param: exclude_keys: list of setting key(s) to be excluded.
|
|
||||||
returns: dict: A dict containing settings keyed by setting name.
|
|
||||||
raises: HAIncompleteConfig if settings are missing or incorrect.
|
|
||||||
'''
|
|
||||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
|
|
||||||
'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
|
|
||||||
conf = {}
|
|
||||||
for setting in settings:
|
|
||||||
if exclude_keys and setting in exclude_keys:
|
|
||||||
continue
|
|
||||||
|
|
||||||
conf[setting] = config_get(setting)
|
|
||||||
|
|
||||||
if not valid_hacluster_config():
|
|
||||||
raise HAIncorrectConfig('Insufficient or incorrect config data to '
|
|
||||||
'configure hacluster.')
|
|
||||||
return conf
|
|
||||||
|
|
||||||
|
|
||||||
def valid_hacluster_config():
|
|
||||||
'''
|
|
||||||
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
|
|
||||||
must be set.
|
|
||||||
|
|
||||||
Note: ha-bindiface and ha-macastport both have defaults and will always
|
|
||||||
be set. We only care that either vip or dns-ha is set.
|
|
||||||
|
|
||||||
:returns: boolean: valid config returns true.
|
|
||||||
raises: HAIncompatibileConfig if settings conflict.
|
|
||||||
raises: HAIncompleteConfig if settings are missing.
|
|
||||||
'''
|
|
||||||
vip = config_get('vip')
|
|
||||||
dns = config_get('dns-ha')
|
|
||||||
if not(bool(vip) ^ bool(dns)):
|
|
||||||
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
|
||||||
'use high availability')
|
|
||||||
status_set('blocked', msg)
|
|
||||||
raise HAIncorrectConfig(msg)
|
|
||||||
|
|
||||||
# If dns-ha then one of os-*-hostname must be set
|
|
||||||
if dns:
|
|
||||||
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
|
|
||||||
'os-public-hostname', 'os-access-hostname']
|
|
||||||
# At this point it is unknown if one or all of the possible
|
|
||||||
# network spaces are in HA. Validate at least one is set which is
|
|
||||||
# the minimum required.
|
|
||||||
for setting in dns_settings:
|
|
||||||
if config_get(setting):
|
|
||||||
log('DNS HA: At least one hostname is set {}: {}'
|
|
||||||
''.format(setting, config_get(setting)),
|
|
||||||
level=DEBUG)
|
|
||||||
return True
|
|
||||||
|
|
||||||
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
|
|
||||||
'DNS HA')
|
|
||||||
status_set('blocked', msg)
|
|
||||||
raise HAIncompleteConfig(msg)
|
|
||||||
|
|
||||||
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def canonical_url(configs, vip_setting='vip'):
|
|
||||||
'''
|
|
||||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
|
||||||
configuration and hacluster.
|
|
||||||
|
|
||||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
|
||||||
a complete https context.
|
|
||||||
|
|
||||||
:vip_setting: str: Setting in charm config that specifies
|
|
||||||
VIP address.
|
|
||||||
'''
|
|
||||||
scheme = 'http'
|
|
||||||
if 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
if is_clustered():
|
|
||||||
addr = config_get(vip_setting)
|
|
||||||
else:
|
|
||||||
addr = unit_get('private-address')
|
|
||||||
return '%s://%s' % (scheme, addr)
|
|
||||||
|
|
||||||
|
|
||||||
def distributed_wait(modulo=None, wait=None, operation_name='operation'):
|
|
||||||
''' Distribute operations by waiting based on modulo_distribution
|
|
||||||
|
|
||||||
If modulo and or wait are not set, check config_get for those values.
|
|
||||||
If config values are not set, default to modulo=3 and wait=30.
|
|
||||||
|
|
||||||
:param modulo: int The modulo number creates the group distribution
|
|
||||||
:param wait: int The constant time wait value
|
|
||||||
:param operation_name: string Operation name for status message
|
|
||||||
i.e. 'restart'
|
|
||||||
:side effect: Calls config_get()
|
|
||||||
:side effect: Calls log()
|
|
||||||
:side effect: Calls status_set()
|
|
||||||
:side effect: Calls time.sleep()
|
|
||||||
'''
|
|
||||||
if modulo is None:
|
|
||||||
modulo = config_get('modulo-nodes') or 3
|
|
||||||
if wait is None:
|
|
||||||
wait = config_get('known-wait') or 30
|
|
||||||
if juju_is_leader():
|
|
||||||
# The leader should never wait
|
|
||||||
calculated_wait = 0
|
|
||||||
else:
|
|
||||||
# non_zero_wait=True guarantees the non-leader who gets modulo 0
|
|
||||||
# will still wait
|
|
||||||
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
|
|
||||||
non_zero_wait=True)
|
|
||||||
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
|
|
||||||
operation_name)
|
|
||||||
log(msg, DEBUG)
|
|
||||||
status_set('maintenance', msg)
|
|
||||||
time.sleep(calculated_wait)
|
|
||||||
|
|
||||||
|
|
||||||
def get_managed_services_and_ports(services, external_ports,
|
|
||||||
external_services=None,
|
|
||||||
port_conv_f=determine_apache_port_single):
|
|
||||||
"""Get the services and ports managed by this charm.
|
|
||||||
|
|
||||||
Return only the services and corresponding ports that are managed by this
|
|
||||||
charm. This excludes haproxy when there is a relation with hacluster. This
|
|
||||||
is because this charm passes responsibility for stopping and starting
|
|
||||||
haproxy to hacluster.
|
|
||||||
|
|
||||||
Similarly, if a relation with hacluster exists then the ports returned by
|
|
||||||
this method correspond to those managed by the apache server rather than
|
|
||||||
haproxy.
|
|
||||||
|
|
||||||
:param services: List of services.
|
|
||||||
:type services: List[str]
|
|
||||||
:param external_ports: List of ports managed by external services.
|
|
||||||
:type external_ports: List[int]
|
|
||||||
:param external_services: List of services to be removed if ha relation is
|
|
||||||
present.
|
|
||||||
:type external_services: List[str]
|
|
||||||
:param port_conv_f: Function to apply to ports to calculate the ports
|
|
||||||
managed by services controlled by this charm.
|
|
||||||
:type port_convert_func: f()
|
|
||||||
:returns: A tuple containing a list of services first followed by a list of
|
|
||||||
ports.
|
|
||||||
:rtype: Tuple[List[str], List[int]]
|
|
||||||
"""
|
|
||||||
if external_services is None:
|
|
||||||
external_services = ['haproxy']
|
|
||||||
if relation_ids('ha'):
|
|
||||||
for svc in external_services:
|
|
||||||
try:
|
|
||||||
services.remove(svc)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
external_ports = [port_conv_f(p) for p in external_ports]
|
|
||||||
return services, external_ports
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
# Juju charm-helpers hardening library
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
This library provides multiple implementations of system and application
|
|
||||||
hardening that conform to the standards of http://hardening.io/.
|
|
||||||
|
|
||||||
Current implementations include:
|
|
||||||
|
|
||||||
* OS
|
|
||||||
* SSH
|
|
||||||
* MySQL
|
|
||||||
* Apache
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
* Juju Charms
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
1. Synchronise this library into your charm and add the harden() decorator
|
|
||||||
(from contrib.hardening.harden) to any functions or methods you want to use
|
|
||||||
to trigger hardening of your application/system.
|
|
||||||
|
|
||||||
2. Add a config option called 'harden' to your charm config.yaml and set it to
|
|
||||||
a space-delimited list of hardening modules you want to run e.g. "os ssh"
|
|
||||||
|
|
||||||
3. Override any config defaults (contrib.hardening.defaults) by adding a file
|
|
||||||
called hardening.yaml to your charm root containing the name(s) of the
|
|
||||||
modules whose settings you want override at root level and then any settings
|
|
||||||
with overrides e.g.
|
|
||||||
|
|
||||||
os:
|
|
||||||
general:
|
|
||||||
desktop_enable: True
|
|
||||||
|
|
||||||
4. Now just run your charm as usual and hardening will be applied each time the
|
|
||||||
hook runs.
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.apache.checks import config
|
|
||||||
|
|
||||||
|
|
||||||
def run_apache_checks():
|
|
||||||
log("Starting Apache hardening checks.", level=DEBUG)
|
|
||||||
checks = config.get_audits()
|
|
||||||
for check in checks:
|
|
||||||
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
|
|
||||||
check.ensure_compliance()
|
|
||||||
|
|
||||||
log("Apache hardening checks complete.", level=DEBUG)
|
|
||||||
|
|
@ -1,104 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import six
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
DirectoryPermissionAudit,
|
|
||||||
NoReadWriteForOther,
|
|
||||||
TemplatedFile,
|
|
||||||
DeletedFile
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
|
|
||||||
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get Apache hardening config audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
|
|
||||||
log("Apache server does not appear to be installed on this node - "
|
|
||||||
"skipping apache hardening", level=INFO)
|
|
||||||
return []
|
|
||||||
|
|
||||||
context = ApacheConfContext()
|
|
||||||
settings = utils.get_settings('apache')
|
|
||||||
audits = [
|
|
||||||
FilePermissionAudit(paths=os.path.join(
|
|
||||||
settings['common']['apache_dir'], 'apache2.conf'),
|
|
||||||
user='root', group='root', mode=0o0640),
|
|
||||||
|
|
||||||
TemplatedFile(os.path.join(settings['common']['apache_dir'],
|
|
||||||
'mods-available/alias.conf'),
|
|
||||||
context,
|
|
||||||
TEMPLATES_DIR,
|
|
||||||
mode=0o0640,
|
|
||||||
user='root',
|
|
||||||
service_actions=[{'service': 'apache2',
|
|
||||||
'actions': ['restart']}]),
|
|
||||||
|
|
||||||
TemplatedFile(os.path.join(settings['common']['apache_dir'],
|
|
||||||
'conf-enabled/99-hardening.conf'),
|
|
||||||
context,
|
|
||||||
TEMPLATES_DIR,
|
|
||||||
mode=0o0640,
|
|
||||||
user='root',
|
|
||||||
service_actions=[{'service': 'apache2',
|
|
||||||
'actions': ['restart']}]),
|
|
||||||
|
|
||||||
DirectoryPermissionAudit(settings['common']['apache_dir'],
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0750),
|
|
||||||
|
|
||||||
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
|
|
||||||
|
|
||||||
NoReadWriteForOther(settings['common']['apache_dir']),
|
|
||||||
|
|
||||||
DeletedFile(['/var/www/html/index.html'])
|
|
||||||
]
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class ApacheConfContext(object):
|
|
||||||
"""Defines the set of key/value pairs to set in a apache config file.
|
|
||||||
|
|
||||||
This context, when called, will return a dictionary containing the
|
|
||||||
key/value pairs of setting to specify in the
|
|
||||||
/etc/apache/conf-enabled/hardening.conf file.
|
|
||||||
"""
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('apache')
|
|
||||||
ctxt = settings['hardening']
|
|
||||||
|
|
||||||
out = subprocess.check_output(['apache2', '-v'])
|
|
||||||
if six.PY3:
|
|
||||||
out = out.decode('utf-8')
|
|
||||||
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
|
|
||||||
out).group(1)
|
|
||||||
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
|
|
||||||
return ctxt
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
<Location / >
|
|
||||||
<LimitExcept {{ allowed_http_methods }} >
|
|
||||||
# http://httpd.apache.org/docs/2.4/upgrading.html
|
|
||||||
{% if apache_version > '2.2' -%}
|
|
||||||
Require all granted
|
|
||||||
{% else -%}
|
|
||||||
Order Allow,Deny
|
|
||||||
Deny from all
|
|
||||||
{% endif %}
|
|
||||||
</LimitExcept>
|
|
||||||
</Location>
|
|
||||||
|
|
||||||
<Directory />
|
|
||||||
Options -Indexes -FollowSymLinks
|
|
||||||
AllowOverride None
|
|
||||||
</Directory>
|
|
||||||
|
|
||||||
<Directory /var/www/>
|
|
||||||
Options -Indexes -FollowSymLinks
|
|
||||||
AllowOverride None
|
|
||||||
</Directory>
|
|
||||||
|
|
||||||
TraceEnable {{ traceenable }}
|
|
||||||
ServerTokens {{ servertokens }}
|
|
||||||
|
|
||||||
SSLHonorCipherOrder {{ honor_cipher_order }}
|
|
||||||
SSLCipherSuite {{ cipher_suite }}
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
<IfModule alias_module>
|
|
||||||
#
|
|
||||||
# Aliases: Add here as many aliases as you need (with no limit). The format is
|
|
||||||
# Alias fakename realname
|
|
||||||
#
|
|
||||||
# Note that if you include a trailing / on fakename then the server will
|
|
||||||
# require it to be present in the URL. So "/icons" isn't aliased in this
|
|
||||||
# example, only "/icons/". If the fakename is slash-terminated, then the
|
|
||||||
# realname must also be slash terminated, and if the fakename omits the
|
|
||||||
# trailing slash, the realname must also omit it.
|
|
||||||
#
|
|
||||||
# We include the /icons/ alias for FancyIndexed directory listings. If
|
|
||||||
# you do not use FancyIndexing, you may comment this out.
|
|
||||||
#
|
|
||||||
Alias /icons/ "{{ apache_icondir }}/"
|
|
||||||
|
|
||||||
<Directory "{{ apache_icondir }}">
|
|
||||||
Options -Indexes -MultiViews -FollowSymLinks
|
|
||||||
AllowOverride None
|
|
||||||
{% if apache_version == '2.4' -%}
|
|
||||||
Require all granted
|
|
||||||
{% else -%}
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
{% endif %}
|
|
||||||
</Directory>
|
|
||||||
</IfModule>
|
|
||||||
|
|
@ -1,54 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
class BaseAudit(object): # NO-QA
|
|
||||||
"""Base class for hardening checks.
|
|
||||||
|
|
||||||
The lifecycle of a hardening check is to first check to see if the system
|
|
||||||
is in compliance for the specified check. If it is not in compliance, the
|
|
||||||
check method will return a value which will be supplied to the.
|
|
||||||
"""
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.unless = kwargs.get('unless', None)
|
|
||||||
super(BaseAudit, self).__init__()
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
"""Checks to see if the current hardening check is in compliance or
|
|
||||||
not.
|
|
||||||
|
|
||||||
If the check that is performed is not in compliance, then an exception
|
|
||||||
should be raised.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _take_action(self):
|
|
||||||
"""Determines whether to perform the action or not.
|
|
||||||
|
|
||||||
Checks whether or not an action should be taken. This is determined by
|
|
||||||
the truthy value for the unless parameter. If unless is a callback
|
|
||||||
method, it will be invoked with no parameters in order to determine
|
|
||||||
whether or not the action should be taken. Otherwise, the truthy value
|
|
||||||
of the unless attribute will determine if the action should be
|
|
||||||
performed.
|
|
||||||
"""
|
|
||||||
# Do the action if there isn't an unless override.
|
|
||||||
if self.unless is None:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Invoke the callback if there is one.
|
|
||||||
if hasattr(self.unless, '__call__'):
|
|
||||||
return not self.unless()
|
|
||||||
|
|
||||||
return not self.unless
|
|
||||||
|
|
@ -1,105 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits import BaseAudit
|
|
||||||
|
|
||||||
|
|
||||||
class DisabledModuleAudit(BaseAudit):
|
|
||||||
"""Audits Apache2 modules.
|
|
||||||
|
|
||||||
Determines if the apache2 modules are enabled. If the modules are enabled
|
|
||||||
then they are removed in the ensure_compliance.
|
|
||||||
"""
|
|
||||||
def __init__(self, modules):
|
|
||||||
if modules is None:
|
|
||||||
self.modules = []
|
|
||||||
elif isinstance(modules, six.string_types):
|
|
||||||
self.modules = [modules]
|
|
||||||
else:
|
|
||||||
self.modules = modules
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
"""Ensures that the modules are not loaded."""
|
|
||||||
if not self.modules:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
loaded_modules = self._get_loaded_modules()
|
|
||||||
non_compliant_modules = []
|
|
||||||
for module in self.modules:
|
|
||||||
if module in loaded_modules:
|
|
||||||
log("Module '%s' is enabled but should not be." %
|
|
||||||
(module), level=INFO)
|
|
||||||
non_compliant_modules.append(module)
|
|
||||||
|
|
||||||
if len(non_compliant_modules) == 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
for module in non_compliant_modules:
|
|
||||||
self._disable_module(module)
|
|
||||||
self._restart_apache()
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
log('Error occurred auditing apache module compliance. '
|
|
||||||
'This may have been already reported. '
|
|
||||||
'Output is: %s' % e.output, level=ERROR)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_loaded_modules():
|
|
||||||
"""Returns the modules which are enabled in Apache."""
|
|
||||||
output = subprocess.check_output(['apache2ctl', '-M'])
|
|
||||||
if six.PY3:
|
|
||||||
output = output.decode('utf-8')
|
|
||||||
modules = []
|
|
||||||
for line in output.splitlines():
|
|
||||||
# Each line of the enabled module output looks like:
|
|
||||||
# module_name (static|shared)
|
|
||||||
# Plus a header line at the top of the output which is stripped
|
|
||||||
# out by the regex.
|
|
||||||
matcher = re.search(r'^ (\S*)_module (\S*)', line)
|
|
||||||
if matcher:
|
|
||||||
modules.append(matcher.group(1))
|
|
||||||
return modules
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _disable_module(module):
|
|
||||||
"""Disables the specified module in Apache."""
|
|
||||||
try:
|
|
||||||
subprocess.check_call(['a2dismod', module])
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
# Note: catch error here to allow the attempt of disabling
|
|
||||||
# multiple modules in one go rather than failing after the
|
|
||||||
# first module fails.
|
|
||||||
log('Error occurred disabling module %s. '
|
|
||||||
'Output is: %s' % (module, e.output), level=ERROR)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _restart_apache():
|
|
||||||
"""Restarts the apache process"""
|
|
||||||
subprocess.check_output(['service', 'apache2', 'restart'])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_ssl_enabled():
|
|
||||||
"""Check if SSL module is enabled or not"""
|
|
||||||
return 'ssl' in DisabledModuleAudit._get_loaded_modules()
|
|
||||||
|
|
@ -1,104 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from __future__ import absolute_import # required for external apt import
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_cache,
|
|
||||||
apt_purge
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits import BaseAudit
|
|
||||||
from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg
|
|
||||||
|
|
||||||
|
|
||||||
class AptConfig(BaseAudit):
|
|
||||||
|
|
||||||
def __init__(self, config, **kwargs):
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
def verify_config(self):
|
|
||||||
apt_pkg.init()
|
|
||||||
for cfg in self.config:
|
|
||||||
value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
|
|
||||||
if value and value != cfg['expected']:
|
|
||||||
log("APT config '%s' has unexpected value '%s' "
|
|
||||||
"(expected='%s')" %
|
|
||||||
(cfg['key'], value, cfg['expected']), level=WARNING)
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
self.verify_config()
|
|
||||||
|
|
||||||
|
|
||||||
class RestrictedPackages(BaseAudit):
|
|
||||||
"""Class used to audit restricted packages on the system."""
|
|
||||||
|
|
||||||
def __init__(self, pkgs, **kwargs):
|
|
||||||
super(RestrictedPackages, self).__init__(**kwargs)
|
|
||||||
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
|
|
||||||
self.pkgs = pkgs.split()
|
|
||||||
else:
|
|
||||||
self.pkgs = pkgs
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
cache = apt_cache()
|
|
||||||
|
|
||||||
for p in self.pkgs:
|
|
||||||
if p not in cache:
|
|
||||||
continue
|
|
||||||
|
|
||||||
pkg = cache[p]
|
|
||||||
if not self.is_virtual_package(pkg):
|
|
||||||
if not pkg.current_ver:
|
|
||||||
log("Package '%s' is not installed." % pkg.name,
|
|
||||||
level=DEBUG)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
log("Restricted package '%s' is installed" % pkg.name,
|
|
||||||
level=WARNING)
|
|
||||||
self.delete_package(cache, pkg)
|
|
||||||
else:
|
|
||||||
log("Checking restricted virtual package '%s' provides" %
|
|
||||||
pkg.name, level=DEBUG)
|
|
||||||
self.delete_package(cache, pkg)
|
|
||||||
|
|
||||||
def delete_package(self, cache, pkg):
|
|
||||||
"""Deletes the package from the system.
|
|
||||||
|
|
||||||
Deletes the package form the system, properly handling virtual
|
|
||||||
packages.
|
|
||||||
|
|
||||||
:param cache: the apt cache
|
|
||||||
:param pkg: the package to remove
|
|
||||||
"""
|
|
||||||
if self.is_virtual_package(pkg):
|
|
||||||
log("Package '%s' appears to be virtual - purging provides" %
|
|
||||||
pkg.name, level=DEBUG)
|
|
||||||
for _p in pkg.provides_list:
|
|
||||||
self.delete_package(cache, _p[2].parent_pkg)
|
|
||||||
elif not pkg.current_ver:
|
|
||||||
log("Package '%s' not installed" % pkg.name, level=DEBUG)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
log("Purging package '%s'" % pkg.name, level=DEBUG)
|
|
||||||
apt_purge(pkg.name)
|
|
||||||
|
|
||||||
def is_virtual_package(self, pkg):
|
|
||||||
return (pkg.get('has_provides', False) and
|
|
||||||
not pkg.get('has_versions', False))
|
|
||||||
|
|
@ -1,550 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import re
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
CalledProcessError,
|
|
||||||
check_output,
|
|
||||||
check_call,
|
|
||||||
)
|
|
||||||
from traceback import format_exc
|
|
||||||
from six import string_types
|
|
||||||
from stat import (
|
|
||||||
S_ISGID,
|
|
||||||
S_ISUID
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
from charmhelpers.core import unitdata
|
|
||||||
from charmhelpers.core.host import file_hash
|
|
||||||
from charmhelpers.contrib.hardening.audits import BaseAudit
|
|
||||||
from charmhelpers.contrib.hardening.templating import (
|
|
||||||
get_template_path,
|
|
||||||
render_and_write,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
class BaseFileAudit(BaseAudit):
|
|
||||||
"""Base class for file audits.
|
|
||||||
|
|
||||||
Provides api stubs for compliance check flow that must be used by any class
|
|
||||||
that implemented this one.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, paths, always_comply=False, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
:param paths: string path of list of paths of files we want to apply
|
|
||||||
compliance checks are criteria to.
|
|
||||||
:param always_comply: if true compliance criteria is always applied
|
|
||||||
else compliance is skipped for non-existent
|
|
||||||
paths.
|
|
||||||
"""
|
|
||||||
super(BaseFileAudit, self).__init__(*args, **kwargs)
|
|
||||||
self.always_comply = always_comply
|
|
||||||
if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
|
|
||||||
self.paths = [paths]
|
|
||||||
else:
|
|
||||||
self.paths = paths
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
"""Ensure that the all registered files comply to registered criteria.
|
|
||||||
"""
|
|
||||||
for p in self.paths:
|
|
||||||
if os.path.exists(p):
|
|
||||||
if self.is_compliant(p):
|
|
||||||
continue
|
|
||||||
|
|
||||||
log('File %s is not in compliance.' % p, level=INFO)
|
|
||||||
else:
|
|
||||||
if not self.always_comply:
|
|
||||||
log("Non-existent path '%s' - skipping compliance check"
|
|
||||||
% (p), level=INFO)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if self._take_action():
|
|
||||||
log("Applying compliance criteria to '%s'" % (p), level=INFO)
|
|
||||||
self.comply(p)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Audits the path to see if it is compliance.
|
|
||||||
|
|
||||||
:param path: the path to the file that should be checked.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
"""Enforces the compliance of a path.
|
|
||||||
|
|
||||||
:param path: the path to the file that should be enforced.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _get_stat(cls, path):
|
|
||||||
"""Returns the Posix st_stat information for the specified file path.
|
|
||||||
|
|
||||||
:param path: the path to get the st_stat information for.
|
|
||||||
:returns: an st_stat object for the path or None if the path doesn't
|
|
||||||
exist.
|
|
||||||
"""
|
|
||||||
return os.stat(path)
|
|
||||||
|
|
||||||
|
|
||||||
class FilePermissionAudit(BaseFileAudit):
|
|
||||||
"""Implements an audit for file permissions and ownership for a user.
|
|
||||||
|
|
||||||
This class implements functionality that ensures that a specific user/group
|
|
||||||
will own the file(s) specified and that the permissions specified are
|
|
||||||
applied properly to the file.
|
|
||||||
"""
|
|
||||||
def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
|
|
||||||
self.user = user
|
|
||||||
self.group = group
|
|
||||||
self.mode = mode
|
|
||||||
super(FilePermissionAudit, self).__init__(paths, user, group, mode,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def user(self):
|
|
||||||
return self._user
|
|
||||||
|
|
||||||
@user.setter
|
|
||||||
def user(self, name):
|
|
||||||
try:
|
|
||||||
user = pwd.getpwnam(name)
|
|
||||||
except KeyError:
|
|
||||||
log('Unknown user %s' % name, level=ERROR)
|
|
||||||
user = None
|
|
||||||
self._user = user
|
|
||||||
|
|
||||||
@property
|
|
||||||
def group(self):
|
|
||||||
return self._group
|
|
||||||
|
|
||||||
@group.setter
|
|
||||||
def group(self, name):
|
|
||||||
try:
|
|
||||||
group = None
|
|
||||||
if name:
|
|
||||||
group = grp.getgrnam(name)
|
|
||||||
else:
|
|
||||||
group = grp.getgrgid(self.user.pw_gid)
|
|
||||||
except KeyError:
|
|
||||||
log('Unknown group %s' % name, level=ERROR)
|
|
||||||
self._group = group
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Checks if the path is in compliance.
|
|
||||||
|
|
||||||
Used to determine if the path specified meets the necessary
|
|
||||||
requirements to be in compliance with the check itself.
|
|
||||||
|
|
||||||
:param path: the file path to check
|
|
||||||
:returns: True if the path is compliant, False otherwise.
|
|
||||||
"""
|
|
||||||
stat = self._get_stat(path)
|
|
||||||
user = self.user
|
|
||||||
group = self.group
|
|
||||||
|
|
||||||
compliant = True
|
|
||||||
if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
|
|
||||||
log('File %s is not owned by %s:%s.' % (path, user.pw_name,
|
|
||||||
group.gr_name),
|
|
||||||
level=INFO)
|
|
||||||
compliant = False
|
|
||||||
|
|
||||||
# POSIX refers to the st_mode bits as corresponding to both the
|
|
||||||
# file type and file permission bits, where the least significant 12
|
|
||||||
# bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
|
|
||||||
# file permission bits (8-0)
|
|
||||||
perms = stat.st_mode & 0o7777
|
|
||||||
if perms != self.mode:
|
|
||||||
log('File %s has incorrect permissions, currently set to %s' %
|
|
||||||
(path, oct(stat.st_mode & 0o7777)), level=INFO)
|
|
||||||
compliant = False
|
|
||||||
|
|
||||||
return compliant
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
"""Issues a chown and chmod to the file paths specified."""
|
|
||||||
utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
|
|
||||||
self.mode)
|
|
||||||
|
|
||||||
|
|
||||||
class DirectoryPermissionAudit(FilePermissionAudit):
|
|
||||||
"""Performs a permission check for the specified directory path."""
|
|
||||||
|
|
||||||
def __init__(self, paths, user, group=None, mode=0o600,
|
|
||||||
recursive=True, **kwargs):
|
|
||||||
super(DirectoryPermissionAudit, self).__init__(paths, user, group,
|
|
||||||
mode, **kwargs)
|
|
||||||
self.recursive = recursive
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Checks if the directory is compliant.
|
|
||||||
|
|
||||||
Used to determine if the path specified and all of its children
|
|
||||||
directories are in compliance with the check itself.
|
|
||||||
|
|
||||||
:param path: the directory path to check
|
|
||||||
:returns: True if the directory tree is compliant, otherwise False.
|
|
||||||
"""
|
|
||||||
if not os.path.isdir(path):
|
|
||||||
log('Path specified %s is not a directory.' % path, level=ERROR)
|
|
||||||
raise ValueError("%s is not a directory." % path)
|
|
||||||
|
|
||||||
if not self.recursive:
|
|
||||||
return super(DirectoryPermissionAudit, self).is_compliant(path)
|
|
||||||
|
|
||||||
compliant = True
|
|
||||||
for root, dirs, _ in os.walk(path):
|
|
||||||
if len(dirs) > 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not super(DirectoryPermissionAudit, self).is_compliant(root):
|
|
||||||
compliant = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
return compliant
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
for root, dirs, _ in os.walk(path):
|
|
||||||
if len(dirs) > 0:
|
|
||||||
super(DirectoryPermissionAudit, self).comply(root)
|
|
||||||
|
|
||||||
|
|
||||||
class ReadOnly(BaseFileAudit):
|
|
||||||
"""Audits that files and folders are read only."""
|
|
||||||
def __init__(self, paths, *args, **kwargs):
|
|
||||||
super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
try:
|
|
||||||
output = check_output(['find', path, '-perm', '-go+w',
|
|
||||||
'-type', 'f']).strip()
|
|
||||||
|
|
||||||
# The find above will find any files which have permission sets
|
|
||||||
# which allow too broad of write access. As such, the path is
|
|
||||||
# compliant if there is no output.
|
|
||||||
if output:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred checking finding writable files for %s. '
|
|
||||||
'Error information is: command %s failed with returncode '
|
|
||||||
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
|
|
||||||
format_exc(e)), level=ERROR)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
try:
|
|
||||||
check_output(['chmod', 'go-w', '-R', path])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred removing writeable permissions for %s. '
|
|
||||||
'Error information is: command %s failed with returncode '
|
|
||||||
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
|
|
||||||
format_exc(e)), level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class NoReadWriteForOther(BaseFileAudit):
|
|
||||||
"""Ensures that the files found under the base path are readable or
|
|
||||||
writable by anyone other than the owner or the group.
|
|
||||||
"""
|
|
||||||
def __init__(self, paths):
|
|
||||||
super(NoReadWriteForOther, self).__init__(paths)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
try:
|
|
||||||
cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
|
|
||||||
'-perm', '-o+w', '-type', 'f']
|
|
||||||
output = check_output(cmd).strip()
|
|
||||||
|
|
||||||
# The find above here will find any files which have read or
|
|
||||||
# write permissions for other, meaning there is too broad of access
|
|
||||||
# to read/write the file. As such, the path is compliant if there's
|
|
||||||
# no output.
|
|
||||||
if output:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred while finding files which are readable or '
|
|
||||||
'writable to the world in %s. '
|
|
||||||
'Command output is: %s.' % (path, e.output), level=ERROR)
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
try:
|
|
||||||
check_output(['chmod', '-R', 'o-rw', path])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred attempting to change modes of files under '
|
|
||||||
'path %s. Output of command is: %s' % (path, e.output))
|
|
||||||
|
|
||||||
|
|
||||||
class NoSUIDSGIDAudit(BaseFileAudit):
|
|
||||||
"""Audits that specified files do not have SUID/SGID bits set."""
|
|
||||||
def __init__(self, paths, *args, **kwargs):
|
|
||||||
super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
stat = self._get_stat(path)
|
|
||||||
if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
try:
|
|
||||||
log('Removing suid/sgid from %s.' % path, level=DEBUG)
|
|
||||||
check_output(['chmod', '-s', path])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred removing suid/sgid from %s.'
|
|
||||||
'Error information is: command %s failed with returncode '
|
|
||||||
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
|
|
||||||
format_exc(e)), level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class TemplatedFile(BaseFileAudit):
|
|
||||||
"""The TemplatedFileAudit audits the contents of a templated file.
|
|
||||||
|
|
||||||
This audit renders a file from a template, sets the appropriate file
|
|
||||||
permissions, then generates a hashsum with which to check the content
|
|
||||||
changed.
|
|
||||||
"""
|
|
||||||
def __init__(self, path, context, template_dir, mode, user='root',
|
|
||||||
group='root', service_actions=None, **kwargs):
|
|
||||||
self.context = context
|
|
||||||
self.user = user
|
|
||||||
self.group = group
|
|
||||||
self.mode = mode
|
|
||||||
self.template_dir = template_dir
|
|
||||||
self.service_actions = service_actions
|
|
||||||
super(TemplatedFile, self).__init__(paths=path, always_comply=True,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Determines if the templated file is compliant.
|
|
||||||
|
|
||||||
A templated file is only compliant if it has not changed (as
|
|
||||||
determined by its sha256 hashsum) AND its file permissions are set
|
|
||||||
appropriately.
|
|
||||||
|
|
||||||
:param path: the path to check compliance.
|
|
||||||
"""
|
|
||||||
same_templates = self.templates_match(path)
|
|
||||||
same_content = self.contents_match(path)
|
|
||||||
same_permissions = self.permissions_match(path)
|
|
||||||
|
|
||||||
if same_content and same_permissions and same_templates:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def run_service_actions(self):
|
|
||||||
"""Run any actions on services requested."""
|
|
||||||
if not self.service_actions:
|
|
||||||
return
|
|
||||||
|
|
||||||
for svc_action in self.service_actions:
|
|
||||||
name = svc_action['service']
|
|
||||||
actions = svc_action['actions']
|
|
||||||
log("Running service '%s' actions '%s'" % (name, actions),
|
|
||||||
level=DEBUG)
|
|
||||||
for action in actions:
|
|
||||||
cmd = ['service', name, action]
|
|
||||||
try:
|
|
||||||
check_call(cmd)
|
|
||||||
except CalledProcessError as exc:
|
|
||||||
log("Service name='%s' action='%s' failed - %s" %
|
|
||||||
(name, action, exc), level=WARNING)
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
"""Ensures the contents and the permissions of the file.
|
|
||||||
|
|
||||||
:param path: the path to correct
|
|
||||||
"""
|
|
||||||
dirname = os.path.dirname(path)
|
|
||||||
if not os.path.exists(dirname):
|
|
||||||
os.makedirs(dirname)
|
|
||||||
|
|
||||||
self.pre_write()
|
|
||||||
render_and_write(self.template_dir, path, self.context())
|
|
||||||
utils.ensure_permissions(path, self.user, self.group, self.mode)
|
|
||||||
self.run_service_actions()
|
|
||||||
self.save_checksum(path)
|
|
||||||
self.post_write()
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
"""Invoked prior to writing the template."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
"""Invoked after writing the template."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def templates_match(self, path):
|
|
||||||
"""Determines if the template files are the same.
|
|
||||||
|
|
||||||
The template file equality is determined by the hashsum of the
|
|
||||||
template files themselves. If there is no hashsum, then the content
|
|
||||||
cannot be sure to be the same so treat it as if they changed.
|
|
||||||
Otherwise, return whether or not the hashsums are the same.
|
|
||||||
|
|
||||||
:param path: the path to check
|
|
||||||
:returns: boolean
|
|
||||||
"""
|
|
||||||
template_path = get_template_path(self.template_dir, path)
|
|
||||||
key = 'hardening:template:%s' % template_path
|
|
||||||
template_checksum = file_hash(template_path)
|
|
||||||
kv = unitdata.kv()
|
|
||||||
stored_tmplt_checksum = kv.get(key)
|
|
||||||
if not stored_tmplt_checksum:
|
|
||||||
kv.set(key, template_checksum)
|
|
||||||
kv.flush()
|
|
||||||
log('Saved template checksum for %s.' % template_path,
|
|
||||||
level=DEBUG)
|
|
||||||
# Since we don't have a template checksum, then assume it doesn't
|
|
||||||
# match and return that the template is different.
|
|
||||||
return False
|
|
||||||
elif stored_tmplt_checksum != template_checksum:
|
|
||||||
kv.set(key, template_checksum)
|
|
||||||
kv.flush()
|
|
||||||
log('Updated template checksum for %s.' % template_path,
|
|
||||||
level=DEBUG)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Here the template hasn't changed based upon the calculated
|
|
||||||
# checksum of the template and what was previously stored.
|
|
||||||
return True
|
|
||||||
|
|
||||||
def contents_match(self, path):
|
|
||||||
"""Determines if the file content is the same.
|
|
||||||
|
|
||||||
This is determined by comparing hashsum of the file contents and
|
|
||||||
the saved hashsum. If there is no hashsum, then the content cannot
|
|
||||||
be sure to be the same so treat them as if they are not the same.
|
|
||||||
Otherwise, return True if the hashsums are the same, False if they
|
|
||||||
are not the same.
|
|
||||||
|
|
||||||
:param path: the file to check.
|
|
||||||
"""
|
|
||||||
checksum = file_hash(path)
|
|
||||||
|
|
||||||
kv = unitdata.kv()
|
|
||||||
stored_checksum = kv.get('hardening:%s' % path)
|
|
||||||
if not stored_checksum:
|
|
||||||
# If the checksum hasn't been generated, return False to ensure
|
|
||||||
# the file is written and the checksum stored.
|
|
||||||
log('Checksum for %s has not been calculated.' % path, level=DEBUG)
|
|
||||||
return False
|
|
||||||
elif stored_checksum != checksum:
|
|
||||||
log('Checksum mismatch for %s.' % path, level=DEBUG)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def permissions_match(self, path):
|
|
||||||
"""Determines if the file owner and permissions match.
|
|
||||||
|
|
||||||
:param path: the path to check.
|
|
||||||
"""
|
|
||||||
audit = FilePermissionAudit(path, self.user, self.group, self.mode)
|
|
||||||
return audit.is_compliant(path)
|
|
||||||
|
|
||||||
def save_checksum(self, path):
|
|
||||||
"""Calculates and saves the checksum for the path specified.
|
|
||||||
|
|
||||||
:param path: the path of the file to save the checksum.
|
|
||||||
"""
|
|
||||||
checksum = file_hash(path)
|
|
||||||
kv = unitdata.kv()
|
|
||||||
kv.set('hardening:%s' % path, checksum)
|
|
||||||
kv.flush()
|
|
||||||
|
|
||||||
|
|
||||||
class DeletedFile(BaseFileAudit):
|
|
||||||
"""Audit to ensure that a file is deleted."""
|
|
||||||
def __init__(self, paths):
|
|
||||||
super(DeletedFile, self).__init__(paths)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
return not os.path.exists(path)
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
os.remove(path)
|
|
||||||
|
|
||||||
|
|
||||||
class FileContentAudit(BaseFileAudit):
|
|
||||||
"""Audit the contents of a file."""
|
|
||||||
def __init__(self, paths, cases, **kwargs):
|
|
||||||
# Cases we expect to pass
|
|
||||||
self.pass_cases = cases.get('pass', [])
|
|
||||||
# Cases we expect to fail
|
|
||||||
self.fail_cases = cases.get('fail', [])
|
|
||||||
super(FileContentAudit, self).__init__(paths, **kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""
|
|
||||||
Given a set of content matching cases i.e. tuple(regex, bool) where
|
|
||||||
bool value denotes whether or not regex is expected to match, check that
|
|
||||||
all cases match as expected with the contents of the file. Cases can be
|
|
||||||
expected to pass of fail.
|
|
||||||
|
|
||||||
:param path: Path of file to check.
|
|
||||||
:returns: Boolean value representing whether or not all cases are
|
|
||||||
found to be compliant.
|
|
||||||
"""
|
|
||||||
log("Auditing contents of file '%s'" % (path), level=DEBUG)
|
|
||||||
with open(path, 'r') as fd:
|
|
||||||
contents = fd.read()
|
|
||||||
|
|
||||||
matches = 0
|
|
||||||
for pattern in self.pass_cases:
|
|
||||||
key = re.compile(pattern, flags=re.MULTILINE)
|
|
||||||
results = re.search(key, contents)
|
|
||||||
if results:
|
|
||||||
matches += 1
|
|
||||||
else:
|
|
||||||
log("Pattern '%s' was expected to pass but instead it failed"
|
|
||||||
% (pattern), level=WARNING)
|
|
||||||
|
|
||||||
for pattern in self.fail_cases:
|
|
||||||
key = re.compile(pattern, flags=re.MULTILINE)
|
|
||||||
results = re.search(key, contents)
|
|
||||||
if not results:
|
|
||||||
matches += 1
|
|
||||||
else:
|
|
||||||
log("Pattern '%s' was expected to fail but instead it passed"
|
|
||||||
% (pattern), level=WARNING)
|
|
||||||
|
|
||||||
total = len(self.pass_cases) + len(self.fail_cases)
|
|
||||||
log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
|
|
||||||
return matches == total
|
|
||||||
|
|
||||||
def comply(self, *args, **kwargs):
|
|
||||||
"""NOOP since we just issue warnings. This is to avoid the
|
|
||||||
NotImplememtedError.
|
|
||||||
"""
|
|
||||||
log("Not applying any compliance criteria, only checks.", level=INFO)
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'apache' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'apache' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
common:
|
|
||||||
apache_dir: '/etc/apache2'
|
|
||||||
|
|
||||||
hardening:
|
|
||||||
traceenable: 'off'
|
|
||||||
allowed_http_methods: "GET POST"
|
|
||||||
modules_to_disable: [ cgi, cgid ]
|
|
||||||
servertokens: 'Prod'
|
|
||||||
honor_cipher_order: 'on'
|
|
||||||
cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES'
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
common:
|
|
||||||
apache_dir:
|
|
||||||
traceenable:
|
|
||||||
|
|
||||||
hardening:
|
|
||||||
allowed_http_methods:
|
|
||||||
modules_to_disable:
|
|
||||||
servertokens:
|
|
||||||
honor_cipher_order:
|
|
||||||
cipher_suite:
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'mysql' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'mysql' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
hardening:
|
|
||||||
mysql-conf: /etc/mysql/my.cnf
|
|
||||||
hardening-conf: /etc/mysql/conf.d/hardening.cnf
|
|
||||||
|
|
||||||
security:
|
|
||||||
# @see http://www.symantec.com/connect/articles/securing-mysql-step-step
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
|
|
||||||
chroot: None
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
|
|
||||||
safe-user-create: 1
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
|
|
||||||
secure-auth: 1
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
|
|
||||||
skip-symbolic-links: 1
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
|
|
||||||
skip-show-database: True
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
|
|
||||||
local-infile: 0
|
|
||||||
|
|
||||||
# @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
|
|
||||||
allow-suspicious-udfs: 0
|
|
||||||
|
|
||||||
# @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
|
|
||||||
automatic-sp-privileges: 0
|
|
||||||
|
|
||||||
# @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
|
|
||||||
secure-file-priv: /tmp
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
hardening:
|
|
||||||
mysql-conf:
|
|
||||||
hardening-conf:
|
|
||||||
security:
|
|
||||||
chroot:
|
|
||||||
safe-user-create:
|
|
||||||
secure-auth:
|
|
||||||
skip-symbolic-links:
|
|
||||||
skip-show-database:
|
|
||||||
local-infile:
|
|
||||||
allow-suspicious-udfs:
|
|
||||||
automatic-sp-privileges:
|
|
||||||
secure-file-priv:
|
|
||||||
|
|
@ -1,68 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'os' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'os' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
general:
|
|
||||||
desktop_enable: False # (type:boolean)
|
|
||||||
|
|
||||||
environment:
|
|
||||||
extra_user_paths: []
|
|
||||||
umask: 027
|
|
||||||
root_path: /
|
|
||||||
|
|
||||||
auth:
|
|
||||||
pw_max_age: 60
|
|
||||||
# discourage password cycling
|
|
||||||
pw_min_age: 7
|
|
||||||
retries: 5
|
|
||||||
lockout_time: 600
|
|
||||||
timeout: 60
|
|
||||||
allow_homeless: False # (type:boolean)
|
|
||||||
pam_passwdqc_enable: True # (type:boolean)
|
|
||||||
pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
|
|
||||||
root_ttys:
|
|
||||||
console
|
|
||||||
tty1
|
|
||||||
tty2
|
|
||||||
tty3
|
|
||||||
tty4
|
|
||||||
tty5
|
|
||||||
tty6
|
|
||||||
uid_min: 1000
|
|
||||||
gid_min: 1000
|
|
||||||
sys_uid_min: 100
|
|
||||||
sys_uid_max: 999
|
|
||||||
sys_gid_min: 100
|
|
||||||
sys_gid_max: 999
|
|
||||||
chfn_restrict:
|
|
||||||
|
|
||||||
security:
|
|
||||||
users_allow: []
|
|
||||||
suid_sgid_enforce: True # (type:boolean)
|
|
||||||
# user-defined blacklist and whitelist
|
|
||||||
suid_sgid_blacklist: []
|
|
||||||
suid_sgid_whitelist: []
|
|
||||||
# if this is True, remove any suid/sgid bits from files that were not in the whitelist
|
|
||||||
suid_sgid_dry_run_on_unknown: False # (type:boolean)
|
|
||||||
suid_sgid_remove_from_unknown: False # (type:boolean)
|
|
||||||
# remove packages with known issues
|
|
||||||
packages_clean: True # (type:boolean)
|
|
||||||
packages_list:
|
|
||||||
xinetd
|
|
||||||
inetd
|
|
||||||
ypserv
|
|
||||||
telnet-server
|
|
||||||
rsh-server
|
|
||||||
rsync
|
|
||||||
kernel_enable_module_loading: True # (type:boolean)
|
|
||||||
kernel_enable_core_dump: False # (type:boolean)
|
|
||||||
ssh_tmout: 300
|
|
||||||
|
|
||||||
sysctl:
|
|
||||||
kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
|
|
||||||
kernel_enable_sysrq: False # (type:boolean)
|
|
||||||
forwarding: False # (type:boolean)
|
|
||||||
ipv6_enable: False # (type:boolean)
|
|
||||||
arp_restricted: True # (type:boolean)
|
|
||||||
|
|
@ -1,43 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
general:
|
|
||||||
desktop_enable:
|
|
||||||
environment:
|
|
||||||
extra_user_paths:
|
|
||||||
umask:
|
|
||||||
root_path:
|
|
||||||
auth:
|
|
||||||
pw_max_age:
|
|
||||||
pw_min_age:
|
|
||||||
retries:
|
|
||||||
lockout_time:
|
|
||||||
timeout:
|
|
||||||
allow_homeless:
|
|
||||||
pam_passwdqc_enable:
|
|
||||||
pam_passwdqc_options:
|
|
||||||
root_ttys:
|
|
||||||
uid_min:
|
|
||||||
gid_min:
|
|
||||||
sys_uid_min:
|
|
||||||
sys_uid_max:
|
|
||||||
sys_gid_min:
|
|
||||||
sys_gid_max:
|
|
||||||
chfn_restrict:
|
|
||||||
security:
|
|
||||||
users_allow:
|
|
||||||
suid_sgid_enforce:
|
|
||||||
suid_sgid_blacklist:
|
|
||||||
suid_sgid_whitelist:
|
|
||||||
suid_sgid_dry_run_on_unknown:
|
|
||||||
suid_sgid_remove_from_unknown:
|
|
||||||
packages_clean:
|
|
||||||
packages_list:
|
|
||||||
kernel_enable_module_loading:
|
|
||||||
kernel_enable_core_dump:
|
|
||||||
ssh_tmout:
|
|
||||||
sysctl:
|
|
||||||
kernel_secure_sysrq:
|
|
||||||
kernel_enable_sysrq:
|
|
||||||
forwarding:
|
|
||||||
ipv6_enable:
|
|
||||||
arp_restricted:
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'ssh' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'ssh' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
common:
|
|
||||||
service_name: 'ssh'
|
|
||||||
network_ipv6_enable: False # (type:boolean)
|
|
||||||
ports: [22]
|
|
||||||
remote_hosts: []
|
|
||||||
|
|
||||||
client:
|
|
||||||
package: 'openssh-client'
|
|
||||||
cbc_required: False # (type:boolean)
|
|
||||||
weak_hmac: False # (type:boolean)
|
|
||||||
weak_kex: False # (type:boolean)
|
|
||||||
roaming: False
|
|
||||||
password_authentication: 'no'
|
|
||||||
|
|
||||||
server:
|
|
||||||
host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
|
|
||||||
'/etc/ssh/ssh_host_ecdsa_key']
|
|
||||||
cbc_required: False # (type:boolean)
|
|
||||||
weak_hmac: False # (type:boolean)
|
|
||||||
weak_kex: False # (type:boolean)
|
|
||||||
allow_root_with_key: False # (type:boolean)
|
|
||||||
allow_tcp_forwarding: 'no'
|
|
||||||
allow_agent_forwarding: 'no'
|
|
||||||
allow_x11_forwarding: 'no'
|
|
||||||
use_privilege_separation: 'sandbox'
|
|
||||||
listen_to: ['0.0.0.0']
|
|
||||||
use_pam: 'no'
|
|
||||||
package: 'openssh-server'
|
|
||||||
password_authentication: 'no'
|
|
||||||
alive_interval: '600'
|
|
||||||
alive_count: '3'
|
|
||||||
sftp_enable: False # (type:boolean)
|
|
||||||
sftp_group: 'sftponly'
|
|
||||||
sftp_chroot: '/home/%u'
|
|
||||||
deny_users: []
|
|
||||||
allow_users: []
|
|
||||||
deny_groups: []
|
|
||||||
allow_groups: []
|
|
||||||
print_motd: 'no'
|
|
||||||
print_last_log: 'no'
|
|
||||||
use_dns: 'no'
|
|
||||||
max_auth_tries: 2
|
|
||||||
max_sessions: 10
|
|
||||||
|
|
@ -1,42 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
common:
|
|
||||||
service_name:
|
|
||||||
network_ipv6_enable:
|
|
||||||
ports:
|
|
||||||
remote_hosts:
|
|
||||||
client:
|
|
||||||
package:
|
|
||||||
cbc_required:
|
|
||||||
weak_hmac:
|
|
||||||
weak_kex:
|
|
||||||
roaming:
|
|
||||||
password_authentication:
|
|
||||||
server:
|
|
||||||
host_key_files:
|
|
||||||
cbc_required:
|
|
||||||
weak_hmac:
|
|
||||||
weak_kex:
|
|
||||||
allow_root_with_key:
|
|
||||||
allow_tcp_forwarding:
|
|
||||||
allow_agent_forwarding:
|
|
||||||
allow_x11_forwarding:
|
|
||||||
use_privilege_separation:
|
|
||||||
listen_to:
|
|
||||||
use_pam:
|
|
||||||
package:
|
|
||||||
password_authentication:
|
|
||||||
alive_interval:
|
|
||||||
alive_count:
|
|
||||||
sftp_enable:
|
|
||||||
sftp_group:
|
|
||||||
sftp_chroot:
|
|
||||||
deny_users:
|
|
||||||
allow_users:
|
|
||||||
deny_groups:
|
|
||||||
allow_groups:
|
|
||||||
print_motd:
|
|
||||||
print_last_log:
|
|
||||||
use_dns:
|
|
||||||
max_auth_tries:
|
|
||||||
max_sessions:
|
|
||||||
|
|
@ -1,96 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host.checks import run_os_checks
|
|
||||||
from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
|
|
||||||
from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
|
|
||||||
from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
|
|
||||||
|
|
||||||
_DISABLE_HARDENING_FOR_UNIT_TEST = False
|
|
||||||
|
|
||||||
|
|
||||||
def harden(overrides=None):
|
|
||||||
"""Hardening decorator.
|
|
||||||
|
|
||||||
This is the main entry point for running the hardening stack. In order to
|
|
||||||
run modules of the stack you must add this decorator to charm hook(s) and
|
|
||||||
ensure that your charm config.yaml contains the 'harden' option set to
|
|
||||||
one or more of the supported modules. Setting these will cause the
|
|
||||||
corresponding hardening code to be run when the hook fires.
|
|
||||||
|
|
||||||
This decorator can and should be applied to more than one hook or function
|
|
||||||
such that hardening modules are called multiple times. This is because
|
|
||||||
subsequent calls will perform auditing checks that will report any changes
|
|
||||||
to resources hardened by the first run (and possibly perform compliance
|
|
||||||
actions as a result of any detected infractions).
|
|
||||||
|
|
||||||
:param overrides: Optional list of stack modules used to override those
|
|
||||||
provided with 'harden' config.
|
|
||||||
:returns: Returns value returned by decorated function once executed.
|
|
||||||
"""
|
|
||||||
if overrides is None:
|
|
||||||
overrides = []
|
|
||||||
|
|
||||||
def _harden_inner1(f):
|
|
||||||
# As this has to be py2.7 compat, we can't use nonlocal. Use a trick
|
|
||||||
# to capture the dictionary that can then be updated.
|
|
||||||
_logged = {'done': False}
|
|
||||||
|
|
||||||
def _harden_inner2(*args, **kwargs):
|
|
||||||
# knock out hardening via a config var; normally it won't get
|
|
||||||
# disabled.
|
|
||||||
if _DISABLE_HARDENING_FOR_UNIT_TEST:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
if not _logged['done']:
|
|
||||||
log("Hardening function '%s'" % (f.__name__), level=DEBUG)
|
|
||||||
_logged['done'] = True
|
|
||||||
RUN_CATALOG = OrderedDict([('os', run_os_checks),
|
|
||||||
('ssh', run_ssh_checks),
|
|
||||||
('mysql', run_mysql_checks),
|
|
||||||
('apache', run_apache_checks)])
|
|
||||||
|
|
||||||
enabled = overrides[:] or (config("harden") or "").split()
|
|
||||||
if enabled:
|
|
||||||
modules_to_run = []
|
|
||||||
# modules will always be performed in the following order
|
|
||||||
for module, func in six.iteritems(RUN_CATALOG):
|
|
||||||
if module in enabled:
|
|
||||||
enabled.remove(module)
|
|
||||||
modules_to_run.append(func)
|
|
||||||
|
|
||||||
if enabled:
|
|
||||||
log("Unknown hardening modules '%s' - ignoring" %
|
|
||||||
(', '.join(enabled)), level=WARNING)
|
|
||||||
|
|
||||||
for hardener in modules_to_run:
|
|
||||||
log("Executing hardening module '%s'" %
|
|
||||||
(hardener.__name__), level=DEBUG)
|
|
||||||
hardener()
|
|
||||||
else:
|
|
||||||
log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
|
|
||||||
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
return _harden_inner2
|
|
||||||
|
|
||||||
return _harden_inner1
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
|
|
||||||
|
|
@ -1,48 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host.checks import (
|
|
||||||
apt,
|
|
||||||
limits,
|
|
||||||
login,
|
|
||||||
minimize_access,
|
|
||||||
pam,
|
|
||||||
profile,
|
|
||||||
securetty,
|
|
||||||
suid_sgid,
|
|
||||||
sysctl
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_os_checks():
|
|
||||||
log("Starting OS hardening checks.", level=DEBUG)
|
|
||||||
checks = apt.get_audits()
|
|
||||||
checks.extend(limits.get_audits())
|
|
||||||
checks.extend(login.get_audits())
|
|
||||||
checks.extend(minimize_access.get_audits())
|
|
||||||
checks.extend(pam.get_audits())
|
|
||||||
checks.extend(profile.get_audits())
|
|
||||||
checks.extend(securetty.get_audits())
|
|
||||||
checks.extend(suid_sgid.get_audits())
|
|
||||||
checks.extend(sysctl.get_audits())
|
|
||||||
|
|
||||||
for check in checks:
|
|
||||||
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
|
|
||||||
check.ensure_compliance()
|
|
||||||
|
|
||||||
log("OS hardening checks complete.", level=DEBUG)
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.utils import get_settings
|
|
||||||
from charmhelpers.contrib.hardening.audits.apt import (
|
|
||||||
AptConfig,
|
|
||||||
RestrictedPackages,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening apt audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
|
|
||||||
'expected': 'false'}])]
|
|
||||||
|
|
||||||
settings = get_settings('os')
|
|
||||||
clean_packages = settings['security']['packages_clean']
|
|
||||||
if clean_packages:
|
|
||||||
security_packages = settings['security']['packages_list']
|
|
||||||
if security_packages:
|
|
||||||
audits.append(RestrictedPackages(security_packages))
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
DirectoryPermissionAudit,
|
|
||||||
TemplatedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening security limits audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Ensure that the /etc/security/limits.d directory is only writable
|
|
||||||
# by the root user, but others can execute and read.
|
|
||||||
audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
|
|
||||||
user='root', group='root',
|
|
||||||
mode=0o755))
|
|
||||||
|
|
||||||
# If core dumps are not enabled, then don't allow core dumps to be
|
|
||||||
# created as they may contain sensitive information.
|
|
||||||
if not settings['security']['kernel_enable_core_dump']:
|
|
||||||
audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
|
|
||||||
SecurityLimitsContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root', mode=0o0440))
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class SecurityLimitsContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'disable_core_dump':
|
|
||||||
not settings['security']['kernel_enable_core_dump']}
|
|
||||||
return ctxt
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening login.defs audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = [TemplatedFile('/etc/login.defs', LoginContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root', mode=0o0444)]
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class LoginContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Octal numbers in yaml end up being turned into decimal,
|
|
||||||
# so check if the umask is entered as a string (e.g. '027')
|
|
||||||
# or as an octal umask as we know it (e.g. 002). If its not
|
|
||||||
# a string assume it to be octal and turn it into an octal
|
|
||||||
# string.
|
|
||||||
umask = settings['environment']['umask']
|
|
||||||
if not isinstance(umask, string_types):
|
|
||||||
umask = '%s' % oct(umask)
|
|
||||||
|
|
||||||
ctxt = {
|
|
||||||
'additional_user_paths':
|
|
||||||
settings['environment']['extra_user_paths'],
|
|
||||||
'umask': umask,
|
|
||||||
'pwd_max_age': settings['auth']['pw_max_age'],
|
|
||||||
'pwd_min_age': settings['auth']['pw_min_age'],
|
|
||||||
'uid_min': settings['auth']['uid_min'],
|
|
||||||
'sys_uid_min': settings['auth']['sys_uid_min'],
|
|
||||||
'sys_uid_max': settings['auth']['sys_uid_max'],
|
|
||||||
'gid_min': settings['auth']['gid_min'],
|
|
||||||
'sys_gid_min': settings['auth']['sys_gid_min'],
|
|
||||||
'sys_gid_max': settings['auth']['sys_gid_max'],
|
|
||||||
'login_retries': settings['auth']['retries'],
|
|
||||||
'login_timeout': settings['auth']['timeout'],
|
|
||||||
'chfn_restrict': settings['auth']['chfn_restrict'],
|
|
||||||
'allow_login_without_home': settings['auth']['allow_homeless']
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
@ -1,50 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
ReadOnly,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening access audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Remove write permissions from $PATH folders for all regular users.
|
|
||||||
# This prevents changing system-wide commands from normal users.
|
|
||||||
path_folders = {'/usr/local/sbin',
|
|
||||||
'/usr/local/bin',
|
|
||||||
'/usr/sbin',
|
|
||||||
'/usr/bin',
|
|
||||||
'/bin'}
|
|
||||||
extra_user_paths = settings['environment']['extra_user_paths']
|
|
||||||
path_folders.update(extra_user_paths)
|
|
||||||
audits.append(ReadOnly(path_folders))
|
|
||||||
|
|
||||||
# Only allow the root user to have access to the shadow file.
|
|
||||||
audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
|
|
||||||
|
|
||||||
if 'change_user' not in settings['security']['users_allow']:
|
|
||||||
# su should only be accessible to user and group root, unless it is
|
|
||||||
# expressly defined to allow users to change to root via the
|
|
||||||
# security_users_allow config option.
|
|
||||||
audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
@ -1,132 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
check_output,
|
|
||||||
CalledProcessError,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_purge,
|
|
||||||
apt_update,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
TemplatedFile,
|
|
||||||
DeletedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening PAM authentication audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
if settings['auth']['pam_passwdqc_enable']:
|
|
||||||
audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
|
|
||||||
|
|
||||||
if settings['auth']['retries']:
|
|
||||||
audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
|
|
||||||
else:
|
|
||||||
audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class PasswdqcPAMContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
ctxt = {}
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
ctxt['auth_pam_passwdqc_options'] = \
|
|
||||||
settings['auth']['pam_passwdqc_options']
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class PasswdqcPAM(TemplatedFile):
|
|
||||||
"""The PAM Audit verifies the linux PAM settings."""
|
|
||||||
def __init__(self, path):
|
|
||||||
super(PasswdqcPAM, self).__init__(path=path,
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
context=PasswdqcPAMContext(),
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0640)
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
# Always remove?
|
|
||||||
for pkg in ['libpam-ccreds', 'libpam-cracklib']:
|
|
||||||
log("Purging package '%s'" % pkg, level=DEBUG),
|
|
||||||
apt_purge(pkg)
|
|
||||||
|
|
||||||
apt_update(fatal=True)
|
|
||||||
for pkg in ['libpam-passwdqc']:
|
|
||||||
log("Installing package '%s'" % pkg, level=DEBUG),
|
|
||||||
apt_install(pkg)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
"""Updates the PAM configuration after the file has been written"""
|
|
||||||
try:
|
|
||||||
check_output(['pam-auth-update', '--package'])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error calling pam-auth-update: %s' % e, level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class Tally2PAMContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
ctxt = {}
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
|
|
||||||
ctxt['auth_retries'] = settings['auth']['retries']
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class Tally2PAM(TemplatedFile):
|
|
||||||
"""The PAM Audit verifies the linux PAM settings."""
|
|
||||||
def __init__(self, path):
|
|
||||||
super(Tally2PAM, self).__init__(path=path,
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
context=Tally2PAMContext(),
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0640)
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
# Always remove?
|
|
||||||
apt_purge('libpam-ccreds')
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install('libpam-modules')
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
"""Updates the PAM configuration after the file has been written"""
|
|
||||||
try:
|
|
||||||
check_output(['pam-auth-update', '--package'])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error calling pam-auth-update: %s' % e, level=ERROR)
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening profile audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
# If core dumps are not enabled, then don't allow core dumps to be
|
|
||||||
# created as they may contain sensitive information.
|
|
||||||
if not settings['security']['kernel_enable_core_dump']:
|
|
||||||
audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
|
|
||||||
ProfileContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
mode=0o0755, user='root', group='root'))
|
|
||||||
if settings['security']['ssh_tmout']:
|
|
||||||
audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh',
|
|
||||||
ProfileContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
mode=0o0644, user='root', group='root'))
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class ProfileContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'ssh_tmout':
|
|
||||||
settings['security']['ssh_tmout']}
|
|
||||||
return ctxt
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening Secure TTY audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
mode=0o0400, user='root', group='root'))
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class SecureTTYContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'ttys': settings['auth']['root_ttys']}
|
|
||||||
return ctxt
|
|
||||||
|
|
@ -1,129 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
|
|
||||||
'/usr/libexec/openssh/ssh-keysign',
|
|
||||||
'/usr/lib/openssh/ssh-keysign',
|
|
||||||
'/sbin/netreport',
|
|
||||||
'/usr/sbin/usernetctl',
|
|
||||||
'/usr/sbin/userisdnctl',
|
|
||||||
'/usr/sbin/pppd',
|
|
||||||
'/usr/bin/lockfile',
|
|
||||||
'/usr/bin/mail-lock',
|
|
||||||
'/usr/bin/mail-unlock',
|
|
||||||
'/usr/bin/mail-touchlock',
|
|
||||||
'/usr/bin/dotlockfile',
|
|
||||||
'/usr/bin/arping',
|
|
||||||
'/usr/sbin/uuidd',
|
|
||||||
'/usr/bin/mtr',
|
|
||||||
'/usr/lib/evolution/camel-lock-helper-1.2',
|
|
||||||
'/usr/lib/pt_chown',
|
|
||||||
'/usr/lib/eject/dmcrypt-get-device',
|
|
||||||
'/usr/lib/mc/cons.saver']
|
|
||||||
|
|
||||||
WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
|
|
||||||
'/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
|
|
||||||
'/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
|
|
||||||
'/usr/bin/passwd', '/usr/bin/ssh-agent',
|
|
||||||
'/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
|
|
||||||
'/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
|
|
||||||
'/bin/ping6', '/usr/bin/traceroute6.iputils',
|
|
||||||
'/sbin/mount.nfs', '/sbin/umount.nfs',
|
|
||||||
'/sbin/mount.nfs4', '/sbin/umount.nfs4',
|
|
||||||
'/usr/bin/crontab',
|
|
||||||
'/usr/bin/wall', '/usr/bin/write',
|
|
||||||
'/usr/bin/screen',
|
|
||||||
'/usr/bin/mlocate',
|
|
||||||
'/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
|
|
||||||
'/bin/fusermount',
|
|
||||||
'/usr/bin/pkexec',
|
|
||||||
'/usr/bin/sudo', '/usr/bin/sudoedit',
|
|
||||||
'/usr/sbin/postdrop', '/usr/sbin/postqueue',
|
|
||||||
'/usr/sbin/suexec',
|
|
||||||
'/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
|
|
||||||
'/usr/kerberos/bin/ksu',
|
|
||||||
'/usr/sbin/ccreds_validate',
|
|
||||||
'/usr/bin/Xorg',
|
|
||||||
'/usr/bin/X',
|
|
||||||
'/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
|
|
||||||
'/usr/lib/vte/gnome-pty-helper',
|
|
||||||
'/usr/lib/libvte9/gnome-pty-helper',
|
|
||||||
'/usr/lib/libvte-2.90-9/gnome-pty-helper']
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening suid/sgid audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
checks = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
if not settings['security']['suid_sgid_enforce']:
|
|
||||||
log("Skipping suid/sgid hardening", level=INFO)
|
|
||||||
return checks
|
|
||||||
|
|
||||||
# Build the blacklist and whitelist of files for suid/sgid checks.
|
|
||||||
# There are a total of 4 lists:
|
|
||||||
# 1. the system blacklist
|
|
||||||
# 2. the system whitelist
|
|
||||||
# 3. the user blacklist
|
|
||||||
# 4. the user whitelist
|
|
||||||
#
|
|
||||||
# The blacklist is the set of paths which should NOT have the suid/sgid bit
|
|
||||||
# set and the whitelist is the set of paths which MAY have the suid/sgid
|
|
||||||
# bit setl. The user whitelist/blacklist effectively override the system
|
|
||||||
# whitelist/blacklist.
|
|
||||||
u_b = settings['security']['suid_sgid_blacklist']
|
|
||||||
u_w = settings['security']['suid_sgid_whitelist']
|
|
||||||
|
|
||||||
blacklist = set(BLACKLIST) - set(u_w + u_b)
|
|
||||||
whitelist = set(WHITELIST) - set(u_b + u_w)
|
|
||||||
|
|
||||||
checks.append(NoSUIDSGIDAudit(blacklist))
|
|
||||||
|
|
||||||
dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
|
|
||||||
|
|
||||||
if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
|
|
||||||
# If the policy is a dry_run (e.g. complain only) or remove unknown
|
|
||||||
# suid/sgid bits then find all of the paths which have the suid/sgid
|
|
||||||
# bit set and then remove the whitelisted paths.
|
|
||||||
root_path = settings['environment']['root_path']
|
|
||||||
unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
|
|
||||||
checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
|
|
||||||
|
|
||||||
return checks
|
|
||||||
|
|
||||||
|
|
||||||
def find_paths_with_suid_sgid(root_path):
|
|
||||||
"""Finds all paths/files which have an suid/sgid bit enabled.
|
|
||||||
|
|
||||||
Starting with the root_path, this will recursively find all paths which
|
|
||||||
have an suid or sgid bit set.
|
|
||||||
"""
|
|
||||||
cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
|
|
||||||
'-type', 'f', '!', '-path', '/proc/*', '-print']
|
|
||||||
|
|
||||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
out, _ = p.communicate()
|
|
||||||
return set(out.split('\n'))
|
|
||||||
|
|
@ -1,209 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import six
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
TemplatedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
|
|
||||||
|
|
||||||
SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
|
|
||||||
net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
|
|
||||||
net.ipv4.conf.all.rp_filter=1
|
|
||||||
net.ipv4.conf.default.rp_filter=1
|
|
||||||
net.ipv4.icmp_echo_ignore_broadcasts=1
|
|
||||||
net.ipv4.icmp_ignore_bogus_error_responses=1
|
|
||||||
net.ipv4.icmp_ratelimit=100
|
|
||||||
net.ipv4.icmp_ratemask=88089
|
|
||||||
net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
|
|
||||||
net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
|
|
||||||
net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
|
|
||||||
net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
|
|
||||||
net.ipv4.tcp_rfc1337=1
|
|
||||||
net.ipv4.tcp_syncookies=1
|
|
||||||
net.ipv4.conf.all.shared_media=1
|
|
||||||
net.ipv4.conf.default.shared_media=1
|
|
||||||
net.ipv4.conf.all.accept_source_route=0
|
|
||||||
net.ipv4.conf.default.accept_source_route=0
|
|
||||||
net.ipv4.conf.all.accept_redirects=0
|
|
||||||
net.ipv4.conf.default.accept_redirects=0
|
|
||||||
net.ipv6.conf.all.accept_redirects=0
|
|
||||||
net.ipv6.conf.default.accept_redirects=0
|
|
||||||
net.ipv4.conf.all.secure_redirects=0
|
|
||||||
net.ipv4.conf.default.secure_redirects=0
|
|
||||||
net.ipv4.conf.all.send_redirects=0
|
|
||||||
net.ipv4.conf.default.send_redirects=0
|
|
||||||
net.ipv4.conf.all.log_martians=0
|
|
||||||
net.ipv6.conf.default.router_solicitations=0
|
|
||||||
net.ipv6.conf.default.accept_ra_rtr_pref=0
|
|
||||||
net.ipv6.conf.default.accept_ra_pinfo=0
|
|
||||||
net.ipv6.conf.default.accept_ra_defrtr=0
|
|
||||||
net.ipv6.conf.default.autoconf=0
|
|
||||||
net.ipv6.conf.default.dad_transmits=0
|
|
||||||
net.ipv6.conf.default.max_addresses=1
|
|
||||||
net.ipv6.conf.all.accept_ra=0
|
|
||||||
net.ipv6.conf.default.accept_ra=0
|
|
||||||
kernel.modules_disabled=%(kernel_modules_disabled)s
|
|
||||||
kernel.sysrq=%(kernel_sysrq)s
|
|
||||||
fs.suid_dumpable=%(fs_suid_dumpable)s
|
|
||||||
kernel.randomize_va_space=2
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening sysctl audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Apply the sysctl settings which are configured to be applied.
|
|
||||||
audits.append(SysctlConf())
|
|
||||||
# Make sure that only root has access to the sysctl.conf file, and
|
|
||||||
# that it is read-only.
|
|
||||||
audits.append(FilePermissionAudit('/etc/sysctl.conf',
|
|
||||||
user='root',
|
|
||||||
group='root', mode=0o0440))
|
|
||||||
# If module loading is not enabled, then ensure that the modules
|
|
||||||
# file has the appropriate permissions and rebuild the initramfs
|
|
||||||
if not settings['security']['kernel_enable_module_loading']:
|
|
||||||
audits.append(ModulesTemplate())
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class ModulesContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
with open('/proc/cpuinfo', 'r') as fd:
|
|
||||||
cpuinfo = fd.readlines()
|
|
||||||
|
|
||||||
for line in cpuinfo:
|
|
||||||
match = re.search(r"^vendor_id\s+:\s+(.+)", line)
|
|
||||||
if match:
|
|
||||||
vendor = match.group(1)
|
|
||||||
|
|
||||||
if vendor == "GenuineIntel":
|
|
||||||
vendor = "intel"
|
|
||||||
elif vendor == "AuthenticAMD":
|
|
||||||
vendor = "amd"
|
|
||||||
|
|
||||||
ctxt = {'arch': platform.processor(),
|
|
||||||
'cpuVendor': vendor,
|
|
||||||
'desktop_enable': settings['general']['desktop_enable']}
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class ModulesTemplate(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
|
|
||||||
ModulesContext(),
|
|
||||||
templates_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root',
|
|
||||||
mode=0o0440)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
subprocess.check_call(['update-initramfs', '-u'])
|
|
||||||
|
|
||||||
|
|
||||||
class SysCtlHardeningContext(object):
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'sysctl': {}}
|
|
||||||
|
|
||||||
log("Applying sysctl settings", level=INFO)
|
|
||||||
extras = {'net_ipv4_ip_forward': 0,
|
|
||||||
'net_ipv6_conf_all_forwarding': 0,
|
|
||||||
'net_ipv6_conf_all_disable_ipv6': 1,
|
|
||||||
'net_ipv4_tcp_timestamps': 0,
|
|
||||||
'net_ipv4_conf_all_arp_ignore': 0,
|
|
||||||
'net_ipv4_conf_all_arp_announce': 0,
|
|
||||||
'kernel_sysrq': 0,
|
|
||||||
'fs_suid_dumpable': 0,
|
|
||||||
'kernel_modules_disabled': 1}
|
|
||||||
|
|
||||||
if settings['sysctl']['ipv6_enable']:
|
|
||||||
extras['net_ipv6_conf_all_disable_ipv6'] = 0
|
|
||||||
|
|
||||||
if settings['sysctl']['forwarding']:
|
|
||||||
extras['net_ipv4_ip_forward'] = 1
|
|
||||||
extras['net_ipv6_conf_all_forwarding'] = 1
|
|
||||||
|
|
||||||
if settings['sysctl']['arp_restricted']:
|
|
||||||
extras['net_ipv4_conf_all_arp_ignore'] = 1
|
|
||||||
extras['net_ipv4_conf_all_arp_announce'] = 2
|
|
||||||
|
|
||||||
if settings['security']['kernel_enable_module_loading']:
|
|
||||||
extras['kernel_modules_disabled'] = 0
|
|
||||||
|
|
||||||
if settings['sysctl']['kernel_enable_sysrq']:
|
|
||||||
sysrq_val = settings['sysctl']['kernel_secure_sysrq']
|
|
||||||
extras['kernel_sysrq'] = sysrq_val
|
|
||||||
|
|
||||||
if settings['security']['kernel_enable_core_dump']:
|
|
||||||
extras['fs_suid_dumpable'] = 1
|
|
||||||
|
|
||||||
settings.update(extras)
|
|
||||||
for d in (SYSCTL_DEFAULTS % settings).split():
|
|
||||||
d = d.strip().partition('=')
|
|
||||||
key = d[0].strip()
|
|
||||||
path = os.path.join('/proc/sys', key.replace('.', '/'))
|
|
||||||
if not os.path.exists(path):
|
|
||||||
log("Skipping '%s' since '%s' does not exist" % (key, path),
|
|
||||||
level=WARNING)
|
|
||||||
continue
|
|
||||||
|
|
||||||
ctxt['sysctl'][key] = d[2] or None
|
|
||||||
|
|
||||||
# Translate for python3
|
|
||||||
return {'sysctl_settings':
|
|
||||||
[(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
|
|
||||||
|
|
||||||
|
|
||||||
class SysctlConf(TemplatedFile):
|
|
||||||
"""An audit check for sysctl settings."""
|
|
||||||
def __init__(self):
|
|
||||||
self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
|
|
||||||
super(SysctlConf, self).__init__(self.conffile,
|
|
||||||
SysCtlHardeningContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root',
|
|
||||||
mode=0o0440)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
try:
|
|
||||||
subprocess.check_call(['sysctl', '-p', self.conffile])
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
# NOTE: on some systems if sysctl cannot apply all settings it
|
|
||||||
# will return non-zero as well.
|
|
||||||
log("sysctl command returned an error (maybe some "
|
|
||||||
"keys could not be set) - %s" % (e),
|
|
||||||
level=WARNING)
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
{% if disable_core_dump -%}
|
|
||||||
# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information.
|
|
||||||
* hard core 0
|
|
||||||
{% endif %}
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
TMOUT={{ tmout }}
|
|
||||||
readonly TMOUT
|
|
||||||
export TMOUT
|
|
||||||
|
|
||||||
readonly HISTFILE
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
{% for key, value in sysctl_settings -%}
|
|
||||||
{{ key }}={{ value }}
|
|
||||||
{% endfor -%}
|
|
||||||
|
|
@ -1,349 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
#
|
|
||||||
# /etc/login.defs - Configuration control definitions for the login package.
|
|
||||||
#
|
|
||||||
# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
|
|
||||||
# If unspecified, some arbitrary (and possibly incorrect) value will
|
|
||||||
# be assumed. All other items are optional - if not specified then
|
|
||||||
# the described action or option will be inhibited.
|
|
||||||
#
|
|
||||||
# Comment lines (lines beginning with "#") and blank lines are ignored.
|
|
||||||
#
|
|
||||||
# Modified for Linux. --marekm
|
|
||||||
|
|
||||||
# REQUIRED for useradd/userdel/usermod
|
|
||||||
# Directory where mailboxes reside, _or_ name of file, relative to the
|
|
||||||
# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
|
|
||||||
# MAIL_DIR takes precedence.
|
|
||||||
#
|
|
||||||
# Essentially:
|
|
||||||
# - MAIL_DIR defines the location of users mail spool files
|
|
||||||
# (for mbox use) by appending the username to MAIL_DIR as defined
|
|
||||||
# below.
|
|
||||||
# - MAIL_FILE defines the location of the users mail spool files as the
|
|
||||||
# fully-qualified filename obtained by prepending the user home
|
|
||||||
# directory before $MAIL_FILE
|
|
||||||
#
|
|
||||||
# NOTE: This is no more used for setting up users MAIL environment variable
|
|
||||||
# which is, starting from shadow 4.0.12-1 in Debian, entirely the
|
|
||||||
# job of the pam_mail PAM modules
|
|
||||||
# See default PAM configuration files provided for
|
|
||||||
# login, su, etc.
|
|
||||||
#
|
|
||||||
# This is a temporary situation: setting these variables will soon
|
|
||||||
# move to /etc/default/useradd and the variables will then be
|
|
||||||
# no more supported
|
|
||||||
MAIL_DIR /var/mail
|
|
||||||
#MAIL_FILE .mail
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable logging and display of /var/log/faillog login failure info.
|
|
||||||
# This option conflicts with the pam_tally PAM module.
|
|
||||||
#
|
|
||||||
FAILLOG_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable display of unknown usernames when login failures are recorded.
|
|
||||||
#
|
|
||||||
# WARNING: Unknown usernames may become world readable.
|
|
||||||
# See #290803 and #298773 for details about how this could become a security
|
|
||||||
# concern
|
|
||||||
LOG_UNKFAIL_ENAB no
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable logging of successful logins
|
|
||||||
#
|
|
||||||
LOG_OK_LOGINS yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable "syslog" logging of su activity - in addition to sulog file logging.
|
|
||||||
# SYSLOG_SG_ENAB does the same for newgrp and sg.
|
|
||||||
#
|
|
||||||
SYSLOG_SU_ENAB yes
|
|
||||||
SYSLOG_SG_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, all su activity is logged to this file.
|
|
||||||
#
|
|
||||||
#SULOG_FILE /var/log/sulog
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, file which maps tty line to TERM environment parameter.
|
|
||||||
# Each line of the file is in a format something like "vt100 tty01".
|
|
||||||
#
|
|
||||||
#TTYTYPE_FILE /etc/ttytype
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, login failures will be logged here in a utmp format
|
|
||||||
# last, when invoked as lastb, will read /var/log/btmp, so...
|
|
||||||
#
|
|
||||||
FTMP_FILE /var/log/btmp
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, the command name to display when running "su -". For
|
|
||||||
# example, if this is defined as "su" then a "ps" will display the
|
|
||||||
# command is "-su". If not defined, then "ps" would display the
|
|
||||||
# name of the shell actually being run, e.g. something like "-sh".
|
|
||||||
#
|
|
||||||
SU_NAME su
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, file which inhibits all the usual chatter during the login
|
|
||||||
# sequence. If a full pathname, then hushed mode will be enabled if the
|
|
||||||
# user's name or shell are found in the file. If not a full pathname, then
|
|
||||||
# hushed mode will be enabled if the file exists in the user's home directory.
|
|
||||||
#
|
|
||||||
HUSHLOGIN_FILE .hushlogin
|
|
||||||
#HUSHLOGIN_FILE /etc/hushlogins
|
|
||||||
|
|
||||||
#
|
|
||||||
# *REQUIRED* The default PATH settings, for superuser and normal users.
|
|
||||||
#
|
|
||||||
# (they are minimal, add the rest in the shell startup files)
|
|
||||||
ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
|
||||||
ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Terminal permissions
|
|
||||||
#
|
|
||||||
# TTYGROUP Login tty will be assigned this group ownership.
|
|
||||||
# TTYPERM Login tty will be set to this permission.
|
|
||||||
#
|
|
||||||
# If you have a "write" program which is "setgid" to a special group
|
|
||||||
# which owns the terminals, define TTYGROUP to the group number and
|
|
||||||
# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
|
|
||||||
# TTYPERM to either 622 or 600.
|
|
||||||
#
|
|
||||||
# In Debian /usr/bin/bsd-write or similar programs are setgid tty
|
|
||||||
# However, the default and recommended value for TTYPERM is still 0600
|
|
||||||
# to not allow anyone to write to anyone else console or terminal
|
|
||||||
|
|
||||||
# Users can still allow other people to write them by issuing
|
|
||||||
# the "mesg y" command.
|
|
||||||
|
|
||||||
TTYGROUP tty
|
|
||||||
TTYPERM 0600
|
|
||||||
|
|
||||||
#
|
|
||||||
# Login configuration initializations:
|
|
||||||
#
|
|
||||||
# ERASECHAR Terminal ERASE character ('\010' = backspace).
|
|
||||||
# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
|
|
||||||
# UMASK Default "umask" value.
|
|
||||||
#
|
|
||||||
# The ERASECHAR and KILLCHAR are used only on System V machines.
|
|
||||||
#
|
|
||||||
# UMASK is the default umask value for pam_umask and is used by
|
|
||||||
# useradd and newusers to set the mode of the new home directories.
|
|
||||||
# 022 is the "historical" value in Debian for UMASK
|
|
||||||
# 027, or even 077, could be considered better for privacy
|
|
||||||
# There is no One True Answer here : each sysadmin must make up his/her
|
|
||||||
# mind.
|
|
||||||
#
|
|
||||||
# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
|
|
||||||
# for private user groups, i. e. the uid is the same as gid, and username is
|
|
||||||
# the same as the primary group name: for these, the user permissions will be
|
|
||||||
# used as group permissions, e. g. 022 will become 002.
|
|
||||||
#
|
|
||||||
# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
|
|
||||||
#
|
|
||||||
ERASECHAR 0177
|
|
||||||
KILLCHAR 025
|
|
||||||
UMASK {{ umask }}
|
|
||||||
|
|
||||||
# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name.
|
|
||||||
# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user.
|
|
||||||
USERGROUPS_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Password aging controls:
|
|
||||||
#
|
|
||||||
# PASS_MAX_DAYS Maximum number of days a password may be used.
|
|
||||||
# PASS_MIN_DAYS Minimum number of days allowed between password changes.
|
|
||||||
# PASS_WARN_AGE Number of days warning given before a password expires.
|
|
||||||
#
|
|
||||||
PASS_MAX_DAYS {{ pwd_max_age }}
|
|
||||||
PASS_MIN_DAYS {{ pwd_min_age }}
|
|
||||||
PASS_WARN_AGE 7
|
|
||||||
|
|
||||||
#
|
|
||||||
# Min/max values for automatic uid selection in useradd
|
|
||||||
#
|
|
||||||
UID_MIN {{ uid_min }}
|
|
||||||
UID_MAX 60000
|
|
||||||
# System accounts
|
|
||||||
SYS_UID_MIN {{ sys_uid_min }}
|
|
||||||
SYS_UID_MAX {{ sys_uid_max }}
|
|
||||||
|
|
||||||
# Min/max values for automatic gid selection in groupadd
|
|
||||||
GID_MIN {{ gid_min }}
|
|
||||||
GID_MAX 60000
|
|
||||||
# System accounts
|
|
||||||
SYS_GID_MIN {{ sys_gid_min }}
|
|
||||||
SYS_GID_MAX {{ sys_gid_max }}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Max number of login retries if password is bad. This will most likely be
|
|
||||||
# overridden by PAM, since the default pam_unix module has it's own built
|
|
||||||
# in of 3 retries. However, this is a safe fallback in case you are using
|
|
||||||
# an authentication module that does not enforce PAM_MAXTRIES.
|
|
||||||
#
|
|
||||||
LOGIN_RETRIES {{ login_retries }}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Max time in seconds for login
|
|
||||||
#
|
|
||||||
LOGIN_TIMEOUT {{ login_timeout }}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Which fields may be changed by regular users using chfn - use
|
|
||||||
# any combination of letters "frwh" (full name, room number, work
|
|
||||||
# phone, home phone). If not defined, no changes are allowed.
|
|
||||||
# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
|
|
||||||
#
|
|
||||||
{% if chfn_restrict %}
|
|
||||||
CHFN_RESTRICT {{ chfn_restrict }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Should login be allowed if we can't cd to the home directory?
|
|
||||||
# Default in no.
|
|
||||||
#
|
|
||||||
DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %}
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, this command is run when removing a user.
|
|
||||||
# It should remove any at/cron/print jobs etc. owned by
|
|
||||||
# the user to be removed (passed as the first argument).
|
|
||||||
#
|
|
||||||
#USERDEL_CMD /usr/sbin/userdel_local
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable setting of the umask group bits to be the same as owner bits
|
|
||||||
# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is
|
|
||||||
# the same as gid, and username is the same as the primary group name.
|
|
||||||
#
|
|
||||||
# If set to yes, userdel will remove the user´s group if it contains no
|
|
||||||
# more members, and useradd will create by default a group with the name
|
|
||||||
# of the user.
|
|
||||||
#
|
|
||||||
USERGROUPS_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Instead of the real user shell, the program specified by this parameter
|
|
||||||
# will be launched, although its visible name (argv[0]) will be the shell's.
|
|
||||||
# The program may do whatever it wants (logging, additional authentication,
|
|
||||||
# banner, ...) before running the actual shell.
|
|
||||||
#
|
|
||||||
# FAKE_SHELL /bin/fakeshell
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, either full pathname of a file containing device names or
|
|
||||||
# a ":" delimited list of device names. Root logins will be allowed only
|
|
||||||
# upon these devices.
|
|
||||||
#
|
|
||||||
# This variable is used by login and su.
|
|
||||||
#
|
|
||||||
#CONSOLE /etc/consoles
|
|
||||||
#CONSOLE console:tty01:tty02:tty03:tty04
|
|
||||||
|
|
||||||
#
|
|
||||||
# List of groups to add to the user's supplementary group set
|
|
||||||
# when logging in on the console (as determined by the CONSOLE
|
|
||||||
# setting). Default is none.
|
|
||||||
#
|
|
||||||
# Use with caution - it is possible for users to gain permanent
|
|
||||||
# access to these groups, even when not logged in on the console.
|
|
||||||
# How to do it is left as an exercise for the reader...
|
|
||||||
#
|
|
||||||
# This variable is used by login and su.
|
|
||||||
#
|
|
||||||
#CONSOLE_GROUPS floppy:audio:cdrom
|
|
||||||
|
|
||||||
#
|
|
||||||
# If set to "yes", new passwords will be encrypted using the MD5-based
|
|
||||||
# algorithm compatible with the one used by recent releases of FreeBSD.
|
|
||||||
# It supports passwords of unlimited length and longer salt strings.
|
|
||||||
# Set to "no" if you need to copy encrypted passwords to other systems
|
|
||||||
# which don't understand the new algorithm. Default is "no".
|
|
||||||
#
|
|
||||||
# This variable is deprecated. You should use ENCRYPT_METHOD.
|
|
||||||
#
|
|
||||||
MD5_CRYPT_ENAB no
|
|
||||||
|
|
||||||
#
|
|
||||||
# If set to MD5 , MD5-based algorithm will be used for encrypting password
|
|
||||||
# If set to SHA256, SHA256-based algorithm will be used for encrypting password
|
|
||||||
# If set to SHA512, SHA512-based algorithm will be used for encrypting password
|
|
||||||
# If set to DES, DES-based algorithm will be used for encrypting password (default)
|
|
||||||
# Overrides the MD5_CRYPT_ENAB option
|
|
||||||
#
|
|
||||||
# Note: It is recommended to use a value consistent with
|
|
||||||
# the PAM modules configuration.
|
|
||||||
#
|
|
||||||
ENCRYPT_METHOD SHA512
|
|
||||||
|
|
||||||
#
|
|
||||||
# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
|
|
||||||
#
|
|
||||||
# Define the number of SHA rounds.
|
|
||||||
# With a lot of rounds, it is more difficult to brute forcing the password.
|
|
||||||
# But note also that it more CPU resources will be needed to authenticate
|
|
||||||
# users.
|
|
||||||
#
|
|
||||||
# If not specified, the libc will choose the default number of rounds (5000).
|
|
||||||
# The values must be inside the 1000-999999999 range.
|
|
||||||
# If only one of the MIN or MAX values is set, then this value will be used.
|
|
||||||
# If MIN > MAX, the highest value will be used.
|
|
||||||
#
|
|
||||||
# SHA_CRYPT_MIN_ROUNDS 5000
|
|
||||||
# SHA_CRYPT_MAX_ROUNDS 5000
|
|
||||||
|
|
||||||
################# OBSOLETED BY PAM ##############
|
|
||||||
# #
|
|
||||||
# These options are now handled by PAM. Please #
|
|
||||||
# edit the appropriate file in /etc/pam.d/ to #
|
|
||||||
# enable the equivelants of them.
|
|
||||||
#
|
|
||||||
###############
|
|
||||||
|
|
||||||
#MOTD_FILE
|
|
||||||
#DIALUPS_CHECK_ENAB
|
|
||||||
#LASTLOG_ENAB
|
|
||||||
#MAIL_CHECK_ENAB
|
|
||||||
#OBSCURE_CHECKS_ENAB
|
|
||||||
#PORTTIME_CHECKS_ENAB
|
|
||||||
#SU_WHEEL_ONLY
|
|
||||||
#CRACKLIB_DICTPATH
|
|
||||||
#PASS_CHANGE_TRIES
|
|
||||||
#PASS_ALWAYS_WARN
|
|
||||||
#ENVIRON_FILE
|
|
||||||
#NOLOGINS_FILE
|
|
||||||
#ISSUE_FILE
|
|
||||||
#PASS_MIN_LEN
|
|
||||||
#PASS_MAX_LEN
|
|
||||||
#ULIMIT
|
|
||||||
#ENV_HZ
|
|
||||||
#CHFN_AUTH
|
|
||||||
#CHSH_AUTH
|
|
||||||
#FAIL_DELAY
|
|
||||||
|
|
||||||
################# OBSOLETED #######################
|
|
||||||
# #
|
|
||||||
# These options are no more handled by shadow. #
|
|
||||||
# #
|
|
||||||
# Shadow utilities will display a warning if they #
|
|
||||||
# still appear. #
|
|
||||||
# #
|
|
||||||
###################################################
|
|
||||||
|
|
||||||
# CLOSE_SESSIONS
|
|
||||||
# LOGIN_STRING
|
|
||||||
# NO_PASSWORD_CONSOLE
|
|
||||||
# QMAIL_DIR
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,117 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# /etc/modules: kernel modules to load at boot time.
|
|
||||||
#
|
|
||||||
# This file contains the names of kernel modules that should be loaded
|
|
||||||
# at boot time, one per line. Lines beginning with "#" are ignored.
|
|
||||||
# Parameters can be specified after the module name.
|
|
||||||
|
|
||||||
# Arch
|
|
||||||
# ----
|
|
||||||
#
|
|
||||||
# Modules for certains builds, contains support modules and some CPU-specific optimizations.
|
|
||||||
|
|
||||||
{% if arch == "x86_64" -%}
|
|
||||||
# Optimize for x86_64 cryptographic features
|
|
||||||
twofish-x86_64-3way
|
|
||||||
twofish-x86_64
|
|
||||||
aes-x86_64
|
|
||||||
salsa20-x86_64
|
|
||||||
blowfish-x86_64
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if cpuVendor == "intel" -%}
|
|
||||||
# Intel-specific optimizations
|
|
||||||
ghash-clmulni-intel
|
|
||||||
aesni-intel
|
|
||||||
kvm-intel
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if cpuVendor == "amd" -%}
|
|
||||||
# AMD-specific optimizations
|
|
||||||
kvm-amd
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
kvm
|
|
||||||
|
|
||||||
|
|
||||||
# Crypto
|
|
||||||
# ------
|
|
||||||
|
|
||||||
# Some core modules which comprise strong cryptography.
|
|
||||||
blowfish_common
|
|
||||||
blowfish_generic
|
|
||||||
ctr
|
|
||||||
cts
|
|
||||||
lrw
|
|
||||||
lzo
|
|
||||||
rmd160
|
|
||||||
rmd256
|
|
||||||
rmd320
|
|
||||||
serpent
|
|
||||||
sha512_generic
|
|
||||||
twofish_common
|
|
||||||
twofish_generic
|
|
||||||
xts
|
|
||||||
zlib
|
|
||||||
|
|
||||||
|
|
||||||
# Drivers
|
|
||||||
# -------
|
|
||||||
|
|
||||||
# Basics
|
|
||||||
lp
|
|
||||||
rtc
|
|
||||||
loop
|
|
||||||
|
|
||||||
# Filesystems
|
|
||||||
ext2
|
|
||||||
btrfs
|
|
||||||
|
|
||||||
{% if desktop_enable -%}
|
|
||||||
# Desktop
|
|
||||||
psmouse
|
|
||||||
snd
|
|
||||||
snd_ac97_codec
|
|
||||||
snd_intel8x0
|
|
||||||
snd_page_alloc
|
|
||||||
snd_pcm
|
|
||||||
snd_timer
|
|
||||||
soundcore
|
|
||||||
usbhid
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
# Lib
|
|
||||||
# ---
|
|
||||||
xz
|
|
||||||
|
|
||||||
|
|
||||||
# Net
|
|
||||||
# ---
|
|
||||||
|
|
||||||
# All packets needed for netfilter rules (ie iptables, ebtables).
|
|
||||||
ip_tables
|
|
||||||
x_tables
|
|
||||||
iptable_filter
|
|
||||||
iptable_nat
|
|
||||||
|
|
||||||
# Targets
|
|
||||||
ipt_LOG
|
|
||||||
ipt_REJECT
|
|
||||||
|
|
||||||
# Modules
|
|
||||||
xt_connlimit
|
|
||||||
xt_tcpudp
|
|
||||||
xt_recent
|
|
||||||
xt_limit
|
|
||||||
xt_conntrack
|
|
||||||
nf_conntrack
|
|
||||||
nf_conntrack_ipv4
|
|
||||||
nf_defrag_ipv4
|
|
||||||
xt_state
|
|
||||||
nf_nat
|
|
||||||
|
|
||||||
# Addons
|
|
||||||
xt_pknock
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
Name: passwdqc password strength enforcement
|
|
||||||
Default: yes
|
|
||||||
Priority: 1024
|
|
||||||
Conflicts: cracklib
|
|
||||||
Password-Type: Primary
|
|
||||||
Password:
|
|
||||||
requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }}
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# Disable core dumps via soft limits for all users. Compliance to this setting
|
|
||||||
# is voluntary and can be modified by users up to a hard limit. This setting is
|
|
||||||
# a sane default.
|
|
||||||
ulimit -S -c 0 > /dev/null 2>&1
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# A list of TTYs, from which root can log in
|
|
||||||
# see `man securetty` for reference
|
|
||||||
{% if ttys -%}
|
|
||||||
{% for tty in ttys -%}
|
|
||||||
{{ tty }}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endif -%}
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
Name: tally2 lockout after failed attempts enforcement
|
|
||||||
Default: yes
|
|
||||||
Priority: 1024
|
|
||||||
Conflicts: cracklib
|
|
||||||
Auth-Type: Primary
|
|
||||||
Auth-Initial:
|
|
||||||
required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }}
|
|
||||||
Account-Type: Primary
|
|
||||||
Account-Initial:
|
|
||||||
required pam_tally2.so
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.mysql.checks import config
|
|
||||||
|
|
||||||
|
|
||||||
def run_mysql_checks():
|
|
||||||
log("Starting MySQL hardening checks.", level=DEBUG)
|
|
||||||
checks = config.get_audits()
|
|
||||||
for check in checks:
|
|
||||||
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
|
|
||||||
check.ensure_compliance()
|
|
||||||
|
|
||||||
log("MySQL hardening checks complete.", level=DEBUG)
|
|
||||||
|
|
@ -1,87 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import six
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
DirectoryPermissionAudit,
|
|
||||||
TemplatedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get MySQL hardening config audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0:
|
|
||||||
log("MySQL does not appear to be installed on this node - "
|
|
||||||
"skipping mysql hardening", level=WARNING)
|
|
||||||
return []
|
|
||||||
|
|
||||||
settings = utils.get_settings('mysql')
|
|
||||||
hardening_settings = settings['hardening']
|
|
||||||
my_cnf = hardening_settings['mysql-conf']
|
|
||||||
|
|
||||||
audits = [
|
|
||||||
FilePermissionAudit(paths=[my_cnf], user='root',
|
|
||||||
group='root', mode=0o0600),
|
|
||||||
|
|
||||||
TemplatedFile(hardening_settings['hardening-conf'],
|
|
||||||
MySQLConfContext(),
|
|
||||||
TEMPLATES_DIR,
|
|
||||||
mode=0o0750,
|
|
||||||
user='mysql',
|
|
||||||
group='root',
|
|
||||||
service_actions=[{'service': 'mysql',
|
|
||||||
'actions': ['restart']}]),
|
|
||||||
|
|
||||||
# MySQL and Percona charms do not allow configuration of the
|
|
||||||
# data directory, so use the default.
|
|
||||||
DirectoryPermissionAudit('/var/lib/mysql',
|
|
||||||
user='mysql',
|
|
||||||
group='mysql',
|
|
||||||
recursive=False,
|
|
||||||
mode=0o755),
|
|
||||||
|
|
||||||
DirectoryPermissionAudit('/etc/mysql',
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
recursive=False,
|
|
||||||
mode=0o700),
|
|
||||||
]
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLConfContext(object):
|
|
||||||
"""Defines the set of key/value pairs to set in a mysql config file.
|
|
||||||
|
|
||||||
This context, when called, will return a dictionary containing the
|
|
||||||
key/value pairs of setting to specify in the
|
|
||||||
/etc/mysql/conf.d/hardening.cnf file.
|
|
||||||
"""
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('mysql')
|
|
||||||
# Translate for python3
|
|
||||||
return {'mysql_settings':
|
|
||||||
[(k, v) for k, v in six.iteritems(settings['security'])]}
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
[mysqld]
|
|
||||||
{% for setting, value in mysql_settings -%}
|
|
||||||
{% if value == 'True' -%}
|
|
||||||
{{ setting }}
|
|
||||||
{% elif value != 'None' and value != None -%}
|
|
||||||
{{ setting }} = {{ value }}
|
|
||||||
{% endif -%}
|
|
||||||
{% endfor -%}
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.ssh.checks import config
|
|
||||||
|
|
||||||
|
|
||||||
def run_ssh_checks():
|
|
||||||
log("Starting SSH hardening checks.", level=DEBUG)
|
|
||||||
checks = config.get_audits()
|
|
||||||
for check in checks:
|
|
||||||
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
|
|
||||||
check.ensure_compliance()
|
|
||||||
|
|
||||||
log("SSH hardening checks complete.", level=DEBUG)
|
|
||||||
|
|
@ -1,435 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
|
||||||
get_address_in_network,
|
|
||||||
get_iface_addr,
|
|
||||||
is_ip,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_update,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
lsb_release,
|
|
||||||
CompareHostReleases,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
TemplatedFile,
|
|
||||||
FileContentAudit,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get SSH hardening config audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(),
|
|
||||||
SSHDConfigFileContentAudit()]
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class SSHConfigContext(object):
|
|
||||||
|
|
||||||
type = 'client'
|
|
||||||
|
|
||||||
def get_macs(self, allow_weak_mac):
|
|
||||||
if allow_weak_mac:
|
|
||||||
weak_macs = 'weak'
|
|
||||||
else:
|
|
||||||
weak_macs = 'default'
|
|
||||||
|
|
||||||
default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
|
|
||||||
macs = {'default': default,
|
|
||||||
'weak': default + ',hmac-sha1'}
|
|
||||||
|
|
||||||
default = ('hmac-sha2-512-etm@openssh.com,'
|
|
||||||
'hmac-sha2-256-etm@openssh.com,'
|
|
||||||
'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,'
|
|
||||||
'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160')
|
|
||||||
macs_66 = {'default': default,
|
|
||||||
'weak': default + ',hmac-sha1'}
|
|
||||||
|
|
||||||
# Use newer ciphers on Ubuntu Trusty and above
|
|
||||||
_release = lsb_release()['DISTRIB_CODENAME'].lower()
|
|
||||||
if CompareHostReleases(_release) >= 'trusty':
|
|
||||||
log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
|
|
||||||
macs = macs_66
|
|
||||||
|
|
||||||
return macs[weak_macs]
|
|
||||||
|
|
||||||
def get_kexs(self, allow_weak_kex):
|
|
||||||
if allow_weak_kex:
|
|
||||||
weak_kex = 'weak'
|
|
||||||
else:
|
|
||||||
weak_kex = 'default'
|
|
||||||
|
|
||||||
default = 'diffie-hellman-group-exchange-sha256'
|
|
||||||
weak = (default + ',diffie-hellman-group14-sha1,'
|
|
||||||
'diffie-hellman-group-exchange-sha1,'
|
|
||||||
'diffie-hellman-group1-sha1')
|
|
||||||
kex = {'default': default,
|
|
||||||
'weak': weak}
|
|
||||||
|
|
||||||
default = ('curve25519-sha256@libssh.org,'
|
|
||||||
'diffie-hellman-group-exchange-sha256')
|
|
||||||
weak = (default + ',diffie-hellman-group14-sha1,'
|
|
||||||
'diffie-hellman-group-exchange-sha1,'
|
|
||||||
'diffie-hellman-group1-sha1')
|
|
||||||
kex_66 = {'default': default,
|
|
||||||
'weak': weak}
|
|
||||||
|
|
||||||
# Use newer kex on Ubuntu Trusty and above
|
|
||||||
_release = lsb_release()['DISTRIB_CODENAME'].lower()
|
|
||||||
if CompareHostReleases(_release) >= 'trusty':
|
|
||||||
log('Detected Ubuntu 14.04 or newer, using new key exchange '
|
|
||||||
'algorithms', level=DEBUG)
|
|
||||||
kex = kex_66
|
|
||||||
|
|
||||||
return kex[weak_kex]
|
|
||||||
|
|
||||||
def get_ciphers(self, cbc_required):
|
|
||||||
if cbc_required:
|
|
||||||
weak_ciphers = 'weak'
|
|
||||||
else:
|
|
||||||
weak_ciphers = 'default'
|
|
||||||
|
|
||||||
default = 'aes256-ctr,aes192-ctr,aes128-ctr'
|
|
||||||
cipher = {'default': default,
|
|
||||||
'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}
|
|
||||||
|
|
||||||
default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,'
|
|
||||||
'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr')
|
|
||||||
ciphers_66 = {'default': default,
|
|
||||||
'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
|
|
||||||
|
|
||||||
# Use newer ciphers on ubuntu Trusty and above
|
|
||||||
_release = lsb_release()['DISTRIB_CODENAME'].lower()
|
|
||||||
if CompareHostReleases(_release) >= 'trusty':
|
|
||||||
log('Detected Ubuntu 14.04 or newer, using new ciphers',
|
|
||||||
level=DEBUG)
|
|
||||||
cipher = ciphers_66
|
|
||||||
|
|
||||||
return cipher[weak_ciphers]
|
|
||||||
|
|
||||||
def get_listening(self, listen=['0.0.0.0']):
|
|
||||||
"""Returns a list of addresses SSH can list on
|
|
||||||
|
|
||||||
Turns input into a sensible list of IPs SSH can listen on. Input
|
|
||||||
must be a python list of interface names, IPs and/or CIDRs.
|
|
||||||
|
|
||||||
:param listen: list of IPs, CIDRs, interface names
|
|
||||||
|
|
||||||
:returns: list of IPs available on the host
|
|
||||||
"""
|
|
||||||
if listen == ['0.0.0.0']:
|
|
||||||
return listen
|
|
||||||
|
|
||||||
value = []
|
|
||||||
for network in listen:
|
|
||||||
try:
|
|
||||||
ip = get_address_in_network(network=network, fatal=True)
|
|
||||||
except ValueError:
|
|
||||||
if is_ip(network):
|
|
||||||
ip = network
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
ip = get_iface_addr(iface=network, fatal=False)[0]
|
|
||||||
except IndexError:
|
|
||||||
continue
|
|
||||||
value.append(ip)
|
|
||||||
if value == []:
|
|
||||||
return ['0.0.0.0']
|
|
||||||
return value
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('ssh')
|
|
||||||
if settings['common']['network_ipv6_enable']:
|
|
||||||
addr_family = 'any'
|
|
||||||
else:
|
|
||||||
addr_family = 'inet'
|
|
||||||
|
|
||||||
ctxt = {
|
|
||||||
'addr_family': addr_family,
|
|
||||||
'remote_hosts': settings['common']['remote_hosts'],
|
|
||||||
'password_auth_allowed':
|
|
||||||
settings['client']['password_authentication'],
|
|
||||||
'ports': settings['common']['ports'],
|
|
||||||
'ciphers': self.get_ciphers(settings['client']['cbc_required']),
|
|
||||||
'macs': self.get_macs(settings['client']['weak_hmac']),
|
|
||||||
'kexs': self.get_kexs(settings['client']['weak_kex']),
|
|
||||||
'roaming': settings['client']['roaming'],
|
|
||||||
}
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class SSHConfig(TemplatedFile):
|
|
||||||
def __init__(self):
|
|
||||||
path = '/etc/ssh/ssh_config'
|
|
||||||
super(SSHConfig, self).__init__(path=path,
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
context=SSHConfigContext(),
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0644)
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
settings = utils.get_settings('ssh')
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install(settings['client']['package'])
|
|
||||||
if not os.path.exists('/etc/ssh'):
|
|
||||||
os.makedir('/etc/ssh')
|
|
||||||
# NOTE: don't recurse
|
|
||||||
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
|
|
||||||
maxdepth=0)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
# NOTE: don't recurse
|
|
||||||
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
|
|
||||||
maxdepth=0)
|
|
||||||
|
|
||||||
|
|
||||||
class SSHDConfigContext(SSHConfigContext):
|
|
||||||
|
|
||||||
type = 'server'
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('ssh')
|
|
||||||
if settings['common']['network_ipv6_enable']:
|
|
||||||
addr_family = 'any'
|
|
||||||
else:
|
|
||||||
addr_family = 'inet'
|
|
||||||
|
|
||||||
ctxt = {
|
|
||||||
'ssh_ip': self.get_listening(settings['server']['listen_to']),
|
|
||||||
'password_auth_allowed':
|
|
||||||
settings['server']['password_authentication'],
|
|
||||||
'ports': settings['common']['ports'],
|
|
||||||
'addr_family': addr_family,
|
|
||||||
'ciphers': self.get_ciphers(settings['server']['cbc_required']),
|
|
||||||
'macs': self.get_macs(settings['server']['weak_hmac']),
|
|
||||||
'kexs': self.get_kexs(settings['server']['weak_kex']),
|
|
||||||
'host_key_files': settings['server']['host_key_files'],
|
|
||||||
'allow_root_with_key': settings['server']['allow_root_with_key'],
|
|
||||||
'password_authentication':
|
|
||||||
settings['server']['password_authentication'],
|
|
||||||
'use_priv_sep': settings['server']['use_privilege_separation'],
|
|
||||||
'use_pam': settings['server']['use_pam'],
|
|
||||||
'allow_x11_forwarding': settings['server']['allow_x11_forwarding'],
|
|
||||||
'print_motd': settings['server']['print_motd'],
|
|
||||||
'print_last_log': settings['server']['print_last_log'],
|
|
||||||
'client_alive_interval':
|
|
||||||
settings['server']['alive_interval'],
|
|
||||||
'client_alive_count': settings['server']['alive_count'],
|
|
||||||
'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'],
|
|
||||||
'allow_agent_forwarding':
|
|
||||||
settings['server']['allow_agent_forwarding'],
|
|
||||||
'deny_users': settings['server']['deny_users'],
|
|
||||||
'allow_users': settings['server']['allow_users'],
|
|
||||||
'deny_groups': settings['server']['deny_groups'],
|
|
||||||
'allow_groups': settings['server']['allow_groups'],
|
|
||||||
'use_dns': settings['server']['use_dns'],
|
|
||||||
'sftp_enable': settings['server']['sftp_enable'],
|
|
||||||
'sftp_group': settings['server']['sftp_group'],
|
|
||||||
'sftp_chroot': settings['server']['sftp_chroot'],
|
|
||||||
'max_auth_tries': settings['server']['max_auth_tries'],
|
|
||||||
'max_sessions': settings['server']['max_sessions'],
|
|
||||||
}
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class SSHDConfig(TemplatedFile):
|
|
||||||
def __init__(self):
|
|
||||||
path = '/etc/ssh/sshd_config'
|
|
||||||
super(SSHDConfig, self).__init__(path=path,
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
context=SSHDConfigContext(),
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0600,
|
|
||||||
service_actions=[{'service': 'ssh',
|
|
||||||
'actions':
|
|
||||||
['restart']}])
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
settings = utils.get_settings('ssh')
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install(settings['server']['package'])
|
|
||||||
if not os.path.exists('/etc/ssh'):
|
|
||||||
os.makedir('/etc/ssh')
|
|
||||||
# NOTE: don't recurse
|
|
||||||
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
|
|
||||||
maxdepth=0)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
# NOTE: don't recurse
|
|
||||||
utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
|
|
||||||
maxdepth=0)
|
|
||||||
|
|
||||||
|
|
||||||
class SSHConfigFileContentAudit(FileContentAudit):
|
|
||||||
def __init__(self):
|
|
||||||
self.path = '/etc/ssh/ssh_config'
|
|
||||||
super(SSHConfigFileContentAudit, self).__init__(self.path, {})
|
|
||||||
|
|
||||||
def is_compliant(self, *args, **kwargs):
|
|
||||||
self.pass_cases = []
|
|
||||||
self.fail_cases = []
|
|
||||||
settings = utils.get_settings('ssh')
|
|
||||||
|
|
||||||
_release = lsb_release()['DISTRIB_CODENAME'].lower()
|
|
||||||
if CompareHostReleases(_release) >= 'trusty':
|
|
||||||
if not settings['server']['weak_hmac']:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
|
|
||||||
|
|
||||||
if settings['server']['weak_kex']:
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
|
|
||||||
if settings['server']['cbc_required']:
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
else:
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
else:
|
|
||||||
if not settings['client']['weak_hmac']:
|
|
||||||
self.fail_cases.append(r'^MACs.+,hmac-sha1$')
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
|
|
||||||
|
|
||||||
if settings['client']['weak_kex']:
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
|
|
||||||
|
|
||||||
if settings['client']['cbc_required']:
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
else:
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
|
|
||||||
if settings['client']['roaming']:
|
|
||||||
self.pass_cases.append(r'^UseRoaming yes$')
|
|
||||||
else:
|
|
||||||
self.fail_cases.append(r'^UseRoaming yes$')
|
|
||||||
|
|
||||||
return super(SSHConfigFileContentAudit, self).is_compliant(*args,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class SSHDConfigFileContentAudit(FileContentAudit):
|
|
||||||
def __init__(self):
|
|
||||||
self.path = '/etc/ssh/sshd_config'
|
|
||||||
super(SSHDConfigFileContentAudit, self).__init__(self.path, {})
|
|
||||||
|
|
||||||
def is_compliant(self, *args, **kwargs):
|
|
||||||
self.pass_cases = []
|
|
||||||
self.fail_cases = []
|
|
||||||
settings = utils.get_settings('ssh')
|
|
||||||
|
|
||||||
_release = lsb_release()['DISTRIB_CODENAME'].lower()
|
|
||||||
if CompareHostReleases(_release) >= 'trusty':
|
|
||||||
if not settings['server']['weak_hmac']:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
|
|
||||||
|
|
||||||
if settings['server']['weak_kex']:
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
|
|
||||||
if settings['server']['cbc_required']:
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
else:
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
else:
|
|
||||||
if not settings['server']['weak_hmac']:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^MACs.+,hmac-sha1$')
|
|
||||||
|
|
||||||
if settings['server']['weak_kex']:
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
|
|
||||||
else:
|
|
||||||
self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
|
|
||||||
self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
|
|
||||||
|
|
||||||
if settings['server']['cbc_required']:
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
else:
|
|
||||||
self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
|
|
||||||
self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
|
|
||||||
|
|
||||||
if settings['server']['sftp_enable']:
|
|
||||||
self.pass_cases.append(r'^Subsystem\ssftp')
|
|
||||||
else:
|
|
||||||
self.fail_cases.append(r'^Subsystem\ssftp')
|
|
||||||
|
|
||||||
return super(SSHDConfigFileContentAudit, self).is_compliant(*args,
|
|
||||||
**kwargs)
|
|
||||||
|
|
@ -1,70 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# This is the ssh client system-wide configuration file. See
|
|
||||||
# ssh_config(5) for more information. This file provides defaults for
|
|
||||||
# users, and the values can be changed in per-user configuration files
|
|
||||||
# or on the command line.
|
|
||||||
|
|
||||||
# Configuration data is parsed as follows:
|
|
||||||
# 1. command line options
|
|
||||||
# 2. user-specific file
|
|
||||||
# 3. system-wide file
|
|
||||||
# Any configuration value is only changed the first time it is set.
|
|
||||||
# Thus, host-specific definitions should be at the beginning of the
|
|
||||||
# configuration file, and defaults at the end.
|
|
||||||
|
|
||||||
# Site-wide defaults for some commonly used options. For a comprehensive
|
|
||||||
# list of available options, their meanings and defaults, please see the
|
|
||||||
# ssh_config(5) man page.
|
|
||||||
|
|
||||||
# Restrict the following configuration to be limited to this Host.
|
|
||||||
{% if remote_hosts -%}
|
|
||||||
Host {{ ' '.join(remote_hosts) }}
|
|
||||||
{% endif %}
|
|
||||||
ForwardAgent no
|
|
||||||
ForwardX11 no
|
|
||||||
ForwardX11Trusted yes
|
|
||||||
RhostsRSAAuthentication no
|
|
||||||
RSAAuthentication yes
|
|
||||||
PasswordAuthentication {{ password_auth_allowed }}
|
|
||||||
HostbasedAuthentication no
|
|
||||||
GSSAPIAuthentication no
|
|
||||||
GSSAPIDelegateCredentials no
|
|
||||||
GSSAPIKeyExchange no
|
|
||||||
GSSAPITrustDNS no
|
|
||||||
BatchMode no
|
|
||||||
CheckHostIP yes
|
|
||||||
AddressFamily {{ addr_family }}
|
|
||||||
ConnectTimeout 0
|
|
||||||
StrictHostKeyChecking ask
|
|
||||||
IdentityFile ~/.ssh/identity
|
|
||||||
IdentityFile ~/.ssh/id_rsa
|
|
||||||
IdentityFile ~/.ssh/id_dsa
|
|
||||||
# The port at the destination should be defined
|
|
||||||
{% for port in ports -%}
|
|
||||||
Port {{ port }}
|
|
||||||
{% endfor %}
|
|
||||||
Protocol 2
|
|
||||||
Cipher 3des
|
|
||||||
{% if ciphers -%}
|
|
||||||
Ciphers {{ ciphers }}
|
|
||||||
{%- endif %}
|
|
||||||
{% if macs -%}
|
|
||||||
MACs {{ macs }}
|
|
||||||
{%- endif %}
|
|
||||||
{% if kexs -%}
|
|
||||||
KexAlgorithms {{ kexs }}
|
|
||||||
{%- endif %}
|
|
||||||
EscapeChar ~
|
|
||||||
Tunnel no
|
|
||||||
TunnelDevice any:any
|
|
||||||
PermitLocalCommand no
|
|
||||||
VisualHostKey no
|
|
||||||
RekeyLimit 1G 1h
|
|
||||||
SendEnv LANG LC_*
|
|
||||||
HashKnownHosts yes
|
|
||||||
{% if roaming -%}
|
|
||||||
UseRoaming {{ roaming }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
@ -1,159 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# Package generated configuration file
|
|
||||||
# See the sshd_config(5) manpage for details
|
|
||||||
|
|
||||||
# What ports, IPs and protocols we listen for
|
|
||||||
{% for port in ports -%}
|
|
||||||
Port {{ port }}
|
|
||||||
{% endfor -%}
|
|
||||||
AddressFamily {{ addr_family }}
|
|
||||||
# Use these options to restrict which interfaces/protocols sshd will bind to
|
|
||||||
{% if ssh_ip -%}
|
|
||||||
{% for ip in ssh_ip -%}
|
|
||||||
ListenAddress {{ ip }}
|
|
||||||
{% endfor %}
|
|
||||||
{%- else -%}
|
|
||||||
ListenAddress ::
|
|
||||||
ListenAddress 0.0.0.0
|
|
||||||
{% endif -%}
|
|
||||||
Protocol 2
|
|
||||||
{% if ciphers -%}
|
|
||||||
Ciphers {{ ciphers }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if macs -%}
|
|
||||||
MACs {{ macs }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if kexs -%}
|
|
||||||
KexAlgorithms {{ kexs }}
|
|
||||||
{% endif -%}
|
|
||||||
# HostKeys for protocol version 2
|
|
||||||
{% for keyfile in host_key_files -%}
|
|
||||||
HostKey {{ keyfile }}
|
|
||||||
{% endfor -%}
|
|
||||||
|
|
||||||
# Privilege Separation is turned on for security
|
|
||||||
{% if use_priv_sep -%}
|
|
||||||
UsePrivilegeSeparation {{ use_priv_sep }}
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
# Lifetime and size of ephemeral version 1 server key
|
|
||||||
KeyRegenerationInterval 3600
|
|
||||||
ServerKeyBits 1024
|
|
||||||
|
|
||||||
# Logging
|
|
||||||
SyslogFacility AUTH
|
|
||||||
LogLevel VERBOSE
|
|
||||||
|
|
||||||
# Authentication:
|
|
||||||
LoginGraceTime 30s
|
|
||||||
{% if allow_root_with_key -%}
|
|
||||||
PermitRootLogin without-password
|
|
||||||
{% else -%}
|
|
||||||
PermitRootLogin no
|
|
||||||
{% endif %}
|
|
||||||
PermitTunnel no
|
|
||||||
PermitUserEnvironment no
|
|
||||||
StrictModes yes
|
|
||||||
|
|
||||||
RSAAuthentication yes
|
|
||||||
PubkeyAuthentication yes
|
|
||||||
AuthorizedKeysFile %h/.ssh/authorized_keys
|
|
||||||
|
|
||||||
# Don't read the user's ~/.rhosts and ~/.shosts files
|
|
||||||
IgnoreRhosts yes
|
|
||||||
# For this to work you will also need host keys in /etc/ssh_known_hosts
|
|
||||||
RhostsRSAAuthentication no
|
|
||||||
# similar for protocol version 2
|
|
||||||
HostbasedAuthentication no
|
|
||||||
# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
|
|
||||||
IgnoreUserKnownHosts yes
|
|
||||||
|
|
||||||
# To enable empty passwords, change to yes (NOT RECOMMENDED)
|
|
||||||
PermitEmptyPasswords no
|
|
||||||
|
|
||||||
# Change to yes to enable challenge-response passwords (beware issues with
|
|
||||||
# some PAM modules and threads)
|
|
||||||
ChallengeResponseAuthentication no
|
|
||||||
|
|
||||||
# Change to no to disable tunnelled clear text passwords
|
|
||||||
PasswordAuthentication {{ password_authentication }}
|
|
||||||
|
|
||||||
# Kerberos options
|
|
||||||
KerberosAuthentication no
|
|
||||||
KerberosGetAFSToken no
|
|
||||||
KerberosOrLocalPasswd no
|
|
||||||
KerberosTicketCleanup yes
|
|
||||||
|
|
||||||
# GSSAPI options
|
|
||||||
GSSAPIAuthentication no
|
|
||||||
GSSAPICleanupCredentials yes
|
|
||||||
|
|
||||||
X11Forwarding {{ allow_x11_forwarding }}
|
|
||||||
X11DisplayOffset 10
|
|
||||||
X11UseLocalhost yes
|
|
||||||
GatewayPorts no
|
|
||||||
PrintMotd {{ print_motd }}
|
|
||||||
PrintLastLog {{ print_last_log }}
|
|
||||||
TCPKeepAlive no
|
|
||||||
UseLogin no
|
|
||||||
|
|
||||||
ClientAliveInterval {{ client_alive_interval }}
|
|
||||||
ClientAliveCountMax {{ client_alive_count }}
|
|
||||||
AllowTcpForwarding {{ allow_tcp_forwarding }}
|
|
||||||
AllowAgentForwarding {{ allow_agent_forwarding }}
|
|
||||||
|
|
||||||
MaxStartups 10:30:100
|
|
||||||
#Banner /etc/issue.net
|
|
||||||
|
|
||||||
# Allow client to pass locale environment variables
|
|
||||||
AcceptEnv LANG LC_*
|
|
||||||
|
|
||||||
# Set this to 'yes' to enable PAM authentication, account processing,
|
|
||||||
# and session processing. If this is enabled, PAM authentication will
|
|
||||||
# be allowed through the ChallengeResponseAuthentication and
|
|
||||||
# PasswordAuthentication. Depending on your PAM configuration,
|
|
||||||
# PAM authentication via ChallengeResponseAuthentication may bypass
|
|
||||||
# the setting of "PermitRootLogin without-password".
|
|
||||||
# If you just want the PAM account and session checks to run without
|
|
||||||
# PAM authentication, then enable this but set PasswordAuthentication
|
|
||||||
# and ChallengeResponseAuthentication to 'no'.
|
|
||||||
UsePAM {{ use_pam }}
|
|
||||||
|
|
||||||
{% if deny_users -%}
|
|
||||||
DenyUsers {{ deny_users }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if allow_users -%}
|
|
||||||
AllowUsers {{ allow_users }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if deny_groups -%}
|
|
||||||
DenyGroups {{ deny_groups }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if allow_groups -%}
|
|
||||||
AllowGroups allow_groups
|
|
||||||
{% endif -%}
|
|
||||||
UseDNS {{ use_dns }}
|
|
||||||
MaxAuthTries {{ max_auth_tries }}
|
|
||||||
MaxSessions {{ max_sessions }}
|
|
||||||
|
|
||||||
{% if sftp_enable -%}
|
|
||||||
# Configuration, in case SFTP is used
|
|
||||||
## override default of no subsystems
|
|
||||||
## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
|
|
||||||
Subsystem sftp internal-sftp -l VERBOSE
|
|
||||||
|
|
||||||
## These lines must appear at the *end* of sshd_config
|
|
||||||
Match Group {{ sftp_group }}
|
|
||||||
ForceCommand internal-sftp -l VERBOSE
|
|
||||||
ChrootDirectory {{ sftp_chroot }}
|
|
||||||
{% else -%}
|
|
||||||
# Configuration, in case SFTP is used
|
|
||||||
## override default of no subsystems
|
|
||||||
## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
|
|
||||||
## These lines must appear at the *end* of sshd_config
|
|
||||||
Match Group sftponly
|
|
||||||
ForceCommand internal-sftp -l VERBOSE
|
|
||||||
ChrootDirectory /sftpchroot/home/%u
|
|
||||||
{% endif %}
|
|
||||||
|
|
@ -1,73 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
from jinja2 import FileSystemLoader, Environment
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
from charmhelpers.fetch import apt_update
|
|
||||||
apt_update(fatal=True)
|
|
||||||
if six.PY2:
|
|
||||||
apt_install('python-jinja2', fatal=True)
|
|
||||||
else:
|
|
||||||
apt_install('python3-jinja2', fatal=True)
|
|
||||||
from jinja2 import FileSystemLoader, Environment
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE: function separated from main rendering code to facilitate easier
|
|
||||||
# mocking in unit tests.
|
|
||||||
def write(path, data):
|
|
||||||
with open(path, 'wb') as out:
|
|
||||||
out.write(data)
|
|
||||||
|
|
||||||
|
|
||||||
def get_template_path(template_dir, path):
|
|
||||||
"""Returns the template file which would be used to render the path.
|
|
||||||
|
|
||||||
The path to the template file is returned.
|
|
||||||
:param template_dir: the directory the templates are located in
|
|
||||||
:param path: the file path to be written to.
|
|
||||||
:returns: path to the template file
|
|
||||||
"""
|
|
||||||
return os.path.join(template_dir, os.path.basename(path))
|
|
||||||
|
|
||||||
|
|
||||||
def render_and_write(template_dir, path, context):
|
|
||||||
"""Renders the specified template into the file.
|
|
||||||
|
|
||||||
:param template_dir: the directory to load the template from
|
|
||||||
:param path: the path to write the templated contents to
|
|
||||||
:param context: the parameters to pass to the rendering engine
|
|
||||||
"""
|
|
||||||
env = Environment(loader=FileSystemLoader(template_dir))
|
|
||||||
template_file = os.path.basename(path)
|
|
||||||
template = env.get_template(template_file)
|
|
||||||
log('Rendering from template: %s' % template.name, level=DEBUG)
|
|
||||||
rendered_content = template.render(context)
|
|
||||||
if not rendered_content:
|
|
||||||
log("Render returned None - skipping '%s'" % path,
|
|
||||||
level=WARNING)
|
|
||||||
return
|
|
||||||
|
|
||||||
write(path, rendered_content.encode('utf-8').strip())
|
|
||||||
log('Wrote template %s' % path, level=DEBUG)
|
|
||||||
|
|
@ -1,155 +0,0 @@
|
||||||
# Copyright 2016-2021 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import glob
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import six
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Global settings cache. Since each hook fire entails a fresh module import it
|
|
||||||
# is safe to hold this in memory and not risk missing config changes (since
|
|
||||||
# they will result in a new hook fire and thus re-import).
|
|
||||||
__SETTINGS__ = {}
|
|
||||||
|
|
||||||
|
|
||||||
def _get_defaults(modules):
|
|
||||||
"""Load the default config for the provided modules.
|
|
||||||
|
|
||||||
:param modules: stack modules config defaults to lookup.
|
|
||||||
:returns: modules default config dictionary.
|
|
||||||
"""
|
|
||||||
default = os.path.join(os.path.dirname(__file__),
|
|
||||||
'defaults/%s.yaml' % (modules))
|
|
||||||
return yaml.safe_load(open(default))
|
|
||||||
|
|
||||||
|
|
||||||
def _get_schema(modules):
|
|
||||||
"""Load the config schema for the provided modules.
|
|
||||||
|
|
||||||
NOTE: this schema is intended to have 1-1 relationship with they keys in
|
|
||||||
the default config and is used a means to verify valid overrides provided
|
|
||||||
by the user.
|
|
||||||
|
|
||||||
:param modules: stack modules config schema to lookup.
|
|
||||||
:returns: modules default schema dictionary.
|
|
||||||
"""
|
|
||||||
schema = os.path.join(os.path.dirname(__file__),
|
|
||||||
'defaults/%s.yaml.schema' % (modules))
|
|
||||||
return yaml.safe_load(open(schema))
|
|
||||||
|
|
||||||
|
|
||||||
def _get_user_provided_overrides(modules):
|
|
||||||
"""Load user-provided config overrides.
|
|
||||||
|
|
||||||
:param modules: stack modules to lookup in user overrides yaml file.
|
|
||||||
:returns: overrides dictionary.
|
|
||||||
"""
|
|
||||||
overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
|
|
||||||
'hardening.yaml')
|
|
||||||
if os.path.exists(overrides):
|
|
||||||
log("Found user-provided config overrides file '%s'" %
|
|
||||||
(overrides), level=DEBUG)
|
|
||||||
settings = yaml.safe_load(open(overrides))
|
|
||||||
if settings and settings.get(modules):
|
|
||||||
log("Applying '%s' overrides" % (modules), level=DEBUG)
|
|
||||||
return settings.get(modules)
|
|
||||||
|
|
||||||
log("No overrides found for '%s'" % (modules), level=DEBUG)
|
|
||||||
else:
|
|
||||||
log("No hardening config overrides file '%s' found in charm "
|
|
||||||
"root dir" % (overrides), level=DEBUG)
|
|
||||||
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def _apply_overrides(settings, overrides, schema):
|
|
||||||
"""Get overrides config overlaid onto modules defaults.
|
|
||||||
|
|
||||||
:param modules: require stack modules config.
|
|
||||||
:returns: dictionary of modules config with user overrides applied.
|
|
||||||
"""
|
|
||||||
if overrides:
|
|
||||||
for k, v in six.iteritems(overrides):
|
|
||||||
if k in schema:
|
|
||||||
if schema[k] is None:
|
|
||||||
settings[k] = v
|
|
||||||
elif type(schema[k]) is dict:
|
|
||||||
settings[k] = _apply_overrides(settings[k], overrides[k],
|
|
||||||
schema[k])
|
|
||||||
else:
|
|
||||||
raise Exception("Unexpected type found in schema '%s'" %
|
|
||||||
type(schema[k]), level=ERROR)
|
|
||||||
else:
|
|
||||||
log("Unknown override key '%s' - ignoring" % (k), level=INFO)
|
|
||||||
|
|
||||||
return settings
|
|
||||||
|
|
||||||
|
|
||||||
def get_settings(modules):
|
|
||||||
global __SETTINGS__
|
|
||||||
if modules in __SETTINGS__:
|
|
||||||
return __SETTINGS__[modules]
|
|
||||||
|
|
||||||
schema = _get_schema(modules)
|
|
||||||
settings = _get_defaults(modules)
|
|
||||||
overrides = _get_user_provided_overrides(modules)
|
|
||||||
__SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)
|
|
||||||
return __SETTINGS__[modules]
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_permissions(path, user, group, permissions, maxdepth=-1):
|
|
||||||
"""Ensure permissions for path.
|
|
||||||
|
|
||||||
If path is a file, apply to file and return. If path is a directory,
|
|
||||||
apply recursively (if required) to directory contents and return.
|
|
||||||
|
|
||||||
:param user: user name
|
|
||||||
:param group: group name
|
|
||||||
:param permissions: octal permissions
|
|
||||||
:param maxdepth: maximum recursion depth. A negative maxdepth allows
|
|
||||||
infinite recursion and maxdepth=0 means no recursion.
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
if not os.path.exists(path):
|
|
||||||
log("File '%s' does not exist - cannot set permissions" % (path),
|
|
||||||
level=WARNING)
|
|
||||||
return
|
|
||||||
|
|
||||||
_user = pwd.getpwnam(user)
|
|
||||||
os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
|
|
||||||
os.chmod(path, permissions)
|
|
||||||
|
|
||||||
if maxdepth == 0:
|
|
||||||
log("Max recursion depth reached - skipping further recursion",
|
|
||||||
level=DEBUG)
|
|
||||||
return
|
|
||||||
elif maxdepth > 0:
|
|
||||||
maxdepth -= 1
|
|
||||||
|
|
||||||
if os.path.isdir(path):
|
|
||||||
contents = glob.glob("%s/*" % (path))
|
|
||||||
for c in contents:
|
|
||||||
ensure_permissions(c, user=user, group=group,
|
|
||||||
permissions=permissions, maxdepth=maxdepth)
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue