update mod: ubuntu and nrpe

This commit is contained in:
AnonSaber 2023-04-12 14:25:36 +08:00
parent 654e782025
commit 852cc0f247
780 changed files with 157455 additions and 0 deletions

View File

@ -12,4 +12,6 @@ charm pull cs:~containers/calico-812
charm pull cs:~containers/kubeapi-load-balancer-786
charm pull cs:~containers/keepalived-85
charm pull cs:~containers/coredns-20
charm pull cs:~containers/ubuntu-20
charm pull cs:~containers/nrpe-75
```

75
nrpe/Makefile Normal file
View File

@ -0,0 +1,75 @@
PYTHON := /usr/bin/python3
PROJECTPATH=$(dir $(realpath $(MAKEFILE_LIST)))
ifndef CHARM_BUILD_DIR
CHARM_BUILD_DIR=${PROJECTPATH}.build
endif
METADATA_FILE="metadata.yaml"
CHARM_NAME=$(shell cat ${PROJECTPATH}/${METADATA_FILE} | grep -E '^name:' | awk '{print $$2}')
help:
@echo "This project supports the following targets"
@echo ""
@echo " make help - show this text"
@echo " make clean - remove unneeded files"
@echo " make submodules - make sure that the submodules are up-to-date"
@echo " make submodules-update - update submodules to latest changes on remote branch"
@echo " make build - build the charm"
@echo " make release - run clean and build targets"
@echo " make lint - run flake8 and black --check"
@echo " make black - run black and reformat files"
@echo " make proof - run charm proof"
@echo " make unittests - run the tests defined in the unittest subdirectory"
@echo " make functional - run the tests defined in the functional subdirectory"
@echo " make test - run lint, proof, unittests and functional targets"
@echo ""
clean:
@echo "Cleaning files"
@git clean -ffXd -e '!.idea'
@echo "Cleaning existing build"
@rm -rf ${CHARM_BUILD_DIR}/${CHARM_NAME}
submodules:
@echo "Cloning submodules"
@git submodule update --init --recursive
submodules-update:
@echo "Pulling latest updates for submodules"
@git submodule update --init --recursive --remote --merge
build: submodules-update
@echo "Building charm to base directory ${CHARM_BUILD_DIR}/${CHARM_NAME}"
@-git rev-parse --abbrev-ref HEAD > ./repo-info
@-git describe --always > ./version
@mkdir -p ${CHARM_BUILD_DIR}/${CHARM_NAME}
@cp -a ./* ${CHARM_BUILD_DIR}/${CHARM_NAME}
release: clean build
@echo "Charm is built at ${CHARM_BUILD_DIR}/${CHARM_NAME}"
lint:
@echo "Running lint checks"
@tox -e lint
black:
@echo "Reformat files with black"
@tox -e black
proof:
@echo "Running charm proof"
@-charm proof
unittests: submodules-update
@echo "Running unit tests"
@tox -e unit
functional: build
@echo "Executing functional tests in ${CHARM_BUILD_DIR}"
@CHARM_BUILD_DIR=${CHARM_BUILD_DIR} tox -e func
test: lint proof unittests functional
@echo "Charm ${CHARM_NAME} has been tested"
# The targets below don't depend on a file
.PHONY: help submodules submodules-update clean build release lint black proof unittests functional test

225
nrpe/README.md Normal file
View File

@ -0,0 +1,225 @@
Introduction
============
This subordinate charm is used to configure nrpe (Nagios Remote Plugin
Executor). It can be related to the nagios charm via the monitors relation and
will pass a monitors yaml to nagios informing it of what checks to monitor.
Principal Relations
===================
This charm can be attached to any principal charm (via the juju-info relation)
regardless of whether it has implemented the local-monitors or
nrpe-external-master relations. For example:
juju deploy ubuntu
juju deploy nrpe
juju deploy nagios
juju add-relation ubuntu nrpe
juju add-relation nrpe:monitors nagios:monitors
If joined via the juju-info relation the default checks are configured and
additional checks can be added via the monitors config option (see below).
The local-monitors relations allows the principal to request checks to be setup
by passing a monitors yaml and listing them in the 'local' section. It can
also list checks that is has configured by listing them in the remote nrpe
section and finally it can request external monitors are setup by using one of
the other remote types. See "Monitors yaml" below.
Other Subordinate Charms
========================
If another subordinate charm deployed to the same principal has a
local-monitors or nrpe-external-master relation then it can also be related to
the local nrpe charm. For example:
echo -e "glance:\n vip: 10.5.106.1" > glance.yaml
juju deploy -n3 --config glance.yaml glance
juju deploy hacluster glance-hacluster
juju deploy nrpe glance-nrpe
juju deploy nagios
juju add-relation glance glance-hacluster
juju add-relation glance-nrpe:monitors nagios:monitors
juju add-relation glance glance-nrpe
juju add-relation glance-hacluster glance-nrpe
The glance-hacluster charm will pass monitoring information to glance-nrpe
which will amalgamate all monitor definitions before passing them to nagios.
Check sources
=============
Check definitions can come from three places:
Default Checks
--------------
This charm creates a base set of checks in /etc/nagios/nrpe.d, including
check\_load, check\_users, check\_disk\_root. All of the options for these are
configurable but sensible defaults have been set in config.yaml.
For example to increase the alert threshold for number of processes:
juju config nrpe load="-w 10,10,10 -c 25,25,25"
Default checks maybe disabled by setting them to the empty string.
Principal Requested Checks
--------------------------
Monitors passed to this charm by the principal charm via the local-monitors
or nrpe-external-master relation. The principal charm can write its own
check definition into */etc/nagios/nrpe.d* and then inform this charm via the
monitors setting. It can also request a direct external check of a service
without using nrpe. See "Monitors yaml" below for examples.
User Requested Checks
---------------------
This works in the same way as the Principal requested except the monitors yaml
is set by the user via the monitors config option. For example to add a monitor
for the rsyslog process:
juju config nrpe monitors="
monitors:
local:
procrunning:
rsyslogd:
min: 1
max: 1
executable: rsyslogd
"
External Nagios
===============
If the nagios server is not deployed in the juju environment then the charm can
be configured, via the export\_nagios\_definitions, to write out nagios config
fragments to /var/lib/nagios/export. Rsync is then configured to allow a host
(specified by nagios\_master) to collect the fragments. An rsync stanza is created
allowing the Nagios server to pick up configs from /var/lib/nagios/export (as
a target called "external-nagios"), which will also be configured to allow
connections from the hostname or IP address as specified for the
"nagios\_master" variable.
It is up to you to configure the Nagios master to pull the configs needed, which
will then cause it to connect back to the instances in question to run the nrpe
checks you have defined.
Monitors yaml
=============
The list of monitors past down the monitors relation is an amalgamation of the
lists provided via the principal, the user and the default checks.
The monitors yaml is of the following form:
# Version of the spec, mostly ignored but 0.3 is the current one
version: '0.3'
# Dict with just 'local' and 'remote' as parts
monitors:
# local monitors need an agent to be handled. See nrpe charm for
# some example implementations
local:
# procrunning checks for a running process named X (no path)
procrunning:
# Multiple procrunning can be defined, this is the "name" of it
nagios3:
min: 1
max: 1
executable: nagios3
# Remote monitors can be polled directly by a remote system
remote:
# do a request on the HTTP protocol
http:
nagios:
port: 80
path: /nagios3/
# expected status response (otherwise just look for 200)
status: 'HTTP/1.1 401'
# Use as the Host: header (the server address will still be used to connect() to)
host: www.fewbar.com
mysql:
# Named basic check
basic:
username: monitors
password: abcdefg123456
nrpe:
apache2:
command: check_apache2
Before a monitor is added it is checked to see if it is in the 'local' section.
If it is this charm needs to convert it into an nrpe checks. Only a small
number of check types are currently supported (see below) .These checks can
then be called by the nagios charm via the nrpe service. So for each check
listed in the local section:
1. The definition is read and a check definition it written /etc/nagios/nrpe.d
2. The check is defined as a remote nrpe check in the yaml passed to nagios
In the example above a check\_proc\_nagios3\_user.cfg file would be written
out which contains:
# Check process nagios3 is running (user)
command[check_proc_nagios3_user]=/usr/lib/nagios/plugins/check_procs -w 1 -c 1 -C nagios3
And the monitors yaml passed to nagios would include:
monitors:
nrpe:
check_proc_nagios3_user:
command: check_proc_nagios3_user
The principal charm, or the user via the monitors config option, can request an
external check by adding it to the remote section of the monitors yaml. In the
example above direct checks of a webserver and of mysql are being requested.
This charm passes those on to nagios unaltered.
Local check types
-----------------
Supported nrpe checks are:
procrunning:
min: Minimum number of 'executable' processes
max: Maximum number of 'executable' processes
executable: Name of executable to look for in process list
processcount:
min: Minimum total number processes
max: Maximum total number processes
executable: Name of executable to look for in process list
disk:
path: Directory to monitor space usage of
custom:
check: the name of the check to execute
plugin_path: (optional) Absolute path to the directory containing the
custom plugin. Default value is /var/lib/nagios/plugins
description: (optional) Description of the check
params: (optional) Parameters to pass to the check on invocation
Remote check types
------------------
Supported remote types:
http, mysql, nrpe, tcp, rpc, pgsql
(See Nagios charm for up-to-date list and options)
Spaces
======
By defining 'monitors' binding, you can influence which nrpe's IP will be reported
back to Nagios. This can be very handy if nrpe is placed on machines with multiple
IPs/networks.
Actions
=======
The charm defines 2 actions, 'list-nrpe-checks' that gives a list of all the
nrpe checks defined for this unit and what commands they use. The other is
run-nrpe-check, which allows you to run a specified nrpe check and get the
output. This is useful to confirm if an alert is actually resolved.

9
nrpe/actions.yaml Normal file
View File

@ -0,0 +1,9 @@
list-nrpe-checks:
description: Lists all NRPE checks defined on this unit
run-nrpe-check:
description: Run a specific NRPE check defined on this unit
params:
name:
type: string
description: Check name to run
required: [name]

16
nrpe/actions/list-nrpe-checks Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
nrpedir=/etc/nagios/nrpe.d
if [ ! -d $nrpedir ]; then
action-fail "No $nrpedir exists"
exit 1
else
for i in $nrpedir/*.cfg; do
check=$(grep command $i | awk -F "=" '{ print $1 }' | sed -e 's/command\[//' | sed -e 's/\]//' | sed -e 's/_/-/g');
command=$(grep command $i | awk -F "=" '{ print $2 }');
action-set checks.$check="$command";
done
fi
action-set timestamp="$(date)"

15
nrpe/actions/run-nrpe-check Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
check=$(action-get name | sed -e 's/-/_/g')
nrpedir="/etc/nagios/nrpe.d"
checkfile="$nrpedir/${check}.cfg"
if [ -f $checkfile ]; then
command=$(awk -F "=" '{ print $2 }' $checkfile)
output=$(sudo -u nagios $command)
action-set check-output="$output"
else
action-fail "$checkfile does not exist"
fi

210
nrpe/config.yaml Normal file
View File

@ -0,0 +1,210 @@
options:
nagios_master:
default: "None"
type: string
description: |
IP address of the nagios master from which to allow rsync access
server_port:
default: 5666
type: int
description: |
Port on which nagios-nrpe-server will listen
nagios_address_type:
default: "private"
type: string
description: |
Determines whether the nagios host check should use the private
or public IP address of an instance. Can be "private" or "public".
nagios_host_context:
default: "juju"
type: string
description: |
A string which will be prepended to instance name to set the host name
in nagios. So for instance the hostname would be something like:
juju-postgresql-0
If you're running multiple environments with the same services in them
this allows you to differentiate between them.
nagios_hostname_type:
default: "auto"
type: string
description: |
Determines whether a server is identified by its unit name or
host name. If you're in a virtual environment, "unit" is
probably best. If you're using MaaS, you may prefer "host".
Use "auto" to have nrpe automatically distinguish between
metal and non-metal hosts.
dont_blame_nrpe:
default: False
type: boolean
description: |
Setting dont_blame_nrpe to True sets dont_blame_nrpe=1 in nrpe.cfg
This config option which allows specifying arguments to nrpe scripts.
This can be a security risk so it is disabled by default. Nrpe is
compiled with --enable-command-args option by default, which this
option enables.
debug:
default: False
type: boolean
description: |
Setting debug to True enables debug=1 in nrpe.cfg
disk_root:
default: "-u GB -w 25% -c 20% -K 5%"
type: string
description: |
Root disk check. This can be made to also check non-root disk systems
as follows:
-u GB -w 20% -c 15% -r '/srv/juju/vol-' -C -u GB -w 25% -c 20%
The string '-p /' will be appended to this check, so you must finish
the string taking that into account. See the nagios check_disk plugin
help for further details.
.
Set to '' in order to disable this check.
zombies:
default: ""
type: string
description: |
Zombie processes check; defaults to disabled. To enable, set the desired
check_procs arguments pertaining to zombies, for example: "-w 3 -c 6 -s Z"
procs:
default: ""
type: string
description: |
Set thresholds for number of running processes. Defaults to disabled;
to enable, specify 'auto' for the charm to generate thresholds based
on processor count, or manually provide arguments for check_procs, for
example: "-k -w 250 -c 300" to set warning and critical levels
manually and exclude kernel threads.
load:
default: "auto"
type: string
description: |
Load check arguments (e.g. "-w 8,8,8 -c 15,15,15"); if 'auto' is set,
thresholds will be set to multipliers of processor count for 1m, 5m
and 15m thresholds, with warning as "(4, 2, 1)", and critical set to
"(8, 4, 2)". So if you have two processors, you'd get thresholds of
"-w 8,4,2 -c 16,8,4".
.
Set to '' in order to disable this check.
conntrack:
default: "-w 80 -c 90"
type: string
description: |
Check conntrack (net.netfilter.nf_conntrack_count) against thresholds.
.
Set to '' in order to disable this check.
users:
default: ""
type: string
description: |
Set thresholds for number of logged-in users. Defaults to disabled;
to enable, manually provide arguments for check_user, for example:
"-w 20 -c 25"
swap:
default: ''
type: string
description: |
Check swap utilisation. See the nagios check_swap plugin help for
further details. The format looks like "-w 40% -c 25%"
.
Set to '' in order to disable this check.
swap_activity:
default: "-i 5 -w 10240 -c 40960"
type: string
description: |
Swapout activity check. Thresholds are expressed in kB, interval in
seconds.
.
Set to '' in order to disable this check.
mem:
default: "-C -h -u -w 85 -c 90"
type: string
description: |
Check memory % used.
By default, thresholds are applied to the non-hugepages portion of the
memory.
.
Set to '' in order to disable this check.
lacp_bonds:
default: ''
type: string
description: |
LACP bond interfaces, space-delimited (ie. 'bond0 bond1')
netlinks:
default: ''
type: string
description: |
Network interfaces to monitor for correct link state, MTU size
and speed negotiated. The first argument is either an interface name or
a CIDR expression. Parsed keywords are "mtu", "speed", and "op". Other
keywords are ignored.
.
Note that CIDR expressions can match multiple devices.
.
For example (multi-line starts with pipe):
- 10.1.2.0/24 mtu:9000 speed:25000
- eth0 mtu:9000 speed:25000
- lo mtu:65536 op:unknown
- br0-mgmt mtu:9000
- br0-sta mtu:9000
- br0-stc mtu:9000
- br0-api mtu:1500
- bond0 mtu:9000 speed:50000
- bond0.25 mtu:1500 speed:50000
- ens3 mtu:1500 speed:-1 desc:openstack_iface
- ...
netlinks_skip_unfound_ifaces:
default: False
type: boolean
description: |
add --skip-unfound-ifaces to check_netlinks.py.
monitors:
default: ''
type: string
description: |
Additional monitors defined in the monitors yaml format (see README)
hostgroups:
default: ""
type: string
description: Comma separated list of hostgroups to add for these hosts
hostcheck_inherit:
default: "server"
type: string
description: Hostcheck to inherit
export_nagios_definitions:
default: False
type: boolean
description: |
If True nagios check definitions are written to
'/var/lib/nagios/export' and rync is configured to allow nagios_master
to collect them. Useful when Nagios is outside of the juju environment
sub_postfix:
default: ""
type: string
description: |
A string to be appended onto all the nrpe checks created by this charm
to avoid potential clashes with existing checks
xfs_errors:
default: ""
type: string
description: |
dmesg history length to check for xfs errors, in minutes
.
Defaults to disabled, set the time to enable.
ro_filesystem_excludes:
default: "/snap/,/sys/fs/cgroup,/run/containerd,/var/lib/docker"
type: string
description: |
Comma separated list of mount points to exclude from checks for readonly filesystem.
Can be a substring rather than the entire mount point, e.g. /sys will match all filesystems
beginning with the string /sys.
The check is disabled on all LXD units, and also for non-container units if this parameter is
set to ''.
cpu_governor:
default: ""
type: string
description: |
CPU governor check. The string value here will be checked against all CPUs in
/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor. The supported values are
'ondemand', 'performance', 'powersave'. Unset value means the check will be disabled.
There is a relation key called requested_cpu_governor='string', but the charm config value
will take precedence over the relation data.

53
nrpe/copyright Normal file
View File

@ -0,0 +1,53 @@
Format: http://dep.debian.net/deps/dep5/
Files: *
Copyright: Copyright 2012, Canonical Ltd., All Rights Reserved.
License: GPL-3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Files: files/plugins/check_exit_status.pl
Copyright: Copyright (C) 2011 Chad Columbus <ccolumbu@hotmail.com>
License: GPL-2
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Files: files/plugins/check_mem.pl
Copyright: Copyright (c) 2011 justin@techadvise.com
License: MIT/X11
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
.
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

7
nrpe/files/default_rsync Normal file
View File

@ -0,0 +1,7 @@
#------------------------------------------------
# This file is juju managed
#------------------------------------------------
RSYNC_ENABLE=true
RSYNC_NICE=''
RSYNC_OPTS=''

View File

@ -0,0 +1,84 @@
#!/usr/bin/env python
"""Nagios plugin for python2.7."""
# Copyright (C) 2005, 2006, 2007, 2012 James Troup <james.troup@canonical.com>
import os
import stat
import time
import traceback
import sys
################################################################################
class CriticalError(Exception):
"""This indicates a critical error."""
pass
class WarnError(Exception):
"""This indicates a warning condition."""
pass
class UnknownError(Exception):
"""This indicates a unknown error was encountered."""
pass
def try_check(function, *args, **kwargs):
"""Perform a check with error/warn/unknown handling."""
try:
function(*args, **kwargs)
except UnknownError, msg: # noqa: E999
print msg
sys.exit(3)
except CriticalError, msg: # noqa: E999
print msg
sys.exit(2)
except WarnError, msg: # noqa: E999
print msg
sys.exit(1)
except: # noqa: E722
print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0])
print "=" * 60
traceback.print_exc(file=sys.stdout)
print "=" * 60
sys.exit(3)
################################################################################
def check_file_freshness(filename, newer_than=600):
"""Check a file.
It check that file exists, is readable and is newer than <n> seconds (where
<n> defaults to 600).
"""
# First check the file exists and is readable
if not os.path.exists(filename):
raise CriticalError("%s: does not exist." % (filename))
if os.access(filename, os.R_OK) == 0:
raise CriticalError("%s: is not readable." % (filename))
# Then ensure the file is up-to-date enough
mtime = os.stat(filename)[stat.ST_MTIME]
last_modified = time.time() - mtime
if last_modified > newer_than:
raise CriticalError(
"%s: was last modified on %s and is too old (> %s seconds)."
% (filename, time.ctime(mtime), newer_than)
)
if last_modified < 0:
raise CriticalError(
"%s: was last modified on %s which is in the future."
% (filename, time.ctime(mtime))
)
################################################################################

View File

@ -0,0 +1,85 @@
#!/usr/bin/env python3
"""Nagios plugin for python3."""
# Copyright (C) 2005, 2006, 2007, 2012, 2017 James Troup <james.troup@canonical.com>
import os
import stat
import sys
import time
import traceback
###############################################################################
class CriticalError(Exception):
"""This indicates a critical error."""
pass
class WarnError(Exception):
"""This indicates a warning condition."""
pass
class UnknownError(Exception):
"""This indicates a unknown error was encountered."""
pass
def try_check(function, *args, **kwargs):
"""Perform a check with error/warn/unknown handling."""
try:
function(*args, **kwargs)
except UnknownError as msg:
print(msg)
sys.exit(3)
except CriticalError as msg:
print(msg)
sys.exit(2)
except WarnError as msg:
print(msg)
sys.exit(1)
except: # noqa: E722
print("{} raised unknown exception '{}'".format(function, sys.exc_info()[0]))
print("=" * 60)
traceback.print_exc(file=sys.stdout)
print("=" * 60)
sys.exit(3)
###############################################################################
def check_file_freshness(filename, newer_than=600):
"""Check a file.
It check that file exists, is readable and is newer than <n> seconds (where
<n> defaults to 600).
"""
# First check the file exists and is readable
if not os.path.exists(filename):
raise CriticalError("%s: does not exist." % (filename))
if os.access(filename, os.R_OK) == 0:
raise CriticalError("%s: is not readable." % (filename))
# Then ensure the file is up-to-date enough
mtime = os.stat(filename)[stat.ST_MTIME]
last_modified = time.time() - mtime
if last_modified > newer_than:
raise CriticalError(
"%s: was last modified on %s and is too old (> %s "
"seconds)." % (filename, time.ctime(mtime), newer_than)
)
if last_modified < 0:
raise CriticalError(
"%s: was last modified on %s which is in the "
"future." % (filename, time.ctime(mtime))
)
###############################################################################

View File

@ -0,0 +1,89 @@
#!/usr/bin/env python3
"""Check arp cache usage and alert."""
# -*- coding: us-ascii -*-
# Copyright (C) 2019 Canonical
# All rights reserved
import argparse
import os
from nagios_plugin3 import (
CriticalError,
UnknownError,
WarnError,
try_check,
)
def check_arp_cache(warn, crit):
"""Check the usage of arp cache against gc_thresh.
Alerts when the number of arp entries exceeds a threshold of gc_thresh3.
See https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt for
full details.
:param warn: integer, % level of hard limit at which to raise Warning
:param crit: integer, % level of hard limit at which to raise Critical
"""
arp_table_entries = "/proc/net/arp"
gc_thresh_location = "/proc/sys/net/ipv4/neigh/default/gc_thresh3"
if not os.path.exists(arp_table_entries):
raise UnknownError("No arp table found!")
if not os.path.exists(gc_thresh_location):
raise UnknownError("sysctl entry net.ipv4.neigh.default.gc_thresh3 not found!")
with open(gc_thresh_location) as fd:
gc_thresh3 = int(fd.read())
with open(arp_table_entries) as fd:
arp_cache = fd.read().count("\n") - 1 # remove header
extra_info = "arp cache entries: {}".format(arp_cache)
warn_threshold = gc_thresh3 * warn / 100
crit_threshold = gc_thresh3 * crit / 100
if arp_cache >= crit_threshold:
message = "CRITICAL: arp cache is more than {} of limit, {}".format(
crit, extra_info
)
raise CriticalError(message)
if arp_cache >= warn_threshold:
message = "WARNING: arp cache is more than {} of limit, {}".format(
warn, extra_info
)
raise WarnError(message)
print("OK: arp cache is healthy: {}".format(extra_info))
def parse_args():
"""Parse command-line options."""
parser = argparse.ArgumentParser(description="Check bond status")
parser.add_argument(
"--warn",
"-w",
type=int,
help="% of gc_thresh3 to exceed for warning",
default=60,
)
parser.add_argument(
"--crit",
"-c",
type=int,
help="% of gc_thresh3 to exceed for critical",
default=80,
)
args = parser.parse_args()
return args
def main():
"""Parse args and check the arp cache."""
args = parse_args()
try_check(check_arp_cache, args.warn, args.crit)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,79 @@
#!/bin/sh
# This file is managed by juju. Do not make local changes.
# Copyright (C) 2013, 2016 Canonical Ltd.
# Author: Haw Loeung <haw.loeung@canonical.com>
# Paul Gear <paul.gear@canonical.com>
# Alert when current conntrack entries exceeds certain percentage of max. to
# detect when we're about to fill it up and start dropping packets.
set -eu
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
if ! lsmod | grep -q conntrack; then
echo "OK: no conntrack modules present"
exit $STATE_OK
fi
if ! [ -e /proc/sys/net/netfilter/nf_conntrack_max ]; then
echo "OK: conntrack not available"
exit $STATE_OK
fi
max=$(sysctl net.netfilter.nf_conntrack_max 2>/dev/null | awk '{ print $3 }')
if [ -z "$max" ]; then
echo "UNKNOWN: unable to retrieve value of net.netfilter.nf_conntrack_max"
exit $STATE_UNKNOWN
fi
current=$(sysctl net.netfilter.nf_conntrack_count 2>/dev/null | awk '{ print $3 }')
if [ -z "$current" ]; then
echo "UNKNOWN: unable to retrieve value of net.netfilter.nf_conntrack_count"
exit $STATE_UNKNOWN
fi
# default thresholds
crit=90
warn=80
# parse command line
set +e
OPTIONS=$(getopt w:c: "$@")
if [ $? -ne 0 ]; then
echo "Usage: $0 [-w warningpercent] [-c criticalpercent]" >&2
echo " Check nf_conntrack_count against nf_conntrack_max" >&2
exit $STATE_UNKNOWN
fi
set -e
set -- $OPTIONS
while true; do
case "$1" in
-w) warn=$2; shift 2 ;;
-c) crit=$2; shift 2 ;;
--) shift; break ;;
*) break ;;
esac
done
percent=$((current * 100 / max))
stats="| current=$current max=$max percent=$percent;$warn;$crit"
threshold=$((max * crit / 100))
if [ $current -gt $threshold ]; then
echo "CRITICAL: conntrack table nearly full. $stats"
exit $STATE_CRITICAL
fi
threshold=$((max * warn / 100))
if [ $current -gt $threshold ]; then
echo "WARNING: conntrack table filling. $stats"
exit $STATE_WARNING
fi
echo "OK: conntrack table normal $stats"
exit $STATE_OK

View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
"""Check CPU governor scaling and alert."""
import argparse
import os
import re
from nagios_plugin3 import (
CriticalError,
try_check,
)
def wanted_governor(governor):
"""Check /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor."""
cpu_path = os.listdir("/sys/devices/system/cpu")
regex = re.compile("(cpu[0-9][0-9]*)")
numcpus = sum(1 for x in cpu_path if regex.match(x))
error_cpus = set()
for cpu in range(0, numcpus):
path = f"/sys/devices/system/cpu/cpu{cpu}/cpufreq/scaling_governor"
with open(path) as f:
out = f.readline().strip()
if governor in out:
continue
else:
error_cpus.add(f"CPU{cpu}")
if error_cpus:
error_cpus = ",".join(error_cpus)
raise CriticalError(f"CRITICAL: {error_cpus} not set to {governor}")
print(f"OK: All CPUs set to {governor}.")
def parse_args():
"""Parse command-line options."""
parser = argparse.ArgumentParser(description="Check CPU governor")
parser.add_argument(
"--governor",
"-g",
type=str,
help="The requested governor to check for each CPU",
default="performance",
)
args = parser.parse_args()
return args
def main():
"""Check the CPU governors."""
args = parse_args()
try_check(wanted_governor, args.governor)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,189 @@
#!/usr/bin/perl
################################################################################
# #
# Copyright (C) 2011 Chad Columbus <ccolumbu@hotmail.com> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #
# #
################################################################################
use strict;
use Getopt::Std;
$| = 1;
my %opts;
getopts('heronp:s:', \%opts);
my $VERSION = "Version 1.0";
my $AUTHOR = '(c) 2011 Chad Columbus <ccolumbu@hotmail.com>';
# Default values:
my $script_to_check;
my $pattern = 'is running';
my $cmd;
my $message;
my $error;
# Exit codes
my $STATE_OK = 0;
my $STATE_WARNING = 1;
my $STATE_CRITICAL = 2;
my $STATE_UNKNOWN = 3;
# Parse command line options
if ($opts{'h'} || scalar(%opts) == 0) {
&print_help();
exit($STATE_OK);
}
# Make sure scipt is provided:
if ($opts{'s'} eq '') {
# Script to run not provided
print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n";
exit($STATE_UNKNOWN);
} else {
$script_to_check = $opts{'s'};
}
# Make sure only a-z, 0-9, /, _, and - are used in the script.
if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) {
# Script contains illegal characters exit.
print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n";
exit($STATE_UNKNOWN);
}
# See if script is executable
if (! -x "$script_to_check") {
print "\nIt appears you can't execute $script_to_check, $!\n";
exit($STATE_UNKNOWN);
}
# If a pattern is provided use it:
if ($opts{'p'} ne '') {
$pattern = $opts{'p'};
}
# If -r run command via sudo as root:
if ($opts{'r'}) {
$cmd = "sudo -n $script_to_check status" . ' 2>&1';
} else {
$cmd = "$script_to_check status" . ' 2>&1';
}
my $cmd_result = `$cmd`;
chomp($cmd_result);
if ($cmd_result =~ /sudo/i) {
# This means it could not run the sudo command
$message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result";
$error = $STATE_UNKNOWN;
} else {
# Check exitstatus instead of output:
if ($opts{'e'} == 1) {
if ($? != 0) {
# error
$message = "$script_to_check CRITICAL - Exit code: $?\.";
if ($opts{'o'} == 0) {
$message .= " $cmd_result";
}
$error = $STATE_CRITICAL;
} else {
# success
$message = "$script_to_check OK - Exit code: $?\.";
if ($opts{'o'} == 0) {
$message .= " $cmd_result";
}
$error = $STATE_OK;
}
} else {
my $not_check = 1;
if ($opts{'n'} == 1) {
$not_check = 0;
}
if (($cmd_result =~ /$pattern/i) == $not_check) {
$message = "$script_to_check OK";
if ($opts{'o'} == 0) {
$message .= " - $cmd_result";
}
$error = $STATE_OK;
} else {
$message = "$script_to_check CRITICAL";
if ($opts{'o'} == 0) {
$message .= " - $cmd_result";
}
$error = $STATE_CRITICAL;
}
}
}
if ($message eq '') {
print "Error: program failed in an unknown way\n";
exit($STATE_UNKNOWN);
}
if ($error) {
print "$message\n";
exit($error);
} else {
# If we get here we are OK
print "$message\n";
exit($STATE_OK);
}
####################################
# Start Subs:
####################################
sub print_help() {
print << "EOF";
Check the output or exit status of a script.
$VERSION
$AUTHOR
Options:
-h
Print detailed help screen
-s
'FULL PATH TO SCRIPT' (required)
This is the script to run, the script is designed to run scripts in the
/etc/init.d dir (but can run any script) and will call the script with
a 'status' argument. So if you use another script make sure it will
work with /path/script status, example: /etc/init.d/httpd status
-e
This is the "exitstaus" flag, it means check the exit status
code instead of looking for a pattern in the output of the script.
-p 'REGEX'
This is a pattern to look for in the output of the script to confirm it
is running, default is 'is running', but not all init.d scripts output
(iptables), so you can specify an arbitrary pattern.
All patterns are case insensitive.
-n
This is the "NOT" flag, it means not the -p pattern, so if you want to
make sure the output of the script does NOT contain -p 'REGEX'
-r
This is the "ROOT" flag, it means run as root via sudo. You will need a
line in your /etc/sudoers file like:
nagios ALL=(root) NOPASSWD: /etc/init.d/* status
-o
This is the "SUPPRESS OUTPUT" flag. Some programs have a long output
(like iptables), this flag suppresses that output so it is not printed
as a part of the nagios message.
EOF
}

View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
"""Check lacp bonds and alert."""
# -*- coding: us-ascii -*-
# Copyright (C) 2017 Canonical
# All rights reserved
# Author: Alvaro Uria <alvaro.uria@canonical.com>
import argparse
import glob
import os
import sys
from nagios_plugin3 import CriticalError, WarnError, try_check
# LACPDU port states in binary
LACPDU_ACTIVE = 0b1 # 1 = Active, 0 = Passive
LACPDU_RATE = 0b10 # 1 = Short Timeout, 0 = Long Timeout
LACPDU_AGGREGATED = 0b100 # 1 = Yes, 0 = No (individual link)
LACPDU_SYNC = 0b1000 # 1 = In sync, 0 = Not in sync
LACPDU_COLLECT = 0b10000 # Mux is accepting traffic received on this port
LACPDU_DIST = 0b100000 # Mux is sending traffic using this port
LACPDU_DEFAULT = 0b1000000 # 1 = default settings, 0 = via LACP PDU
LACPDU_EXPIRED = 0b10000000 # In an expired state
def check_lacpdu_port(actor_port, partner_port):
"""Return message for LACPDU port state mismatch."""
diff = int(actor_port) ^ int(partner_port)
msg = []
if diff & LACPDU_RATE:
msg.append("lacp rate mismatch")
if diff & LACPDU_AGGREGATED:
msg.append("not aggregated")
if diff & LACPDU_SYNC:
msg.append("not in sync")
if diff & LACPDU_COLLECT:
msg.append("not collecting")
return ", ".join(msg)
def check_lacp_bond(iface):
"""Check LACP bonds are correctly configured (AD Aggregator IDs match)."""
bond_aggr_template = "/sys/class/net/{0}/bonding/ad_aggregator"
bond_slaves_template = "/sys/class/net/{0}/bonding/slaves"
bond_mode_template = "/sys/class/net/{0}/bonding/mode"
slave_template = "/sys/class/net/{0}/bonding_slave/ad_aggregator_id"
actor_port_state = "/sys/class/net/{0}/bonding_slave/ad_actor_oper_port_state"
partnet_port_state = "/sys/class/net/{0}/bonding_slave/ad_partner_oper_port_state"
bond_aggr = bond_aggr_template.format(iface)
bond_slaves = bond_slaves_template.format(iface)
if os.path.exists(bond_aggr):
with open(bond_mode_template.format(iface)) as fd:
bond_mode = fd.readline()
if "802.3ad" not in bond_mode:
msg = "WARNING: {} is not in lacp mode".format(iface)
raise WarnError(msg)
with open(bond_aggr) as fd:
bond_aggr_value = fd.readline().strip()
d_bond = {iface: bond_aggr_value}
with open(bond_slaves) as fd:
slaves = fd.readline().strip().split(" ")
for slave in slaves:
# Check aggregator ID
with open(slave_template.format(slave)) as fd:
slave_aggr_value = fd.readline().strip()
d_bond[slave] = slave_aggr_value
if slave_aggr_value != bond_aggr_value:
# If we can report then only 1/2 the bond is down
msg = "WARNING: aggregator_id mismatch "
msg += "({0}:{1} - {2}:{3})"
msg = msg.format(iface, bond_aggr_value, slave, slave_aggr_value)
raise WarnError(msg)
# Check LACPDU port state
with open(actor_port_state.format(slave)) as fd:
actor_port_value = fd.readline().strip()
with open(partnet_port_state.format(slave)) as fd:
partner_port_value = fd.readline().strip()
if actor_port_value != partner_port_value:
res = check_lacpdu_port(actor_port_value, partner_port_value)
msg = (
"WARNING: LACPDU port state mismatch "
"({0}: {1} - actor_port_state={2}, "
"partner_port_state={3})".format(
res, slave, actor_port_value, partner_port_value
)
)
raise WarnError(msg)
else:
msg = "CRITICAL: {} is not a bonding interface".format(iface)
raise CriticalError(msg)
extra_info = "{0}:{1}".format(iface, d_bond[iface])
for k_iface, v_aggrid in d_bond.items():
if k_iface == iface:
continue
extra_info += ", {0}:{1}".format(k_iface, v_aggrid)
print("OK: bond config is healthy: {}".format(extra_info))
def parse_args():
"""Parse command-line options."""
parser = argparse.ArgumentParser(description="Check bond status")
parser.add_argument("--iface", "-i", help="bond iface name")
args = parser.parse_args()
if not args.iface:
ifaces = map(os.path.basename, glob.glob("/sys/class/net/bond?"))
print(
"UNKNOWN: Please specify one of these bond "
"ifaces: {}".format(",".join(ifaces))
)
sys.exit(1)
return args
def main():
"""Parse args and check the lacp bonds."""
args = parse_args()
try_check(check_lacp_bond, args.iface)
if __name__ == "__main__":
main()

412
nrpe/files/plugins/check_mem.pl Executable file
View File

@ -0,0 +1,412 @@
#!/usr/bin/perl -w
# Heavily based on the script from:
# check_mem.pl Copyright (C) 2000 Dan Larsson <dl@tyfon.net>
# heavily modified by
# Justin Ellison <justin@techadvise.com>
#
# The MIT License (MIT)
# Copyright (c) 2011 justin@techadvise.com
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
# OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Tell Perl what we need to use
use strict;
use Getopt::Std;
#TODO - Convert to Nagios::Plugin
#TODO - Use an alarm
# Predefined exit codes for Nagios
use vars qw($opt_c $opt_f $opt_u $opt_w $opt_C $opt_v $opt_h %exit_codes);
%exit_codes = ('UNKNOWN' , 3,
'OK' , 0,
'WARNING' , 1,
'CRITICAL', 2,
);
# Get our variables, do our checking:
init();
# Get the numbers:
my ($free_memory_kb,$used_memory_kb,$caches_kb,$hugepages_kb) = get_memory_info();
print "$free_memory_kb Free\n$used_memory_kb Used\n$caches_kb Cache\n" if ($opt_v);
print "$hugepages_kb Hugepages\n" if ($opt_v and $opt_h);
if ($opt_C) { #Do we count caches as free?
$used_memory_kb -= $caches_kb;
$free_memory_kb += $caches_kb;
}
if ($opt_h) {
$used_memory_kb -= $hugepages_kb;
}
print "$used_memory_kb Used (after Hugepages)\n" if ($opt_v);
# Round to the nearest KB
$free_memory_kb = sprintf('%d',$free_memory_kb);
$used_memory_kb = sprintf('%d',$used_memory_kb);
$caches_kb = sprintf('%d',$caches_kb);
# Tell Nagios what we came up with
tell_nagios($used_memory_kb,$free_memory_kb,$caches_kb,$hugepages_kb);
sub tell_nagios {
my ($used,$free,$caches,$hugepages) = @_;
# Calculate Total Memory
my $total = $free + $used;
print "$total Total\n" if ($opt_v);
my $perf_warn;
my $perf_crit;
if ( $opt_u ) {
$perf_warn = int(${total} * $opt_w / 100);
$perf_crit = int(${total} * $opt_c / 100);
} else {
$perf_warn = int(${total} * ( 100 - $opt_w ) / 100);
$perf_crit = int(${total} * ( 100 - $opt_c ) / 100);
}
my $perfdata = "|TOTAL=${total}KB;;;; USED=${used}KB;${perf_warn};${perf_crit};; FREE=${free}KB;;;; CACHES=${caches}KB;;;;";
$perfdata .= " HUGEPAGES=${hugepages}KB;;;;" if ($opt_h);
if ($opt_f) {
my $percent = sprintf "%.1f", ($free / $total * 100);
if ($percent <= $opt_c) {
finish("CRITICAL - $percent% ($free kB) free!$perfdata",$exit_codes{'CRITICAL'});
}
elsif ($percent <= $opt_w) {
finish("WARNING - $percent% ($free kB) free!$perfdata",$exit_codes{'WARNING'});
}
else {
finish("OK - $percent% ($free kB) free.$perfdata",$exit_codes{'OK'});
}
}
elsif ($opt_u) {
my $percent = sprintf "%.1f", ($used / $total * 100);
if ($percent >= $opt_c) {
finish("CRITICAL - $percent% ($used kB) used!$perfdata",$exit_codes{'CRITICAL'});
}
elsif ($percent >= $opt_w) {
finish("WARNING - $percent% ($used kB) used!$perfdata",$exit_codes{'WARNING'});
}
else {
finish("OK - $percent% ($used kB) used.$perfdata",$exit_codes{'OK'});
}
}
}
# Show usage
sub usage() {
print "\ncheck_mem.pl v1.0 - Nagios Plugin\n\n";
print "usage:\n";
print " check_mem.pl -<f|u> -w <warnlevel> -c <critlevel>\n\n";
print "options:\n";
print " -f Check FREE memory\n";
print " -u Check USED memory\n";
print " -C Count OS caches as FREE memory\n";
print " -h Remove hugepages from the total memory count\n";
print " -w PERCENT Percent free/used when to warn\n";
print " -c PERCENT Percent free/used when critical\n";
print "\nCopyright (C) 2000 Dan Larsson <dl\@tyfon.net>\n";
print "check_mem.pl comes with absolutely NO WARRANTY either implied or explicit\n";
print "This program is licensed under the terms of the\n";
print "MIT License (check source code for details)\n";
exit $exit_codes{'UNKNOWN'};
}
sub get_memory_info {
my $used_memory_kb = 0;
my $free_memory_kb = 0;
my $total_memory_kb = 0;
my $caches_kb = 0;
my $hugepages_nr = 0;
my $hugepages_size = 0;
my $hugepages_kb = 0;
my $uname;
if ( -e '/usr/bin/uname') {
$uname = `/usr/bin/uname -a`;
}
elsif ( -e '/bin/uname') {
$uname = `/bin/uname -a`;
}
else {
die "Unable to find uname in /usr/bin or /bin!\n";
}
print "uname returns $uname" if ($opt_v);
if ( $uname =~ /Linux/ ) {
my @meminfo = `/bin/cat /proc/meminfo`;
foreach (@meminfo) {
chomp;
if (/^Mem(Total|Free):\s+(\d+) kB/) {
my $counter_name = $1;
if ($counter_name eq 'Free') {
$free_memory_kb = $2;
}
elsif ($counter_name eq 'Total') {
$total_memory_kb = $2;
}
}
elsif (/^MemAvailable:\s+(\d+) kB/) {
$caches_kb += $1;
}
elsif (/^(Buffers|Cached|SReclaimable):\s+(\d+) kB/) {
$caches_kb += $2;
}
elsif (/^Shmem:\s+(\d+) kB/) {
$caches_kb -= $1;
}
# These variables will most likely be overwritten once we look into
# /sys/kernel/mm/hugepages, unless we are running on linux <2.6.27
# and have to rely on them
elsif (/^HugePages_Total:\s+(\d+)/) {
$hugepages_nr = $1;
}
elsif (/^Hugepagesize:\s+(\d+) kB/) {
$hugepages_size = $1;
}
}
$hugepages_kb = $hugepages_nr * $hugepages_size;
$used_memory_kb = $total_memory_kb - $free_memory_kb;
# Read hugepages info from the newer sysfs interface if available
my $hugepages_sysfs_dir = '/sys/kernel/mm/hugepages';
if ( -d $hugepages_sysfs_dir ) {
# Reset what we read from /proc/meminfo
$hugepages_kb = 0;
opendir(my $dh, $hugepages_sysfs_dir)
|| die "Can't open $hugepages_sysfs_dir: $!";
while (my $entry = readdir $dh) {
if ($entry =~ /^hugepages-(\d+)kB/) {
$hugepages_size = $1;
my $hugepages_nr_file = "$hugepages_sysfs_dir/$entry/nr_hugepages";
open(my $fh, '<', $hugepages_nr_file)
|| die "Can't open $hugepages_nr_file for reading: $!";
$hugepages_nr = <$fh>;
close($fh);
$hugepages_kb += $hugepages_nr * $hugepages_size;
}
}
closedir($dh);
}
}
elsif ( $uname =~ /HP-UX/ ) {
# HP-UX, thanks to Christoph Fürstaller
my @meminfo = `/usr/bin/sudo /usr/local/bin/kmeminfo`;
foreach (@meminfo) {
chomp;
if (/^Physical memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) {
$total_memory_kb = ($2 * 1024 * 1024);
}
elsif (/^Free memory\s\s+=\s+(\d+)\s+(\d+.\d)g/) {
$free_memory_kb = ($2 * 1024 * 1024);
}
}
$used_memory_kb = $total_memory_kb - $free_memory_kb;
}
elsif ( $uname =~ /FreeBSD/ ) {
# The FreeBSD case. 2013-03-19 www.claudiokuenzler.com
# free mem = Inactive*Page Size + Cache*Page Size + Free*Page Size
my $pagesize = `sysctl vm.stats.vm.v_page_size`;
$pagesize =~ s/[^0-9]//g;
my $mem_inactive = 0;
my $mem_cache = 0;
my $mem_free = 0;
my $mem_total = 0;
my $free_memory = 0;
my @meminfo = `/sbin/sysctl vm.stats.vm`;
foreach (@meminfo) {
chomp;
if (/^vm.stats.vm.v_inactive_count:\s+(\d+)/) {
$mem_inactive = ($1 * $pagesize);
}
elsif (/^vm.stats.vm.v_cache_count:\s+(\d+)/) {
$mem_cache = ($1 * $pagesize);
}
elsif (/^vm.stats.vm.v_free_count:\s+(\d+)/) {
$mem_free = ($1 * $pagesize);
}
elsif (/^vm.stats.vm.v_page_count:\s+(\d+)/) {
$mem_total = ($1 * $pagesize);
}
}
$free_memory = $mem_inactive + $mem_cache + $mem_free;
$free_memory_kb = ( $free_memory / 1024);
$total_memory_kb = ( $mem_total / 1024);
$used_memory_kb = $total_memory_kb - $free_memory_kb;
$caches_kb = ($mem_cache / 1024);
}
elsif ( $uname =~ /joyent/ ) {
# The SmartOS case. 2014-01-10 www.claudiokuenzler.com
# free mem = pagesfree * pagesize
my $pagesize = `pagesize`;
my $phys_pages = `kstat -p unix:0:system_pages:pagestotal | awk '{print \$NF}'`;
my $free_pages = `kstat -p unix:0:system_pages:pagesfree | awk '{print \$NF}'`;
my $arc_size = `kstat -p zfs:0:arcstats:size | awk '{print \$NF}'`;
my $arc_size_kb = $arc_size / 1024;
print "Pagesize is $pagesize" if ($opt_v);
print "Total pages is $phys_pages" if ($opt_v);
print "Free pages is $free_pages" if ($opt_v);
print "Arc size is $arc_size" if ($opt_v);
$caches_kb += $arc_size_kb;
$total_memory_kb = $phys_pages * $pagesize / 1024;
$free_memory_kb = $free_pages * $pagesize / 1024;
$used_memory_kb = $total_memory_kb - $free_memory_kb;
}
elsif ( $uname =~ /SunOS/ ) {
eval "use Sun::Solaris::Kstat";
if ($@) { #Kstat not available
if ($opt_C) {
print "You can't report on Solaris caches without Sun::Solaris::Kstat available!\n";
exit $exit_codes{UNKNOWN};
}
my @vmstat = `/usr/bin/vmstat 1 2`;
my $line;
foreach (@vmstat) {
chomp;
$line = $_;
}
$free_memory_kb = (split(/ /,$line))[5] / 1024;
my @prtconf = `/usr/sbin/prtconf`;
foreach (@prtconf) {
if (/^Memory size: (\d+) Megabytes/) {
$total_memory_kb = $1 * 1024;
}
}
$used_memory_kb = $total_memory_kb - $free_memory_kb;
}
else { # We have kstat
my $kstat = Sun::Solaris::Kstat->new();
my $phys_pages = ${kstat}->{unix}->{0}->{system_pages}->{physmem};
my $free_pages = ${kstat}->{unix}->{0}->{system_pages}->{freemem};
# We probably should account for UFS caching here, but it's unclear
# to me how to determine UFS's cache size. There's inode_cache,
# and maybe the physmem variable in the system_pages module??
# In the real world, it looks to be so small as not to really matter,
# so we don't grab it. If someone can give me code that does this,
# I'd be glad to put it in.
my $arc_size = (exists ${kstat}->{zfs} && ${kstat}->{zfs}->{0}->{arcstats}->{size}) ?
${kstat}->{zfs}->{0}->{arcstats}->{size} / 1024
: 0;
$caches_kb += $arc_size;
my $pagesize = `pagesize`;
$total_memory_kb = $phys_pages * $pagesize / 1024;
$free_memory_kb = $free_pages * $pagesize / 1024;
$used_memory_kb = $total_memory_kb - $free_memory_kb;
}
}
elsif ( $uname =~ /Darwin/ ) {
$total_memory_kb = (split(/ /,`/usr/sbin/sysctl hw.memsize`))[1]/1024;
my $pagesize = (split(/ /,`/usr/sbin/sysctl hw.pagesize`))[1];
$caches_kb = 0;
my @vm_stat = `/usr/bin/vm_stat`;
foreach (@vm_stat) {
chomp;
if (/^(Pages free):\s+(\d+)\.$/) {
$free_memory_kb = $2*$pagesize/1024;
}
# 'caching' concept works different on MACH
# this should be a reasonable approximation
elsif (/^Pages (inactive|purgable):\s+(\d+).$/) {
$caches_kb += $2*$pagesize/1024;
}
}
$used_memory_kb = $total_memory_kb - $free_memory_kb;
}
elsif ( $uname =~ /AIX/ ) {
my @meminfo = `/usr/bin/vmstat -vh`;
foreach (@meminfo) {
chomp;
if (/^\s*([0-9.]+)\s+(.*)/) {
my $counter_name = $2;
if ($counter_name eq 'memory pages') {
$total_memory_kb = $1*4;
}
if ($counter_name eq 'free pages') {
$free_memory_kb = $1*4;
}
if ($counter_name eq 'file pages') {
$caches_kb = $1*4;
}
if ($counter_name eq 'Number of 4k page frames loaned') {
$free_memory_kb += $1*4;
}
}
}
$used_memory_kb = $total_memory_kb - $free_memory_kb;
}
else {
if ($opt_C) {
print "You can't report on $uname caches!\n";
exit $exit_codes{UNKNOWN};
}
my $command_line = `vmstat | tail -1 | awk '{print \$4,\$5}'`;
chomp $command_line;
my @memlist = split(/ /, $command_line);
# Define the calculating scalars
$used_memory_kb = $memlist[0]/1024;
$free_memory_kb = $memlist[1]/1024;
$total_memory_kb = $used_memory_kb + $free_memory_kb;
}
return ($free_memory_kb,$used_memory_kb,$caches_kb,$hugepages_kb);
}
sub init {
# Get the options
if ($#ARGV le 0) {
&usage;
}
else {
getopts('c:fuChvw:');
}
# Shortcircuit the switches
if (!$opt_w or $opt_w == 0 or !$opt_c or $opt_c == 0) {
print "*** You must define WARN and CRITICAL levels!\n";
&usage;
}
elsif (!$opt_f and !$opt_u) {
print "*** You must select to monitor either USED or FREE memory!\n";
&usage;
}
# Check if levels are sane
if ($opt_w <= $opt_c and $opt_f) {
print "*** WARN level must not be less than CRITICAL when checking FREE memory!\n";
&usage;
}
elsif ($opt_w >= $opt_c and $opt_u) {
print "*** WARN level must not be greater than CRITICAL when checking USED memory!\n";
&usage;
}
}
sub finish {
my ($msg,$state) = @_;
print "$msg\n";
exit $state;
}

View File

@ -0,0 +1,134 @@
#!/usr/bin/env python3
"""Check netlinks and alert."""
# -*- coding: us-ascii -*-
# Copyright (C) 2017 Canonical
# All rights reserved
# Author: Alvaro Uria <alvaro.uria@canonical.com>
#
# check_netlinks.py -i eth0 -o up -m 1500 -s 1000
import argparse
import glob
import os
import sys
from nagios_plugin3 import (
CriticalError,
WarnError,
try_check,
)
FILTER = ("operstate", "mtu", "speed")
def check_iface(iface, skiperror, crit_thr):
"""Return /sys/class/net/<iface>/<FILTER> values."""
file_path = "/sys/class/net/{0}/{1}"
filter = ["operstate", "mtu"]
if not os.path.exists(file_path.format(iface, "bridge")) and iface != "lo":
filter.append("speed")
for metric_key in filter:
try:
with open(file_path.format(iface, metric_key)) as fd:
metric_value = fd.readline().strip()
except FileNotFoundError:
if not skiperror:
raise WarnError("WARNING: {} iface does not exist".format(iface))
return
except OSError as e:
if (
metric_key == "speed"
and "Invalid argument" in str(e)
and crit_thr["operstate"] == "down"
):
filter = [f for f in filter if f != "speed"]
continue
else:
raise CriticalError(
"CRITICAL: {} ({} returns "
"invalid argument)".format(iface, metric_key)
)
if metric_key == "operstate" and metric_value != "up":
if metric_value != crit_thr["operstate"]:
raise CriticalError(
"CRITICAL: {} link state is {}".format(iface, metric_value)
)
if metric_value != crit_thr[metric_key]:
raise CriticalError(
"CRITICAL: {}/{} is {} (target: "
"{})".format(iface, metric_key, metric_value, crit_thr[metric_key])
)
for metric in crit_thr:
if metric not in filter:
crit_thr[metric] = "n/a"
crit_thr["iface"] = iface
print(
"OK: {iface} matches thresholds: "
"o:{operstate}, m:{mtu}, s:{speed}".format(**crit_thr)
)
def parse_args():
"""Parse command-line options."""
parser = argparse.ArgumentParser(description="check ifaces status")
parser.add_argument(
"--iface",
"-i",
type=str,
help="interface to monitor; listed in /sys/class/net/*)",
)
parser.add_argument(
"--skip-unfound-ifaces",
"-q",
default=False,
action="store_true",
help="ignores unfound ifaces; otherwise, alert will be triggered",
)
parser.add_argument(
"--operstate",
"-o",
default="up",
type=str,
help="operstate: up, down, unknown (default: up)",
)
parser.add_argument(
"--mtu", "-m", default="1500", type=str, help="mtu size (default: 1500)"
)
parser.add_argument(
"--speed",
"-s",
default="10000",
type=str,
help="link speed in Mbps (default 10000)",
)
args = parser.parse_args()
if not args.iface:
ifaces = map(os.path.basename, glob.glob("/sys/class/net/*"))
print(
"UNKNOWN: Please specify one of these "
"ifaces: {}".format(",".join(ifaces))
)
sys.exit(1)
return args
def main():
"""Parse args and check the netlinks."""
args = parse_args()
crit_thr = {
"operstate": args.operstate.lower(),
"mtu": args.mtu,
"speed": args.speed,
}
try_check(check_iface, args.iface, args.skip_unfound_ifaces, crit_thr)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,52 @@
#!/bin/bash
# Copyright (c) 2014 Canonical, Ltd
# Author: Brad Marshall <brad.marshall@canonical.com>
# Checks if a network namespace is responding by doing an ip a in each one.
. /usr/lib/nagios/plugins/utils.sh
check_ret_value() {
RET=$1
if [[ $RET -ne 0 ]];then
echo "CRIT: $2"
exit $STATE_CRIT
fi
}
check_netns_create() {
RET_VAL=$(ip netns add nrpe-check 2>&1)
check_ret_value $? "$RET_VAL"
RET_VAL=$(ip netns delete nrpe-check 2>&1)
check_ret_value $? "$RET_VAL"
}
netnsok=()
netnscrit=()
for ns in $(ip netns list |awk '!/^nrpe-check$/ {print $1}'); do
output=$(ip netns exec $ns ip a 2>/dev/null)
err=$?
if [ $err -eq 0 ]; then
netnsok=("${netnsok[@]}" $ns)
else
netnscrit=("${netnscrit[@]}" $ns)
fi
done
if [ ${#netnscrit[@]} -eq 0 ]; then
if [ ${#netnsok[@]} -eq 0 ]; then
check_netns_create
echo "OK: no namespaces defined"
exit $STATE_OK
else
echo "OK: ${netnsok[@]} are responding"
exit $STATE_OK
fi
else
echo "CRIT: ${netnscrit[@]} aren't responding"
exit $STATE_CRIT
fi

View File

@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""Check readonly filesystems and alert."""
# -*- coding: us-ascii -*-
# Copyright (C) 2020 Canonical
# All rights reserved
#
import argparse
from nagios_plugin3 import (
CriticalError,
UnknownError,
try_check,
)
EXCLUDE = {"/snap/", "/sys/fs/cgroup"}
def check_ro_filesystem(excludes=""):
"""Loop /proc/mounts looking for readonly mounts.
:param excludes: list of mount points to exclude from checks
"""
# read /proc/mounts, add each line to a list
try:
with open("/proc/mounts") as fd:
mounts = [mount.strip() for mount in fd.readlines()]
except Exception as e:
raise UnknownError("UNKNOWN: unable to read mounts with {}".format(e))
exclude_mounts = EXCLUDE
ro_filesystems = []
# if excludes != "" and excludes is not None:
if excludes:
try:
exclude_mounts = EXCLUDE.union(set(excludes.split(",")))
except Exception as e:
msg = "UNKNOWN: unable to read list of mounts to exclude {}".format(e)
raise UnknownError(msg)
for mount in mounts:
# for each line in the list, split by space to a new list
split_mount = mount.split()
# if mount[1] matches EXCLUDE_FS then next, else check it's not readonly
if not any(
split_mount[1].startswith(exclusion.strip()) for exclusion in exclude_mounts
):
mount_options = split_mount[3].split(",")
if "ro" in mount_options:
ro_filesystems.append(split_mount[1])
if len(ro_filesystems) > 0:
msg = "CRITICAL: filesystem(s) {} readonly".format(",".join(ro_filesystems))
raise CriticalError(msg)
print("OK: no readonly filesystems found")
def parse_args():
"""Parse command-line options."""
parser = argparse.ArgumentParser(description="Check for readonly filesystems")
parser.add_argument(
"--exclude",
"-e",
type=str,
help="""Comma separated list of mount points to exclude from checks for readonly filesystem.
Can be just a substring of the whole mount point.""",
default="",
)
args = parser.parse_args()
return args
def main():
"""Parse args and check the readonly filesystem."""
args = parse_args()
try_check(check_ro_filesystem, args.exclude)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,71 @@
#!/usr/bin/env python3
"""Read file and return nagios status based on its content."""
# --------------------------------------------------------
# This file is managed by Juju
# --------------------------------------------------------
#
# Copyright 2014 Canonical Ltd.
#
# Author: Jacek Nykis <jacek.nykis@canonical.com>
#
import re
import nagios_plugin3 as nagios_plugin
def parse_args():
"""Parse command-line options."""
import argparse
parser = argparse.ArgumentParser(
description="Read file and return nagios status based on its content",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-f", "--status-file", required=True, help="Status file path")
parser.add_argument(
"-c",
"--critical-text",
default="CRITICAL",
help="String indicating critical status",
)
parser.add_argument(
"-w",
"--warning-text",
default="WARNING",
help="String indicating warning status",
)
parser.add_argument(
"-o", "--ok-text", default="OK", help="String indicating OK status"
)
parser.add_argument(
"-u",
"--unknown-text",
default="UNKNOWN",
help="String indicating unknown status",
)
return parser.parse_args()
def check_status(args):
"""Return nagios status."""
nagios_plugin.check_file_freshness(args.status_file, 43200)
with open(args.status_file, "r") as f:
content = [line.strip() for line in f.readlines()]
for line in content:
if re.search(args.critical_text, line):
raise nagios_plugin.CriticalError(line)
elif re.search(args.warning_text, line):
raise nagios_plugin.WarnError(line)
elif re.search(args.unknown_text, line):
raise nagios_plugin.UnknownError(line)
else:
print(line)
if __name__ == "__main__":
args = parse_args()
nagios_plugin.try_check(check_status, args)

View File

@ -0,0 +1,78 @@
#!/bin/bash
# This script checks swap pageouts and reports number of kbytes moved
# from physical ram to swap space in a given number of seconds
#
# Usage: "check_swap_activity -i interval -w warning_kbyts -c critical_kbytes
#
#
set -eu
. /usr/lib/nagios/plugins/utils.sh
help() {
cat << EOH
usage: $0 [ -i ## ] -w ## -c ##
Measures page-outs to swap over a given interval, by default 5 seconds.
-i time in seconds to monitor (defaults to 5 seconds)
-w warning Level in kbytes
-c critical Level in kbytes
EOH
}
TIMEWORD=seconds
WARN_LVL=
CRIT_LVL=
INTERVAL=5
## FETCH ARGUMENTS
while getopts "i:w:c:" OPTION; do
case "${OPTION}" in
i)
INTERVAL=${OPTARG}
if [ $INTERVAL -eq 1 ]; then
TIMEWORD=second
fi
;;
w)
WARN_LVL=${OPTARG}
;;
c)
CRIT_LVL=${OPTARG}
;;
?)
help
exit 3
;;
esac
done
if [ -z ${WARN_LVL} ] || [ -z ${CRIT_LVL} ] ; then
help
exit 3
fi
## Get swap pageouts over $INTERVAL
PAGEOUTS=$(vmstat -w ${INTERVAL} 2 | tail -n 1 | awk '{print $8}')
SUMMARY="| swapout_size=${PAGEOUTS}KB;${WARN_LVL};${CRIT_LVL};"
if [ ${PAGEOUTS} -lt ${WARN_LVL} ]; then
# pageouts are below threshold
echo "OK - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
exit $STATE_OK
elif [ ${PAGEOUTS} -ge ${CRIT_LVL} ]; then
## SWAP IS IN CRITICAL STATE
echo "CRITICAL - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
exit $STATE_CRITICAL
elif [ ${PAGEOUTS} -ge ${WARN_LVL} ] && [ ${PAGEOUTS} -lt ${CRIT_LVL} ]; then
## SWAP IS IN WARNING STATE
echo "WARNING - ${PAGEOUTS} kb swapped out in last ${INTERVAL} ${TIMEWORD} $SUMMARY"
exit $STATE_WARNING
else
echo "CRITICAL: Failure to process pageout information $SUMMARY"
exit $STATE_UNKNOWN
fi

View File

@ -0,0 +1,48 @@
#!/usr/bin/python3
"""Check systemd service and alert."""
#
# Copyright 2016 Canonical Ltd
#
# Author: Brad Marshall <brad.marshall@canonical.com>
#
# Based on check_upstart_job and
# https://zignar.net/2014/09/08/getting-started-with-dbus-python-systemd/
#
import sys
import dbus
service_arg = sys.argv[1]
service_name = "%s.service" % service_arg
try:
bus = dbus.SystemBus()
systemd = bus.get_object("org.freedesktop.systemd1", "/org/freedesktop/systemd1")
manager = dbus.Interface(systemd, dbus_interface="org.freedesktop.systemd1.Manager")
try:
service_unit = manager.LoadUnit(service_name)
service_proxy = bus.get_object("org.freedesktop.systemd1", str(service_unit))
service = dbus.Interface(
service_proxy, dbus_interface="org.freedesktop.systemd1.Unit"
)
service_res = service_proxy.Get(
"org.freedesktop.systemd1.Unit",
"SubState",
dbus_interface="org.freedesktop.DBus.Properties",
)
if service_res == "running":
print("OK: %s is running" % service_name)
sys.exit(0)
else:
print("CRITICAL: %s is not running" % service_name)
sys.exit(2)
except dbus.DBusException:
print("CRITICAL: unable to find %s in systemd" % service_name)
sys.exit(2)
except dbus.DBusException:
print("CRITICAL: unable to connect to system for %s" % service_name)
sys.exit(2)

View File

@ -0,0 +1,72 @@
#!/usr/bin/python
#
# Copyright 2012, 2013 Canonical Ltd.
#
# Author: Paul Collins <paul.collins@canonical.com>
#
# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html
#
import sys
import dbus
class Upstart(object):
def __init__(self):
self._bus = dbus.SystemBus()
self._upstart = self._bus.get_object('com.ubuntu.Upstart',
'/com/ubuntu/Upstart')
def get_job(self, job_name):
path = self._upstart.GetJobByName(job_name,
dbus_interface='com.ubuntu.Upstart0_6')
return self._bus.get_object('com.ubuntu.Upstart', path)
def get_properties(self, job):
path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job')
instance = self._bus.get_object('com.ubuntu.Upstart', path)
return instance.GetAll('com.ubuntu.Upstart0_6.Instance',
dbus_interface=dbus.PROPERTIES_IFACE)
def get_job_instances(self, job_name):
job = self.get_job(job_name)
paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job')
return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths]
def get_job_instance_properties(self, job):
return job.GetAll('com.ubuntu.Upstart0_6.Instance',
dbus_interface=dbus.PROPERTIES_IFACE)
try:
upstart = Upstart()
try:
job = upstart.get_job(sys.argv[1])
props = upstart.get_properties(job)
if props['state'] == 'running':
print 'OK: %s is running' % sys.argv[1]
sys.exit(0)
else:
print 'CRITICAL: %s is not running' % sys.argv[1]
sys.exit(2)
except dbus.DBusException as e:
instances = upstart.get_job_instances(sys.argv[1])
propses = [upstart.get_job_instance_properties(instance) for instance in instances]
states = dict([(props['name'], props['state']) for props in propses])
if len(states) != states.values().count('running'):
not_running = []
for name in states.keys():
if states[name] != 'running':
not_running.append(name)
print 'CRITICAL: %d instances of %s not running: %s' % \
(len(not_running), sys.argv[1], not_running.join(', '))
sys.exit(2)
else:
print 'OK: %d instances of %s running' % (len(states), sys.argv[1])
except dbus.DBusException as e:
print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1]
sys.exit(2)

View File

@ -0,0 +1,47 @@
#!/usr/bin/env python3
"""Check for xfs errors and alert."""
#
# Copyright 2017 Canonical Ltd
#
# Author: Jill Rouleau <jill.rouleau@canonical.com>
#
# Check for xfs errors and alert
#
import re
import subprocess
import sys
from datetime import datetime, timedelta
# error messages commonly seen in dmesg on xfs errors
raw_xfs_errors = [
"XFS_WANT_CORRUPTED_",
"xfs_error_report",
"corruption detected at xfs_",
"Unmount and run xfs_repair",
]
xfs_regex = [re.compile(i) for i in raw_xfs_errors]
# nagios can't read from kern.log, so we look at dmesg - this does present
# a known limitation if a node is rebooted or dmesg is otherwise cleared.
log_lines = [line for line in subprocess.getoutput(["dmesg -T"]).split("\n")]
err_results = [line for line in log_lines for rgx in xfs_regex if re.search(rgx, line)]
# Look for errors within the last N minutes, specified in the check definition
check_delta = int(sys.argv[1])
# dmesg -T formatted timestamps are inside [], so we need to add them
datetime_delta = datetime.now() - timedelta(minutes=check_delta)
recent_logs = [
i for i in err_results if datetime.strptime(i[1:25], "%c") >= datetime_delta
]
if recent_logs:
print("CRITICAL: Recent XFS errors in kern.log." + "\n" + "{}".format(recent_logs))
sys.exit(2)
else:
print("OK")
sys.exit(0)

13
nrpe/files/rsyncd.conf Normal file
View File

@ -0,0 +1,13 @@
#------------------------------------------------
# This file is juju managed
#------------------------------------------------
uid = nobody
gid = nogroup
pid file = /var/run/rsyncd.pid
syslog facility = daemon
socket options = SO_KEEPALIVE
timeout = 7200
&merge /etc/rsync-juju.d
&include /etc/rsync-juju.d

1
nrpe/hooks/charmhelpers Symbolic link
View File

@ -0,0 +1 @@
../mod/charmhelpers/charmhelpers

1
nrpe/hooks/config-changed Symbolic link
View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

1
nrpe/hooks/install Symbolic link
View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

View File

@ -0,0 +1 @@
nrpe_hooks.py

696
nrpe/hooks/nrpe_helpers.py Normal file
View File

@ -0,0 +1,696 @@
"""Nrpe helpers module."""
import glob
import ipaddress
import os
import socket
import subprocess
from charmhelpers.core import hookenv
from charmhelpers.core.host import is_container
from charmhelpers.core.services import helpers
import yaml
NETLINKS_ERROR = False
class InvalidCustomCheckException(Exception):
"""Custom exception for Invalid nrpe check."""
pass
class Monitors(dict):
"""List of checks that a remote Nagios can query."""
def __init__(self, version="0.3"):
"""Build monitors structure."""
self["monitors"] = {"remote": {"nrpe": {}}}
self["version"] = version
def add_monitors(self, mdict, monitor_label="default"):
"""Add monitors passed in mdict."""
if not mdict or not mdict.get("monitors"):
return
for checktype in mdict["monitors"].get("remote", []):
check_details = mdict["monitors"]["remote"][checktype]
if self["monitors"]["remote"].get(checktype):
self["monitors"]["remote"][checktype].update(check_details)
else:
self["monitors"]["remote"][checktype] = check_details
for checktype in mdict["monitors"].get("local", []):
check_details = self.convert_local_checks(
mdict["monitors"]["local"],
monitor_label,
)
self["monitors"]["remote"]["nrpe"].update(check_details)
def add_nrpe_check(self, check_name, command):
"""Add nrpe check to remote monitors."""
self["monitors"]["remote"]["nrpe"][check_name] = command
def convert_local_checks(self, monitors, monitor_src):
"""Convert check from local checks to remote nrpe checks.
monitors -- monitor dict
monitor_src -- Monitor source principal, subordinate or user
"""
mons = {}
for checktype in monitors.keys():
for checkname in monitors[checktype]:
try:
check_def = NRPECheckCtxt(
checktype,
monitors[checktype][checkname],
monitor_src,
)
mons[check_def["cmd_name"]] = {"command": check_def["cmd_name"]}
except InvalidCustomCheckException as e:
hookenv.log(
"Error encountered configuring local check "
'"{check}": {err}'.format(check=checkname, err=str(e)),
hookenv.ERROR,
)
return mons
def get_ingress_address(binding, external=False):
"""Get ingress IP address for a binding.
Returns a local IP address for incoming requests to NRPE.
:param binding: name of the binding, e.g. 'monitors'
:param external: bool, if True return the public address if charm config requests
otherwise return the local address which would be used for incoming
nrpe requests.
"""
# using network-get to retrieve the address details if available.
hookenv.log("Getting ingress IP address for binding %s" % binding)
if hookenv.config("nagios_address_type").lower() == "public" and external:
return hookenv.unit_get("public-address")
ip_address = None
try:
network_info = hookenv.network_get(binding)
if network_info is not None and "ingress-addresses" in network_info:
try:
ip_address = network_info["bind-addresses"][0]["addresses"][0][
"address"
]
hookenv.log("Using ingress-addresses, found %s" % ip_address)
except KeyError:
hookenv.log("Using primary-addresses")
ip_address = hookenv.network_get_primary_address(binding)
except (NotImplementedError, FileNotFoundError) as e:
hookenv.log(
"Unable to determine inbound IP address for binding {} with {}".format(
binding, e
),
level=hookenv.ERROR,
)
return ip_address
class MonitorsRelation(helpers.RelationContext):
"""Define a monitors relation."""
name = "monitors"
interface = "monitors"
def __init__(self, *args, **kwargs):
"""Build superclass and principal relation."""
self.principal_relation = PrincipalRelation()
super(MonitorsRelation, self).__init__(*args, **kwargs)
def is_ready(self):
"""Return true if the principal relation is ready."""
return self.principal_relation.is_ready()
def get_subordinate_monitors(self):
"""Return default monitors defined by this charm."""
monitors = Monitors()
for check in SubordinateCheckDefinitions()["checks"]:
if check["cmd_params"]:
monitors.add_nrpe_check(check["cmd_name"], check["cmd_name"])
return monitors
def get_user_defined_monitors(self):
"""Return monitors defined by monitors config option."""
monitors = Monitors()
monitors.add_monitors(yaml.safe_load(hookenv.config("monitors")), "user")
return monitors
def get_principal_monitors(self):
"""Return monitors passed by relation with principal."""
return self.principal_relation.get_monitors()
def get_monitor_dicts(self):
"""Return all monitor dicts."""
monitor_dicts = {
"principal": self.get_principal_monitors(),
"subordinate": self.get_subordinate_monitors(),
"user": self.get_user_defined_monitors(),
}
return monitor_dicts
def get_monitors(self):
"""Return monitor dict.
All monitors merged together and local
monitors converted to remote nrpe checks.
"""
all_monitors = Monitors()
monitors = [
self.get_principal_monitors(),
self.get_subordinate_monitors(),
self.get_user_defined_monitors(),
]
for mon in monitors:
all_monitors.add_monitors(mon)
return all_monitors
def egress_subnets(self, relation_data):
"""Return egress subnets.
This behaves the same as charmhelpers.core.hookenv.egress_subnets().
If it can't determine the egress subnets it will fall back to
ingress-address or finally private-address.
"""
if "egress-subnets" in relation_data:
return relation_data["egress-subnets"]
if "ingress-address" in relation_data:
return relation_data["ingress-address"]
return relation_data["private-address"]
def get_data(self):
"""Get relation data."""
super(MonitorsRelation, self).get_data()
if not hookenv.relation_ids(self.name):
return
# self['monitors'] comes from the superclass helpers.RelationContext
# and contains relation data for each 'monitors' relation (to/from
# Nagios).
subnets = [self.egress_subnets(info) for info in self["monitors"]]
self["monitor_allowed_hosts"] = ",".join(subnets)
def provide_data(self):
"""Return relation info."""
# get the address to send to Nagios for host definition
address = get_ingress_address("monitors", external=True)
relation_info = {
"target-id": self.principal_relation.nagios_hostname(),
"monitors": self.get_monitors(),
"private-address": address,
"ingress-address": address,
"target-address": address,
"machine_id": os.environ["JUJU_MACHINE_ID"],
"model_id": hookenv.model_uuid(),
}
return relation_info
class PrincipalRelation(helpers.RelationContext):
"""Define a principal relation."""
def __init__(self, *args, **kwargs):
"""Set name and interface."""
if hookenv.relations_of_type("nrpe-external-master"):
self.name = "nrpe-external-master"
self.interface = "nrpe-external-master"
elif hookenv.relations_of_type("general-info"):
self.name = "general-info"
self.interface = "juju-info"
elif hookenv.relations_of_type("local-monitors"):
self.name = "local-monitors"
self.interface = "local-monitors"
super(PrincipalRelation, self).__init__(*args, **kwargs)
def is_ready(self):
"""Return true if the relation is connected."""
if self.name not in self:
return False
return "__unit__" in self[self.name][0]
def nagios_hostname(self):
"""Return the string that nagios will use to identify this host."""
host_context = hookenv.config("nagios_host_context")
if host_context:
host_context += "-"
hostname_type = hookenv.config("nagios_hostname_type")
# Detect bare metal hosts
if hostname_type == "auto":
is_metal = "none" in subprocess.getoutput("/usr/bin/systemd-detect-virt")
if is_metal:
hostname_type = "host"
else:
hostname_type = "unit"
if hostname_type == "host" or not self.is_ready():
nagios_hostname = "{}{}".format(host_context, socket.gethostname())
return nagios_hostname
else:
principal_unitname = hookenv.principal_unit()
# Fallback to using "primary" if it exists.
if not principal_unitname:
for relunit in self[self.name]:
if relunit.get("primary", "False").lower() == "true":
principal_unitname = relunit["__unit__"]
break
nagios_hostname = "{}{}".format(host_context, principal_unitname)
nagios_hostname = nagios_hostname.replace("/", "-")
return nagios_hostname
def get_monitors(self):
"""Return monitors passed by services on the self.interface relation."""
if not self.is_ready():
return
monitors = Monitors()
for rel in self[self.name]:
if rel.get("monitors"):
monitors.add_monitors(yaml.load(rel["monitors"]), "principal")
return monitors
def provide_data(self):
"""Return nagios hostname and nagios host context."""
# Provide this data to principals because get_nagios_hostname expects
# them in charmhelpers/contrib/charmsupport/nrpe when writing principal
# service__* files
return {
"nagios_hostname": self.nagios_hostname(),
"nagios_host_context": hookenv.config("nagios_host_context"),
}
class NagiosInfo(dict):
"""Define a NagiosInfo dict."""
def __init__(self):
"""Set principal relation and dict values."""
self.principal_relation = PrincipalRelation()
self["external_nagios_master"] = "127.0.0.1"
if hookenv.config("nagios_master") != "None":
self["external_nagios_master"] = "{},{}".format(
self["external_nagios_master"], hookenv.config("nagios_master")
)
self["nagios_hostname"] = self.principal_relation.nagios_hostname()
# export_host.cfg.tmpl host definition for Nagios
self["nagios_ipaddress"] = get_ingress_address("monitors", external=True)
# Address configured for NRPE to listen on
self["nrpe_ipaddress"] = get_ingress_address("monitors")
self["dont_blame_nrpe"] = "1" if hookenv.config("dont_blame_nrpe") else "0"
self["debug"] = "1" if hookenv.config("debug") else "0"
class RsyncEnabled(helpers.RelationContext):
"""Define a relation context for rsync enabled relation."""
def __init__(self):
"""Set export_nagios_definitions."""
self["export_nagios_definitions"] = hookenv.config("export_nagios_definitions")
if (
hookenv.config("nagios_master")
and hookenv.config("nagios_master") != "None"
):
self["export_nagios_definitions"] = True
def is_ready(self):
"""Return true if relation is ready."""
return self["export_nagios_definitions"]
class NRPECheckCtxt(dict):
"""Convert a local monitor definition.
Create a dict needed for writing the nrpe check definition.
"""
def __init__(self, checktype, check_opts, monitor_src):
"""Set dict values."""
plugin_path = "/usr/lib/nagios/plugins"
if checktype == "procrunning":
self["cmd_exec"] = plugin_path + "/check_procs"
self["description"] = "Check process {executable} is running".format(
**check_opts
)
self["cmd_name"] = "check_proc_" + check_opts["executable"]
self["cmd_params"] = "-w {min} -c {max} -C {executable}".format(
**check_opts
)
elif checktype == "processcount":
self["cmd_exec"] = plugin_path + "/check_procs"
self["description"] = "Check process count"
self["cmd_name"] = "check_proc_principal"
if "min" in check_opts:
self["cmd_params"] = "-w {min} -c {max}".format(**check_opts)
else:
self["cmd_params"] = "-c {max}".format(**check_opts)
elif checktype == "disk":
self["cmd_exec"] = plugin_path + "/check_disk"
self["description"] = "Check disk usage " + check_opts["path"].replace(
"/", "_"
)
self["cmd_name"] = "check_disk_principal"
self["cmd_params"] = "-w 20 -c 10 -p " + check_opts["path"]
elif checktype == "custom":
custom_path = check_opts.get("plugin_path", plugin_path)
if not custom_path.startswith(os.path.sep):
custom_path = os.path.join(os.path.sep, custom_path)
if not os.path.isdir(custom_path):
raise InvalidCustomCheckException(
'Specified plugin_path "{}" does not exist or is not a '
"directory.".format(custom_path)
)
check = check_opts["check"]
self["cmd_exec"] = os.path.join(custom_path, check)
self["description"] = check_opts.get("desc", "Check %s" % check)
self["cmd_name"] = check
self["cmd_params"] = check_opts.get("params", "") or ""
self["description"] += " ({})".format(monitor_src)
self["cmd_name"] += "_" + monitor_src
class SubordinateCheckDefinitions(dict):
"""Return dict of checks the charm configures."""
def __init__(self):
"""Set dict values."""
self.procs = self.proc_count()
load_thresholds = self._get_load_thresholds()
proc_thresholds = self._get_proc_thresholds()
disk_root_thresholds = self._get_disk_root_thresholds()
pkg_plugin_dir = "/usr/lib/nagios/plugins/"
local_plugin_dir = "/usr/local/lib/nagios/plugins/"
checks = [
{
"description": "Number of Zombie processes",
"cmd_name": "check_zombie_procs",
"cmd_exec": pkg_plugin_dir + "check_procs",
"cmd_params": hookenv.config("zombies"),
},
{
"description": "Number of processes",
"cmd_name": "check_total_procs",
"cmd_exec": pkg_plugin_dir + "check_procs",
"cmd_params": proc_thresholds,
},
{
"description": "Number of Users",
"cmd_name": "check_users",
"cmd_exec": pkg_plugin_dir + "check_users",
"cmd_params": hookenv.config("users"),
},
{
"description": "Connnection tracking table",
"cmd_name": "check_conntrack",
"cmd_exec": local_plugin_dir + "check_conntrack.sh",
"cmd_params": hookenv.config("conntrack"),
},
]
if not is_container():
checks.extend(
[
{
"description": "Root disk",
"cmd_name": "check_disk_root",
"cmd_exec": pkg_plugin_dir + "check_disk",
"cmd_params": disk_root_thresholds,
},
{
"description": "System Load",
"cmd_name": "check_load",
"cmd_exec": pkg_plugin_dir + "check_load",
"cmd_params": load_thresholds,
},
{
"description": "Swap",
"cmd_name": "check_swap",
"cmd_exec": pkg_plugin_dir + "check_swap",
"cmd_params": hookenv.config("swap").strip(),
},
# Note: check_swap_activity *must* be listed after check_swap, else
# check_swap_activity will be removed during installation of
# check_swap.
{
"description": "Swap Activity",
"cmd_name": "check_swap_activity",
"cmd_exec": local_plugin_dir + "check_swap_activity",
"cmd_params": hookenv.config("swap_activity"),
},
{
"description": "Memory",
"cmd_name": "check_mem",
"cmd_exec": local_plugin_dir + "check_mem.pl",
"cmd_params": hookenv.config("mem"),
},
{
"description": "XFS Errors",
"cmd_name": "check_xfs_errors",
"cmd_exec": local_plugin_dir + "check_xfs_errors.py",
"cmd_params": hookenv.config("xfs_errors"),
},
{
"description": "ARP cache entries",
"cmd_name": "check_arp_cache",
"cmd_exec": os.path.join(
local_plugin_dir, "check_arp_cache.py"
),
"cmd_params": "-w 60 -c 80",
},
]
)
ro_filesystem_excludes = hookenv.config("ro_filesystem_excludes")
if ro_filesystem_excludes == "":
# specify cmd_params = '' to disable/remove the check from nrpe
check_ro_filesystem = {
"description": "Readonly filesystems",
"cmd_name": "check_ro_filesystem",
"cmd_exec": os.path.join(
local_plugin_dir, "check_ro_filesystem.py"
),
"cmd_params": "",
}
else:
check_ro_filesystem = {
"description": "Readonly filesystems",
"cmd_name": "check_ro_filesystem",
"cmd_exec": os.path.join(
local_plugin_dir, "check_ro_filesystem.py"
),
"cmd_params": "-e {}".format(
hookenv.config("ro_filesystem_excludes")
),
}
checks.append(check_ro_filesystem)
if hookenv.config("lacp_bonds").strip():
for bond_iface in hookenv.config("lacp_bonds").strip().split():
if os.path.exists("/sys/class/net/{}".format(bond_iface)):
description = "LACP Check {}".format(bond_iface)
cmd_name = "check_lacp_{}".format(bond_iface)
cmd_exec = local_plugin_dir + "check_lacp_bond.py"
cmd_params = "-i {}".format(bond_iface)
lacp_check = {
"description": description,
"cmd_name": cmd_name,
"cmd_exec": cmd_exec,
"cmd_params": cmd_params,
}
checks.append(lacp_check)
if hookenv.config("netlinks"):
ifaces = yaml.safe_load(hookenv.config("netlinks"))
cmd_exec = local_plugin_dir + "check_netlinks.py"
if hookenv.config("netlinks_skip_unfound_ifaces"):
cmd_exec += " --skip-unfound-ifaces"
d_ifaces = self.parse_netlinks(ifaces)
for iface in d_ifaces:
description = "Netlinks status ({})".format(iface)
cmd_name = "check_netlinks_{}".format(iface)
cmd_params = d_ifaces[iface]
netlink_check = {
"description": description,
"cmd_name": cmd_name,
"cmd_exec": cmd_exec,
"cmd_params": cmd_params,
}
checks.append(netlink_check)
# Checking if CPU governor is supported by the system and add nrpe check
cpu_governor_paths = "/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"
cpu_governor_supported = glob.glob(cpu_governor_paths)
requested_cpu_governor = hookenv.relation_get("requested_cpu_governor")
cpu_governor_config = hookenv.config("cpu_governor")
wanted_cpu_governor = cpu_governor_config or requested_cpu_governor
if wanted_cpu_governor and cpu_governor_supported:
description = "Check CPU governor scaler"
cmd_name = "check_cpu_governor"
cmd_exec = local_plugin_dir + "check_cpu_governor.py"
cmd_params = "--governor {}".format(wanted_cpu_governor)
cpu_governor_check = {
"description": description,
"cmd_name": cmd_name,
"cmd_exec": cmd_exec,
"cmd_params": cmd_params,
}
checks.append(cpu_governor_check)
self["checks"] = []
sub_postfix = str(hookenv.config("sub_postfix"))
# Automatically use _sub for checks shipped on a unit with the nagios
# charm. Mostly for backwards compatibility.
principal_unit = hookenv.principal_unit()
if sub_postfix == "" and principal_unit:
md = hookenv._metadata_unit(principal_unit)
if md and md.pop("name", None) == "nagios":
sub_postfix = "_sub"
nrpe_config_sub_tmpl = "/etc/nagios/nrpe.d/{}_*.cfg"
nrpe_config_tmpl = "/etc/nagios/nrpe.d/{}.cfg"
for check in checks:
# This can be used to clean up old files before rendering the new
# ones
nrpe_configfiles_sub = nrpe_config_sub_tmpl.format(check["cmd_name"])
nrpe_configfiles = nrpe_config_tmpl.format(check["cmd_name"])
check["matching_files"] = glob.glob(nrpe_configfiles_sub)
check["matching_files"].extend(glob.glob(nrpe_configfiles))
check["description"] += " (sub)"
check["cmd_name"] += sub_postfix
self["checks"].append(check)
def _get_proc_thresholds(self):
"""Return suitable processor thresholds."""
if hookenv.config("procs") == "auto":
proc_thresholds = "-k -w {} -c {}".format(
25 * self.procs + 100, 50 * self.procs + 100
)
else:
proc_thresholds = hookenv.config("procs")
return proc_thresholds
def _get_load_thresholds(self):
"""Return suitable load thresholds."""
if hookenv.config("load") == "auto":
# Give 1min load alerts higher thresholds than 15 min load alerts
warn_multipliers = (4, 2, 1)
crit_multipliers = (8, 4, 2)
load_thresholds = ("-w %s -c %s") % (
",".join([str(m * self.procs) for m in warn_multipliers]),
",".join([str(m * self.procs) for m in crit_multipliers]),
)
else:
load_thresholds = hookenv.config("load")
return load_thresholds
def _get_disk_root_thresholds(self):
"""Return suitable disk thresholds."""
if hookenv.config("disk_root"):
disk_root_thresholds = hookenv.config("disk_root") + " -p / "
else:
disk_root_thresholds = ""
return disk_root_thresholds
def proc_count(self):
"""Return number number of processing units."""
return int(subprocess.check_output(["nproc", "--all"]))
def parse_netlinks(self, ifaces):
"""Parse a list of strings, or a single string.
Looks if the interfaces exist and configures extra parameters (or
properties) -> ie. ['mtu:9000', 'speed:1000', 'op:up']
"""
iface_path = "/sys/class/net/{}"
props_dict = {"mtu": "-m {}", "speed": "-s {}", "op": "-o {}"}
if type(ifaces) == str:
ifaces = [ifaces]
d_ifaces = {}
for iface in ifaces:
iface_props = iface.strip().split()
# no ifaces defined; SKIP
if len(iface_props) == 0:
continue
target = iface_props[0]
try:
matches = match_cidr_to_ifaces(target)
except Exception as e:
# Log likely unintentional errors and set flag for blocked status,
# if appropriate.
if isinstance(e, ValueError) and "has host bits set" in e.args[0]:
hookenv.log(
"Error parsing netlinks: {}".format(e.args[0]),
level=hookenv.ERROR,
)
set_netlinks_error()
# Treat target as explicit interface name
matches = [target]
iface_devs = [
target
for target in matches
if os.path.exists(iface_path.format(target))
]
# no ifaces found; SKIP
if not iface_devs:
continue
# parse extra parameters (properties)
del iface_props[0]
extra_params = ""
for prop in iface_props:
# wrong format (key:value); SKIP
if prop.find(":") < 0:
continue
# only one ':' expected
kv = prop.split(":")
if len(kv) == 2 and kv[0].lower() in props_dict:
extra_params += " "
extra_params += props_dict[kv[0].lower()].format(kv[1])
for iface_dev in iface_devs:
d_ifaces[iface_dev] = "-i {}{}".format(iface_dev, extra_params)
return d_ifaces
def match_cidr_to_ifaces(cidr):
"""Use CIDR expression to search for matching network adapters.
Returns a list of adapter names.
"""
import netifaces # Avoid import error before this dependency gets installed
network = ipaddress.IPv4Network(cidr)
matches = []
for adapter in netifaces.interfaces():
ipv4_addr_structs = netifaces.ifaddresses(adapter).get(netifaces.AF_INET, [])
addrs = [
ipaddress.IPv4Address(addr_struct["addr"])
for addr_struct in ipv4_addr_structs
]
if any(addr in network for addr in addrs):
matches.append(adapter)
return matches
def has_netlinks_error():
"""Return True in case of netlinks related errors."""
return NETLINKS_ERROR
def set_netlinks_error():
"""Set the flag indicating a netlinks related error."""
global NETLINKS_ERROR
NETLINKS_ERROR = True

6
nrpe/hooks/nrpe_hooks.py Executable file
View File

@ -0,0 +1,6 @@
#!/usr/bin/python3
"""Nrpe hooks module."""
import services
services.manage()

275
nrpe/hooks/nrpe_utils.py Normal file
View File

@ -0,0 +1,275 @@
"""Nrpe utils module."""
import glob
import os
import shutil
import subprocess
from charmhelpers import fetch
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core.services import helpers
from charmhelpers.core.services.base import (
ManagerCallback,
PortManagerCallback,
)
from charmhelpers.core.templating import render
import nrpe_helpers
import yaml
def restart_rsync(service_name):
"""Restart rsync."""
host.service_restart("rsync")
def restart_nrpe(service_name):
"""Restart nrpe."""
host.service_restart("nagios-nrpe-server")
def determine_packages():
"""Return a list of packages this charm needs installed."""
pkgs = [
"nagios-nrpe-server",
"nagios-plugins-basic",
"nagios-plugins-standard",
"python3",
"python3-netifaces",
]
if hookenv.config("export_nagios_definitions"):
pkgs.append("rsync")
if hookenv.config("nagios_master") not in ["None", "", None]:
pkgs.append("rsync")
return pkgs
def install_packages(service_name):
"""Install packages."""
fetch.apt_update()
apt_options = [
# avoid installing rpcbind LP#1873171
"--no-install-recommends",
# and retain the default option too
"--option=Dpkg::Options::=--force-confold",
]
fetch.apt_install(determine_packages(), options=apt_options, fatal=True)
def remove_host_export_fragments(service_name):
"""Remove nagios host config fragment."""
for fname in glob.glob("/var/lib/nagios/export/host__*"):
os.unlink(fname)
def install_charm_files(service_name):
"""Install files shipped with charm."""
# The preinst script of nagios-nrpe-server deb package will add nagios user
# and create this dir as home
# ref: https://git.launchpad.net/ubuntu/+source/nagios-nrpe/tree/debian/nagios-nrpe-server.preinst#n28 # NOQA: E501
nagios_home = "/var/lib/nagios"
# it's possible dir owner be changed to root by other process, e.g.: LP1866382
# here we ensure owner is nagios, but didn't apply it resursively intentionally.
shutil.chown(nagios_home, user="nagios", group="nagios")
# the `2` in mode will setgid for group, set dir permission to `drwxr-sr-x`.
# the `s` (setgid) will ensure any file created in this dir inherits parent dir
# group `nagios`, regardless of the effective user, such as root.
os.chmod(nagios_home, 0o2755) # 2 will set the s flag for group
nag_dirs = [
"/etc/nagios/nrpe.d/",
"/usr/local/lib/nagios/plugins",
"/var/lib/nagios/export/",
]
for nag_dir in nag_dirs:
if not os.path.exists(nag_dir):
host.mkdir(nag_dir, perms=0o755)
charm_file_dir = os.path.join(hookenv.charm_dir(), "files")
charm_plugin_dir = os.path.join(charm_file_dir, "plugins")
pkg_plugin_dir = "/usr/lib/nagios/plugins/"
local_plugin_dir = "/usr/local/lib/nagios/plugins/"
shutil.copy2(
os.path.join(charm_file_dir, "nagios_plugin.py"),
pkg_plugin_dir + "/nagios_plugin.py",
)
shutil.copy2(
os.path.join(charm_file_dir, "nagios_plugin3.py"),
pkg_plugin_dir + "/nagios_plugin3.py",
)
shutil.copy2(os.path.join(charm_file_dir, "default_rsync"), "/etc/default/rsync")
shutil.copy2(os.path.join(charm_file_dir, "rsyncd.conf"), "/etc/rsyncd.conf")
host.mkdir("/etc/rsync-juju.d", perms=0o755)
host.rsync(charm_plugin_dir, "/usr/local/lib/nagios/", options=["--executability"])
for nagios_plugin in ("nagios_plugin.py", "nagios_plugin3.py"):
if not os.path.exists(local_plugin_dir + nagios_plugin):
os.symlink(pkg_plugin_dir + nagios_plugin, local_plugin_dir + nagios_plugin)
def render_nrpe_check_config(checkctxt):
"""Write nrpe check definition."""
# Only render if we actually have cmd parameters
if checkctxt["cmd_params"]:
render(
"nrpe_command.tmpl",
"/etc/nagios/nrpe.d/{}.cfg".format(checkctxt["cmd_name"]),
checkctxt,
)
def render_nrped_files(service_name):
"""Render each of the predefined checks."""
for checkctxt in nrpe_helpers.SubordinateCheckDefinitions()["checks"]:
# Clean up existing files
for fname in checkctxt["matching_files"]:
try:
os.unlink(fname)
except FileNotFoundError:
# Don't clean up non-existent files
pass
render_nrpe_check_config(checkctxt)
process_local_monitors()
process_user_monitors()
def process_user_monitors():
"""Collect the user defined local monitors from config."""
if hookenv.config("monitors"):
monitors = yaml.safe_load(hookenv.config("monitors"))
else:
return
try:
local_user_checks = monitors["monitors"]["local"].keys()
except KeyError as e:
hookenv.log("no local monitors found in monitors config: {}".format(e))
return
for checktype in local_user_checks:
for check in monitors["monitors"]["local"][checktype].keys():
check_def = nrpe_helpers.NRPECheckCtxt(
checktype, monitors["monitors"]["local"][checktype][check], "user"
)
render_nrpe_check_config(check_def)
def process_local_monitors():
"""Get all the monitor dicts and write out and local checks."""
monitor_dicts = nrpe_helpers.MonitorsRelation().get_monitor_dicts()
for monitor_src in monitor_dicts.keys():
monitor_dict = monitor_dicts[monitor_src]
if not (monitor_dict and "local" in monitor_dict["monitors"]):
continue
monitors = monitor_dict["monitors"]["local"]
for checktype in monitors:
for check in monitors[checktype]:
render_nrpe_check_config(
nrpe_helpers.NRPECheckCtxt(
checktype,
monitors[checktype][check],
monitor_src,
)
)
def update_nrpe_external_master_relation(service_name):
"""Update nrpe external master relation.
Send updated nagios_hostname to charms attached
to nrpe_external_master relation.
"""
principal_relation = nrpe_helpers.PrincipalRelation()
for rid in hookenv.relation_ids("nrpe-external-master"):
hookenv.relation_set(
relation_id=rid, relation_settings=principal_relation.provide_data()
)
def update_monitor_relation(service_name):
"""Send updated monitor yaml to charms attached to monitor relation."""
monitor_relation = nrpe_helpers.MonitorsRelation()
for rid in hookenv.relation_ids("monitors"):
hookenv.relation_set(
relation_id=rid, relation_settings=monitor_relation.provide_data()
)
def has_consumer():
"""Check for the monitor relation or external monitor config."""
return hookenv.config("nagios_master") not in ["None", "", None] or bool(
hookenv.relation_ids("monitors")
)
class TolerantPortManagerCallback(PortManagerCallback):
"""Manage unit ports.
Specialization of the PortManagerCallback. It will open or close
ports as its superclass, but will not raise an error on conflicts
for opening ports
For context, see:
https://bugs.launchpad.net/juju/+bug/1750079 and
https://github.com/juju/charm-helpers/pull/152
"""
def __call__(self, manager, service_name, event_name):
"""Open unit ports."""
service = manager.get_service(service_name)
new_ports = service.get("ports", [])
port_file = os.path.join(hookenv.charm_dir(), ".{}.ports".format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
old_ports = fp.read().split(",")
for old_port in old_ports:
if bool(old_port) and not self.ports_contains(old_port, new_ports):
hookenv.close_port(old_port)
with open(port_file, "w") as fp:
fp.write(",".join(str(port) for port in new_ports))
for port in new_ports:
# A port is either a number or 'ICMP'
protocol = "TCP"
if str(port).upper() == "ICMP":
protocol = "ICMP"
if event_name == "start":
try:
hookenv.open_port(port, protocol)
except subprocess.CalledProcessError as err:
if err.returncode == 1:
hookenv.log(
"open_port returns: {}, ignoring".format(err),
level=hookenv.INFO,
)
else:
raise
elif event_name == "stop":
hookenv.close_port(port, protocol)
maybe_open_ports = TolerantPortManagerCallback()
class ExportManagerCallback(ManagerCallback):
"""Defer lookup of nagios_hostname.
This class exists in order to defer lookup of nagios_hostname()
until the template is ready to be rendered. This should reduce the
incidence of incorrectly-rendered hostnames in /var/lib/nagios/exports.
See charmhelpers.core.services.base.ManagerCallback and
charmhelpers.core.services.helpers.TemplateCallback for more background.
"""
def __call__(self, manager, service_name, event_name):
"""Render export_host.cfg."""
nag_hostname = nrpe_helpers.PrincipalRelation().nagios_hostname()
target = "/var/lib/nagios/export/host__{}.cfg".format(nag_hostname)
renderer = helpers.render_template(
source="export_host.cfg.tmpl",
target=target,
perms=0o644,
)
renderer(manager, service_name, event_name)
create_host_export_fragment = ExportManagerCallback()

94
nrpe/hooks/services.py Normal file
View File

@ -0,0 +1,94 @@
"""Nrpe service definifition."""
import os
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import status_set
from charmhelpers.core.services import helpers
from charmhelpers.core.services.base import ServiceManager
import nrpe_helpers
import nrpe_utils
def get_revision():
"""Get charm revision str."""
revision = ""
if os.path.exists("version"):
with open("version") as f:
line = f.readline().strip()
# We only want the first 8 characters, that's enough to tell
# which version of the charm we're using.
if len(line) > 8:
revision = " (source version/commit {}...)".format(line[:8])
else:
revision = " (source version/commit {})".format(line)
return revision
def manage():
"""Manage nrpe service."""
status_set("maintenance", "starting")
config = hookenv.config()
manager = ServiceManager(
[
{
"service": "nrpe-install",
"data_ready": [
nrpe_utils.install_packages,
nrpe_utils.install_charm_files,
],
"start": [],
"stop": [],
},
{
"service": "nrpe-config",
"required_data": [
config,
nrpe_helpers.MonitorsRelation(),
nrpe_helpers.PrincipalRelation(),
nrpe_helpers.NagiosInfo(),
],
"data_ready": [
nrpe_utils.update_nrpe_external_master_relation,
nrpe_utils.update_monitor_relation,
nrpe_utils.create_host_export_fragment,
nrpe_utils.render_nrped_files,
helpers.render_template(
source="nrpe.tmpl", target="/etc/nagios/nrpe.cfg"
),
],
"provided_data": [nrpe_helpers.PrincipalRelation()],
"ports": [hookenv.config("server_port"), "ICMP"],
"start": [nrpe_utils.maybe_open_ports, nrpe_utils.restart_nrpe],
"stop": [],
},
{
"service": "nrpe-rsync",
"required_data": [
config,
nrpe_helpers.PrincipalRelation(),
nrpe_helpers.RsyncEnabled(),
nrpe_helpers.NagiosInfo(),
],
"data_ready": [
nrpe_utils.remove_host_export_fragments,
helpers.render_template(
source="rsync-juju.d.tmpl",
target="/etc/rsync-juju.d/010-nrpe-external-master.conf",
),
nrpe_utils.create_host_export_fragment,
],
"start": [nrpe_utils.restart_rsync],
"stop": [],
},
]
)
manager.manage()
if not nrpe_utils.has_consumer():
status_set("blocked", "Nagios server not configured or related")
elif nrpe_helpers.has_netlinks_error():
status_set("blocked", "Netlinks parsing encountered failure; see logs")
else:
status_set("active", "Ready{}".format(get_revision()))

1
nrpe/hooks/start Symbolic link
View File

@ -0,0 +1 @@
nrpe_hooks.py

1
nrpe/hooks/stop Symbolic link
View File

@ -0,0 +1 @@
nrpe_hooks.py

23
nrpe/hooks/update-status Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/env python3
"""Nrpe update-status hook"""
import os
import subprocess
from charmhelpers.core.hookenv import status_set
from services import get_revision
SERVICE = "nagios-nrpe-server"
def update_status():
"""Update Nrpe Juju status."""
retcode = subprocess.call(["systemctl", "is-active", "--quiet", SERVICE])
if retcode == 0:
status_set("active", "Ready{}".format(get_revision()))
else:
status_set("blocked", "{} service inactive.".format(SERVICE))
if __name__ == '__main__':
update_status()

1
nrpe/hooks/upgrade-charm Symbolic link
View File

@ -0,0 +1 @@
nrpe_hooks.py

212
nrpe/icon.svg Normal file
View File

@ -0,0 +1,212 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="96"
height="96"
id="svg6517"
version="1.1"
inkscape:version="0.91+devel r"
sodipodi:docname="nagios01.svg"
viewBox="0 0 96 96">
<defs
id="defs6519">
<filter
style="color-interpolation-filters:sRGB"
inkscape:label="Inner Shadow"
id="filter1121">
<feFlood
flood-opacity="0.59999999999999998"
flood-color="rgb(0,0,0)"
result="flood"
id="feFlood1123" />
<feComposite
in="flood"
in2="SourceGraphic"
operator="out"
result="composite1"
id="feComposite1125" />
<feGaussianBlur
in="composite1"
stdDeviation="1"
result="blur"
id="feGaussianBlur1127" />
<feOffset
dx="0"
dy="2"
result="offset"
id="feOffset1129" />
<feComposite
in="offset"
in2="SourceGraphic"
operator="atop"
result="composite2"
id="feComposite1131" />
</filter>
<filter
style="color-interpolation-filters:sRGB"
inkscape:label="Drop Shadow"
id="filter950">
<feFlood
flood-opacity="0.25"
flood-color="rgb(0,0,0)"
result="flood"
id="feFlood952" />
<feComposite
in="flood"
in2="SourceGraphic"
operator="in"
result="composite1"
id="feComposite954" />
<feGaussianBlur
in="composite1"
stdDeviation="1"
result="blur"
id="feGaussianBlur956" />
<feOffset
dx="0"
dy="1"
result="offset"
id="feOffset958" />
<feComposite
in="SourceGraphic"
in2="offset"
operator="over"
result="composite2"
id="feComposite960" />
</filter>
<linearGradient
id="Background">
<stop
id="stop4178"
offset="0"
style="stop-color:#22779e;stop-opacity:1" />
<stop
id="stop4180"
offset="1"
style="stop-color:#2991c0;stop-opacity:1" />
</linearGradient>
<clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath873">
<g
transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
id="g875"
inkscape:label="Layer 1"
style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none">
<path
style="display:inline;fill:#ff00ff;fill-opacity:1;stroke:none"
d="M 46.702703,898.22775 H 97.297297 C 138.16216,898.22775 144,904.06497 144,944.92583 v 50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 H 46.702703 C 5.8378378,1042.3622 0,1036.525 0,995.66429 v -50.73846 c 0,-40.86086 5.8378378,-46.69808 46.702703,-46.69808 z"
id="path877"
inkscape:connector-curvature="0"
sodipodi:nodetypes="sssssssss" />
</g>
</clipPath>
<style
id="style867"
type="text/css"><![CDATA[
.fil0 {fill:#1F1A17}
]]></style>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="5.0931702"
inkscape:cx="51.311597"
inkscape:cy="9.1743059"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1920"
inkscape:window-height="1029"
inkscape:window-x="0"
inkscape:window-y="24"
inkscape:window-maximized="1"
showborder="true"
showguides="true"
inkscape:guide-bbox="true"
inkscape:showpageshadow="false"
inkscape:snap-global="false"
inkscape:snap-bbox="true"
inkscape:bbox-paths="true"
inkscape:bbox-nodes="true"
inkscape:snap-bbox-edge-midpoints="true"
inkscape:snap-bbox-midpoints="true"
inkscape:object-paths="true"
inkscape:snap-intersection-paths="true"
inkscape:object-nodes="true"
inkscape:snap-smooth-nodes="true"
inkscape:snap-midpoints="true"
inkscape:snap-object-midpoints="true"
inkscape:snap-center="true"
inkscape:snap-text-baseline="true">
<inkscape:grid
type="xygrid"
id="grid821" />
<sodipodi:guide
orientation="1,0"
position="16,48"
id="guide823"
inkscape:locked="false" />
<sodipodi:guide
orientation="0,1"
position="64,80"
id="guide825"
inkscape:locked="false" />
<sodipodi:guide
orientation="1,0"
position="80,40"
id="guide827"
inkscape:locked="false" />
<sodipodi:guide
orientation="0,1"
position="64,16"
id="guide829"
inkscape:locked="false" />
</sodipodi:namedview>
<metadata
id="metadata6522">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="BACKGROUND"
inkscape:groupmode="layer"
id="layer1"
transform="translate(268,-635.29076)"
style="display:inline">
<path
style="display:inline;fill:#383838;fill-opacity:1;stroke:none"
d="M 48 0 A 48 48 0 0 0 0 48 A 48 48 0 0 0 48 96 A 48 48 0 0 0 96 48 A 48 48 0 0 0 48 0 z "
transform="translate(-268,635.29076)"
id="path6455" />
<path
style="opacity:1;fill:#ffffff;stroke-width:0.49836424"
d="m -245.09662,711.72332 c 0.11462,-2.99034 3.05007,-3.91613 4.84239,-4.18797 1.71688,-0.26041 5.37891,0.68965 12.09436,0.58011 6.71546,-0.10955 12.86651,-0.84046 13.96291,-0.88947 1.9861,-0.0888 5.3861,0.76742 8.80105,1.10369 4.31039,0.42444 6.80977,-0.52091 9.2513,0.84518 1.61577,0.90407 1.08624,3.01777 -0.74659,4.38113 -1.33442,0.99261 -1.63017,1.00618 -10.21647,0.46871 -5.17466,-0.32392 -9.97919,-0.38052 -11.58328,-0.13645 -1.35912,0.2068 -6.41923,0.46913 -11.10369,0.29831 -2.63405,-0.096 -4.79303,-0.60312 -6.97952,-0.66401 -2.52649,-0.0704 -3.8034,0.41679 -4.70411,0.38205 -2.3943,-0.0924 -3.67838,-0.61513 -3.61835,-2.18128 z m 5.08818,-11.30155 c -2.22213,-1.35482 -2.60399,-2.66785 -2.60399,-8.95375 0,-1.80081 -0.61411,-3.70956 0,-5.40241 0.30925,-0.85248 1.16285,-1.39184 1.74428,-2.08776 1.99091,-2.38297 2.18499,-3.96166 0.73705,-5.99511 -2.15336,-3.02411 -2.55856,-5.26728 -2.5057,-13.87166 0.0276,-4.48536 0.36374,-8.50768 0.51938,-9.03208 0.39979,-1.34711 1.27377,-1.54835 3.07627,-1.29531 1.70445,0.23927 3.95595,-0.20898 5.39827,-0.90867 2.60236,-1.26243 3.2066,-0.51959 4.20906,0.37736 0.59637,0.5336 1.3041,1.99758 1.7922,3.12788 0.56626,1.31131 1.45544,2.99812 3.01987,3.48855 2.97891,0.93386 3.54465,3.48769 6.64802,8.00186 2.48359,3.61262 5.05929,7.14477 5.7238,7.84924 2.77866,2.94574 3.73548,0.83339 3.37029,-7.44054 -0.10452,-2.36805 -0.60796,-4.45632 -0.35748,-6.22263 0.44969,-3.17117 -0.064,-6.30696 1.06018,-8.13995 0.72523,-1.18253 2.25821,-0.84224 4.94907,-0.82731 5.87758,0.0326 7.51589,1.06692 8.04026,7.66048 0.17784,2.23625 -0.0448,5.06655 0.0935,8.77532 0.21258,5.69922 0.36565,9.89449 -0.19419,13.9542 -0.33257,2.4116 -0.23954,5.19203 0.11854,7.85689 0.65813,4.89781 0.10092,7.46463 -1.97891,9.11584 -1.22508,0.97261 -1.74021,1.04732 -5.2449,0.76061 -2.13752,-0.17486 -4.77629,-0.67399 -5.86393,-1.10918 -2.47417,-0.98996 -5.12777,-3.97168 -7.68278,-8.63275 -1.08686,-1.98272 -3.08563,-4.87223 -4.44173,-6.42113 -1.35609,-1.5489 -3.21923,-3.99357 -4.14032,-5.4326 -1.7631,-2.75454 -3.20325,-3.36232 -4.08098,-1.72228 -0.26319,0.49178 -0.61704,4.2482 -0.78633,8.3476 -0.0761,1.84209 0.29872,3.56974 0.0707,5.40334 -0.27023,2.17271 -1.51256,3.76156 -0.90944,4.81483 1.14113,1.99282 0.59074,2.41331 0.15055,3.26026 -0.85686,1.64863 -7.25181,2.33409 -9.93055,0.70086 z"
id="path4279"
inkscape:connector-curvature="0"
sodipodi:nodetypes="sssssssssssssssssssssssssssssssssssssssssssssss" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 8.4 KiB

32
nrpe/metadata.yaml Normal file
View File

@ -0,0 +1,32 @@
name: nrpe
format: 2
summary: Nagios Remote Plugin Executor Server
maintainer: LMA Charmers <llama-charmers@lists.ubuntu.com>
subordinate: true
description: |
Nagios is a host/service/network monitoring and management system. The
purpose of this addon is to allow you to execute Nagios plugins on a
remote host in as transparent a manner as possible. This program runs
as a background process on the remote host and processes command
execution requests from the check_nrpe plugin on the Nagios host.
tags:
- misc
provides:
nrpe:
interface: nrpe
monitors:
interface: monitors
requires:
nrpe-external-master:
interface: nrpe-external-master
scope: container
general-info:
interface: juju-info
scope: container
local-monitors:
interface: local-monitors
scope: container
series:
- bionic
- focal
- xenial

View File

@ -0,0 +1,17 @@
*.pyc
__pycache__/
dist/
build/
MANIFEST
charmhelpers.egg-info/
charmhelpers/version.py
.coverage
.env/
coverage.xml
docs/_build
.idea
.project
.pydevproject
.settings
.venv
.venv3

View File

@ -0,0 +1,46 @@
name: charm-helpers CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-18.04
strategy:
matrix:
include:
- python-version: 2.7
env: pep8,py27
- python-version: 3.4
env: pep8,py34
- python-version: 3.5
env: pep8,py35
- python-version: 3.6
env: pep8,py36
- python-version: 3.7
env: pep8,py37
- python-version: 3.8
env: pep8,py38
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install juju
run: |
sudo snap install juju --classic
- name: Install packages
run: |
sudo apt -qq update
sudo apt install --yes libapt-pkg-dev # For python-apt wheel build
sudo apt install --yes bzr
- name: Install tox
run: pip install tox
- name: Test
run: tox -c tox.ini -e ${{ matrix.env }}

125
nrpe/mod/charmhelpers/.gitignore vendored Normal file
View File

@ -0,0 +1,125 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv*
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
*.pyc
__pycache__/
dist/
build/
MANIFEST
charmhelpers.egg-info/
charmhelpers/version.py
.coverage
.env/
coverage.xml
docs/_build
.idea
.project
.pydevproject
.settings
.venv
.venv3
.bzr
.unit-state.db
AUTHORS
ChangeLog

View File

@ -0,0 +1,102 @@
# Hacking on charmhelpers
## Run testsuite (tox method)
CAUTION: the charm-helpers library has some unit tests which do unsavory things
such as making real, unmocked calls out to sudo foo, juju binaries, and perhaps
other things. This is not ideal for a number of reasons. One of those reasons
is that it pollutes the test runner (your) system.
The current recommendation for testing locally is to do so in a fresh Xenial
(16.04) lxc container. 16.04 is selected for consistency with what is available
in the Travis CI test gates. As of this writing, 18.04 is not available there.
The fresh Xenial lxc system container will need to have the following packages
installed in order to satisfy test runner dependencies:
sudo apt install git bzr tox libapt-pkg-dev python-dev python3-dev build-essential juju -y
The tests can be executed as follows:
tox -e pep8
tox -e py3
tox -e py2
See also: .travis.yaml for what is happening in the test gate.
## Run testsuite (legacy Makefile method)
make test
Run `make` without arguments for more options.
## Test it in a charm
Use following instructions to build a charm that uses your own development branch of
charmhelpers.
Step 1: Make sure your version of charmhelpers is recognised as the latest version by
by appending `dev0` to the version number in the `VERSION` file.
Step 2: Create an override file `override-wheelhouse.txt` that points to your own
charmhelpers branch. *The format of this file is the same as pip's
[`requirements.txt`](https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format)
file.
# Override charmhelpers by the version found in folder
-e /path/to/charmhelpers
# Or point it to a github repo with
-e git+https://github.com/<myuser>/charm-helpers#egg=charmhelpers
Step 3: Build the charm specifying the override file. *You might need to install the
candidate channel of the charm snap*
charm build <mycharm> -w wheelhouse-overrides.txt
Now when you deploy your charm, it will use your own branch of charmhelpers.
*Note: If you want to verify this or change the charmhelpers code on a built
charm, get the path of the installed charmhelpers by running following command.*
python3 -c "import charmhelpers; print(charmhelpers.__file__)"
# Hacking on Docs
Install html doc dependencies:
```bash
sudo apt-get install python-flake8 python-shelltoolbox python-tempita \
python-nose python-mock python-testtools python-jinja2 python-coverage \
python-git python-netifaces python-netaddr python-pip zip
```
To build the html documentation:
```bash
make docs
```
To browse the html documentation locally:
```bash
make docs
cd docs/_build/html
python -m SimpleHTTPServer 8765
# point web browser to http://localhost:8765
```
To build and upload package and doc updates to PyPI:
```bash
make release
# note: if the package version already exists on PyPI
# this command will upload doc updates only
```
# PyPI Package and Docs
The published package and docs currently live at:
https://pypi.python.org/pypi/charmhelpers
http://pythonhosted.org/charmhelpers/

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,7 @@
include *.txt
include Makefile
include VERSION
include MANIFEST.in
include scripts/*
include README.rst
recursive-include debian *

View File

@ -0,0 +1,89 @@
PROJECT=charmhelpers
PYTHON := /usr/bin/env python
SUITE=unstable
TESTS=tests/
all:
@echo "make source - Create source package"
@echo "make sdeb - Create debian source package"
@echo "make deb - Create debian package"
@echo "make clean"
@echo "make userinstall - Install locally"
@echo "make docs - Build html documentation"
@echo "make release - Build and upload package and docs to PyPI"
@echo "make test"
sdeb: source
scripts/build source
deb: source
scripts/build
source: setup.py
scripts/update-revno
python setup.py sdist
clean:
-python setup.py clean
rm -rf build/ MANIFEST
find . -name '*.pyc' -delete
find . -name '__pycache__' -delete
rm -rf dist/*
rm -rf .venv
rm -rf .venv3
(which dh_clean && dh_clean) || true
userinstall:
scripts/update-revno
python setup.py install --user
.venv:
dpkg-query -W -f='$${status}' gcc python-dev python-virtualenv 2>/dev/null | grep --invert-match "not-installed" || sudo apt-get install -y python-dev python-virtualenv
virtualenv .venv --system-site-packages
.venv/bin/pip install -U pip
.venv/bin/pip install -I -r test-requirements.txt
.venv/bin/pip install bzr
.venv3:
dpkg-query -W -f='$${status}' gcc python3-dev python-virtualenv python3-apt 2>/dev/null | grep --invert-match "not-installed" || sudo apt-get install -y python3-dev python-virtualenv python3-apt
virtualenv .venv3 --python=python3 --system-site-packages
.venv3/bin/pip install -U pip
.venv3/bin/pip install -I -r test-requirements.txt
# Note we don't even attempt to run tests if lint isn't passing.
test: lint test2 test3
@echo OK
test2:
@echo Starting Py2 tests...
.venv/bin/nosetests -s --nologcapture tests/
test3:
@echo Starting Py3 tests...
.venv3/bin/nosetests -s --nologcapture tests/
ftest: lint
@echo Starting fast tests...
.venv/bin/nosetests --attr '!slow' --nologcapture tests/
.venv3/bin/nosetests --attr '!slow' --nologcapture tests/
lint: .venv .venv3
@echo Checking for Python syntax...
@.venv/bin/flake8 --ignore=E402,E501,W504 $(PROJECT) $(TESTS) tools/ \
&& echo Py2 OK
@.venv3/bin/flake8 --ignore=E402,E501,W504 $(PROJECT) $(TESTS) tools/ \
&& echo Py3 OK
docs:
- [ -z "`dpkg -l | grep python-sphinx`" ] && sudo apt-get install python-sphinx -y
- [ -z "`dpkg -l | grep python-pip`" ] && sudo apt-get install python-pip -y
- [ -z "`pip list | grep -i sphinx-pypi-upload`" ] && sudo pip install sphinx-pypi-upload
- [ -z "`pip list | grep -i sphinx_rtd_theme`" ] && sudo pip install sphinx_rtd_theme
cd docs && make html && cd -
.PHONY: docs
release: docs
$(PYTHON) setup.py sdist upload upload_sphinx
build: test lint docs

View File

@ -0,0 +1,52 @@
CharmHelpers |badge|
--------------------
.. |badge| image:: https://github.com/juju/charm-helpers/actions/workflows/build.yml/badge.svg?branch=master
:target: https://github.com/juju/charm-helpers/actions/workflows/build.yml
Overview
========
CharmHelpers provides an opinionated set of tools for building Juju charms.
The full documentation is available online at: https://charm-helpers.readthedocs.io/
Common Usage Examples
=====================
* interaction with charm-specific Juju unit agents via hook tools;
* processing of events and execution of decorated functions based on event names;
* handling of persistent storage between independent charm invocations;
* rendering of configuration file templates;
* modification of system configuration files;
* installation of packages;
* retrieval of machine-specific details;
* implementation of application-specific code reused in similar charms.
Why Python?
===========
* Python is an extremely popular, easy to learn, and powerful language which is also common in automation tools;
* An interpreted language helps with charm portability across different CPU architectures;
* Doesn't require debugging symbols (just use pdb in-place);
* An author or a user is able to make debugging changes without recompiling a charm.
Dev/Test
========
See the HACKING.md file for information about testing and development.
License
=======
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,4 @@
This directory contains executables for accessing charmhelpers functionality
Please see charmhelpers.cli for the recommended way to add scripts.

8
nrpe/mod/charmhelpers/bin/chlp Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env python
from charmhelpers.cli import cmdline
from charmhelpers.cli.commands import *
if __name__ == '__main__':
cmdline.run()

View File

@ -0,0 +1,31 @@
#!/usr/bin/env python
import argparse
from charmhelpers.contrib.charmsupport import execd
def run_execd(args):
execd.execd_run(args.module, args.dir, die_on_error=True)
def parse_args():
parser = argparse.ArgumentParser(description='Perform common charm tasks')
subparsers = parser.add_subparsers(help='Commands')
execd_parser = subparsers.add_parser('execd',
help='Execute a directory of commands')
execd_parser.add_argument('--module', default='charm-pre-install',
help='module to run (default: charm-pre-install)')
execd_parser.add_argument('--dir',
help="Override the exec.d directory path")
execd_parser.set_defaults(func=run_execd)
return parser.parse_args()
def main():
arguments = parse_args()
arguments.func(arguments)
if __name__ == '__main__':
exit(main())

View File

@ -0,0 +1,11 @@
#!/usr/bin/env python
'''
Directly call a salt command in the modules, does not require a running salt
minion to run.
'''
from salt.scripts import salt_call
if __name__ == '__main__':
salt_call()

View File

@ -0,0 +1,99 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
from __future__ import print_function
from __future__ import absolute_import
import functools
import inspect
import subprocess
import sys
try:
import six # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # NOQA:F401
try:
import yaml # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # NOQA:F401
# Holds a list of mapping of mangled function names that have been deprecated
# using the @deprecate decorator below. This is so that the warning is only
# printed once for each usage of the function.
__deprecated_functions = {}
def deprecate(warning, date=None, log=None):
"""Add a deprecation warning the first time the function is used.
The date which is a string in semi-ISO8660 format indicates the year-month
that the function is officially going to be removed.
usage:
@deprecate('use core/fetch/add_source() instead', '2017-04')
def contributed_add_source_thing(...):
...
And it then prints to the log ONCE that the function is deprecated.
The reason for passing the logging function (log) is so that hookenv.log
can be used for a charm if needed.
:param warning: String to indicate what is to be used instead.
:param date: Optional string in YYYY-MM format to indicate when the
function will definitely (probably) be removed.
:param log: The log function to call in order to log. If None, logs to
stdout
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
try:
module = inspect.getmodule(f)
file = inspect.getsourcefile(f)
lines = inspect.getsourcelines(f)
f_name = "{}-{}-{}..{}-{}".format(
module.__name__, file, lines[0], lines[-1], f.__name__)
except (IOError, TypeError):
# assume it was local, so just use the name of the function
f_name = f.__name__
if f_name not in __deprecated_functions:
__deprecated_functions[f_name] = True
s = "DEPRECATION WARNING: Function {} is being removed".format(
f.__name__)
if date:
s = "{} on/around {}".format(s, date)
if warning:
s = "{} : {}".format(s, warning)
if log:
log(s)
else:
print(s)
return f(*args, **kwargs)
return wrapped_f
return wrap

View File

@ -0,0 +1,57 @@
==========
Commandant
==========
-----------------------------------------------------
Automatic command-line interfaces to Python functions
-----------------------------------------------------
One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands.
Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life.
Goals
=====
* Single decorator to expose a function as a command.
* now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW)
* Automatic analysis of function signature through ``inspect.getargspec()`` on python 2 or ``inspect.getfullargspec()`` on python 3
* Command argument parser built automatically with ``argparse``
* Interactive interpreter loop object made with ``Cmd``
* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps.
Other Important Features that need writing
------------------------------------------
* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour
* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc.
- Filename arguments are important, as good practice is for functions to accept file objects as parameters.
- choices arguments help to limit bad input before the function is called
* Some automatic behaviour could make for better defaults, once the user can override them.
- We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True.
- We could automatically support hyphens as alternates for underscores
- Arguments defaulting to sequence types could support the ``append`` action.
-----------------------------------------------------
Implementing subcommands
-----------------------------------------------------
(WIP)
So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose.
Some examples::
from charmhelpers.cli import CommandLine
from charmhelpers.payload import execd
from charmhelpers.foo import bar
cli = CommandLine()
cli.subcommand(execd.execd_run)
@cli.subcommand_builder("bar", help="Bar baz qux")
def barcmd_builder(subparser):
subparser.add_argument('argument1', help="yackety")
return bar

View File

@ -0,0 +1,196 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import argparse
import sys
import six
from six.moves import zip
import charmhelpers.core.unitdata
class OutputFormatter(object):
def __init__(self, outfile=sys.stdout):
self.formats = (
"raw",
"json",
"py",
"yaml",
"csv",
"tab",
)
self.outfile = outfile
def add_arguments(self, argument_parser):
formatgroup = argument_parser.add_mutually_exclusive_group()
choices = self.supported_formats
formatgroup.add_argument("--format", metavar='FMT',
help="Select output format for returned data, "
"where FMT is one of: {}".format(choices),
choices=choices, default='raw')
for fmt in self.formats:
fmtfunc = getattr(self, fmt)
formatgroup.add_argument("-{}".format(fmt[0]),
"--{}".format(fmt), action='store_const',
const=fmt, dest='format',
help=fmtfunc.__doc__)
@property
def supported_formats(self):
return self.formats
def raw(self, output):
"""Output data as raw string (default)"""
if isinstance(output, (list, tuple)):
output = '\n'.join(map(str, output))
self.outfile.write(str(output))
def py(self, output):
"""Output data as a nicely-formatted python data structure"""
import pprint
pprint.pprint(output, stream=self.outfile)
def json(self, output):
"""Output data in JSON format"""
import json
json.dump(output, self.outfile)
def yaml(self, output):
"""Output data in YAML format"""
import yaml
yaml.safe_dump(output, self.outfile)
def csv(self, output):
"""Output data as excel-compatible CSV"""
import csv
csvwriter = csv.writer(self.outfile)
csvwriter.writerows(output)
def tab(self, output):
"""Output data in excel-compatible tab-delimited format"""
import csv
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
csvwriter.writerows(output)
def format_output(self, output, fmt='raw'):
fmtfunc = getattr(self, fmt)
fmtfunc(output)
class CommandLine(object):
argument_parser = None
subparsers = None
formatter = None
exit_code = 0
def __init__(self):
if not self.argument_parser:
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
if not self.formatter:
self.formatter = OutputFormatter()
self.formatter.add_arguments(self.argument_parser)
if not self.subparsers:
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
def subcommand(self, command_name=None):
"""
Decorate a function as a subcommand. Use its arguments as the
command-line arguments"""
def wrapper(decorated):
cmd_name = command_name or decorated.__name__
subparser = self.subparsers.add_parser(cmd_name,
description=decorated.__doc__)
for args, kwargs in describe_arguments(decorated):
subparser.add_argument(*args, **kwargs)
subparser.set_defaults(func=decorated)
return decorated
return wrapper
def test_command(self, decorated):
"""
Subcommand is a boolean test function, so bool return values should be
converted to a 0/1 exit code.
"""
decorated._cli_test_command = True
return decorated
def no_output(self, decorated):
"""
Subcommand is not expected to return a value, so don't print a spurious None.
"""
decorated._cli_no_output = True
return decorated
def subcommand_builder(self, command_name, description=None):
"""
Decorate a function that builds a subcommand. Builders should accept a
single argument (the subparser instance) and return the function to be
run as the command."""
def wrapper(decorated):
subparser = self.subparsers.add_parser(command_name)
func = decorated(subparser)
subparser.set_defaults(func=func)
subparser.description = description or func.__doc__
return wrapper
def run(self):
"Run cli, processing arguments and executing subcommands."
arguments = self.argument_parser.parse_args()
if six.PY2:
argspec = inspect.getargspec(arguments.func)
else:
argspec = inspect.getfullargspec(arguments.func)
vargs = []
for arg in argspec.args:
vargs.append(getattr(arguments, arg))
if argspec.varargs:
vargs.extend(getattr(arguments, argspec.varargs))
output = arguments.func(*vargs)
if getattr(arguments.func, '_cli_test_command', False):
self.exit_code = 0 if output else 1
output = ''
if getattr(arguments.func, '_cli_no_output', False):
output = ''
self.formatter.format_output(output, arguments.format)
if charmhelpers.core.unitdata._KV:
charmhelpers.core.unitdata._KV.flush()
cmdline = CommandLine()
def describe_arguments(func):
"""
Analyze a function's signature and return a data structure suitable for
passing in as arguments to an argparse parser's add_argument() method."""
if six.PY2:
argspec = inspect.getargspec(func)
else:
argspec = inspect.getfullargspec(func)
# we should probably raise an exception somewhere if func includes **kwargs
if argspec.defaults:
positional_args = argspec.args[:-len(argspec.defaults)]
keyword_names = argspec.args[-len(argspec.defaults):]
for arg, default in zip(keyword_names, argspec.defaults):
yield ('--{}'.format(arg),), {'default': default}
else:
positional_args = argspec.args
for arg in positional_args:
yield (arg,), {}
if argspec.varargs:
yield (argspec.varargs,), {'nargs': '*'}

View File

@ -0,0 +1,34 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import cmdline
from charmhelpers.contrib.benchmark import Benchmark
@cmdline.subcommand(command_name='benchmark-start')
def start():
Benchmark.start()
@cmdline.subcommand(command_name='benchmark-finish')
def finish():
Benchmark.finish()
@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
def service(subparser):
subparser.add_argument("value", help="The composite score.")
subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
return Benchmark.set_composite_score

View File

@ -0,0 +1,30 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module loads sub-modules into the python runtime so they can be
discovered via the inspect module. In order to prevent flake8 from (rightfully)
telling us these are unused modules, throw a ' # noqa' at the end of each import
so that the warning is suppressed.
"""
from . import CommandLine # noqa
"""
Import the sub-modules which have decorated subcommands to register with chlp.
"""
from . import host # noqa
from . import benchmark # noqa
from . import unitdata # noqa
from . import hookenv # noqa

View File

@ -0,0 +1,21 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import cmdline
from charmhelpers.core import hookenv
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
cmdline.subcommand('service-name')(hookenv.service_name)
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)

View File

@ -0,0 +1,29 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import cmdline
from charmhelpers.core import host
@cmdline.subcommand()
def mounts():
"List mounts"
return host.mounts()
@cmdline.subcommand_builder('service', description="Control system services")
def service(subparser):
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
subparser.add_argument("service_name", help="Name of the service to control")
return host.service

View File

@ -0,0 +1,46 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import cmdline
from charmhelpers.core import unitdata
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
def unitdata_cmd(subparser):
nested = subparser.add_subparsers()
get_cmd = nested.add_parser('get', help='Retrieve data')
get_cmd.add_argument('key', help='Key to retrieve the value of')
get_cmd.set_defaults(action='get', value=None)
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
getrange_cmd.add_argument('key', metavar='prefix',
help='Prefix of the keys to retrieve')
getrange_cmd.set_defaults(action='getrange', value=None)
set_cmd = nested.add_parser('set', help='Store data')
set_cmd.add_argument('key', help='Key to set')
set_cmd.add_argument('value', help='Value to store')
set_cmd.set_defaults(action='set')
def _unitdata_cmd(action, key, value):
if action == 'get':
return unitdata.kv().get(key)
elif action == 'getrange':
return unitdata.kv().getrange(key)
elif action == 'set':
unitdata.kv().set(key, value)
unitdata.kv().flush()
return ''
return _unitdata_cmd

View File

@ -0,0 +1,205 @@
# Copyright 2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
A Pythonic API to interact with the charm hook environment.
:author: Stuart Bishop <stuart.bishop@canonical.com>
'''
import six
from charmhelpers.core import hookenv
from collections import OrderedDict
if six.PY3:
from collections import UserDict # pragma: nocover
else:
from UserDict import IterableUserDict as UserDict # pragma: nocover
class Relations(OrderedDict):
'''Mapping relation name -> relation id -> Relation.
>>> rels = Relations()
>>> rels['sprog']['sprog:12']['client/6']['widget']
'remote widget'
>>> rels['sprog']['sprog:12'].local['widget'] = 'local widget'
>>> rels['sprog']['sprog:12'].local['widget']
'local widget'
>>> rels.peer.local['widget']
'local widget on the peer relation'
'''
def __init__(self):
super(Relations, self).__init__()
for relname in sorted(hookenv.relation_types()):
self[relname] = OrderedDict()
relids = hookenv.relation_ids(relname)
relids.sort(key=lambda x: int(x.split(':', 1)[-1]))
for relid in relids:
self[relname][relid] = Relation(relid)
@property
def peer(self):
peer_relid = hookenv.peer_relation_id()
for rels in self.values():
if peer_relid in rels:
return rels[peer_relid]
class Relation(OrderedDict):
'''Mapping of unit -> remote RelationInfo for a relation.
This is an OrderedDict mapping, ordered numerically by
by unit number.
Also provides access to the local RelationInfo, and peer RelationInfo
instances by the 'local' and 'peers' attributes.
>>> r = Relation('sprog:12')
>>> r.keys()
['client/9', 'client/10'] # Ordered numerically
>>> r['client/10']['widget'] # A remote RelationInfo setting
'remote widget'
>>> r.local['widget'] # The local RelationInfo setting
'local widget'
'''
relid = None # The relation id.
relname = None # The relation name (also known as relation type).
service = None # The remote service name, if known.
local = None # The local end's RelationInfo.
peers = None # Map of peer -> RelationInfo. None if no peer relation.
def __init__(self, relid):
remote_units = hookenv.related_units(relid)
remote_units.sort(key=lambda u: int(u.split('/', 1)[-1]))
super(Relation, self).__init__((unit, RelationInfo(relid, unit))
for unit in remote_units)
self.relname = relid.split(':', 1)[0]
self.relid = relid
self.local = RelationInfo(relid, hookenv.local_unit())
for relinfo in self.values():
self.service = relinfo.service
break
# If we have peers, and they have joined both the provided peer
# relation and this relation, we can peek at their data too.
# This is useful for creating consensus without leadership.
peer_relid = hookenv.peer_relation_id()
if peer_relid and peer_relid != relid:
peers = hookenv.related_units(peer_relid)
if peers:
peers.sort(key=lambda u: int(u.split('/', 1)[-1]))
self.peers = OrderedDict((peer, RelationInfo(relid, peer))
for peer in peers)
else:
self.peers = OrderedDict()
else:
self.peers = None
def __str__(self):
return '{} ({})'.format(self.relid, self.service)
class RelationInfo(UserDict):
'''The bag of data at an end of a relation.
Every unit participating in a relation has a single bag of
data associated with that relation. This is that bag.
The bag of data for the local unit may be updated. Remote data
is immutable and will remain static for the duration of the hook.
Changes made to the local units relation data only become visible
to other units after the hook completes successfully. If the hook
does not complete successfully, the changes are rolled back.
Unlike standard Python mappings, setting an item to None is the
same as deleting it.
>>> relinfo = RelationInfo('db:12') # Default is the local unit.
>>> relinfo['user'] = 'fred'
>>> relinfo['user']
'fred'
>>> relinfo['user'] = None
>>> 'fred' in relinfo
False
This class wraps hookenv.relation_get and hookenv.relation_set.
All caching is left up to these two methods to avoid synchronization
issues. Data is only loaded on demand.
'''
relid = None # The relation id.
relname = None # The relation name (also know as the relation type).
unit = None # The unit id.
number = None # The unit number (integer).
service = None # The service name.
def __init__(self, relid, unit):
self.relname = relid.split(':', 1)[0]
self.relid = relid
self.unit = unit
self.service, num = self.unit.split('/', 1)
self.number = int(num)
def __str__(self):
return '{} ({})'.format(self.relid, self.unit)
@property
def data(self):
return hookenv.relation_get(rid=self.relid, unit=self.unit)
def __setitem__(self, key, value):
if self.unit != hookenv.local_unit():
raise TypeError('Attempting to set {} on remote unit {}'
''.format(key, self.unit))
if value is not None and not isinstance(value, six.string_types):
# We don't do implicit casting. This would cause simple
# types like integers to be read back as strings in subsequent
# hooks, and mutable types would require a lot of wrapping
# to ensure relation-set gets called when they are mutated.
raise ValueError('Only string values allowed')
hookenv.relation_set(self.relid, {key: value})
def __delitem__(self, key):
# Deleting a key and setting it to null is the same thing in
# Juju relations.
self[key] = None
class Leader(UserDict):
def __init__(self):
pass # Don't call superclass initializer, as it will nuke self.data
@property
def data(self):
return hookenv.leader_get()
def __setitem__(self, key, value):
if not hookenv.is_leader():
raise TypeError('Not the leader. Cannot change leader settings.')
if value is not None and not isinstance(value, six.string_types):
# We don't do implicit casting. This would cause simple
# types like integers to be read back as strings in subsequent
# hooks, and mutable types would require a lot of wrapping
# to ensure leader-set gets called when they are mutated.
raise ValueError('Only string values allowed')
hookenv.leader_set({key: value})
def __delitem__(self, key):
# Deleting a key and setting it to null is the same thing in
# Juju leadership settings.
self[key] = None

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,306 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
"""
The ansible package enables you to easily use the configuration management
tool `Ansible`_ to setup and configure your charm. All of your charm
configuration options and relation-data are available as regular Ansible
variables which can be used in your playbooks and templates.
.. _Ansible: https://www.ansible.com/
Usage
=====
Here is an example directory structure for a charm to get you started::
charm-ansible-example/
|-- ansible
| |-- playbook.yaml
| `-- templates
| `-- example.j2
|-- config.yaml
|-- copyright
|-- icon.svg
|-- layer.yaml
|-- metadata.yaml
|-- reactive
| `-- example.py
|-- README.md
Running a playbook called ``playbook.yaml`` when the ``install`` hook is run
can be as simple as::
from charmhelpers.contrib import ansible
from charms.reactive import hook
@hook('install')
def install():
ansible.install_ansible_support()
ansible.apply_playbook('ansible/playbook.yaml')
Here is an example playbook that uses the ``template`` module to template the
file ``example.j2`` to the charm host and then uses the ``debug`` module to
print out all the host and Juju variables that you can use in your playbooks.
Note that you must target ``localhost`` as the playbook is run locally on the
charm host::
---
- hosts: localhost
tasks:
- name: Template a file
template:
src: templates/example.j2
dest: /tmp/example.j2
- name: Print all variables available to Ansible
debug:
var: vars
Read more online about `playbooks`_ and standard Ansible `modules`_.
.. _playbooks: https://docs.ansible.com/ansible/latest/user_guide/playbooks.html
.. _modules: https://docs.ansible.com/ansible/latest/user_guide/modules.html
A further feature of the Ansible hooks is to provide a light weight "action"
scripting tool. This is a decorator that you apply to a function, and that
function can now receive cli args, and can pass extra args to the playbook::
@hooks.action()
def some_action(amount, force="False"):
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
# process the arguments
# do some calls
# return extra-vars to be passed to ansible-playbook
return {
'amount': int(amount),
'type': force,
}
You can now create a symlink to hooks.py that can be invoked like a hook, but
with cli params::
# link actions/some-action to hooks/hooks.py
actions/some-action amount=10 force=true
Install Ansible via pip
=======================
If you want to install a specific version of Ansible via pip instead of
``install_ansible_support`` which uses APT, consider using the layer options
of `layer-basic`_ to install Ansible in a virtualenv::
options:
basic:
python_packages: ['ansible==2.9.0']
include_system_packages: true
use_venv: true
.. _layer-basic: https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#layer-configuration
"""
import os
import json
import stat
import subprocess
import functools
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
import charmhelpers.fetch
charm_dir = os.environ.get('CHARM_DIR', '')
ansible_hosts_path = '/etc/ansible/hosts'
# Ansible will automatically include any vars in the following
# file in its inventory when run locally.
ansible_vars_path = '/etc/ansible/host_vars/localhost'
def install_ansible_support(from_ppa=True, ppa_location='ppa:ansible/ansible'):
"""Installs Ansible via APT.
By default this installs Ansible from the `PPA`_ linked from
the Ansible `website`_ or from a PPA set in ``ppa_location``.
.. _PPA: https://launchpad.net/~ansible/+archive/ubuntu/ansible
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
If ``from_ppa`` is ``False``, then Ansible will be installed from
Ubuntu's Universe repositories.
"""
if from_ppa:
charmhelpers.fetch.add_source(ppa_location)
charmhelpers.fetch.apt_update(fatal=True)
charmhelpers.fetch.apt_install('ansible')
with open(ansible_hosts_path, 'w+') as hosts_file:
hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp')
def apply_playbook(playbook, tags=None, extra_vars=None):
"""Run a playbook.
This helper runs a playbook with juju state variables as context,
therefore variables set in application config can be used directly.
List of tags (--tags) and dictionary with extra_vars (--extra-vars)
can be passed as additional parameters.
Read more about playbook `_variables`_ online.
.. _variables: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html
Example::
# Run ansible/playbook.yaml with tag install and pass extra
# variables var_a and var_b
apply_playbook(
playbook='ansible/playbook.yaml',
tags=['install'],
extra_vars={'var_a': 'val_a', 'var_b': 'val_b'}
)
# Run ansible/playbook.yaml with tag config and extra variable nested,
# which is passed as json and can be used as dictionary in playbook
apply_playbook(
playbook='ansible/playbook.yaml',
tags=['config'],
extra_vars={'nested': {'a': 'value1', 'b': 'value2'}}
)
# Custom config file can be passed within extra_vars
apply_playbook(
playbook='ansible/playbook.yaml',
extra_vars="@some_file.json"
)
"""
tags = tags or []
tags = ",".join(tags)
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
ansible_vars_path, namespace_separator='__',
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
# we want ansible's log output to be unbuffered
env = os.environ.copy()
proxy_settings = charmhelpers.core.hookenv.env_proxy_settings()
if proxy_settings:
env.update(proxy_settings)
env['PYTHONUNBUFFERED'] = "1"
call = [
'ansible-playbook',
'-c',
'local',
playbook,
]
if tags:
call.extend(['--tags', '{}'.format(tags)])
if extra_vars:
call.extend(['--extra-vars', json.dumps(extra_vars)])
subprocess.check_call(call, env=env)
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
"""Run a playbook with the hook-name as the tag.
This helper builds on the standard hookenv.Hooks helper,
but additionally runs the playbook with the hook-name specified
using --tags (ie. running all the tasks tagged with the hook-name).
Example::
hooks = AnsibleHooks(playbook_path='ansible/my_machine_state.yaml')
# All the tasks within my_machine_state.yaml tagged with 'install'
# will be run automatically after do_custom_work()
@hooks.hook()
def install():
do_custom_work()
# For most of your hooks, you won't need to do anything other
# than run the tagged tasks for the hook:
@hooks.hook('config-changed', 'start', 'stop')
def just_use_playbook():
pass
# As a convenience, you can avoid the above noop function by specifying
# the hooks which are handled by ansible-only and they'll be registered
# for you:
# hooks = AnsibleHooks(
# 'ansible/my_machine_state.yaml',
# default_hooks=['config-changed', 'start', 'stop'])
if __name__ == "__main__":
# execute a hook based on the name the program is called by
hooks.execute(sys.argv)
"""
def __init__(self, playbook_path, default_hooks=None):
"""Register any hooks handled by ansible."""
super(AnsibleHooks, self).__init__()
self._actions = {}
self.playbook_path = playbook_path
default_hooks = default_hooks or []
def noop(*args, **kwargs):
pass
for hook in default_hooks:
self.register(hook, noop)
def register_action(self, name, function):
"""Register a hook"""
self._actions[name] = function
def execute(self, args):
"""Execute the hook followed by the playbook using the hook as tag."""
hook_name = os.path.basename(args[0])
extra_vars = None
if hook_name in self._actions:
extra_vars = self._actions[hook_name](args[1:])
else:
super(AnsibleHooks, self).execute(args)
charmhelpers.contrib.ansible.apply_playbook(
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
def action(self, *action_names):
"""Decorator, registering them as actions"""
def action_wrapper(decorated):
@functools.wraps(decorated)
def wrapper(argv):
kwargs = dict(arg.split('=') for arg in argv)
try:
return decorated(**kwargs)
except TypeError as e:
if decorated.__doc__:
e.args += (decorated.__doc__,)
raise
self.register_action(decorated.__name__, wrapper)
if '_' in decorated.__name__:
self.register_action(
decorated.__name__.replace('_', '-'), wrapper)
return wrapper
return action_wrapper

View File

@ -0,0 +1,124 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import time
import os
from distutils.spawn import find_executable
from charmhelpers.core.hookenv import (
in_relation_hook,
relation_ids,
relation_set,
relation_get,
)
def action_set(key, val):
if find_executable('action-set'):
action_cmd = ['action-set']
if isinstance(val, dict):
for k, v in iter(val.items()):
action_set('%s.%s' % (key, k), v)
return True
action_cmd.append('%s=%s' % (key, val))
subprocess.check_call(action_cmd)
return True
return False
class Benchmark():
"""
Helper class for the `benchmark` interface.
:param list actions: Define the actions that are also benchmarks
From inside the benchmark-relation-changed hook, you would
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
Examples:
siege = Benchmark(['siege'])
siege.start()
[... run siege ...]
# The higher the score, the better the benchmark
siege.set_composite_score(16.70, 'trans/sec', 'desc')
siege.finish()
"""
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
required_keys = [
'hostname',
'port',
'graphite_port',
'graphite_endpoint',
'api_port'
]
def __init__(self, benchmarks=None):
if in_relation_hook():
if benchmarks is not None:
for rid in sorted(relation_ids('benchmark')):
relation_set(relation_id=rid, relation_settings={
'benchmarks': ",".join(benchmarks)
})
# Check the relation data
config = {}
for key in self.required_keys:
val = relation_get(key)
if val is not None:
config[key] = val
else:
# We don't have all of the required keys
config = {}
break
if len(config):
with open(self.BENCHMARK_CONF, 'w') as f:
for key, val in iter(config.items()):
f.write("%s=%s\n" % (key, val))
@staticmethod
def start():
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
"""
If the collectd charm is also installed, tell it to send a snapshot
of the current profile data.
"""
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
if os.path.exists(COLLECT_PROFILE_DATA):
subprocess.check_output([COLLECT_PROFILE_DATA])
@staticmethod
def finish():
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
@staticmethod
def set_composite_score(value, units, direction='asc'):
"""
Set the composite score for a benchmark run. This is a single number
representative of the benchmark results. This could be the most
important metric, or an amalgamation of metric scores.
"""
return action_set(
"meta.composite",
{'value': value, 'units': units, 'direction': direction}
)

View File

@ -0,0 +1,4 @@
Source lp:charm-tools/trunk
charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py
charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py

View File

@ -0,0 +1,203 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
import operator
import tempfile
import time
import yaml
import subprocess
import six
if six.PY3:
from urllib.request import urlopen
from urllib.error import (HTTPError, URLError)
else:
from urllib2 import (urlopen, HTTPError, URLError)
"""Helper functions for writing Juju charms in Python."""
__metaclass__ = type
__all__ = [
# 'get_config', # core.hookenv.config()
# 'log', # core.hookenv.log()
# 'log_entry', # core.hookenv.log()
# 'log_exit', # core.hookenv.log()
# 'relation_get', # core.hookenv.relation_get()
# 'relation_set', # core.hookenv.relation_set()
# 'relation_ids', # core.hookenv.relation_ids()
# 'relation_list', # core.hookenv.relation_units()
# 'config_get', # core.hookenv.config()
# 'unit_get', # core.hookenv.unit_get()
# 'open_port', # core.hookenv.open_port()
# 'close_port', # core.hookenv.close_port()
# 'service_control', # core.host.service()
'unit_info', # client-side, NOT IMPLEMENTED
'wait_for_machine', # client-side, NOT IMPLEMENTED
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
'wait_for_relation', # client-side, NOT IMPLEMENTED
'wait_for_unit', # client-side, NOT IMPLEMENTED
]
SLEEP_AMOUNT = 0.1
# We create a juju_status Command here because it makes testing much,
# much easier.
def juju_status():
subprocess.check_call(['juju', 'status'])
# re-implemented as charmhelpers.fetch.configure_sources()
# def configure_source(update=False):
# source = config_get('source')
# if ((source.startswith('ppa:') or
# source.startswith('cloud:') or
# source.startswith('http:'))):
# run('add-apt-repository', source)
# if source.startswith("http:"):
# run('apt-key', 'import', config_get('key'))
# if update:
# run('apt-get', 'update')
# DEPRECATED: client-side only
def make_charm_config_file(charm_config):
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
charm_config_file.write(yaml.dump(charm_config))
charm_config_file.flush()
# The NamedTemporaryFile instance is returned instead of just the name
# because we want to take advantage of garbage collection-triggered
# deletion of the temp file when it goes out of scope in the caller.
return charm_config_file
# DEPRECATED: client-side only
def unit_info(service_name, item_name, data=None, unit=None):
if data is None:
data = yaml.safe_load(juju_status())
service = data['services'].get(service_name)
if service is None:
# XXX 2012-02-08 gmb:
# This allows us to cope with the race condition that we
# have between deploying a service and having it come up in
# `juju status`. We could probably do with cleaning it up so
# that it fails a bit more noisily after a while.
return ''
units = service['units']
if unit is not None:
item = units[unit][item_name]
else:
# It might seem odd to sort the units here, but we do it to
# ensure that when no unit is specified, the first unit for the
# service (or at least the one with the lowest number) is the
# one whose data gets returned.
sorted_unit_names = sorted(units.keys())
item = units[sorted_unit_names[0]][item_name]
return item
# DEPRECATED: client-side only
def get_machine_data():
return yaml.safe_load(juju_status())['machines']
# DEPRECATED: client-side only
def wait_for_machine(num_machines=1, timeout=300):
"""Wait `timeout` seconds for `num_machines` machines to come up.
This wait_for... function can be called by other wait_for functions
whose timeouts might be too short in situations where only a bare
Juju setup has been bootstrapped.
:return: A tuple of (num_machines, time_taken). This is used for
testing.
"""
# You may think this is a hack, and you'd be right. The easiest way
# to tell what environment we're working in (LXC vs EC2) is to check
# the dns-name of the first machine. If it's localhost we're in LXC
# and we can just return here.
if get_machine_data()[0]['dns-name'] == 'localhost':
return 1, 0
start_time = time.time()
while True:
# Drop the first machine, since it's the Zookeeper and that's
# not a machine that we need to wait for. This will only work
# for EC2 environments, which is why we return early above if
# we're in LXC.
machine_data = get_machine_data()
non_zookeeper_machines = [
machine_data[key] for key in list(machine_data.keys())[1:]]
if len(non_zookeeper_machines) >= num_machines:
all_machines_running = True
for machine in non_zookeeper_machines:
if machine.get('instance-state') != 'running':
all_machines_running = False
break
if all_machines_running:
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for service to start')
time.sleep(SLEEP_AMOUNT)
return num_machines, time.time() - start_time
# DEPRECATED: client-side only
def wait_for_unit(service_name, timeout=480):
"""Wait `timeout` seconds for a given service name to come up."""
wait_for_machine(num_machines=1)
start_time = time.time()
while True:
state = unit_info(service_name, 'agent-state')
if 'error' in state or state == 'started':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for service to start')
time.sleep(SLEEP_AMOUNT)
if state != 'started':
raise RuntimeError('unit did not start, agent-state: ' + state)
# DEPRECATED: client-side only
def wait_for_relation(service_name, relation_name, timeout=120):
"""Wait `timeout` seconds for a given relation to come up."""
start_time = time.time()
while True:
relation = unit_info(service_name, 'relations').get(relation_name)
if relation is not None and relation['state'] == 'up':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for relation to be up')
time.sleep(SLEEP_AMOUNT)
# DEPRECATED: client-side only
def wait_for_page_contents(url, contents, timeout=120, validate=None):
if validate is None:
validate = operator.contains
start_time = time.time()
while True:
try:
stream = urlopen(url)
except (HTTPError, URLError):
pass
else:
page = stream.read()
if validate(page, contents):
return page
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for contents of ' + url)
time.sleep(SLEEP_AMOUNT)

View File

@ -0,0 +1,14 @@
Source: lp:charmsupport/trunk
charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py
charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py
charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py
charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py
charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py
charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py
charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py
charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py
charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py
charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,522 @@
# Copyright 2012-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compatibility with the nrpe-external-master charm"""
#
# Authors:
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import glob
import grp
import os
import pwd
import re
import shlex
import shutil
import subprocess
import yaml
from charmhelpers.core.hookenv import (
config,
hook_name,
local_unit,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
)
from charmhelpers.core.host import service
from charmhelpers.core import host
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 4.a If your charm is a subordinate charm set primary=False
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE(primary=False)
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_.@]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
{service_config_overrides}
}}
""")
def __init__(self, shortname, description, check_cmd, max_check_attempts=None):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
self.max_check_attempts = max_check_attempts
def _get_check_filename(self):
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
def _get_service_filename(self, hostname):
return os.path.join(NRPE.nagios_exportdir,
'service__{}_{}.cfg'.format(hostname, self.command))
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def _remove_service_files(self):
if not os.path.exists(NRPE.nagios_exportdir):
return
for f in os.listdir(NRPE.nagios_exportdir):
if f.endswith('_{}.cfg'.format(self.command)):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
def remove(self, hostname):
nrpe_check_file = self._get_check_filename()
if os.path.exists(nrpe_check_file):
os.remove(nrpe_check_file)
self._remove_service_files()
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = self._get_check_filename()
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
if nagios_servicegroups:
nrpe_check_config.write(
"# The following header was added automatically by juju\n")
nrpe_check_config.write(
"# Modifying it will affect nagios monitoring and alerting\n")
nrpe_check_config.write(
"# servicegroups: {}\n".format(nagios_servicegroups))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups):
self._remove_service_files()
if self.max_check_attempts:
service_config_overrides = ' max_check_attempts {}'.format(
self.max_check_attempts
) # Note indentation is here rather than in the template to avoid trailing spaces
else:
service_config_overrides = '' # empty string to avoid printing 'None'
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
'service_config_overrides': service_config_overrides,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = self.nagios_context
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
nagios_hostname = get_nagios_hostname()
if nagios_hostname:
self.hostname = nagios_hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
@classmethod
def does_nrpe_conf_dir_exist(cls):
"""Return True if th nrpe_confdif directory exists."""
return os.path.isdir(cls.nrpe_confdir)
def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None:
raise ValueError('shortname of check must be specified')
# Use sensible defaults if they're not specified - these are not
# actually used during removal, but they're required for constructing
# the Check object; check_disk is chosen because it's part of the
# nagios-plugins-basic package.
if kwargs.get('check_cmd') is None:
kwargs['check_cmd'] = 'check_disk'
if kwargs.get('description') is None:
kwargs['description'] = ''
check = Check(*args, **kwargs)
check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
# check that the charm can write to the conf dir. If not, then nagios
# probably isn't installed, and we can defer.
if not self.does_nrpe_conf_dir_exist():
return
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
# If we were passed max_check_attempts, add that to the relation data
if nrpecheck.max_check_attempts is not None:
nrpe_monitors[nrpecheck.shortname]['max_check_attempts'] = nrpecheck.max_check_attempts
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_host_context' in rel:
return rel['nagios_host_context']
def get_nagios_hostname(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_hostname']
def get_nagios_unit_name(relation_name='nrpe-external-master'):
"""
Return the nagios unit name prepended with host_context if needed
:param str relation_name: Name of relation nrpe sub joined to
"""
host_context = get_nagios_hostcontext(relation_name)
if host_context:
unit = "%s:%s" % (host_context, local_unit())
else:
unit = local_unit()
return unit
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if host.init_is_systemd(service_name=svc):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='service check {%s}' % unit_name,
check_cmd='check_status_file.py -f %s' % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
f = open(checkpath, 'w')
subprocess.call(
croncmd.split(),
stdout=f,
stderr=subprocess.STDOUT
)
f.close()
os.chmod(checkpath, 0o644)
def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
shortname='haproxy_servers',
description='Check HAProxy {%s}' % unit_name,
check_cmd='check_haproxy.sh')
nrpe.add_check(
shortname='haproxy_queue',
description='Check HAProxy queue depth {%s}' % unit_name,
check_cmd='check_haproxy_queue_depth.sh')
def remove_deprecated_check(nrpe, deprecated_services):
"""
Remove checks for deprecated services in list
:param nrpe: NRPE object to remove check from
:type nrpe: NRPE
:param deprecated_services: List of deprecated services that are removed
:type deprecated_services: list
"""
for dep_svc in deprecated_services:
log('Deprecated service: {}'.format(dep_svc))
nrpe.remove_check(shortname=dep_svc)

View File

@ -0,0 +1,173 @@
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Functions for managing volumes in juju units. One volume is supported per unit.
Subordinates may have their own storage, provided it is on its own partition.
Configuration stanzas::
volume-ephemeral:
type: boolean
default: true
description: >
If false, a volume is mounted as specified in "volume-map"
If true, ephemeral storage will be used, meaning that log data
will only exist as long as the machine. YOU HAVE BEEN WARNED.
volume-map:
type: string
default: {}
description: >
YAML map of units to device names, e.g:
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
Service units will raise a configure-error if volume-ephemeral
is 'true' and no volume-map value is set. Use 'juju set' to set a
value and 'juju resolved' to complete configuration.
Usage::
from charmsupport.volumes import configure_volume, VolumeConfigurationError
from charmsupport.hookenv import log, ERROR
def post_mount_hook():
stop_service('myservice')
def post_mount_hook():
start_service('myservice')
if __name__ == '__main__':
try:
configure_volume(before_change=pre_mount_hook,
after_change=post_mount_hook)
except VolumeConfigurationError:
log('Storage could not be configured', ERROR)
'''
# XXX: Known limitations
# - fstab is neither consulted nor updated
import os
from charmhelpers.core import hookenv
from charmhelpers.core import host
import yaml
MOUNT_BASE = '/srv/juju/volumes'
class VolumeConfigurationError(Exception):
'''Volume configuration data is missing or invalid'''
pass
def get_config():
'''Gather and sanity-check volume configuration data'''
volume_config = {}
config = hookenv.config()
errors = False
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
volume_config['ephemeral'] = True
else:
volume_config['ephemeral'] = False
try:
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
except yaml.YAMLError as e:
hookenv.log("Error parsing YAML volume-map: {}".format(e),
hookenv.ERROR)
errors = True
if volume_map is None:
# probably an empty string
volume_map = {}
elif not isinstance(volume_map, dict):
hookenv.log("Volume-map should be a dictionary, not {}".format(
type(volume_map)))
errors = True
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
if volume_config['device'] and volume_config['ephemeral']:
# asked for ephemeral storage but also defined a volume ID
hookenv.log('A volume is defined for this unit, but ephemeral '
'storage was requested', hookenv.ERROR)
errors = True
elif not volume_config['device'] and not volume_config['ephemeral']:
# asked for permanent storage but did not define volume ID
hookenv.log('Ephemeral storage was requested, but there is no volume '
'defined for this unit.', hookenv.ERROR)
errors = True
unit_mount_name = hookenv.local_unit().replace('/', '-')
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
if errors:
return None
return volume_config
def mount_volume(config):
if os.path.exists(config['mountpoint']):
if not os.path.isdir(config['mountpoint']):
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
raise VolumeConfigurationError()
else:
host.mkdir(config['mountpoint'])
if os.path.ismount(config['mountpoint']):
unmount_volume(config)
if not host.mount(config['device'], config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def unmount_volume(config):
if os.path.ismount(config['mountpoint']):
if not host.umount(config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def managed_mounts():
'''List of all mounted managed volumes'''
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(managed_mounts())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint']

View File

@ -0,0 +1,11 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,840 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for working with a MySQL database"""
import collections
import copy
import json
import re
import sys
import platform
import os
import glob
import six
# from string import upper
from charmhelpers.core.host import (
CompareHostReleases,
lsb_release,
mkdir,
pwgen,
write_file
)
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
related_units,
unit_get,
log,
DEBUG,
ERROR,
INFO,
WARNING,
leader_get,
leader_set,
is_leader,
)
from charmhelpers.fetch import (
apt_install,
apt_update,
filter_installed_packages,
)
from charmhelpers.contrib.network.ip import get_host_ip
try:
import MySQLdb
except ImportError:
apt_update(fatal=True)
if six.PY2:
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
else:
apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True)
import MySQLdb
class MySQLSetPasswordError(Exception):
pass
class MySQLHelper(object):
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
migrate_passwd_to_leader_storage=True,
delete_ondisk_passwd_file=True, user="root", password=None,
port=None, connect_timeout=None):
self.user = user
self.host = host
self.password = password
self.port = port
# default timeout of 30 seconds.
self.connect_timeout = connect_timeout or 30
# Password file path templates
self.root_passwd_file_template = rpasswdf_template
self.user_passwd_file_template = upasswdf_template
self.migrate_passwd_to_leader_storage = migrate_passwd_to_leader_storage
# If we migrate we have the option to delete local copy of root passwd
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
self.connection = None
def connect(self, user='root', password=None, host=None, port=None,
connect_timeout=None):
_connection_info = {
"user": user or self.user,
"passwd": password or self.password,
"host": host or self.host
}
# set the connection timeout; for mysql8 it can hang forever, so some
# timeout is required.
timeout = connect_timeout or self.connect_timeout
if timeout:
_connection_info["connect_timeout"] = timeout
# port cannot be None but we also do not want to specify it unless it
# has been explicit set.
port = port or self.port
if port is not None:
_connection_info["port"] = port
log("Opening db connection for %s@%s" % (user, host), level=DEBUG)
try:
self.connection = MySQLdb.connect(**_connection_info)
except Exception as e:
log("Failed to connect to database due to '{}'".format(str(e)),
level=ERROR)
raise
def database_exists(self, db_name):
cursor = self.connection.cursor()
try:
cursor.execute("SHOW DATABASES")
databases = [i[0] for i in cursor.fetchall()]
finally:
cursor.close()
return db_name in databases
def create_database(self, db_name):
cursor = self.connection.cursor()
try:
cursor.execute("CREATE DATABASE `{}` CHARACTER SET UTF8"
.format(db_name))
finally:
cursor.close()
def grant_exists(self, db_name, db_user, remote_ip):
cursor = self.connection.cursor()
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
try:
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
remote_ip))
grants = [i[0] for i in cursor.fetchall()]
except MySQLdb.OperationalError:
return False
finally:
cursor.close()
# TODO: review for different grants
return priv_string in grants
def create_grant(self, db_name, db_user, remote_ip, password):
cursor = self.connection.cursor()
try:
# TODO: review for different grants
cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}' "
"IDENTIFIED BY '{}'".format(db_name,
db_user,
remote_ip,
password))
finally:
cursor.close()
def create_admin_grant(self, db_user, remote_ip, password):
cursor = self.connection.cursor()
try:
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
"IDENTIFIED BY '{}'".format(db_user,
remote_ip,
password))
finally:
cursor.close()
def cleanup_grant(self, db_user, remote_ip):
cursor = self.connection.cursor()
try:
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
"AND HOST='{}'".format(db_user,
remote_ip))
finally:
cursor.close()
def flush_priviledges(self):
cursor = self.connection.cursor()
try:
cursor.execute("FLUSH PRIVILEGES")
finally:
cursor.close()
def execute(self, sql):
"""Execute arbitrary SQL against the database."""
cursor = self.connection.cursor()
try:
cursor.execute(sql)
finally:
cursor.close()
def select(self, sql):
"""
Execute arbitrary SQL select query against the database
and return the results.
:param sql: SQL select query to execute
:type sql: string
:returns: SQL select query result
:rtype: list of lists
:raises: MySQLdb.Error
"""
cursor = self.connection.cursor()
try:
cursor.execute(sql)
results = [list(i) for i in cursor.fetchall()]
finally:
cursor.close()
return results
def migrate_passwords_to_leader_storage(self, excludes=None):
"""Migrate any passwords storage on disk to leader storage."""
if not is_leader():
log("Skipping password migration as not the lead unit",
level=DEBUG)
return
dirname = os.path.dirname(self.root_passwd_file_template)
path = os.path.join(dirname, '*.passwd')
for f in glob.glob(path):
if excludes and f in excludes:
log("Excluding %s from leader storage migration" % (f),
level=DEBUG)
continue
key = os.path.basename(f)
with open(f, 'r') as passwd:
_value = passwd.read().strip()
try:
leader_set(settings={key: _value})
if self.delete_ondisk_passwd_file:
os.unlink(f)
except ValueError:
# NOTE cluster relation not yet ready - skip for now
pass
def get_mysql_password_on_disk(self, username=None, password=None):
"""Retrieve, generate or store a mysql password for the provided
username on disk."""
if username:
template = self.user_passwd_file_template
passwd_file = template.format(username)
else:
passwd_file = self.root_passwd_file_template
_password = None
if os.path.exists(passwd_file):
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
with open(passwd_file, 'r') as passwd:
_password = passwd.read().strip()
else:
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
if not os.path.isdir(os.path.dirname(passwd_file)):
# NOTE: need to ensure this is not mysql root dir (which needs
# to be mysql readable)
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
perms=0o770)
# Force permissions - for some reason the chmod in makedirs
# fails
os.chmod(os.path.dirname(passwd_file), 0o770)
_password = password or pwgen(length=32)
write_file(passwd_file, _password, owner='root', group='root',
perms=0o660)
return _password
def passwd_keys(self, username):
"""Generator to return keys used to store passwords in peer store.
NOTE: we support both legacy and new format to support mysql
charm prior to refactor. This is necessary to avoid LP 1451890.
"""
keys = []
if username == 'mysql':
log("Bad username '%s'" % (username), level=WARNING)
if username:
# IMPORTANT: *newer* format must be returned first
keys.append('mysql-%s.passwd' % (username))
keys.append('%s.passwd' % (username))
else:
keys.append('mysql.passwd')
for key in keys:
yield key
def get_mysql_password(self, username=None, password=None):
"""Retrieve, generate or store a mysql password for the provided
username using peer relation cluster."""
excludes = []
# First check peer relation.
try:
for key in self.passwd_keys(username):
_password = leader_get(key)
if _password:
break
# If root password available don't update peer relation from local
if _password and not username:
excludes.append(self.root_passwd_file_template)
except ValueError:
# cluster relation is not yet started; use on-disk
_password = None
# If none available, generate new one
if not _password:
_password = self.get_mysql_password_on_disk(username, password)
# Put on wire if required
if self.migrate_passwd_to_leader_storage:
self.migrate_passwords_to_leader_storage(excludes=excludes)
return _password
def get_mysql_root_password(self, password=None):
"""Retrieve or generate mysql root password for service units."""
return self.get_mysql_password(username=None, password=password)
def set_mysql_password(self, username, password, current_password=None):
"""Update a mysql password for the provided username changing the
leader settings
To update root's password pass `None` in the username
:param username: Username to change password of
:type username: str
:param password: New password for user.
:type password: str
:param current_password: Existing password for user.
:type current_password: str
"""
if username is None:
username = 'root'
# get root password via leader-get, it may be that in the past (when
# changes to root-password were not supported) the user changed the
# password, so leader-get is more reliable source than
# config.previous('root-password').
rel_username = None if username == 'root' else username
if not current_password:
current_password = self.get_mysql_password(rel_username)
# password that needs to be set
new_passwd = password
# update password for all users (e.g. root@localhost, root@::1, etc)
try:
self.connect(user=username, password=current_password)
cursor = self.connection.cursor()
except MySQLdb.OperationalError as ex:
raise MySQLSetPasswordError(('Cannot connect using password in '
'leader settings (%s)') % ex, ex)
try:
# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
# fails when using SET PASSWORD so using UPDATE against the
# mysql.user table is needed, but changes to this table are not
# replicated across the cluster, so this update needs to run in
# all the nodes. More info at
# http://galeracluster.com/documentation-webpages/userchanges.html
release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
if release < 'bionic':
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "
"PASSWORD( %s ) WHERE user = %s;")
else:
# PXC 5.7 (introduced in Bionic) uses authentication_string
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
"authentication_string = "
"PASSWORD( %s ) WHERE user = %s;")
cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
cursor.execute('FLUSH PRIVILEGES;')
self.connection.commit()
except MySQLdb.OperationalError as ex:
raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
ex)
finally:
cursor.close()
# check the password was changed
try:
self.connect(user=username, password=new_passwd)
self.execute('select 1;')
except MySQLdb.OperationalError as ex:
raise MySQLSetPasswordError(('Cannot connect using new password: '
'%s') % str(ex), ex)
if not is_leader():
log('Only the leader can set a new password in the relation',
level=DEBUG)
return
for key in self.passwd_keys(rel_username):
_password = leader_get(key)
if _password:
log('Updating password for %s (%s)' % (key, rel_username),
level=DEBUG)
leader_set(settings={key: new_passwd})
def set_mysql_root_password(self, password, current_password=None):
"""Update mysql root password changing the leader settings
:param password: New password for user.
:type password: str
:param current_password: Existing password for user.
:type current_password: str
"""
self.set_mysql_password(
'root',
password,
current_password=current_password)
def normalize_address(self, hostname):
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
if config_get('prefer-ipv6'):
# TODO: add support for ipv6 dns
return hostname
if hostname != unit_get('private-address'):
return get_host_ip(hostname, fallback=hostname)
# Otherwise assume localhost
return '127.0.0.1'
def get_allowed_units(self, database, username, relation_id=None, prefix=None):
"""Get list of units with access grants for database with username.
This is typically used to provide shared-db relations with a list of
which units have been granted access to the given database.
"""
if not self.connection:
self.connect(password=self.get_mysql_root_password())
allowed_units = set()
if not prefix:
prefix = database
for unit in related_units(relation_id):
settings = relation_get(rid=relation_id, unit=unit)
# First check for setting with prefix, then without
for attr in ["%s_hostname" % (prefix), 'hostname']:
hosts = settings.get(attr, None)
if hosts:
break
if hosts:
# hostname can be json-encoded list of hostnames
try:
hosts = json.loads(hosts)
except ValueError:
hosts = [hosts]
else:
hosts = [settings['private-address']]
if hosts:
for host in hosts:
host = self.normalize_address(host)
if self.grant_exists(database, username, host):
log("Grant exists for host '%s' on db '%s'" %
(host, database), level=DEBUG)
if unit not in allowed_units:
allowed_units.add(unit)
else:
log("Grant does NOT exist for host '%s' on db '%s'" %
(host, database), level=DEBUG)
else:
log("No hosts found for grant check", level=INFO)
return allowed_units
def configure_db(self, hostname, database, username, admin=False):
"""Configure access to database for username from hostname."""
if not self.connection:
self.connect(password=self.get_mysql_root_password())
if not self.database_exists(database):
self.create_database(database)
remote_ip = self.normalize_address(hostname)
password = self.get_mysql_password(username)
if not self.grant_exists(database, username, remote_ip):
if not admin:
self.create_grant(database, username, remote_ip, password)
else:
self.create_admin_grant(username, remote_ip, password)
self.flush_priviledges()
return password
# `_singleton_config_helper` stores the instance of the helper class that is
# being used during a hook invocation.
_singleton_config_helper = None
def get_mysql_config_helper():
global _singleton_config_helper
if _singleton_config_helper is None:
_singleton_config_helper = MySQLConfigHelper()
return _singleton_config_helper
class MySQLConfigHelper(object):
"""Base configuration helper for MySQL."""
# Going for the biggest page size to avoid wasted bytes.
# InnoDB page size is 16MB
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
DEFAULT_INNODB_BUFFER_SIZE_MAX = 512 * 1024 * 1024
# Validation and lookups for InnoDB configuration
INNODB_VALID_BUFFERING_VALUES = [
'none',
'inserts',
'deletes',
'changes',
'purges',
'all'
]
INNODB_FLUSH_CONFIG_VALUES = {
'fast': 2,
'safest': 1,
'unsafe': 0,
}
def human_to_bytes(self, human):
"""Convert human readable configuration options to bytes."""
num_re = re.compile('^[0-9]+$')
if num_re.match(human):
return human
factors = {
'K': 1024,
'M': 1048576,
'G': 1073741824,
'T': 1099511627776
}
modifier = human[-1]
if modifier in factors:
return int(human[:-1]) * factors[modifier]
if modifier == '%':
total_ram = self.human_to_bytes(self.get_mem_total())
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
total_ram = self.sys_mem_limit()
factor = int(human[:-1]) * 0.01
pctram = total_ram * factor
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
raise ValueError("Can only convert K,M,G, or T")
def is_32bit_system(self):
"""Determine whether system is 32 or 64 bit."""
try:
return sys.maxsize < 2 ** 32
except OverflowError:
return False
def sys_mem_limit(self):
"""Determine the default memory limit for the current service unit."""
if platform.machine() in ['armv7l']:
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
else:
# Limit for x86 based 32bit systems
_mem_limit = self.human_to_bytes('4G')
return _mem_limit
def get_mem_total(self):
"""Calculate the total memory in the current service unit."""
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
key, mem = line.split(':', 2)
if key == 'MemTotal':
mtot, modifier = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_innodb_flush_log_at_trx_commit(self):
"""Get value for innodb_flush_log_at_trx_commit.
Use the innodb-flush-log-at-trx-commit or the tunning-level setting
translated by INNODB_FLUSH_CONFIG_VALUES to get the
innodb_flush_log_at_trx_commit value.
:returns: Numeric value for innodb_flush_log_at_trx_commit
:rtype: Union[None, int]
"""
_iflatc = config_get('innodb-flush-log-at-trx-commit')
_tuning_level = config_get('tuning-level')
if _iflatc:
return _iflatc
elif _tuning_level:
return self.INNODB_FLUSH_CONFIG_VALUES.get(_tuning_level, 1)
def get_innodb_change_buffering(self):
"""Get value for innodb_change_buffering.
Use the innodb-change-buffering validated against
INNODB_VALID_BUFFERING_VALUES to get the innodb_change_buffering value.
:returns: String value for innodb_change_buffering.
:rtype: Union[None, str]
"""
_icb = config_get('innodb-change-buffering')
if _icb and _icb in self.INNODB_VALID_BUFFERING_VALUES:
return _icb
def get_innodb_buffer_pool_size(self):
"""Get value for innodb_buffer_pool_size.
Return the number value of innodb-buffer-pool-size or dataset-size. If
neither is set, calculate a sane default based on total memory.
:returns: Numeric value for innodb_buffer_pool_size.
:rtype: int
"""
total_memory = self.human_to_bytes(self.get_mem_total())
dataset_bytes = config_get('dataset-size')
innodb_buffer_pool_size = config_get('innodb-buffer-pool-size')
if innodb_buffer_pool_size:
innodb_buffer_pool_size = self.human_to_bytes(
innodb_buffer_pool_size)
elif dataset_bytes:
log("Option 'dataset-size' has been deprecated, please use"
"innodb_buffer_pool_size option instead", level="WARN")
innodb_buffer_pool_size = self.human_to_bytes(
dataset_bytes)
else:
# NOTE(jamespage): pick the smallest of 50% of RAM or 512MB
# to ensure that deployments in containers
# without constraints don't try to consume
# silly amounts of memory.
innodb_buffer_pool_size = min(
int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR),
self.DEFAULT_INNODB_BUFFER_SIZE_MAX
)
if innodb_buffer_pool_size > total_memory:
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
innodb_buffer_pool_size,
total_memory), level='WARN')
return innodb_buffer_pool_size
class PerconaClusterHelper(MySQLConfigHelper):
"""Percona-cluster specific configuration helper."""
def parse_config(self):
"""Parse charm configuration and calculate values for config files."""
config = config_get()
mysql_config = {}
if 'max-connections' in config:
mysql_config['max_connections'] = config['max-connections']
if 'wait-timeout' in config:
mysql_config['wait_timeout'] = config['wait-timeout']
if self.get_innodb_flush_log_at_trx_commit() is not None:
mysql_config['innodb_flush_log_at_trx_commit'] = \
self.get_innodb_flush_log_at_trx_commit()
if self.get_innodb_change_buffering() is not None:
mysql_config['innodb_change_buffering'] = config['innodb-change-buffering']
if 'innodb-io-capacity' in config:
mysql_config['innodb_io_capacity'] = config['innodb-io-capacity']
# Set a sane default key_buffer size
mysql_config['key_buffer'] = self.human_to_bytes('32M')
mysql_config['innodb_buffer_pool_size'] = self.get_innodb_buffer_pool_size()
return mysql_config
class MySQL8Helper(MySQLHelper):
def grant_exists(self, db_name, db_user, remote_ip):
cursor = self.connection.cursor()
priv_string = ("GRANT ALL PRIVILEGES ON {}.* "
"TO {}@{}".format(db_name, db_user, remote_ip))
try:
cursor.execute("SHOW GRANTS FOR '{}'@'{}'".format(db_user,
remote_ip))
grants = [i[0] for i in cursor.fetchall()]
except MySQLdb.OperationalError:
return False
finally:
cursor.close()
# Different versions of MySQL use ' or `. Ignore these in the check.
return priv_string in [
i.replace("'", "").replace("`", "") for i in grants]
def create_grant(self, db_name, db_user, remote_ip, password):
if self.grant_exists(db_name, db_user, remote_ip):
return
# Make sure the user exists
# MySQL8 must create the user before the grant
self.create_user(db_user, remote_ip, password)
cursor = self.connection.cursor()
try:
cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}'"
.format(db_name, db_user, remote_ip))
finally:
cursor.close()
def create_user(self, db_user, remote_ip, password):
SQL_USER_CREATE = (
"CREATE USER '{db_user}'@'{remote_ip}' "
"IDENTIFIED BY '{password}'")
cursor = self.connection.cursor()
try:
cursor.execute(SQL_USER_CREATE.format(
db_user=db_user,
remote_ip=remote_ip,
password=password)
)
except MySQLdb._exceptions.OperationalError:
log("DB user {} already exists.".format(db_user),
"WARNING")
finally:
cursor.close()
def create_router_grant(self, db_user, remote_ip, password):
# Make sure the user exists
# MySQL8 must create the user before the grant
self.create_user(db_user, remote_ip, password)
# Mysql-Router specific grants
cursor = self.connection.cursor()
try:
cursor.execute("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT "
"OPTION".format(db_user, remote_ip))
cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON "
"mysql_innodb_cluster_metadata.* TO '{}'@'{}'"
.format(db_user, remote_ip))
cursor.execute("GRANT SELECT ON mysql.user TO '{}'@'{}'"
.format(db_user, remote_ip))
cursor.execute("GRANT SELECT ON "
"performance_schema.replication_group_members "
"TO '{}'@'{}'".format(db_user, remote_ip))
cursor.execute("GRANT SELECT ON "
"performance_schema.replication_group_member_stats "
"TO '{}'@'{}'".format(db_user, remote_ip))
cursor.execute("GRANT SELECT ON "
"performance_schema.global_variables "
"TO '{}'@'{}'".format(db_user, remote_ip))
finally:
cursor.close()
def configure_router(self, hostname, username):
if self.connection is None:
self.connect(password=self.get_mysql_root_password())
remote_ip = self.normalize_address(hostname)
password = self.get_mysql_password(username)
self.create_user(username, remote_ip, password)
self.create_router_grant(username, remote_ip, password)
return password
def get_prefix(requested, keys=None):
"""Return existing prefix or None.
:param requested: Request string. i.e. novacell0_username
:type requested: str
:param keys: Keys to determine prefix. Defaults set in function.
:type keys: List of str keys
:returns: String prefix i.e. novacell0
:rtype: Union[None, str]
"""
if keys is None:
# Shared-DB default keys
keys = ["_database", "_username", "_hostname"]
for key in keys:
if requested.endswith(key):
return requested[:-len(key)]
def get_db_data(relation_data, unprefixed):
"""Organize database requests into a collections.OrderedDict
:param relation_data: shared-db relation data
:type relation_data: dict
:param unprefixed: Prefix to use for requests without a prefix. This should
be unique for each side of the relation to avoid
conflicts.
:type unprefixed: str
:returns: Order dict of databases and users
:rtype: collections.OrderedDict
"""
# Deep copy to avoid unintentionally changing relation data
settings = copy.deepcopy(relation_data)
databases = collections.OrderedDict()
# Clear non-db related elements
if "egress-subnets" in settings.keys():
settings.pop("egress-subnets")
if "ingress-address" in settings.keys():
settings.pop("ingress-address")
if "private-address" in settings.keys():
settings.pop("private-address")
singleset = {"database", "username", "hostname"}
if singleset.issubset(settings):
settings["{}_{}".format(unprefixed, "hostname")] = (
settings["hostname"])
settings.pop("hostname")
settings["{}_{}".format(unprefixed, "database")] = (
settings["database"])
settings.pop("database")
settings["{}_{}".format(unprefixed, "username")] = (
settings["username"])
settings.pop("username")
for k, v in settings.items():
db = k.split("_")[0]
x = "_".join(k.split("_")[1:])
if db not in databases:
databases[db] = collections.OrderedDict()
databases[db][x] = v
return databases

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,90 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
from charmhelpers.core import host
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
# This file contains the CA cert from the charms ssl_ca configuration
# option, in future the file name should be updated reflect that.
CONFIG_CA_CERT_FILE = 'keystone_juju_ca_cert'
def get_cert(cn=None):
# TODO: deal with multiple https endpoints via charm config
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
if cn:
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
ssl_key_attr = 'ssl_key_{}'.format(cn)
else:
ssl_cert_attr = 'ssl_cert'
ssl_key_attr = 'ssl_key'
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get(ssl_cert_attr,
rid=r_id, unit=unit)
if not key:
key = relation_get(ssl_key_attr,
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = config_get('ssl_ca')
if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in (relation_ids('identity-service') +
relation_ids('identity-credentials')):
for unit in relation_list(r_id):
if ca_cert is None:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def retrieve_ca_cert(cert_file):
cert = None
if os.path.isfile(cert_file):
with open(cert_file, 'rb') as crt:
cert = crt.read()
return cert
def install_ca_cert(ca_cert):
host.install_ca_cert(ca_cert, CONFIG_CA_CERT_FILE)

View File

@ -0,0 +1,451 @@
# Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
"""
Helpers for clustering and determining "cluster leadership" and other
clustering-related helpers.
"""
import functools
import subprocess
import os
import time
from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
DEBUG,
WARNING,
unit_get,
is_leader as juju_is_leader,
status_set,
)
from charmhelpers.core.host import (
modulo_distribution,
)
from charmhelpers.core.decorators import (
retry_on_exception,
)
from charmhelpers.core.strutils import (
bool_from_string,
)
DC_RESOURCE_NAME = 'DC'
class HAIncompleteConfig(Exception):
pass
class HAIncorrectConfig(Exception):
pass
class CRMResourceNotFound(Exception):
pass
class CRMDCNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If juju is sufficiently new and leadership election is supported,
the is_leader command will be used.
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit number". In
other words, the oldest surviving unit.
"""
try:
return juju_is_leader()
except NotImplementedError:
log('Juju leadership election feature not enabled'
', using fallback support',
level=WARNING)
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_crm_dc():
"""
Determine leadership by querying the pacemaker Designated Controller
"""
cmd = ['crm', 'status']
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError as ex:
raise CRMDCNotFound(str(ex))
current_dc = ''
for line in status.split('\n'):
if line.startswith('Current DC'):
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname():
return True
elif current_dc == 'NONE':
raise CRMDCNotFound('Current DC: NONE')
return False
@retry_on_exception(5, base_delay=2,
exc_type=(CRMResourceNotFound, CRMDCNotFound))
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command.
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
"""
if resource == DC_RESOURCE_NAME:
return is_crm_dc()
cmd = ['crm', 'resource', 'show', resource]
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
status = None
if status and get_unit_hostname() in status:
return True
if status and "resource %s is NOT running" % (resource) in status:
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
return False
def is_leader(resource):
log("is_leader is deprecated. Please consider using is_crm_leader "
"instead.", level=WARNING)
return is_crm_leader(resource)
def peer_units(peer_relation="cluster"):
peers = []
for r_id in (relation_ids(peer_relation) or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def peer_ips(peer_relation='cluster', addr_key='private-address'):
'''Return a dict of peers and their private-address'''
peers = {}
for r_id in relation_ids(peer_relation):
for unit in relation_list(r_id):
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
return peers
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
log("eligible_leader is deprecated. Please consider using "
"is_elected_leader instead.", level=WARNING)
return is_elected_leader(resource)
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
use_https = config_get('use-https')
if use_https and bool_from_string(use_https):
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('certificates'):
for unit in relation_list(r_id):
ca = relation_get('ca', rid=r_id, unit=unit)
if ca:
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port, singlenode_mode=False):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_apache_port(public_port, singlenode_mode=False):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
determine_apache_port_single = functools.partial(
determine_apache_port, singlenode_mode=True)
def get_hacluster_config(exclude_keys=None):
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
os-admin-hostname, os-public-hostname, os-access-hostname
param: exclude_keys: list of setting key(s) to be excluded.
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing or incorrect.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
conf = {}
for setting in settings:
if exclude_keys and setting in exclude_keys:
continue
conf[setting] = config_get(setting)
if not valid_hacluster_config():
raise HAIncorrectConfig('Insufficient or incorrect config data to '
'configure hacluster.')
return conf
def valid_hacluster_config():
'''
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
must be set.
Note: ha-bindiface and ha-macastport both have defaults and will always
be set. We only care that either vip or dns-ha is set.
:returns: boolean: valid config returns true.
raises: HAIncompatibileConfig if settings conflict.
raises: HAIncompleteConfig if settings are missing.
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)
raise HAIncorrectConfig(msg)
# If dns-ha then one of os-*-hostname must be set
if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname', 'os-access-hostname']
# At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is
# the minimum required.
for setting in dns_settings:
if config_get(setting):
log('DNS HA: At least one hostname is set {}: {}'
''.format(setting, config_get(setting)),
level=DEBUG)
return True
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
'DNS HA')
status_set('blocked', msg)
raise HAIncompleteConfig(msg)
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
return True
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
def distributed_wait(modulo=None, wait=None, operation_name='operation'):
''' Distribute operations by waiting based on modulo_distribution
If modulo and or wait are not set, check config_get for those values.
If config values are not set, default to modulo=3 and wait=30.
:param modulo: int The modulo number creates the group distribution
:param wait: int The constant time wait value
:param operation_name: string Operation name for status message
i.e. 'restart'
:side effect: Calls config_get()
:side effect: Calls log()
:side effect: Calls status_set()
:side effect: Calls time.sleep()
'''
if modulo is None:
modulo = config_get('modulo-nodes') or 3
if wait is None:
wait = config_get('known-wait') or 30
if juju_is_leader():
# The leader should never wait
calculated_wait = 0
else:
# non_zero_wait=True guarantees the non-leader who gets modulo 0
# will still wait
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
non_zero_wait=True)
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
operation_name)
log(msg, DEBUG)
status_set('maintenance', msg)
time.sleep(calculated_wait)
def get_managed_services_and_ports(services, external_ports,
external_services=None,
port_conv_f=determine_apache_port_single):
"""Get the services and ports managed by this charm.
Return only the services and corresponding ports that are managed by this
charm. This excludes haproxy when there is a relation with hacluster. This
is because this charm passes responsibility for stopping and starting
haproxy to hacluster.
Similarly, if a relation with hacluster exists then the ports returned by
this method correspond to those managed by the apache server rather than
haproxy.
:param services: List of services.
:type services: List[str]
:param external_ports: List of ports managed by external services.
:type external_ports: List[int]
:param external_services: List of services to be removed if ha relation is
present.
:type external_services: List[str]
:param port_conv_f: Function to apply to ports to calculate the ports
managed by services controlled by this charm.
:type port_convert_func: f()
:returns: A tuple containing a list of services first followed by a list of
ports.
:rtype: Tuple[List[str], List[int]]
"""
if external_services is None:
external_services = ['haproxy']
if relation_ids('ha'):
for svc in external_services:
try:
services.remove(svc)
except ValueError:
pass
external_ports = [port_conv_f(p) for p in external_ports]
return services, external_ports

View File

@ -0,0 +1,38 @@
# Juju charm-helpers hardening library
## Description
This library provides multiple implementations of system and application
hardening that conform to the standards of http://hardening.io/.
Current implementations include:
* OS
* SSH
* MySQL
* Apache
## Requirements
* Juju Charms
## Usage
1. Synchronise this library into your charm and add the harden() decorator
(from contrib.hardening.harden) to any functions or methods you want to use
to trigger hardening of your application/system.
2. Add a config option called 'harden' to your charm config.yaml and set it to
a space-delimited list of hardening modules you want to run e.g. "os ssh"
3. Override any config defaults (contrib.hardening.defaults) by adding a file
called hardening.yaml to your charm root containing the name(s) of the
modules whose settings you want override at root level and then any settings
with overrides e.g.
os:
general:
desktop_enable: True
4. Now just run your charm as usual and hardening will be applied each time the
hook runs.

View File

@ -0,0 +1,13 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,17 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')

View File

@ -0,0 +1,29 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core.hookenv import (
log,
DEBUG,
)
from charmhelpers.contrib.hardening.apache.checks import config
def run_apache_checks():
log("Starting Apache hardening checks.", level=DEBUG)
checks = config.get_audits()
for check in checks:
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
check.ensure_compliance()
log("Apache hardening checks complete.", level=DEBUG)

View File

@ -0,0 +1,104 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import six
import subprocess
from charmhelpers.core.hookenv import (
log,
INFO,
)
from charmhelpers.contrib.hardening.audits.file import (
FilePermissionAudit,
DirectoryPermissionAudit,
NoReadWriteForOther,
TemplatedFile,
DeletedFile
)
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
from charmhelpers.contrib.hardening import utils
def get_audits():
"""Get Apache hardening config audits.
:returns: dictionary of audits
"""
if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
log("Apache server does not appear to be installed on this node - "
"skipping apache hardening", level=INFO)
return []
context = ApacheConfContext()
settings = utils.get_settings('apache')
audits = [
FilePermissionAudit(paths=os.path.join(
settings['common']['apache_dir'], 'apache2.conf'),
user='root', group='root', mode=0o0640),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'mods-available/alias.conf'),
context,
TEMPLATES_DIR,
mode=0o0640,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'conf-enabled/99-hardening.conf'),
context,
TEMPLATES_DIR,
mode=0o0640,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
DirectoryPermissionAudit(settings['common']['apache_dir'],
user='root',
group='root',
mode=0o0750),
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
NoReadWriteForOther(settings['common']['apache_dir']),
DeletedFile(['/var/www/html/index.html'])
]
return audits
class ApacheConfContext(object):
"""Defines the set of key/value pairs to set in a apache config file.
This context, when called, will return a dictionary containing the
key/value pairs of setting to specify in the
/etc/apache/conf-enabled/hardening.conf file.
"""
def __call__(self):
settings = utils.get_settings('apache')
ctxt = settings['hardening']
out = subprocess.check_output(['apache2', '-v'])
if six.PY3:
out = out.decode('utf-8')
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
out).group(1)
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
return ctxt

View File

@ -0,0 +1,32 @@
###############################################################################
# WARNING: This configuration file is maintained by Juju. Local changes may
# be overwritten.
###############################################################################
<Location / >
<LimitExcept {{ allowed_http_methods }} >
# http://httpd.apache.org/docs/2.4/upgrading.html
{% if apache_version > '2.2' -%}
Require all granted
{% else -%}
Order Allow,Deny
Deny from all
{% endif %}
</LimitExcept>
</Location>
<Directory />
Options -Indexes -FollowSymLinks
AllowOverride None
</Directory>
<Directory /var/www/>
Options -Indexes -FollowSymLinks
AllowOverride None
</Directory>
TraceEnable {{ traceenable }}
ServerTokens {{ servertokens }}
SSLHonorCipherOrder {{ honor_cipher_order }}
SSLCipherSuite {{ cipher_suite }}

View File

@ -0,0 +1,31 @@
###############################################################################
# WARNING: This configuration file is maintained by Juju. Local changes may
# be overwritten.
###############################################################################
<IfModule alias_module>
#
# Aliases: Add here as many aliases as you need (with no limit). The format is
# Alias fakename realname
#
# Note that if you include a trailing / on fakename then the server will
# require it to be present in the URL. So "/icons" isn't aliased in this
# example, only "/icons/". If the fakename is slash-terminated, then the
# realname must also be slash terminated, and if the fakename omits the
# trailing slash, the realname must also omit it.
#
# We include the /icons/ alias for FancyIndexed directory listings. If
# you do not use FancyIndexing, you may comment this out.
#
Alias /icons/ "{{ apache_icondir }}/"
<Directory "{{ apache_icondir }}">
Options -Indexes -MultiViews -FollowSymLinks
AllowOverride None
{% if apache_version == '2.4' -%}
Require all granted
{% else -%}
Order allow,deny
Allow from all
{% endif %}
</Directory>
</IfModule>

View File

@ -0,0 +1,54 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseAudit(object): # NO-QA
"""Base class for hardening checks.
The lifecycle of a hardening check is to first check to see if the system
is in compliance for the specified check. If it is not in compliance, the
check method will return a value which will be supplied to the.
"""
def __init__(self, *args, **kwargs):
self.unless = kwargs.get('unless', None)
super(BaseAudit, self).__init__()
def ensure_compliance(self):
"""Checks to see if the current hardening check is in compliance or
not.
If the check that is performed is not in compliance, then an exception
should be raised.
"""
pass
def _take_action(self):
"""Determines whether to perform the action or not.
Checks whether or not an action should be taken. This is determined by
the truthy value for the unless parameter. If unless is a callback
method, it will be invoked with no parameters in order to determine
whether or not the action should be taken. Otherwise, the truthy value
of the unless attribute will determine if the action should be
performed.
"""
# Do the action if there isn't an unless override.
if self.unless is None:
return True
# Invoke the callback if there is one.
if hasattr(self.unless, '__call__'):
return not self.unless()
return not self.unless

View File

@ -0,0 +1,105 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
import six
from charmhelpers.core.hookenv import (
log,
INFO,
ERROR,
)
from charmhelpers.contrib.hardening.audits import BaseAudit
class DisabledModuleAudit(BaseAudit):
"""Audits Apache2 modules.
Determines if the apache2 modules are enabled. If the modules are enabled
then they are removed in the ensure_compliance.
"""
def __init__(self, modules):
if modules is None:
self.modules = []
elif isinstance(modules, six.string_types):
self.modules = [modules]
else:
self.modules = modules
def ensure_compliance(self):
"""Ensures that the modules are not loaded."""
if not self.modules:
return
try:
loaded_modules = self._get_loaded_modules()
non_compliant_modules = []
for module in self.modules:
if module in loaded_modules:
log("Module '%s' is enabled but should not be." %
(module), level=INFO)
non_compliant_modules.append(module)
if len(non_compliant_modules) == 0:
return
for module in non_compliant_modules:
self._disable_module(module)
self._restart_apache()
except subprocess.CalledProcessError as e:
log('Error occurred auditing apache module compliance. '
'This may have been already reported. '
'Output is: %s' % e.output, level=ERROR)
@staticmethod
def _get_loaded_modules():
"""Returns the modules which are enabled in Apache."""
output = subprocess.check_output(['apache2ctl', '-M'])
if six.PY3:
output = output.decode('utf-8')
modules = []
for line in output.splitlines():
# Each line of the enabled module output looks like:
# module_name (static|shared)
# Plus a header line at the top of the output which is stripped
# out by the regex.
matcher = re.search(r'^ (\S*)_module (\S*)', line)
if matcher:
modules.append(matcher.group(1))
return modules
@staticmethod
def _disable_module(module):
"""Disables the specified module in Apache."""
try:
subprocess.check_call(['a2dismod', module])
except subprocess.CalledProcessError as e:
# Note: catch error here to allow the attempt of disabling
# multiple modules in one go rather than failing after the
# first module fails.
log('Error occurred disabling module %s. '
'Output is: %s' % (module, e.output), level=ERROR)
@staticmethod
def _restart_apache():
"""Restarts the apache process"""
subprocess.check_output(['service', 'apache2', 'restart'])
@staticmethod
def is_ssl_enabled():
"""Check if SSL module is enabled or not"""
return 'ssl' in DisabledModuleAudit._get_loaded_modules()

View File

@ -0,0 +1,104 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import # required for external apt import
from six import string_types
from charmhelpers.fetch import (
apt_cache,
apt_purge
)
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
)
from charmhelpers.contrib.hardening.audits import BaseAudit
from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg
class AptConfig(BaseAudit):
def __init__(self, config, **kwargs):
self.config = config
def verify_config(self):
apt_pkg.init()
for cfg in self.config:
value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
if value and value != cfg['expected']:
log("APT config '%s' has unexpected value '%s' "
"(expected='%s')" %
(cfg['key'], value, cfg['expected']), level=WARNING)
def ensure_compliance(self):
self.verify_config()
class RestrictedPackages(BaseAudit):
"""Class used to audit restricted packages on the system."""
def __init__(self, pkgs, **kwargs):
super(RestrictedPackages, self).__init__(**kwargs)
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
self.pkgs = pkgs.split()
else:
self.pkgs = pkgs
def ensure_compliance(self):
cache = apt_cache()
for p in self.pkgs:
if p not in cache:
continue
pkg = cache[p]
if not self.is_virtual_package(pkg):
if not pkg.current_ver:
log("Package '%s' is not installed." % pkg.name,
level=DEBUG)
continue
else:
log("Restricted package '%s' is installed" % pkg.name,
level=WARNING)
self.delete_package(cache, pkg)
else:
log("Checking restricted virtual package '%s' provides" %
pkg.name, level=DEBUG)
self.delete_package(cache, pkg)
def delete_package(self, cache, pkg):
"""Deletes the package from the system.
Deletes the package form the system, properly handling virtual
packages.
:param cache: the apt cache
:param pkg: the package to remove
"""
if self.is_virtual_package(pkg):
log("Package '%s' appears to be virtual - purging provides" %
pkg.name, level=DEBUG)
for _p in pkg.provides_list:
self.delete_package(cache, _p[2].parent_pkg)
elif not pkg.current_ver:
log("Package '%s' not installed" % pkg.name, level=DEBUG)
return
else:
log("Purging package '%s'" % pkg.name, level=DEBUG)
apt_purge(pkg.name)
def is_virtual_package(self, pkg):
return (pkg.get('has_provides', False) and
not pkg.get('has_versions', False))

View File

@ -0,0 +1,550 @@
# Copyright 2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grp
import os
import pwd
import re
from subprocess import (
CalledProcessError,
check_output,
check_call,
)
from traceback import format_exc
from six import string_types
from stat import (
S_ISGID,
S_ISUID
)
from charmhelpers.core.hookenv import (
log,
DEBUG,
INFO,
WARNING,
ERROR,
)
from charmhelpers.core import unitdata
from charmhelpers.core.host import file_hash
from charmhelpers.contrib.hardening.audits import BaseAudit
from charmhelpers.contrib.hardening.templating import (
get_template_path,
render_and_write,
)
from charmhelpers.contrib.hardening import utils
class BaseFileAudit(BaseAudit):
"""Base class for file audits.
Provides api stubs for compliance check flow that must be used by any class
that implemented this one.
"""
def __init__(self, paths, always_comply=False, *args, **kwargs):
"""
:param paths: string path of list of paths of files we want to apply
compliance checks are criteria to.
:param always_comply: if true compliance criteria is always applied
else compliance is skipped for non-existent
paths.
"""
super(BaseFileAudit, self).__init__(*args, **kwargs)
self.always_comply = always_comply
if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
self.paths = [paths]
else:
self.paths = paths
def ensure_compliance(self):
"""Ensure that the all registered files comply to registered criteria.
"""
for p in self.paths:
if os.path.exists(p):
if self.is_compliant(p):
continue
log('File %s is not in compliance.' % p, level=INFO)
else:
if not self.always_comply:
log("Non-existent path '%s' - skipping compliance check"
% (p), level=INFO)
continue
if self._take_action():
log("Applying compliance criteria to '%s'" % (p), level=INFO)
self.comply(p)
def is_compliant(self, path):
"""Audits the path to see if it is compliance.
:param path: the path to the file that should be checked.
"""
raise NotImplementedError
def comply(self, path):
"""Enforces the compliance of a path.
:param path: the path to the file that should be enforced.
"""
raise NotImplementedError
@classmethod
def _get_stat(cls, path):
"""Returns the Posix st_stat information for the specified file path.
:param path: the path to get the st_stat information for.
:returns: an st_stat object for the path or None if the path doesn't
exist.
"""
return os.stat(path)
class FilePermissionAudit(BaseFileAudit):
"""Implements an audit for file permissions and ownership for a user.
This class implements functionality that ensures that a specific user/group
will own the file(s) specified and that the permissions specified are
applied properly to the file.
"""
def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
self.user = user
self.group = group
self.mode = mode
super(FilePermissionAudit, self).__init__(paths, user, group, mode,
**kwargs)
@property
def user(self):
return self._user
@user.setter
def user(self, name):
try:
user = pwd.getpwnam(name)
except KeyError:
log('Unknown user %s' % name, level=ERROR)
user = None
self._user = user
@property
def group(self):
return self._group
@group.setter
def group(self, name):
try:
group = None
if name:
group = grp.getgrnam(name)
else:
group = grp.getgrgid(self.user.pw_gid)
except KeyError:
log('Unknown group %s' % name, level=ERROR)
self._group = group
def is_compliant(self, path):
"""Checks if the path is in compliance.
Used to determine if the path specified meets the necessary
requirements to be in compliance with the check itself.
:param path: the file path to check
:returns: True if the path is compliant, False otherwise.
"""
stat = self._get_stat(path)
user = self.user
group = self.group
compliant = True
if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
log('File %s is not owned by %s:%s.' % (path, user.pw_name,
group.gr_name),
level=INFO)
compliant = False
# POSIX refers to the st_mode bits as corresponding to both the
# file type and file permission bits, where the least significant 12
# bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
# file permission bits (8-0)
perms = stat.st_mode & 0o7777
if perms != self.mode:
log('File %s has incorrect permissions, currently set to %s' %
(path, oct(stat.st_mode & 0o7777)), level=INFO)
compliant = False
return compliant
def comply(self, path):
"""Issues a chown and chmod to the file paths specified."""
utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
self.mode)
class DirectoryPermissionAudit(FilePermissionAudit):
"""Performs a permission check for the specified directory path."""
def __init__(self, paths, user, group=None, mode=0o600,
recursive=True, **kwargs):
super(DirectoryPermissionAudit, self).__init__(paths, user, group,
mode, **kwargs)
self.recursive = recursive
def is_compliant(self, path):
"""Checks if the directory is compliant.
Used to determine if the path specified and all of its children
directories are in compliance with the check itself.
:param path: the directory path to check
:returns: True if the directory tree is compliant, otherwise False.
"""
if not os.path.isdir(path):
log('Path specified %s is not a directory.' % path, level=ERROR)
raise ValueError("%s is not a directory." % path)
if not self.recursive:
return super(DirectoryPermissionAudit, self).is_compliant(path)
compliant = True
for root, dirs, _ in os.walk(path):
if len(dirs) > 0:
continue
if not super(DirectoryPermissionAudit, self).is_compliant(root):
compliant = False
continue
return compliant
def comply(self, path):
for root, dirs, _ in os.walk(path):
if len(dirs) > 0:
super(DirectoryPermissionAudit, self).comply(root)
class ReadOnly(BaseFileAudit):
"""Audits that files and folders are read only."""
def __init__(self, paths, *args, **kwargs):
super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
def is_compliant(self, path):
try:
output = check_output(['find', path, '-perm', '-go+w',
'-type', 'f']).strip()
# The find above will find any files which have permission sets
# which allow too broad of write access. As such, the path is
# compliant if there is no output.
if output:
return False
return True
except CalledProcessError as e:
log('Error occurred checking finding writable files for %s. '
'Error information is: command %s failed with returncode '
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
format_exc(e)), level=ERROR)
return False
def comply(self, path):
try:
check_output(['chmod', 'go-w', '-R', path])
except CalledProcessError as e:
log('Error occurred removing writeable permissions for %s. '
'Error information is: command %s failed with returncode '
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
format_exc(e)), level=ERROR)
class NoReadWriteForOther(BaseFileAudit):
"""Ensures that the files found under the base path are readable or
writable by anyone other than the owner or the group.
"""
def __init__(self, paths):
super(NoReadWriteForOther, self).__init__(paths)
def is_compliant(self, path):
try:
cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
'-perm', '-o+w', '-type', 'f']
output = check_output(cmd).strip()
# The find above here will find any files which have read or
# write permissions for other, meaning there is too broad of access
# to read/write the file. As such, the path is compliant if there's
# no output.
if output:
return False
return True
except CalledProcessError as e:
log('Error occurred while finding files which are readable or '
'writable to the world in %s. '
'Command output is: %s.' % (path, e.output), level=ERROR)
def comply(self, path):
try:
check_output(['chmod', '-R', 'o-rw', path])
except CalledProcessError as e:
log('Error occurred attempting to change modes of files under '
'path %s. Output of command is: %s' % (path, e.output))
class NoSUIDSGIDAudit(BaseFileAudit):
"""Audits that specified files do not have SUID/SGID bits set."""
def __init__(self, paths, *args, **kwargs):
super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
def is_compliant(self, path):
stat = self._get_stat(path)
if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
return False
return True
def comply(self, path):
try:
log('Removing suid/sgid from %s.' % path, level=DEBUG)
check_output(['chmod', '-s', path])
except CalledProcessError as e:
log('Error occurred removing suid/sgid from %s.'
'Error information is: command %s failed with returncode '
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
format_exc(e)), level=ERROR)
class TemplatedFile(BaseFileAudit):
"""The TemplatedFileAudit audits the contents of a templated file.
This audit renders a file from a template, sets the appropriate file
permissions, then generates a hashsum with which to check the content
changed.
"""
def __init__(self, path, context, template_dir, mode, user='root',
group='root', service_actions=None, **kwargs):
self.context = context
self.user = user
self.group = group
self.mode = mode
self.template_dir = template_dir
self.service_actions = service_actions
super(TemplatedFile, self).__init__(paths=path, always_comply=True,
**kwargs)
def is_compliant(self, path):
"""Determines if the templated file is compliant.
A templated file is only compliant if it has not changed (as
determined by its sha256 hashsum) AND its file permissions are set
appropriately.
:param path: the path to check compliance.
"""
same_templates = self.templates_match(path)
same_content = self.contents_match(path)
same_permissions = self.permissions_match(path)
if same_content and same_permissions and same_templates:
return True
return False
def run_service_actions(self):
"""Run any actions on services requested."""
if not self.service_actions:
return
for svc_action in self.service_actions:
name = svc_action['service']
actions = svc_action['actions']
log("Running service '%s' actions '%s'" % (name, actions),
level=DEBUG)
for action in actions:
cmd = ['service', name, action]
try:
check_call(cmd)
except CalledProcessError as exc:
log("Service name='%s' action='%s' failed - %s" %
(name, action, exc), level=WARNING)
def comply(self, path):
"""Ensures the contents and the permissions of the file.
:param path: the path to correct
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.pre_write()
render_and_write(self.template_dir, path, self.context())
utils.ensure_permissions(path, self.user, self.group, self.mode)
self.run_service_actions()
self.save_checksum(path)
self.post_write()
def pre_write(self):
"""Invoked prior to writing the template."""
pass
def post_write(self):
"""Invoked after writing the template."""
pass
def templates_match(self, path):
"""Determines if the template files are the same.
The template file equality is determined by the hashsum of the
template files themselves. If there is no hashsum, then the content
cannot be sure to be the same so treat it as if they changed.
Otherwise, return whether or not the hashsums are the same.
:param path: the path to check
:returns: boolean
"""
template_path = get_template_path(self.template_dir, path)
key = 'hardening:template:%s' % template_path
template_checksum = file_hash(template_path)
kv = unitdata.kv()
stored_tmplt_checksum = kv.get(key)
if not stored_tmplt_checksum:
kv.set(key, template_checksum)
kv.flush()
log('Saved template checksum for %s.' % template_path,
level=DEBUG)
# Since we don't have a template checksum, then assume it doesn't
# match and return that the template is different.
return False
elif stored_tmplt_checksum != template_checksum:
kv.set(key, template_checksum)
kv.flush()
log('Updated template checksum for %s.' % template_path,
level=DEBUG)
return False
# Here the template hasn't changed based upon the calculated
# checksum of the template and what was previously stored.
return True
def contents_match(self, path):
"""Determines if the file content is the same.
This is determined by comparing hashsum of the file contents and
the saved hashsum. If there is no hashsum, then the content cannot
be sure to be the same so treat them as if they are not the same.
Otherwise, return True if the hashsums are the same, False if they
are not the same.
:param path: the file to check.
"""
checksum = file_hash(path)
kv = unitdata.kv()
stored_checksum = kv.get('hardening:%s' % path)
if not stored_checksum:
# If the checksum hasn't been generated, return False to ensure
# the file is written and the checksum stored.
log('Checksum for %s has not been calculated.' % path, level=DEBUG)
return False
elif stored_checksum != checksum:
log('Checksum mismatch for %s.' % path, level=DEBUG)
return False
return True
def permissions_match(self, path):
"""Determines if the file owner and permissions match.
:param path: the path to check.
"""
audit = FilePermissionAudit(path, self.user, self.group, self.mode)
return audit.is_compliant(path)
def save_checksum(self, path):
"""Calculates and saves the checksum for the path specified.
:param path: the path of the file to save the checksum.
"""
checksum = file_hash(path)
kv = unitdata.kv()
kv.set('hardening:%s' % path, checksum)
kv.flush()
class DeletedFile(BaseFileAudit):
"""Audit to ensure that a file is deleted."""
def __init__(self, paths):
super(DeletedFile, self).__init__(paths)
def is_compliant(self, path):
return not os.path.exists(path)
def comply(self, path):
os.remove(path)
class FileContentAudit(BaseFileAudit):
"""Audit the contents of a file."""
def __init__(self, paths, cases, **kwargs):
# Cases we expect to pass
self.pass_cases = cases.get('pass', [])
# Cases we expect to fail
self.fail_cases = cases.get('fail', [])
super(FileContentAudit, self).__init__(paths, **kwargs)
def is_compliant(self, path):
"""
Given a set of content matching cases i.e. tuple(regex, bool) where
bool value denotes whether or not regex is expected to match, check that
all cases match as expected with the contents of the file. Cases can be
expected to pass of fail.
:param path: Path of file to check.
:returns: Boolean value representing whether or not all cases are
found to be compliant.
"""
log("Auditing contents of file '%s'" % (path), level=DEBUG)
with open(path, 'r') as fd:
contents = fd.read()
matches = 0
for pattern in self.pass_cases:
key = re.compile(pattern, flags=re.MULTILINE)
results = re.search(key, contents)
if results:
matches += 1
else:
log("Pattern '%s' was expected to pass but instead it failed"
% (pattern), level=WARNING)
for pattern in self.fail_cases:
key = re.compile(pattern, flags=re.MULTILINE)
results = re.search(key, contents)
if not results:
matches += 1
else:
log("Pattern '%s' was expected to fail but instead it passed"
% (pattern), level=WARNING)
total = len(self.pass_cases) + len(self.fail_cases)
log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
return matches == total
def comply(self, *args, **kwargs):
"""NOOP since we just issue warnings. This is to avoid the
NotImplememtedError.
"""
log("Not applying any compliance criteria, only checks.", level=INFO)

View File

@ -0,0 +1,16 @@
# NOTE: this file contains the default configuration for the 'apache' hardening
# code. If you want to override any settings you must add them to a file
# called hardening.yaml in the root directory of your charm using the
# name 'apache' as the root key followed by any of the following with new
# values.
common:
apache_dir: '/etc/apache2'
hardening:
traceenable: 'off'
allowed_http_methods: "GET POST"
modules_to_disable: [ cgi, cgid ]
servertokens: 'Prod'
honor_cipher_order: 'on'
cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES'

View File

@ -0,0 +1,12 @@
# NOTE: this schema must contain all valid keys from it's associated defaults
# file. It is used to validate user-provided overrides.
common:
apache_dir:
traceenable:
hardening:
allowed_http_methods:
modules_to_disable:
servertokens:
honor_cipher_order:
cipher_suite:

View File

@ -0,0 +1,38 @@
# NOTE: this file contains the default configuration for the 'mysql' hardening
# code. If you want to override any settings you must add them to a file
# called hardening.yaml in the root directory of your charm using the
# name 'mysql' as the root key followed by any of the following with new
# values.
hardening:
mysql-conf: /etc/mysql/my.cnf
hardening-conf: /etc/mysql/conf.d/hardening.cnf
security:
# @see http://www.symantec.com/connect/articles/securing-mysql-step-step
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
chroot: None
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
safe-user-create: 1
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
secure-auth: 1
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
skip-symbolic-links: 1
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
skip-show-database: True
# @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
local-infile: 0
# @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
allow-suspicious-udfs: 0
# @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
automatic-sp-privileges: 0
# @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
secure-file-priv: /tmp

View File

@ -0,0 +1,15 @@
# NOTE: this schema must contain all valid keys from it's associated defaults
# file. It is used to validate user-provided overrides.
hardening:
mysql-conf:
hardening-conf:
security:
chroot:
safe-user-create:
secure-auth:
skip-symbolic-links:
skip-show-database:
local-infile:
allow-suspicious-udfs:
automatic-sp-privileges:
secure-file-priv:

View File

@ -0,0 +1,68 @@
# NOTE: this file contains the default configuration for the 'os' hardening
# code. If you want to override any settings you must add them to a file
# called hardening.yaml in the root directory of your charm using the
# name 'os' as the root key followed by any of the following with new
# values.
general:
desktop_enable: False # (type:boolean)
environment:
extra_user_paths: []
umask: 027
root_path: /
auth:
pw_max_age: 60
# discourage password cycling
pw_min_age: 7
retries: 5
lockout_time: 600
timeout: 60
allow_homeless: False # (type:boolean)
pam_passwdqc_enable: True # (type:boolean)
pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
root_ttys:
console
tty1
tty2
tty3
tty4
tty5
tty6
uid_min: 1000
gid_min: 1000
sys_uid_min: 100
sys_uid_max: 999
sys_gid_min: 100
sys_gid_max: 999
chfn_restrict:
security:
users_allow: []
suid_sgid_enforce: True # (type:boolean)
# user-defined blacklist and whitelist
suid_sgid_blacklist: []
suid_sgid_whitelist: []
# if this is True, remove any suid/sgid bits from files that were not in the whitelist
suid_sgid_dry_run_on_unknown: False # (type:boolean)
suid_sgid_remove_from_unknown: False # (type:boolean)
# remove packages with known issues
packages_clean: True # (type:boolean)
packages_list:
xinetd
inetd
ypserv
telnet-server
rsh-server
rsync
kernel_enable_module_loading: True # (type:boolean)
kernel_enable_core_dump: False # (type:boolean)
ssh_tmout: 300
sysctl:
kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
kernel_enable_sysrq: False # (type:boolean)
forwarding: False # (type:boolean)
ipv6_enable: False # (type:boolean)
arp_restricted: True # (type:boolean)

View File

@ -0,0 +1,43 @@
# NOTE: this schema must contain all valid keys from it's associated defaults
# file. It is used to validate user-provided overrides.
general:
desktop_enable:
environment:
extra_user_paths:
umask:
root_path:
auth:
pw_max_age:
pw_min_age:
retries:
lockout_time:
timeout:
allow_homeless:
pam_passwdqc_enable:
pam_passwdqc_options:
root_ttys:
uid_min:
gid_min:
sys_uid_min:
sys_uid_max:
sys_gid_min:
sys_gid_max:
chfn_restrict:
security:
users_allow:
suid_sgid_enforce:
suid_sgid_blacklist:
suid_sgid_whitelist:
suid_sgid_dry_run_on_unknown:
suid_sgid_remove_from_unknown:
packages_clean:
packages_list:
kernel_enable_module_loading:
kernel_enable_core_dump:
ssh_tmout:
sysctl:
kernel_secure_sysrq:
kernel_enable_sysrq:
forwarding:
ipv6_enable:
arp_restricted:

View File

@ -0,0 +1,49 @@
# NOTE: this file contains the default configuration for the 'ssh' hardening
# code. If you want to override any settings you must add them to a file
# called hardening.yaml in the root directory of your charm using the
# name 'ssh' as the root key followed by any of the following with new
# values.
common:
service_name: 'ssh'
network_ipv6_enable: False # (type:boolean)
ports: [22]
remote_hosts: []
client:
package: 'openssh-client'
cbc_required: False # (type:boolean)
weak_hmac: False # (type:boolean)
weak_kex: False # (type:boolean)
roaming: False
password_authentication: 'no'
server:
host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
'/etc/ssh/ssh_host_ecdsa_key']
cbc_required: False # (type:boolean)
weak_hmac: False # (type:boolean)
weak_kex: False # (type:boolean)
allow_root_with_key: False # (type:boolean)
allow_tcp_forwarding: 'no'
allow_agent_forwarding: 'no'
allow_x11_forwarding: 'no'
use_privilege_separation: 'sandbox'
listen_to: ['0.0.0.0']
use_pam: 'no'
package: 'openssh-server'
password_authentication: 'no'
alive_interval: '600'
alive_count: '3'
sftp_enable: False # (type:boolean)
sftp_group: 'sftponly'
sftp_chroot: '/home/%u'
deny_users: []
allow_users: []
deny_groups: []
allow_groups: []
print_motd: 'no'
print_last_log: 'no'
use_dns: 'no'
max_auth_tries: 2
max_sessions: 10

Some files were not shown because too many files have changed in this diff Show More