watchdog.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. #!/bin/bash
  2. trap "exit" INT TERM
  3. trap "kill 0" EXIT
  4. # Prepare
  5. BACKGROUND_TASKS=()
  6. if [[ "${USE_WATCHDOG}" =~ ^([nN][oO]|[nN])+$ ]]; then
  7. echo -e "$(date) - USE_WATCHDOG=n, skipping watchdog..."
  8. sleep 365d
  9. exec $(readlink -f "$0")
  10. fi
  11. # Checks pipe their corresponding container name in this pipe
  12. if [[ ! -p /tmp/com_pipe ]]; then
  13. mkfifo /tmp/com_pipe
  14. fi
  15. # Common functions
  16. progress() {
  17. SERVICE=${1}
  18. TOTAL=${2}
  19. CURRENT=${3}
  20. DIFF=${4}
  21. [[ -z ${DIFF} ]] && DIFF=0
  22. [[ -z ${TOTAL} || -z ${CURRENT} ]] && return
  23. [[ ${CURRENT} -gt ${TOTAL} ]] && return
  24. [[ ${CURRENT} -lt 0 ]] && CURRENT=0
  25. PERCENT=$(( 200 * ${CURRENT} / ${TOTAL} % 2 + 100 * ${CURRENT} / ${TOTAL} ))
  26. redis-cli -h redis LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"service\":\"${SERVICE}\",\"lvl\":\"${PERCENT}\",\"hpnow\":\"${CURRENT}\",\"hptotal\":\"${TOTAL}\",\"hpdiff\":\"${DIFF}\"}" > /dev/null
  27. log_msg "${SERVICE} health level: ${PERCENT}% (${CURRENT}/${TOTAL}), health trend: ${DIFF}" no_redis
  28. }
  29. log_msg() {
  30. if [[ ${2} != "no_redis" ]]; then
  31. redis-cli -h redis LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"message\":\"$(printf '%s' "${1}" | \
  32. tr '%&;$"_[]{}-\r\n' ' ')\"}" > /dev/null
  33. fi
  34. redis-cli -h redis LTRIM WATCHDOG_LOG 0 9999 > /dev/null
  35. echo $(date) $(printf '%s\n' "${1}")
  36. }
  37. function mail_error() {
  38. [[ -z ${1} ]] && return 1
  39. [[ -z ${2} ]] && return 2
  40. RCPT_DOMAIN=$(echo ${1} | awk -F @ {'print $NF'})
  41. RCPT_MX=$(dig +short ${RCPT_DOMAIN} mx | sort -n | awk '{print $2; exit}')
  42. if [[ -z ${RCPT_MX} ]]; then
  43. log_msg "Cannot determine MX for ${1}, skipping email notification..."
  44. return 1
  45. fi
  46. ./smtp-cli --missing-modules-ok \
  47. --subject="Watchdog: ${2} service hit the error rate limit" \
  48. --body-plain="Service was restarted, please check your mailcow installation." \
  49. --to=${1} \
  50. --from="watchdog@${MAILCOW_HOSTNAME}" \
  51. --server="${RCPT_MX}" \
  52. --hello-host=${MAILCOW_HOSTNAME}
  53. log_msg "Sent notification email to ${1}"
  54. }
  55. get_container_ip() {
  56. # ${1} is container
  57. CONTAINER_ID=
  58. CONTAINER_IP=
  59. LOOP_C=1
  60. until [[ ${CONTAINER_IP} =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]] || [[ ${LOOP_C} -gt 5 ]]; do
  61. sleep 1
  62. CONTAINER_ID=$(curl --silent http://dockerapi:8080/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${1}\")) | .id")
  63. if [[ ! -z ${CONTAINER_ID} ]]; then
  64. CONTAINER_IP=$(curl --silent http://dockerapi:8080/containers/${CONTAINER_ID}/json | jq -r '.NetworkSettings.Networks[].IPAddress')
  65. fi
  66. LOOP_C=$((LOOP_C + 1))
  67. done
  68. [[ ${LOOP_C} -gt 5 ]] && echo 240.0.0.0 || echo ${CONTAINER_IP}
  69. }
  70. # Check functions
  71. nginx_checks() {
  72. err_count=0
  73. diff_c=0
  74. THRESHOLD=16
  75. # Reduce error count by 2 after restarting an unhealthy container
  76. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  77. while [ ${err_count} -lt ${THRESHOLD} ]; do
  78. host_ip=$(get_container_ip nginx-mailcow)
  79. err_c_cur=${err_count}
  80. /usr/lib/nagios/plugins/check_ping -4 -H ${host_ip} -w 2000,10% -c 4000,100% -p2 1>&2; err_count=$(( ${err_count} + $? ))
  81. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u / -p 8081 1>&2; err_count=$(( ${err_count} + $? ))
  82. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  83. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  84. progress "Nginx" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  85. diff_c=0
  86. sleep $(( ( RANDOM % 30 ) + 10 ))
  87. done
  88. return 1
  89. }
  90. mysql_checks() {
  91. err_count=0
  92. diff_c=0
  93. THRESHOLD=12
  94. # Reduce error count by 2 after restarting an unhealthy container
  95. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  96. while [ ${err_count} -lt ${THRESHOLD} ]; do
  97. host_ip=$(get_container_ip mysql-mailcow)
  98. err_c_cur=${err_count}
  99. /usr/lib/nagios/plugins/check_mysql -H ${host_ip} -P 3306 -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} 1>&2; err_count=$(( ${err_count} + $? ))
  100. /usr/lib/nagios/plugins/check_mysql_query -H ${host_ip} -P 3306 -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} -q "SELECT COUNT(*) FROM information_schema.tables" 1>&2; err_count=$(( ${err_count} + $? ))
  101. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  102. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  103. progress "MySQL/MariaDB" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  104. diff_c=0
  105. sleep $(( ( RANDOM % 30 ) + 10 ))
  106. done
  107. return 1
  108. }
  109. sogo_checks() {
  110. err_count=0
  111. diff_c=0
  112. THRESHOLD=20
  113. # Reduce error count by 2 after restarting an unhealthy container
  114. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  115. while [ ${err_count} -lt ${THRESHOLD} ]; do
  116. host_ip=$(get_container_ip sogo-mailcow)
  117. err_c_cur=${err_count}
  118. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /WebServerResources/css/theme-default.css -p 9192 -R md-default-theme 1>&2; err_count=$(( ${err_count} + $? ))
  119. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 -R "SOGo\sGroupware" 1>&2; err_count=$(( ${err_count} + $? ))
  120. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  121. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  122. progress "SOGo" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  123. diff_c=0
  124. sleep $(( ( RANDOM % 30 ) + 10 ))
  125. done
  126. return 1
  127. }
  128. postfix_checks() {
  129. err_count=0
  130. diff_c=0
  131. THRESHOLD=16
  132. # Reduce error count by 2 after restarting an unhealthy container
  133. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  134. while [ ${err_count} -lt ${THRESHOLD} ]; do
  135. host_ip=$(get_container_ip postfix-mailcow)
  136. err_c_cur=${err_count}
  137. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -f watchdog -C "RCPT TO:null@localhost" -C DATA -C . -R 250 1>&2; err_count=$(( ${err_count} + $? ))
  138. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -S 1>&2; err_count=$(( ${err_count} + $? ))
  139. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  140. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  141. progress "Postfix" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  142. diff_c=0
  143. sleep $(( ( RANDOM % 30 ) + 10 ))
  144. done
  145. return 1
  146. }
  147. dovecot_checks() {
  148. err_count=0
  149. diff_c=0
  150. THRESHOLD=24
  151. # Reduce error count by 2 after restarting an unhealthy container
  152. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  153. while [ ${err_count} -lt ${THRESHOLD} ]; do
  154. host_ip=$(get_container_ip dovecot-mailcow)
  155. err_c_cur=${err_count}
  156. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 24 -f "watchdog" -C "RCPT TO:<watchdog@invalid>" -L -R "User doesn't exist" 1>&2; err_count=$(( ${err_count} + $? ))
  157. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 993 -S -e "OK " 1>&2; err_count=$(( ${err_count} + $? ))
  158. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 143 -e "OK " 1>&2; err_count=$(( ${err_count} + $? ))
  159. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10001 -e "VERSION" 1>&2; err_count=$(( ${err_count} + $? ))
  160. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 4190 -e "Dovecot ready" 1>&2; err_count=$(( ${err_count} + $? ))
  161. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  162. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  163. progress "Dovecot" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  164. diff_c=0
  165. sleep $(( ( RANDOM % 30 ) + 10 ))
  166. done
  167. return 1
  168. }
  169. phpfpm_checks() {
  170. err_count=0
  171. diff_c=0
  172. THRESHOLD=10
  173. # Reduce error count by 2 after restarting an unhealthy container
  174. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  175. while [ ${err_count} -lt ${THRESHOLD} ]; do
  176. host_ip=$(get_container_ip php-fpm-mailcow)
  177. err_c_cur=${err_count}
  178. cgi-fcgi -bind -connect ${host_ip}:9000 | grep "Content-type" 1>&2; err_count=$(( ${err_count} + ($? * 2)))
  179. /usr/lib/nagios/plugins/check_ping -4 -H ${host_ip} -w 2000,10% -c 4000,100% -p2 1>&2; err_count=$(( ${err_count} + $? ))
  180. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  181. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  182. progress "PHP-FPM" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  183. diff_c=0
  184. sleep $(( ( RANDOM % 30 ) + 10 ))
  185. done
  186. return 1
  187. }
  188. rspamd_checks() {
  189. err_count=0
  190. diff_c=0
  191. THRESHOLD=10
  192. # Reduce error count by 2 after restarting an unhealthy container
  193. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  194. while [ ${err_count} -lt ${THRESHOLD} ]; do
  195. host_ip=$(get_container_ip rspamd-mailcow)
  196. err_c_cur=${err_count}
  197. SCORE=$(curl --silent ${host_ip}:11333/scan -d '
  198. To: null@localhost
  199. From: watchdog@localhost
  200. Empty
  201. ' | jq -rc .required_score)
  202. if [[ ${SCORE} != "9999" ]]; then
  203. echo "Rspamd settings check failed" 1>&2
  204. err_count=$(( ${err_count} + 1))
  205. else
  206. echo "Rspamd settings check succeeded" 1>&2
  207. fi
  208. /usr/lib/nagios/plugins/check_ping -4 -H ${host_ip} -w 2000,10% -c 4000,100% -p2 1>&2; err_count=$(( ${err_count} + $? ))
  209. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  210. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  211. progress "Rspamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  212. diff_c=0
  213. sleep $(( ( RANDOM % 30 ) + 10 ))
  214. done
  215. return 1
  216. }
  217. # Create watchdog agents
  218. (
  219. while true; do
  220. if ! nginx_checks; then
  221. log_msg "Nginx hit error limit"
  222. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "nginx-mailcow"
  223. echo nginx-mailcow > /tmp/com_pipe
  224. fi
  225. done
  226. ) &
  227. BACKGROUND_TASKS+=($!)
  228. (
  229. while true; do
  230. if ! mysql_checks; then
  231. log_msg "MySQL hit error limit"
  232. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "mysql-mailcow"
  233. echo mysql-mailcow > /tmp/com_pipe
  234. fi
  235. done
  236. ) &
  237. BACKGROUND_TASKS+=($!)
  238. (
  239. while true; do
  240. if ! phpfpm_checks; then
  241. log_msg "PHP-FPM hit error limit"
  242. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "php-fpm-mailcow"
  243. echo php-fpm-mailcow > /tmp/com_pipe
  244. fi
  245. done
  246. ) &
  247. BACKGROUND_TASKS+=($!)
  248. (
  249. while true; do
  250. if ! sogo_checks; then
  251. log_msg "SOGo hit error limit"
  252. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "sogo-mailcow"
  253. echo sogo-mailcow > /tmp/com_pipe
  254. fi
  255. done
  256. ) &
  257. BACKGROUND_TASKS+=($!)
  258. (
  259. while true; do
  260. if ! postfix_checks; then
  261. log_msg "Postfix hit error limit"
  262. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "postfix-mailcow"
  263. echo postfix-mailcow > /tmp/com_pipe
  264. fi
  265. done
  266. ) &
  267. BACKGROUND_TASKS+=($!)
  268. (
  269. while true; do
  270. if ! dovecot_checks; then
  271. log_msg "Dovecot hit error limit"
  272. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "dovecot-mailcow"
  273. echo dovecot-mailcow > /tmp/com_pipe
  274. fi
  275. done
  276. ) &
  277. BACKGROUND_TASKS+=($!)
  278. (
  279. while true; do
  280. if ! rspamd_checks; then
  281. log_msg "Rspamd hit error limit"
  282. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "rspamd-mailcow"
  283. echo rspamd-mailcow > /tmp/com_pipe
  284. fi
  285. done
  286. ) &
  287. BACKGROUND_TASKS+=($!)
  288. # Monitor watchdog agents, stop script when agents fails and wait for respawn by Docker (restart:always:n)
  289. (
  290. while true; do
  291. for bg_task in ${BACKGROUND_TASKS[*]}; do
  292. if ! kill -0 ${bg_task} 1>&2; then
  293. log_msg "Worker ${bg_task} died, stopping watchdog and waiting for respawn..."
  294. kill -TERM 1
  295. fi
  296. sleep 10
  297. done
  298. done
  299. ) &
  300. # Monitor dockerapi
  301. (
  302. while true; do
  303. while nc -z dockerapi 8080; do
  304. sleep 3
  305. done
  306. log_msg "Cannot find dockerapi-mailcow, waiting to recover..."
  307. kill -STOP ${BACKGROUND_TASKS[*]}
  308. until nc -z dockerapi 8080; do
  309. sleep 3
  310. done
  311. kill -CONT ${BACKGROUND_TASKS[*]}
  312. kill -USR1 ${BACKGROUND_TASKS[*]}
  313. done
  314. ) &
  315. # Restart container when threshold limit reached
  316. while true; do
  317. CONTAINER_ID=
  318. read com_pipe_answer </tmp/com_pipe
  319. if [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
  320. kill -STOP ${BACKGROUND_TASKS[*]}
  321. sleep 3
  322. CONTAINER_ID=$(curl --silent http://dockerapi:8080/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | .id")
  323. if [[ ! -z ${CONTAINER_ID} ]]; then
  324. log_msg "Sending restart command to ${CONTAINER_ID}..."
  325. curl --silent -XPOST http://dockerapi:8080/containers/${CONTAINER_ID}/restart
  326. fi
  327. log_msg "Wait for restarted container to settle and continue watching..."
  328. sleep 30s
  329. kill -CONT ${BACKGROUND_TASKS[*]}
  330. kill -USR1 ${BACKGROUND_TASKS[*]}
  331. fi
  332. done