watchdog.sh 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. #!/bin/bash
  2. trap "exit" INT TERM
  3. trap "kill 0" EXIT
  4. # Prepare
  5. BACKGROUND_TASKS=()
  6. if [[ "${USE_WATCHDOG}" =~ ^([nN][oO]|[nN])+$ ]]; then
  7. echo -e "$(date) - USE_WATCHDOG=n, skipping watchdog..."
  8. sleep 365d
  9. exec $(readlink -f "$0")
  10. fi
  11. # Checks pipe their corresponding container name in this pipe
  12. if [[ ! -p /tmp/com_pipe ]]; then
  13. mkfifo /tmp/com_pipe
  14. fi
  15. # Common functions
  16. progress() {
  17. SERVICE=${1}
  18. TOTAL=${2}
  19. CURRENT=${3}
  20. DIFF=${4}
  21. [[ -z ${DIFF} ]] && DIFF=0
  22. [[ -z ${TOTAL} || -z ${CURRENT} ]] && return
  23. [[ ${CURRENT} -gt ${TOTAL} ]] && return
  24. [[ ${CURRENT} -lt 0 ]] && CURRENT=0
  25. PERCENT=$(( 200 * ${CURRENT} / ${TOTAL} % 2 + 100 * ${CURRENT} / ${TOTAL} ))
  26. redis-cli -h redis LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"service\":\"${SERVICE}\",\"lvl\":\"${PERCENT}\",\"hpnow\":\"${CURRENT}\",\"hptotal\":\"${TOTAL}\",\"hpdiff\":\"${DIFF}\"}" > /dev/null
  27. log_msg "${SERVICE} health level: ${PERCENT}% (${CURRENT}/${TOTAL}), health trend: ${DIFF}" no_redis
  28. }
  29. log_msg() {
  30. if [[ ${2} != "no_redis" ]]; then
  31. redis-cli -h redis LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"message\":\"$(printf '%s' "${1}" | \
  32. tr '%&;$"_[]{}-\r\n' ' ')\"}" > /dev/null
  33. fi
  34. redis-cli -h redis LTRIM WATCHDOG_LOG 0 ${LOG_LINES} > /dev/null
  35. echo $(date) $(printf '%s\n' "${1}")
  36. }
  37. function mail_error() {
  38. [[ -z ${1} ]] && return 1
  39. [[ -z ${2} ]] && return 2
  40. RCPT_DOMAIN=$(echo ${1} | awk -F @ {'print $NF'})
  41. RCPT_MX=$(dig +short ${RCPT_DOMAIN} mx | sort -n | awk '{print $2; exit}')
  42. if [[ -z ${RCPT_MX} ]]; then
  43. log_msg "Cannot determine MX for ${1}, skipping email notification..."
  44. return 1
  45. fi
  46. ./smtp-cli --missing-modules-ok \
  47. --subject="Watchdog: ${2} service hit the error rate limit" \
  48. --body-plain="Service was restarted, please check your mailcow installation." \
  49. --to=${1} \
  50. --from="watchdog@${MAILCOW_HOSTNAME}" \
  51. --server="${RCPT_MX}" \
  52. --hello-host=${MAILCOW_HOSTNAME}
  53. log_msg "Sent notification email to ${1}"
  54. }
  55. get_container_ip() {
  56. # ${1} is container
  57. CONTAINER_ID=
  58. CONTAINER_IP=
  59. LOOP_C=1
  60. until [[ ${CONTAINER_IP} =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]] || [[ ${LOOP_C} -gt 5 ]]; do
  61. sleep 1
  62. CONTAINER_ID=$(curl --silent http://dockerapi:8080/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${1}\")) | .id")
  63. if [[ ! -z ${CONTAINER_ID} ]]; then
  64. CONTAINER_IP=$(curl --silent http://dockerapi:8080/containers/${CONTAINER_ID}/json | jq -r '.NetworkSettings.Networks[].IPAddress')
  65. fi
  66. LOOP_C=$((LOOP_C + 1))
  67. done
  68. [[ ${LOOP_C} -gt 5 ]] && echo 240.0.0.0 || echo ${CONTAINER_IP}
  69. }
  70. # Check functions
  71. nginx_checks() {
  72. err_count=0
  73. diff_c=0
  74. THRESHOLD=16
  75. # Reduce error count by 2 after restarting an unhealthy container
  76. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  77. while [ ${err_count} -lt ${THRESHOLD} ]; do
  78. host_ip=$(get_container_ip nginx-mailcow)
  79. err_c_cur=${err_count}
  80. /usr/lib/nagios/plugins/check_ping -4 -H ${host_ip} -w 2000,10% -c 4000,100% -p2 1>&2; err_count=$(( ${err_count} + $? ))
  81. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u / -p 8081 1>&2; err_count=$(( ${err_count} + $? ))
  82. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  83. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  84. progress "Nginx" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  85. diff_c=0
  86. sleep $(( ( RANDOM % 30 ) + 10 ))
  87. done
  88. return 1
  89. }
  90. mysql_checks() {
  91. err_count=0
  92. diff_c=0
  93. THRESHOLD=12
  94. # Reduce error count by 2 after restarting an unhealthy container
  95. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  96. while [ ${err_count} -lt ${THRESHOLD} ]; do
  97. host_ip=$(get_container_ip mysql-mailcow)
  98. err_c_cur=${err_count}
  99. /usr/lib/nagios/plugins/check_mysql -H ${host_ip} -P 3306 -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} 1>&2; err_count=$(( ${err_count} + $? ))
  100. /usr/lib/nagios/plugins/check_mysql_query -H ${host_ip} -P 3306 -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} -q "SELECT COUNT(*) FROM information_schema.tables" 1>&2; err_count=$(( ${err_count} + $? ))
  101. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  102. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  103. progress "MySQL/MariaDB" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  104. diff_c=0
  105. sleep $(( ( RANDOM % 30 ) + 10 ))
  106. done
  107. return 1
  108. }
  109. sogo_checks() {
  110. err_count=0
  111. diff_c=0
  112. THRESHOLD=20
  113. # Reduce error count by 2 after restarting an unhealthy container
  114. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  115. while [ ${err_count} -lt ${THRESHOLD} ]; do
  116. host_ip=$(get_container_ip sogo-mailcow)
  117. err_c_cur=${err_count}
  118. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /WebServerResources/css/theme-default.css -p 9192 -R md-default-theme 1>&2; err_count=$(( ${err_count} + $? ))
  119. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 -R "SOGo\sGroupware" 1>&2; err_count=$(( ${err_count} + $? ))
  120. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  121. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  122. progress "SOGo" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  123. diff_c=0
  124. sleep $(( ( RANDOM % 30 ) + 10 ))
  125. done
  126. return 1
  127. }
  128. postfix_checks() {
  129. err_count=0
  130. diff_c=0
  131. THRESHOLD=16
  132. # Reduce error count by 2 after restarting an unhealthy container
  133. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  134. while [ ${err_count} -lt ${THRESHOLD} ]; do
  135. host_ip=$(get_container_ip postfix-mailcow)
  136. err_c_cur=${err_count}
  137. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -f "watchdog@invalid" -C "RCPT TO:null@localhost" -C DATA -C . -R 250 1>&2; err_count=$(( ${err_count} + $? ))
  138. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -S 1>&2; err_count=$(( ${err_count} + $? ))
  139. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  140. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  141. progress "Postfix" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  142. diff_c=0
  143. sleep $(( ( RANDOM % 30 ) + 10 ))
  144. done
  145. return 1
  146. }
  147. dovecot_checks() {
  148. err_count=0
  149. diff_c=0
  150. THRESHOLD=24
  151. # Reduce error count by 2 after restarting an unhealthy container
  152. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  153. while [ ${err_count} -lt ${THRESHOLD} ]; do
  154. host_ip=$(get_container_ip dovecot-mailcow)
  155. err_c_cur=${err_count}
  156. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 24 -f "watchdog@invalid" -C "RCPT TO:<watchdog@invalid>" -L -R "User doesn't exist" 1>&2; err_count=$(( ${err_count} + $? ))
  157. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 993 -S -e "OK " 1>&2; err_count=$(( ${err_count} + $? ))
  158. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 143 -e "OK " 1>&2; err_count=$(( ${err_count} + $? ))
  159. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10001 -e "VERSION" 1>&2; err_count=$(( ${err_count} + $? ))
  160. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 4190 -e "Dovecot ready" 1>&2; err_count=$(( ${err_count} + $? ))
  161. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  162. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  163. progress "Dovecot" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  164. diff_c=0
  165. sleep $(( ( RANDOM % 30 ) + 10 ))
  166. done
  167. return 1
  168. }
  169. phpfpm_checks() {
  170. err_count=0
  171. diff_c=0
  172. THRESHOLD=10
  173. # Reduce error count by 2 after restarting an unhealthy container
  174. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  175. while [ ${err_count} -lt ${THRESHOLD} ]; do
  176. host_ip=$(get_container_ip php-fpm-mailcow)
  177. err_c_cur=${err_count}
  178. cgi-fcgi -bind -connect ${host_ip}:9000 | grep "Content-type" 1>&2; err_count=$(( ${err_count} + ($? * 2)))
  179. cgi-fcgi -bind -connect ${host_ip}:9001 | grep "Content-type" 1>&2; err_count=$(( ${err_count} + ($? * 2)))
  180. /usr/lib/nagios/plugins/check_ping -4 -H ${host_ip} -w 2000,10% -c 4000,100% -p2 1>&2; err_count=$(( ${err_count} + $? ))
  181. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  182. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  183. progress "PHP-FPM" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  184. diff_c=0
  185. sleep $(( ( RANDOM % 30 ) + 10 ))
  186. done
  187. return 1
  188. }
  189. rspamd_checks() {
  190. err_count=0
  191. diff_c=0
  192. THRESHOLD=10
  193. # Reduce error count by 2 after restarting an unhealthy container
  194. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  195. while [ ${err_count} -lt ${THRESHOLD} ]; do
  196. host_ip=$(get_container_ip rspamd-mailcow)
  197. err_c_cur=${err_count}
  198. SCORE=$(/usr/bin/curl -s --data-binary @- --unix-socket /rspamd-sock/rspamd.sock http://rspamd/scan -d '
  199. To: null@localhost
  200. From: watchdog@localhost
  201. Empty
  202. ' | jq -rc .required_score)
  203. if [[ ${SCORE} != "9999" ]]; then
  204. echo "Rspamd settings check failed" 1>&2
  205. err_count=$(( ${err_count} + 1))
  206. else
  207. echo "Rspamd settings check succeeded" 1>&2
  208. fi
  209. /usr/lib/nagios/plugins/check_ping -4 -H ${host_ip} -w 2000,10% -c 4000,100% -p2 1>&2; err_count=$(( ${err_count} + $? ))
  210. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  211. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  212. progress "Rspamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  213. diff_c=0
  214. sleep $(( ( RANDOM % 30 ) + 10 ))
  215. done
  216. return 1
  217. }
  218. # Create watchdog agents
  219. (
  220. while true; do
  221. if ! nginx_checks; then
  222. log_msg "Nginx hit error limit"
  223. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "nginx-mailcow"
  224. echo nginx-mailcow > /tmp/com_pipe
  225. fi
  226. done
  227. ) &
  228. BACKGROUND_TASKS+=($!)
  229. (
  230. while true; do
  231. if ! mysql_checks; then
  232. log_msg "MySQL hit error limit"
  233. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "mysql-mailcow"
  234. echo mysql-mailcow > /tmp/com_pipe
  235. fi
  236. done
  237. ) &
  238. BACKGROUND_TASKS+=($!)
  239. (
  240. while true; do
  241. if ! phpfpm_checks; then
  242. log_msg "PHP-FPM hit error limit"
  243. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "php-fpm-mailcow"
  244. echo php-fpm-mailcow > /tmp/com_pipe
  245. fi
  246. done
  247. ) &
  248. BACKGROUND_TASKS+=($!)
  249. (
  250. while true; do
  251. if ! sogo_checks; then
  252. log_msg "SOGo hit error limit"
  253. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "sogo-mailcow"
  254. echo sogo-mailcow > /tmp/com_pipe
  255. fi
  256. done
  257. ) &
  258. BACKGROUND_TASKS+=($!)
  259. (
  260. while true; do
  261. if ! postfix_checks; then
  262. log_msg "Postfix hit error limit"
  263. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "postfix-mailcow"
  264. echo postfix-mailcow > /tmp/com_pipe
  265. fi
  266. done
  267. ) &
  268. BACKGROUND_TASKS+=($!)
  269. (
  270. while true; do
  271. if ! dovecot_checks; then
  272. log_msg "Dovecot hit error limit"
  273. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "dovecot-mailcow"
  274. echo dovecot-mailcow > /tmp/com_pipe
  275. fi
  276. done
  277. ) &
  278. BACKGROUND_TASKS+=($!)
  279. (
  280. while true; do
  281. if ! rspamd_checks; then
  282. log_msg "Rspamd hit error limit"
  283. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${WATCHDOG_NOTIFY_EMAIL}" "rspamd-mailcow"
  284. echo rspamd-mailcow > /tmp/com_pipe
  285. fi
  286. done
  287. ) &
  288. BACKGROUND_TASKS+=($!)
  289. # Monitor watchdog agents, stop script when agents fails and wait for respawn by Docker (restart:always:n)
  290. (
  291. while true; do
  292. for bg_task in ${BACKGROUND_TASKS[*]}; do
  293. if ! kill -0 ${bg_task} 1>&2; then
  294. log_msg "Worker ${bg_task} died, stopping watchdog and waiting for respawn..."
  295. kill -TERM 1
  296. fi
  297. sleep 10
  298. done
  299. done
  300. ) &
  301. # Monitor dockerapi
  302. (
  303. while true; do
  304. while nc -z dockerapi 8080; do
  305. sleep 3
  306. done
  307. log_msg "Cannot find dockerapi-mailcow, waiting to recover..."
  308. kill -STOP ${BACKGROUND_TASKS[*]}
  309. until nc -z dockerapi 8080; do
  310. sleep 3
  311. done
  312. kill -CONT ${BACKGROUND_TASKS[*]}
  313. kill -USR1 ${BACKGROUND_TASKS[*]}
  314. done
  315. ) &
  316. # Restart container when threshold limit reached
  317. while true; do
  318. CONTAINER_ID=
  319. read com_pipe_answer </tmp/com_pipe
  320. if [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
  321. kill -STOP ${BACKGROUND_TASKS[*]}
  322. sleep 3
  323. CONTAINER_ID=$(curl --silent http://dockerapi:8080/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | .id")
  324. if [[ ! -z ${CONTAINER_ID} ]]; then
  325. log_msg "Sending restart command to ${CONTAINER_ID}..."
  326. curl --silent -XPOST http://dockerapi:8080/containers/${CONTAINER_ID}/restart
  327. fi
  328. log_msg "Wait for restarted container to settle and continue watching..."
  329. sleep 30s
  330. kill -CONT ${BACKGROUND_TASKS[*]}
  331. kill -USR1 ${BACKGROUND_TASKS[*]}
  332. fi
  333. done