watchdog.sh 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065
  1. #!/bin/bash
  2. trap "exit" INT TERM
  3. trap "kill 0" EXIT
  4. # Prepare
  5. BACKGROUND_TASKS=()
  6. echo "Waiting for containers to settle..."
  7. sleep 30
  8. if [[ "${USE_WATCHDOG}" =~ ^([nN][oO]|[nN])+$ ]]; then
  9. echo -e "$(date) - USE_WATCHDOG=n, skipping watchdog..."
  10. sleep 365d
  11. exec $(readlink -f "$0")
  12. fi
  13. # Checks pipe their corresponding container name in this pipe
  14. if [[ ! -p /tmp/com_pipe ]]; then
  15. mkfifo /tmp/com_pipe
  16. fi
  17. # Wait for containers
  18. while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
  19. echo "Waiting for SQL..."
  20. sleep 2
  21. done
  22. # Do not attempt to write to slave
  23. if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
  24. REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
  25. else
  26. REDIS_CMDLINE="redis-cli -h redis -p 6379"
  27. fi
  28. until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
  29. echo "Waiting for Redis..."
  30. sleep 2
  31. done
  32. ${REDIS_CMDLINE} DEL F2B_RES > /dev/null
  33. # Common functions
  34. get_ipv6(){
  35. local IPV6=
  36. local IPV6_SRCS=
  37. local TRY=
  38. IPV6_SRCS[0]="ip6.korves.net"
  39. IPV6_SRCS[1]="ip6.mailcow.email"
  40. until [[ ! -z ${IPV6} ]] || [[ ${TRY} -ge 10 ]]; do
  41. IPV6=$(curl --connect-timeout 3 -m 10 -L6s ${IPV6_SRCS[$RANDOM % ${#IPV6_SRCS[@]} ]} | grep "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$")
  42. [[ ! -z ${TRY} ]] && sleep 1
  43. TRY=$((TRY+1))
  44. done
  45. echo ${IPV6}
  46. }
  47. array_diff() {
  48. # https://stackoverflow.com/questions/2312762, Alex Offshore
  49. eval local ARR1=\(\"\${$2[@]}\"\)
  50. eval local ARR2=\(\"\${$3[@]}\"\)
  51. local IFS=$'\n'
  52. mapfile -t $1 < <(comm -23 <(echo "${ARR1[*]}" | sort) <(echo "${ARR2[*]}" | sort))
  53. }
  54. progress() {
  55. SERVICE=${1}
  56. TOTAL=${2}
  57. CURRENT=${3}
  58. DIFF=${4}
  59. [[ -z ${DIFF} ]] && DIFF=0
  60. [[ -z ${TOTAL} || -z ${CURRENT} ]] && return
  61. [[ ${CURRENT} -gt ${TOTAL} ]] && return
  62. [[ ${CURRENT} -lt 0 ]] && CURRENT=0
  63. PERCENT=$(( 200 * ${CURRENT} / ${TOTAL} % 2 + 100 * ${CURRENT} / ${TOTAL} ))
  64. ${REDIS_CMDLINE} LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"service\":\"${SERVICE}\",\"lvl\":\"${PERCENT}\",\"hpnow\":\"${CURRENT}\",\"hptotal\":\"${TOTAL}\",\"hpdiff\":\"${DIFF}\"}" > /dev/null
  65. log_msg "${SERVICE} health level: ${PERCENT}% (${CURRENT}/${TOTAL}), health trend: ${DIFF}" no_redis
  66. # Return 10 to indicate a dead service
  67. [ ${CURRENT} -le 0 ] && return 10
  68. }
  69. log_msg() {
  70. if [[ ${2} != "no_redis" ]]; then
  71. ${REDIS_CMDLINE} LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"message\":\"$(printf '%s' "${1}" | \
  72. tr '\r\n%&;$"_[]{}-' ' ')\"}" > /dev/null
  73. fi
  74. echo $(date) $(printf '%s\n' "${1}")
  75. }
  76. function mail_error() {
  77. [[ -z ${1} ]] && return 1
  78. # If exists, body will be the content of "/tmp/${1}", even if ${2} is set
  79. [[ -z ${2} ]] && BODY="Service was restarted on $(date), please check your mailcow installation." || BODY="$(date) - ${2}"
  80. WATCHDOG_NOTIFY_EMAIL=$(echo "${WATCHDOG_NOTIFY_EMAIL}" | sed 's/"//;s|"$||')
  81. # Some exceptions for subject and body formats
  82. if [[ ${1} == "fail2ban" ]]; then
  83. SUBJECT="${BODY}"
  84. BODY="Please see netfilter-mailcow for more details and triggered rules."
  85. else
  86. SUBJECT="Watchdog ALERT: ${1}"
  87. fi
  88. IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
  89. for rcpt in "${MAIL_RCPTS[@]}"; do
  90. RCPT_DOMAIN=
  91. #RCPT_MX=
  92. RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
  93. # Latest smtp-cli looks up mx via dns
  94. #RCPT_MX=$(dig +short ${RCPT_DOMAIN} mx | sort -n | awk '{print $2; exit}')
  95. #if [[ -z ${RCPT_MX} ]]; then
  96. # log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
  97. # return 1
  98. #fi
  99. [ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
  100. timeout 10s ./smtp-cli --missing-modules-ok \
  101. --charset=UTF-8 \
  102. --subject="${SUBJECT}" \
  103. --body-plain="${BODY}" \
  104. --add-header="X-Priority: 1" \
  105. --to=${rcpt} \
  106. --from="watchdog@${MAILCOW_HOSTNAME}" \
  107. --hello-host=${MAILCOW_HOSTNAME} \
  108. --ipv4
  109. #--server="${RCPT_MX}"
  110. log_msg "Sent notification email to ${rcpt}"
  111. done
  112. }
  113. get_container_ip() {
  114. # ${1} is container
  115. CONTAINER_ID=()
  116. CONTAINER_IPS=()
  117. CONTAINER_IP=
  118. LOOP_C=1
  119. until [[ ${CONTAINER_IP} =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]] || [[ ${LOOP_C} -gt 5 ]]; do
  120. if [ ${IP_BY_DOCKER_API} -eq 0 ]; then
  121. CONTAINER_IP=$(dig a "${1}" +short)
  122. else
  123. sleep 0.5
  124. # get long container id for exact match
  125. CONTAINER_ID=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | .id"))
  126. # returned id can have multiple elements (if scaled), shuffle for random test
  127. CONTAINER_ID=($(printf "%s\n" "${CONTAINER_ID[@]}" | shuf))
  128. if [[ ! -z ${CONTAINER_ID} ]]; then
  129. for matched_container in "${CONTAINER_ID[@]}"; do
  130. CONTAINER_IPS=($(curl --silent --insecure https://dockerapi/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
  131. for ip_match in "${CONTAINER_IPS[@]}"; do
  132. # grep will do nothing if one of these vars is empty
  133. [[ -z ${ip_match} ]] && continue
  134. [[ -z ${IPV4_NETWORK} ]] && continue
  135. # only return ips that are part of our network
  136. if ! grep -q ${IPV4_NETWORK} <(echo ${ip_match}); then
  137. continue
  138. else
  139. CONTAINER_IP=${ip_match}
  140. break
  141. fi
  142. done
  143. [[ ! -z ${CONTAINER_IP} ]] && break
  144. done
  145. fi
  146. fi
  147. LOOP_C=$((LOOP_C + 1))
  148. done
  149. [[ ${LOOP_C} -gt 5 ]] && echo 240.0.0.0 || echo ${CONTAINER_IP}
  150. }
  151. # One-time check
  152. if grep -qi "$(echo ${IPV6_NETWORK} | cut -d: -f1-3)" <<< "$(ip a s)"; then
  153. if [[ -z "$(get_ipv6)" ]]; then
  154. mail_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
  155. fi
  156. fi
  157. external_checks() {
  158. err_count=0
  159. diff_c=0
  160. THRESHOLD=${EXTERNAL_CHECKS_THRESHOLD}
  161. # Reduce error count by 2 after restarting an unhealthy container
  162. GUID=$(mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
  163. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  164. while [ ${err_count} -lt ${THRESHOLD} ]; do
  165. err_c_cur=${err_count}
  166. CHECK_REPONSE="$(curl --connect-timeout 3 -m 10 -4 -s https://checks.mailcow.email -X POST -dguid=${GUID} 2> /dev/null)"
  167. if [[ ! -z "${CHECK_REPONSE}" ]] && [[ "$(echo ${CHECK_REPONSE} | jq -r .response)" == "critical" ]]; then
  168. echo ${CHECK_REPONSE} | jq -r .out > /tmp/external_checks
  169. err_count=$(( ${err_count} + 1 ))
  170. fi
  171. CHECK_REPONSE6="$(curl --connect-timeout 3 -m 10 -6 -s https://checks.mailcow.email -X POST -dguid=${GUID} 2> /dev/null)"
  172. if [[ ! -z "${CHECK_REPONSE6}" ]] && [[ "$(echo ${CHECK_REPONSE6} | jq -r .response)" == "critical" ]]; then
  173. echo ${CHECK_REPONSE} | jq -r .out > /tmp/external_checks
  174. err_count=$(( ${err_count} + 1 ))
  175. fi
  176. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  177. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  178. progress "External checks" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  179. if [[ $? == 10 ]]; then
  180. diff_c=0
  181. sleep 60
  182. else
  183. diff_c=0
  184. sleep $(( ( RANDOM % 20 ) + 120 ))
  185. fi
  186. done
  187. return 1
  188. }
  189. nginx_checks() {
  190. err_count=0
  191. diff_c=0
  192. THRESHOLD=${NGINX_THRESHOLD}
  193. # Reduce error count by 2 after restarting an unhealthy container
  194. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  195. while [ ${err_count} -lt ${THRESHOLD} ]; do
  196. touch /tmp/nginx-mailcow; echo "$(tail -50 /tmp/nginx-mailcow)" > /tmp/nginx-mailcow
  197. host_ip=$(get_container_ip nginx-mailcow)
  198. err_c_cur=${err_count}
  199. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u / -p 8081 2>> /tmp/nginx-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  200. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  201. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  202. progress "Nginx" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  203. if [[ $? == 10 ]]; then
  204. diff_c=0
  205. sleep 1
  206. else
  207. diff_c=0
  208. sleep $(( ( RANDOM % 60 ) + 20 ))
  209. fi
  210. done
  211. return 1
  212. }
  213. unbound_checks() {
  214. err_count=0
  215. diff_c=0
  216. THRESHOLD=${UNBOUND_THRESHOLD}
  217. # Reduce error count by 2 after restarting an unhealthy container
  218. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  219. while [ ${err_count} -lt ${THRESHOLD} ]; do
  220. touch /tmp/unbound-mailcow; echo "$(tail -50 /tmp/unbound-mailcow)" > /tmp/unbound-mailcow
  221. host_ip=$(get_container_ip unbound-mailcow)
  222. err_c_cur=${err_count}
  223. /usr/lib/nagios/plugins/check_dns -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  224. DNSSEC=$(dig com +dnssec | egrep 'flags:.+ad')
  225. if [[ -z ${DNSSEC} ]]; then
  226. echo "DNSSEC failure" 2>> /tmp/unbound-mailcow 1>&2
  227. err_count=$(( ${err_count} + 1))
  228. else
  229. echo "DNSSEC check succeeded" 2>> /tmp/unbound-mailcow 1>&2
  230. fi
  231. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  232. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  233. progress "Unbound" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  234. if [[ $? == 10 ]]; then
  235. diff_c=0
  236. sleep 1
  237. else
  238. diff_c=0
  239. sleep $(( ( RANDOM % 60 ) + 20 ))
  240. fi
  241. done
  242. return 1
  243. }
  244. redis_checks() {
  245. # A check for the local redis container
  246. err_count=0
  247. diff_c=0
  248. THRESHOLD=${REDIS_THRESHOLD}
  249. # Reduce error count by 2 after restarting an unhealthy container
  250. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  251. while [ ${err_count} -lt ${THRESHOLD} ]; do
  252. touch /tmp/redis-mailcow; echo "$(tail -50 /tmp/redis-mailcow)" > /tmp/redis-mailcow
  253. host_ip=$(get_container_ip redis-mailcow)
  254. err_c_cur=${err_count}
  255. /usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "PING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  256. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  257. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  258. progress "Redis" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  259. if [[ $? == 10 ]]; then
  260. diff_c=0
  261. sleep 1
  262. else
  263. diff_c=0
  264. sleep $(( ( RANDOM % 60 ) + 20 ))
  265. fi
  266. done
  267. return 1
  268. }
  269. mysql_checks() {
  270. err_count=0
  271. diff_c=0
  272. THRESHOLD=${MYSQL_THRESHOLD}
  273. # Reduce error count by 2 after restarting an unhealthy container
  274. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  275. while [ ${err_count} -lt ${THRESHOLD} ]; do
  276. touch /tmp/mysql-mailcow; echo "$(tail -50 /tmp/mysql-mailcow)" > /tmp/mysql-mailcow
  277. err_c_cur=${err_count}
  278. /usr/lib/nagios/plugins/check_mysql -s /var/run/mysqld/mysqld.sock -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} 2>> /tmp/mysql-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  279. /usr/lib/nagios/plugins/check_mysql_query -s /var/run/mysqld/mysqld.sock -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} -q "SELECT COUNT(*) FROM information_schema.tables" 2>> /tmp/mysql-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  280. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  281. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  282. progress "MySQL/MariaDB" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  283. if [[ $? == 10 ]]; then
  284. diff_c=0
  285. sleep 1
  286. else
  287. diff_c=0
  288. sleep $(( ( RANDOM % 60 ) + 20 ))
  289. fi
  290. done
  291. return 1
  292. }
  293. mysql_repl_checks() {
  294. err_count=0
  295. diff_c=0
  296. THRESHOLD=${MYSQL_REPLICATION_THRESHOLD}
  297. # Reduce error count by 2 after restarting an unhealthy container
  298. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  299. while [ ${err_count} -lt ${THRESHOLD} ]; do
  300. touch /tmp/mysql_repl_checks; echo "$(tail -50 /tmp/mysql_repl_checks)" > /tmp/mysql_repl_checks
  301. err_c_cur=${err_count}
  302. /usr/lib/nagios/plugins/check_mysql_slavestatus.sh -S /var/run/mysqld/mysqld.sock -u root -p ${DBROOT} 2>> /tmp/mysql_repl_checks 1>&2; err_count=$(( ${err_count} + $? ))
  303. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  304. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  305. progress "MySQL/MariaDB replication" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  306. if [[ $? == 10 ]]; then
  307. diff_c=0
  308. sleep 60
  309. else
  310. diff_c=0
  311. sleep $(( ( RANDOM % 60 ) + 20 ))
  312. fi
  313. done
  314. return 1
  315. }
  316. sogo_checks() {
  317. err_count=0
  318. diff_c=0
  319. THRESHOLD=${SOGO_THRESHOLD}
  320. # Reduce error count by 2 after restarting an unhealthy container
  321. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  322. while [ ${err_count} -lt ${THRESHOLD} ]; do
  323. touch /tmp/sogo-mailcow; echo "$(tail -50 /tmp/sogo-mailcow)" > /tmp/sogo-mailcow
  324. host_ip=$(get_container_ip sogo-mailcow)
  325. err_c_cur=${err_count}
  326. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 -R "SOGo\.MainUI" 2>> /tmp/sogo-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  327. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  328. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  329. progress "SOGo" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  330. if [[ $? == 10 ]]; then
  331. diff_c=0
  332. sleep 1
  333. else
  334. diff_c=0
  335. sleep $(( ( RANDOM % 60 ) + 20 ))
  336. fi
  337. done
  338. return 1
  339. }
  340. postfix_checks() {
  341. err_count=0
  342. diff_c=0
  343. THRESHOLD=${POSTFIX_THRESHOLD}
  344. # Reduce error count by 2 after restarting an unhealthy container
  345. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  346. while [ ${err_count} -lt ${THRESHOLD} ]; do
  347. touch /tmp/postfix-mailcow; echo "$(tail -50 /tmp/postfix-mailcow)" > /tmp/postfix-mailcow
  348. host_ip=$(get_container_ip postfix-mailcow)
  349. err_c_cur=${err_count}
  350. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -f "watchdog@invalid" -C "RCPT TO:watchdog@localhost" -C DATA -C . -R 250 2>> /tmp/postfix-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  351. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -S 2>> /tmp/postfix-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  352. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  353. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  354. progress "Postfix" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  355. if [[ $? == 10 ]]; then
  356. diff_c=0
  357. sleep 1
  358. else
  359. diff_c=0
  360. sleep $(( ( RANDOM % 60 ) + 20 ))
  361. fi
  362. done
  363. return 1
  364. }
  365. clamd_checks() {
  366. err_count=0
  367. diff_c=0
  368. THRESHOLD=${CLAMD_THRESHOLD}
  369. # Reduce error count by 2 after restarting an unhealthy container
  370. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  371. while [ ${err_count} -lt ${THRESHOLD} ]; do
  372. touch /tmp/clamd-mailcow; echo "$(tail -50 /tmp/clamd-mailcow)" > /tmp/clamd-mailcow
  373. host_ip=$(get_container_ip clamd-mailcow)
  374. err_c_cur=${err_count}
  375. /usr/lib/nagios/plugins/check_clamd -4 -H ${host_ip} 2>> /tmp/clamd-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  376. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  377. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  378. progress "Clamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  379. if [[ $? == 10 ]]; then
  380. diff_c=0
  381. sleep 1
  382. else
  383. diff_c=0
  384. sleep $(( ( RANDOM % 120 ) + 20 ))
  385. fi
  386. done
  387. return 1
  388. }
  389. dovecot_checks() {
  390. err_count=0
  391. diff_c=0
  392. THRESHOLD=${DOVECOT_THRESHOLD}
  393. # Reduce error count by 2 after restarting an unhealthy container
  394. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  395. while [ ${err_count} -lt ${THRESHOLD} ]; do
  396. touch /tmp/dovecot-mailcow; echo "$(tail -50 /tmp/dovecot-mailcow)" > /tmp/dovecot-mailcow
  397. host_ip=$(get_container_ip dovecot-mailcow)
  398. err_c_cur=${err_count}
  399. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 24 -f "watchdog@invalid" -C "RCPT TO:<watchdog@invalid>" -L -R "User doesn't exist" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  400. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 993 -S -e "OK " 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  401. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 143 -e "OK " 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  402. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10001 -e "VERSION" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  403. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 4190 -e "Dovecot ready" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  404. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  405. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  406. progress "Dovecot" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  407. if [[ $? == 10 ]]; then
  408. diff_c=0
  409. sleep 1
  410. else
  411. diff_c=0
  412. sleep $(( ( RANDOM % 60 ) + 20 ))
  413. fi
  414. done
  415. return 1
  416. }
  417. dovecot_repl_checks() {
  418. err_count=0
  419. diff_c=0
  420. THRESHOLD=${DOVECOT_REPL_THRESHOLD}
  421. D_REPL_STATUS=$(redis-cli -h redis -r GET DOVECOT_REPL_HEALTH)
  422. # Reduce error count by 2 after restarting an unhealthy container
  423. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  424. while [ ${err_count} -lt ${THRESHOLD} ]; do
  425. err_c_cur=${err_count}
  426. D_REPL_STATUS=$(redis-cli --raw -h redis GET DOVECOT_REPL_HEALTH)
  427. if [[ "${D_REPL_STATUS}" != "1" ]]; then
  428. err_count=$(( ${err_count} + 1 ))
  429. fi
  430. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  431. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  432. progress "Dovecot replication" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  433. if [[ $? == 10 ]]; then
  434. diff_c=0
  435. sleep 60
  436. else
  437. diff_c=0
  438. sleep $(( ( RANDOM % 60 ) + 20 ))
  439. fi
  440. done
  441. return 1
  442. }
  443. phpfpm_checks() {
  444. err_count=0
  445. diff_c=0
  446. THRESHOLD=${PHPFPM_THRESHOLD}
  447. # Reduce error count by 2 after restarting an unhealthy container
  448. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  449. while [ ${err_count} -lt ${THRESHOLD} ]; do
  450. touch /tmp/php-fpm-mailcow; echo "$(tail -50 /tmp/php-fpm-mailcow)" > /tmp/php-fpm-mailcow
  451. host_ip=$(get_container_ip php-fpm-mailcow)
  452. err_c_cur=${err_count}
  453. /usr/lib/nagios/plugins/check_tcp -H ${host_ip} -p 9001 2>> /tmp/php-fpm-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  454. /usr/lib/nagios/plugins/check_tcp -H ${host_ip} -p 9002 2>> /tmp/php-fpm-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  455. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  456. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  457. progress "PHP-FPM" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  458. if [[ $? == 10 ]]; then
  459. diff_c=0
  460. sleep 1
  461. else
  462. diff_c=0
  463. sleep $(( ( RANDOM % 60 ) + 20 ))
  464. fi
  465. done
  466. return 1
  467. }
  468. ratelimit_checks() {
  469. err_count=0
  470. diff_c=0
  471. THRESHOLD=${RATELIMIT_THRESHOLD}
  472. RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
  473. # Reduce error count by 2 after restarting an unhealthy container
  474. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  475. while [ ${err_count} -lt ${THRESHOLD} ]; do
  476. err_c_cur=${err_count}
  477. RL_LOG_STATUS_PREV=${RL_LOG_STATUS}
  478. RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
  479. if [[ ${RL_LOG_STATUS_PREV} != ${RL_LOG_STATUS} ]]; then
  480. err_count=$(( ${err_count} + 1 ))
  481. echo 'Last 10 applied ratelimits (may overlap with previous reports).' > /tmp/ratelimit
  482. echo 'Full ratelimit buckets can be emptied by deleting the ratelimit hash from within mailcow UI (see /debug -> Protocols -> Ratelimit):' >> /tmp/ratelimit
  483. echo >> /tmp/ratelimit
  484. redis-cli --raw -h redis LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
  485. fi
  486. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  487. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  488. progress "Ratelimit" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  489. if [[ $? == 10 ]]; then
  490. diff_c=0
  491. sleep 1
  492. else
  493. diff_c=0
  494. sleep $(( ( RANDOM % 60 ) + 20 ))
  495. fi
  496. done
  497. return 1
  498. }
  499. mailq_checks() {
  500. err_count=0
  501. diff_c=0
  502. THRESHOLD=${MAILQ_THRESHOLD}
  503. # Reduce error count by 2 after restarting an unhealthy container
  504. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  505. while [ ${err_count} -lt ${THRESHOLD} ]; do
  506. touch /tmp/mail_queue_status; echo "$(tail -50 /tmp/mail_queue_status)" > /tmp/mail_queue_status
  507. MAILQ_LOG_STATUS=$(find /var/spool/postfix/deferred -type f | wc -l)
  508. echo "Mail queue contains ${MAILQ_LOG_STATUS} items (critical limit is ${MAILQ_CRIT}) at $(date)" >> /tmp/mail_queue_status
  509. err_c_cur=${err_count}
  510. if [ ${MAILQ_LOG_STATUS} -ge ${MAILQ_CRIT} ]; then
  511. err_count=$(( ${err_count} + 1 ))
  512. echo "Mail queue contains ${MAILQ_LOG_STATUS} items (critical limit is ${MAILQ_CRIT}) at $(date)" >> /tmp/mail_queue_status
  513. fi
  514. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  515. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  516. progress "Mail queue" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  517. if [[ $? == 10 ]]; then
  518. diff_c=0
  519. sleep 60
  520. else
  521. diff_c=0
  522. sleep $(( ( RANDOM % 60 ) + 20 ))
  523. fi
  524. done
  525. return 1
  526. }
  527. fail2ban_checks() {
  528. err_count=0
  529. diff_c=0
  530. THRESHOLD=${FAIL2BAN_THRESHOLD}
  531. F2B_LOG_STATUS=($(${REDIS_CMDLINE} --raw HKEYS F2B_ACTIVE_BANS))
  532. F2B_RES=
  533. # Reduce error count by 2 after restarting an unhealthy container
  534. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  535. while [ ${err_count} -lt ${THRESHOLD} ]; do
  536. err_c_cur=${err_count}
  537. F2B_LOG_STATUS_PREV=(${F2B_LOG_STATUS[@]})
  538. F2B_LOG_STATUS=($(${REDIS_CMDLINE} --raw HKEYS F2B_ACTIVE_BANS))
  539. array_diff F2B_RES F2B_LOG_STATUS F2B_LOG_STATUS_PREV
  540. if [[ ! -z "${F2B_RES}" ]]; then
  541. err_count=$(( ${err_count} + 1 ))
  542. echo -n "${F2B_RES[@]}" | tr -cd "[a-fA-F0-9.:/] " | timeout 3s ${REDIS_CMDLINE} -x SET F2B_RES > /dev/null
  543. if [ $? -ne 0 ]; then
  544. ${REDIS_CMDLINE} -x DEL F2B_RES
  545. fi
  546. fi
  547. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  548. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  549. progress "Fail2ban" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  550. if [[ $? == 10 ]]; then
  551. diff_c=0
  552. sleep 1
  553. else
  554. diff_c=0
  555. sleep $(( ( RANDOM % 60 ) + 20 ))
  556. fi
  557. done
  558. return 1
  559. }
  560. acme_checks() {
  561. err_count=0
  562. diff_c=0
  563. THRESHOLD=${ACME_THRESHOLD}
  564. ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME)
  565. if [[ -z "${ACME_LOG_STATUS}" ]]; then
  566. ${REDIS_CMDLINE} SET ACME_FAIL_TIME 0
  567. ACME_LOG_STATUS=0
  568. fi
  569. # Reduce error count by 2 after restarting an unhealthy container
  570. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  571. while [ ${err_count} -lt ${THRESHOLD} ]; do
  572. err_c_cur=${err_count}
  573. ACME_LOG_STATUS_PREV=${ACME_LOG_STATUS}
  574. ACME_LC=0
  575. until [[ ! -z ${ACME_LOG_STATUS} ]] || [ ${ACME_LC} -ge 3 ]; do
  576. ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME 2> /dev/null)
  577. sleep 3
  578. ACME_LC=$((ACME_LC+1))
  579. done
  580. if [[ ${ACME_LOG_STATUS_PREV} != ${ACME_LOG_STATUS} ]]; then
  581. err_count=$(( ${err_count} + 1 ))
  582. fi
  583. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  584. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  585. progress "ACME" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  586. if [[ $? == 10 ]]; then
  587. diff_c=0
  588. sleep 1
  589. else
  590. diff_c=0
  591. sleep $(( ( RANDOM % 60 ) + 20 ))
  592. fi
  593. done
  594. return 1
  595. }
  596. ipv6nat_checks() {
  597. err_count=0
  598. diff_c=0
  599. THRESHOLD=${IPV6NAT_THRESHOLD}
  600. # Reduce error count by 2 after restarting an unhealthy container
  601. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  602. while [ ${err_count} -lt ${THRESHOLD} ]; do
  603. err_c_cur=${err_count}
  604. CONTAINERS=$(curl --silent --insecure https://dockerapi/containers/json)
  605. IPV6NAT_CONTAINER_ID=$(echo ${CONTAINERS} | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"ipv6nat-mailcow\")) | .id")
  606. if [[ ! -z ${IPV6NAT_CONTAINER_ID} ]]; then
  607. LATEST_STARTED="$(echo ${CONTAINERS} | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], StartedAt: .State.StartedAt}" | jq -rc "select( .name | tostring | contains(\"ipv6nat-mailcow\") | not)" | jq -rc .StartedAt | xargs -n1 date +%s -d | sort | tail -n1)"
  608. LATEST_IPV6NAT="$(echo ${CONTAINERS} | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], StartedAt: .State.StartedAt}" | jq -rc "select( .name | tostring | contains(\"ipv6nat-mailcow\"))" | jq -rc .StartedAt | xargs -n1 date +%s -d | sort | tail -n1)"
  609. DIFFERENCE_START_TIME=$(expr ${LATEST_IPV6NAT} - ${LATEST_STARTED} 2>/dev/null)
  610. if [[ "${DIFFERENCE_START_TIME}" -lt 30 ]]; then
  611. err_count=$(( ${err_count} + 1 ))
  612. fi
  613. fi
  614. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  615. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  616. progress "IPv6 NAT" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  617. if [[ $? == 10 ]]; then
  618. diff_c=0
  619. sleep 30
  620. else
  621. diff_c=0
  622. sleep 300
  623. fi
  624. done
  625. return 1
  626. }
  627. rspamd_checks() {
  628. err_count=0
  629. diff_c=0
  630. THRESHOLD=${RSPAMD_THRESHOLD}
  631. # Reduce error count by 2 after restarting an unhealthy container
  632. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  633. while [ ${err_count} -lt ${THRESHOLD} ]; do
  634. touch /tmp/rspamd-mailcow; echo "$(tail -50 /tmp/rspamd-mailcow)" > /tmp/rspamd-mailcow
  635. host_ip=$(get_container_ip rspamd-mailcow)
  636. err_c_cur=${err_count}
  637. SCORE=$(echo 'To: null@localhost
  638. From: watchdog@localhost
  639. Empty
  640. ' | usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/scan | jq -rc .default.required_score)
  641. if [[ ${SCORE} != "9999" ]]; then
  642. echo "Rspamd settings check failed" 2>> /tmp/rspamd-mailcow 1>&2
  643. err_count=$(( ${err_count} + 1))
  644. else
  645. echo "Rspamd settings check succeeded" 2>> /tmp/rspamd-mailcow 1>&2
  646. fi
  647. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  648. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  649. progress "Rspamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  650. if [[ $? == 10 ]]; then
  651. diff_c=0
  652. sleep 1
  653. else
  654. diff_c=0
  655. sleep $(( ( RANDOM % 60 ) + 20 ))
  656. fi
  657. done
  658. return 1
  659. }
  660. olefy_checks() {
  661. err_count=0
  662. diff_c=0
  663. THRESHOLD=${OLEFY_THRESHOLD}
  664. # Reduce error count by 2 after restarting an unhealthy container
  665. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  666. while [ ${err_count} -lt ${THRESHOLD} ]; do
  667. touch /tmp/olefy-mailcow; echo "$(tail -50 /tmp/olefy-mailcow)" > /tmp/olefy-mailcow
  668. host_ip=$(get_container_ip olefy-mailcow)
  669. err_c_cur=${err_count}
  670. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10055 -s "PING\n" 2>> /tmp/olefy-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  671. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  672. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  673. progress "Olefy" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  674. if [[ $? == 10 ]]; then
  675. diff_c=0
  676. sleep 1
  677. else
  678. diff_c=0
  679. sleep $(( ( RANDOM % 60 ) + 20 ))
  680. fi
  681. done
  682. return 1
  683. }
  684. # Notify about start
  685. if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
  686. mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
  687. fi
  688. # Create watchdog agents
  689. (
  690. while true; do
  691. if ! nginx_checks; then
  692. log_msg "Nginx hit error limit"
  693. echo nginx-mailcow > /tmp/com_pipe
  694. fi
  695. done
  696. ) &
  697. PID=$!
  698. echo "Spawned nginx_checks with PID ${PID}"
  699. BACKGROUND_TASKS+=(${PID})
  700. if [[ ${WATCHDOG_EXTERNAL_CHECKS} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
  701. (
  702. while true; do
  703. if ! external_checks; then
  704. log_msg "External checks hit error limit"
  705. echo external_checks > /tmp/com_pipe
  706. fi
  707. done
  708. ) &
  709. PID=$!
  710. echo "Spawned external_checks with PID ${PID}"
  711. BACKGROUND_TASKS+=(${PID})
  712. fi
  713. if [[ ${WATCHDOG_MYSQL_REPLICATION_CHECKS} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
  714. (
  715. while true; do
  716. if ! mysql_repl_checks; then
  717. log_msg "MySQL replication check hit error limit"
  718. echo mysql_repl_checks > /tmp/com_pipe
  719. fi
  720. done
  721. ) &
  722. PID=$!
  723. echo "Spawned mysql_repl_checks with PID ${PID}"
  724. BACKGROUND_TASKS+=(${PID})
  725. fi
  726. (
  727. while true; do
  728. if ! mysql_checks; then
  729. log_msg "MySQL hit error limit"
  730. echo mysql-mailcow > /tmp/com_pipe
  731. fi
  732. done
  733. ) &
  734. PID=$!
  735. echo "Spawned mysql_checks with PID ${PID}"
  736. BACKGROUND_TASKS+=(${PID})
  737. (
  738. while true; do
  739. if ! redis_checks; then
  740. log_msg "Local Redis hit error limit"
  741. echo redis-mailcow > /tmp/com_pipe
  742. fi
  743. done
  744. ) &
  745. PID=$!
  746. echo "Spawned redis_checks with PID ${PID}"
  747. BACKGROUND_TASKS+=(${PID})
  748. (
  749. while true; do
  750. if ! phpfpm_checks; then
  751. log_msg "PHP-FPM hit error limit"
  752. echo php-fpm-mailcow > /tmp/com_pipe
  753. fi
  754. done
  755. ) &
  756. PID=$!
  757. echo "Spawned phpfpm_checks with PID ${PID}"
  758. BACKGROUND_TASKS+=(${PID})
  759. if [[ "${SKIP_SOGO}" =~ ^([nN][oO]|[nN])+$ ]]; then
  760. (
  761. while true; do
  762. if ! sogo_checks; then
  763. log_msg "SOGo hit error limit"
  764. echo sogo-mailcow > /tmp/com_pipe
  765. fi
  766. done
  767. ) &
  768. PID=$!
  769. echo "Spawned sogo_checks with PID ${PID}"
  770. BACKGROUND_TASKS+=(${PID})
  771. fi
  772. if [ ${CHECK_UNBOUND} -eq 1 ]; then
  773. (
  774. while true; do
  775. if ! unbound_checks; then
  776. log_msg "Unbound hit error limit"
  777. echo unbound-mailcow > /tmp/com_pipe
  778. fi
  779. done
  780. ) &
  781. PID=$!
  782. echo "Spawned unbound_checks with PID ${PID}"
  783. BACKGROUND_TASKS+=(${PID})
  784. fi
  785. if [[ "${SKIP_CLAMD}" =~ ^([nN][oO]|[nN])+$ ]]; then
  786. (
  787. while true; do
  788. if ! clamd_checks; then
  789. log_msg "Clamd hit error limit"
  790. echo clamd-mailcow > /tmp/com_pipe
  791. fi
  792. done
  793. ) &
  794. PID=$!
  795. echo "Spawned clamd_checks with PID ${PID}"
  796. BACKGROUND_TASKS+=(${PID})
  797. fi
  798. (
  799. while true; do
  800. if ! postfix_checks; then
  801. log_msg "Postfix hit error limit"
  802. echo postfix-mailcow > /tmp/com_pipe
  803. fi
  804. done
  805. ) &
  806. PID=$!
  807. echo "Spawned postfix_checks with PID ${PID}"
  808. BACKGROUND_TASKS+=(${PID})
  809. (
  810. while true; do
  811. if ! mailq_checks; then
  812. log_msg "Mail queue hit error limit"
  813. echo mail_queue_status > /tmp/com_pipe
  814. fi
  815. done
  816. ) &
  817. PID=$!
  818. echo "Spawned mailq_checks with PID ${PID}"
  819. BACKGROUND_TASKS+=(${PID})
  820. (
  821. while true; do
  822. if ! dovecot_checks; then
  823. log_msg "Dovecot hit error limit"
  824. echo dovecot-mailcow > /tmp/com_pipe
  825. fi
  826. done
  827. ) &
  828. PID=$!
  829. echo "Spawned dovecot_checks with PID ${PID}"
  830. BACKGROUND_TASKS+=(${PID})
  831. (
  832. while true; do
  833. if ! dovecot_repl_checks; then
  834. log_msg "Dovecot hit error limit"
  835. echo dovecot_repl_checks > /tmp/com_pipe
  836. fi
  837. done
  838. ) &
  839. PID=$!
  840. echo "Spawned dovecot_repl_checks with PID ${PID}"
  841. BACKGROUND_TASKS+=(${PID})
  842. (
  843. while true; do
  844. if ! rspamd_checks; then
  845. log_msg "Rspamd hit error limit"
  846. echo rspamd-mailcow > /tmp/com_pipe
  847. fi
  848. done
  849. ) &
  850. PID=$!
  851. echo "Spawned rspamd_checks with PID ${PID}"
  852. BACKGROUND_TASKS+=(${PID})
  853. (
  854. while true; do
  855. if ! ratelimit_checks; then
  856. log_msg "Ratelimit hit error limit"
  857. echo ratelimit > /tmp/com_pipe
  858. fi
  859. done
  860. ) &
  861. PID=$!
  862. echo "Spawned ratelimit_checks with PID ${PID}"
  863. BACKGROUND_TASKS+=(${PID})
  864. (
  865. while true; do
  866. if ! fail2ban_checks; then
  867. log_msg "Fail2ban hit error limit"
  868. echo fail2ban > /tmp/com_pipe
  869. fi
  870. done
  871. ) &
  872. PID=$!
  873. echo "Spawned fail2ban_checks with PID ${PID}"
  874. BACKGROUND_TASKS+=(${PID})
  875. (
  876. while true; do
  877. if ! olefy_checks; then
  878. log_msg "Olefy hit error limit"
  879. echo olefy-mailcow > /tmp/com_pipe
  880. fi
  881. done
  882. ) &
  883. PID=$!
  884. echo "Spawned olefy_checks with PID ${PID}"
  885. BACKGROUND_TASKS+=(${PID})
  886. (
  887. while true; do
  888. if ! acme_checks; then
  889. log_msg "ACME client hit error limit"
  890. echo acme-mailcow > /tmp/com_pipe
  891. fi
  892. done
  893. ) &
  894. PID=$!
  895. echo "Spawned acme_checks with PID ${PID}"
  896. BACKGROUND_TASKS+=(${PID})
  897. (
  898. while true; do
  899. if ! ipv6nat_checks; then
  900. log_msg "IPv6 NAT warning: ipv6nat-mailcow container was not started at least 30s after siblings (not an error)"
  901. echo ipv6nat-mailcow > /tmp/com_pipe
  902. fi
  903. done
  904. ) &
  905. PID=$!
  906. echo "Spawned ipv6nat_checks with PID ${PID}"
  907. BACKGROUND_TASKS+=(${PID})
  908. # Monitor watchdog agents, stop script when agents fails and wait for respawn by Docker (restart:always:n)
  909. (
  910. while true; do
  911. for bg_task in ${BACKGROUND_TASKS[*]}; do
  912. if ! kill -0 ${bg_task} 1>&2; then
  913. log_msg "Worker ${bg_task} died, stopping watchdog and waiting for respawn..."
  914. kill -TERM 1
  915. fi
  916. sleep 10
  917. done
  918. done
  919. ) &
  920. # Monitor dockerapi
  921. (
  922. while true; do
  923. while nc -z dockerapi 443; do
  924. sleep 3
  925. done
  926. log_msg "Cannot find dockerapi-mailcow, waiting to recover..."
  927. kill -STOP ${BACKGROUND_TASKS[*]}
  928. until nc -z dockerapi 443; do
  929. sleep 3
  930. done
  931. kill -CONT ${BACKGROUND_TASKS[*]}
  932. kill -USR1 ${BACKGROUND_TASKS[*]}
  933. done
  934. ) &
  935. # Actions when threshold limit is reached
  936. while true; do
  937. CONTAINER_ID=
  938. HAS_INITDB=
  939. read com_pipe_answer </tmp/com_pipe
  940. if [ -s "/tmp/${com_pipe_answer}" ]; then
  941. cat "/tmp/${com_pipe_answer}"
  942. fi
  943. if [[ ${com_pipe_answer} == "ratelimit" ]]; then
  944. log_msg "At least one ratelimit was applied"
  945. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
  946. elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then
  947. log_msg "Mail queue status is critical"
  948. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
  949. elif [[ ${com_pipe_answer} == "external_checks" ]]; then
  950. log_msg "Your mailcow is an open relay!"
  951. # Define $2 to override message text, else print service was restarted at ...
  952. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
  953. elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then
  954. log_msg "MySQL replication is not working properly"
  955. # Define $2 to override message text, else print service was restarted at ...
  956. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the SQL replication status"
  957. elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then
  958. log_msg "Dovecot replication is not working properly"
  959. # Define $2 to override message text, else print service was restarted at ...
  960. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the Dovecot replicator status"
  961. elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then
  962. log_msg "acme-mailcow did not complete successfully"
  963. # Define $2 to override message text, else print service was restarted at ...
  964. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
  965. elif [[ ${com_pipe_answer} == "fail2ban" ]]; then
  966. F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null))
  967. if [[ ! -z "${F2B_RES}" ]]; then
  968. ${REDIS_CMDLINE} DEL F2B_RES > /dev/null
  969. host=
  970. for host in "${F2B_RES[@]}"; do
  971. log_msg "Banned ${host}"
  972. rm /tmp/fail2ban 2> /dev/null
  973. timeout 2s whois "${host}" > /tmp/fail2ban
  974. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && mail_error "${com_pipe_answer}" "IP ban: ${host}"
  975. done
  976. fi
  977. elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
  978. kill -STOP ${BACKGROUND_TASKS[*]}
  979. sleep 10
  980. CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | .id")
  981. if [[ ! -z ${CONTAINER_ID} ]]; then
  982. if [[ "${com_pipe_answer}" == "php-fpm-mailcow" ]]; then
  983. HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
  984. fi
  985. S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
  986. if [ ${S_RUNNING} -lt 360 ]; then
  987. log_msg "Container is running for less than 360 seconds, skipping action..."
  988. elif [[ ! -z ${HAS_INITDB} ]]; then
  989. log_msg "Database is being initialized by php-fpm-mailcow, not restarting but delaying checks for a minute..."
  990. sleep 60
  991. else
  992. log_msg "Sending restart command to ${CONTAINER_ID}..."
  993. curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart
  994. if [[ ${com_pipe_answer} != "ipv6nat-mailcow" ]]; then
  995. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
  996. fi
  997. log_msg "Wait for restarted container to settle and continue watching..."
  998. sleep 35
  999. fi
  1000. fi
  1001. kill -CONT ${BACKGROUND_TASKS[*]}
  1002. sleep 1
  1003. kill -USR1 ${BACKGROUND_TASKS[*]}
  1004. fi
  1005. done