watchdog.sh 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. #!/bin/bash
  2. trap "exit" INT TERM
  3. trap "kill 0" EXIT
  4. # Prepare
  5. BACKGROUND_TASKS=()
  6. echo "Waiting for containers to settle..."
  7. sleep 30
  8. if [[ "${USE_WATCHDOG}" =~ ^([nN][oO]|[nN])+$ ]]; then
  9. echo -e "$(date) - USE_WATCHDOG=n, skipping watchdog..."
  10. sleep 365d
  11. exec $(readlink -f "$0")
  12. fi
  13. # Checks pipe their corresponding container name in this pipe
  14. if [[ ! -p /tmp/com_pipe ]]; then
  15. mkfifo /tmp/com_pipe
  16. fi
  17. # Wait for containers
  18. while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
  19. echo "Waiting for SQL..."
  20. sleep 2
  21. done
  22. # Do not attempt to write to slave
  23. if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
  24. REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
  25. else
  26. REDIS_CMDLINE="redis-cli -h redis -p 6379"
  27. fi
  28. until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
  29. echo "Waiting for Redis..."
  30. sleep 2
  31. done
  32. ${REDIS_CMDLINE} DEL F2B_RES > /dev/null
  33. # Common functions
  34. get_ipv6(){
  35. local IPV6=
  36. local IPV6_SRCS=
  37. local TRY=
  38. IPV6_SRCS[0]="ip6.mailcow.email"
  39. IPV6_SRCS[1]="ip6.nevondo.com"
  40. until [[ ! -z ${IPV6} ]] || [[ ${TRY} -ge 10 ]]; do
  41. IPV6=$(curl --connect-timeout 3 -m 10 -L6s ${IPV6_SRCS[$RANDOM % ${#IPV6_SRCS[@]} ]} | grep "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$")
  42. [[ ! -z ${TRY} ]] && sleep 1
  43. TRY=$((TRY+1))
  44. done
  45. echo ${IPV6}
  46. }
  47. array_diff() {
  48. # https://stackoverflow.com/questions/2312762, Alex Offshore
  49. eval local ARR1=\(\"\${$2[@]}\"\)
  50. eval local ARR2=\(\"\${$3[@]}\"\)
  51. local IFS=$'\n'
  52. mapfile -t $1 < <(comm -23 <(echo "${ARR1[*]}" | sort) <(echo "${ARR2[*]}" | sort))
  53. }
  54. progress() {
  55. SERVICE=${1}
  56. TOTAL=${2}
  57. CURRENT=${3}
  58. DIFF=${4}
  59. [[ -z ${DIFF} ]] && DIFF=0
  60. [[ -z ${TOTAL} || -z ${CURRENT} ]] && return
  61. [[ ${CURRENT} -gt ${TOTAL} ]] && return
  62. [[ ${CURRENT} -lt 0 ]] && CURRENT=0
  63. PERCENT=$(( 200 * ${CURRENT} / ${TOTAL} % 2 + 100 * ${CURRENT} / ${TOTAL} ))
  64. ${REDIS_CMDLINE} LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"service\":\"${SERVICE}\",\"lvl\":\"${PERCENT}\",\"hpnow\":\"${CURRENT}\",\"hptotal\":\"${TOTAL}\",\"hpdiff\":\"${DIFF}\"}" > /dev/null
  65. log_msg "${SERVICE} health level: ${PERCENT}% (${CURRENT}/${TOTAL}), health trend: ${DIFF}" no_redis
  66. # Return 10 to indicate a dead service
  67. [ ${CURRENT} -le 0 ] && return 10
  68. }
  69. log_msg() {
  70. if [[ ${2} != "no_redis" ]]; then
  71. ${REDIS_CMDLINE} LPUSH WATCHDOG_LOG "{\"time\":\"$(date +%s)\",\"message\":\"$(printf '%s' "${1}" | \
  72. tr '\r\n%&;$"_[]{}-' ' ')\"}" > /dev/null
  73. fi
  74. echo $(date) $(printf '%s\n' "${1}")
  75. }
  76. function mail_error() {
  77. THROTTLE=
  78. [[ -z ${1} ]] && return 1
  79. # If exists, body will be the content of "/tmp/${1}", even if ${2} is set
  80. [[ -z ${2} ]] && BODY="Service was restarted on $(date), please check your mailcow installation." || BODY="$(date) - ${2}"
  81. # If exists, mail will be throttled by argument in seconds
  82. [[ ! -z ${3} ]] && THROTTLE=${3}
  83. if [[ ! -z ${THROTTLE} ]]; then
  84. TTL_LEFT="$(${REDIS_CMDLINE} TTL THROTTLE_${1} 2> /dev/null)"
  85. if [[ "${TTL_LEFT}" == "-2" ]]; then
  86. # Delay key not found, setting a delay key now
  87. ${REDIS_CMDLINE} SET THROTTLE_${1} 1 EX ${THROTTLE}
  88. else
  89. log_msg "Not sending notification email now, blocked for ${TTL_LEFT} seconds..."
  90. return 1
  91. fi
  92. fi
  93. WATCHDOG_NOTIFY_EMAIL=$(echo "${WATCHDOG_NOTIFY_EMAIL}" | sed 's/"//;s|"$||')
  94. # Some exceptions for subject and body formats
  95. if [[ ${1} == "fail2ban" ]]; then
  96. SUBJECT="${BODY}"
  97. BODY="Please see netfilter-mailcow for more details and triggered rules."
  98. else
  99. SUBJECT="${WATCHDOG_SUBJECT}: ${1}"
  100. fi
  101. IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}"
  102. for rcpt in "${MAIL_RCPTS[@]}"; do
  103. RCPT_DOMAIN=
  104. #RCPT_MX=
  105. RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'})
  106. # Latest smtp-cli looks up mx via dns
  107. #RCPT_MX=$(dig +short ${RCPT_DOMAIN} mx | sort -n | awk '{print $2; exit}')
  108. #if [[ -z ${RCPT_MX} ]]; then
  109. # log_msg "Cannot determine MX for ${rcpt}, skipping email notification..."
  110. # return 1
  111. #fi
  112. [ -f "/tmp/${1}" ] && BODY="/tmp/${1}"
  113. timeout 10s ./smtp-cli --missing-modules-ok \
  114. --charset=UTF-8 \
  115. --subject="${SUBJECT}" \
  116. --body-plain="${BODY}" \
  117. --add-header="X-Priority: 1" \
  118. --to=${rcpt} \
  119. --from="watchdog@${MAILCOW_HOSTNAME}" \
  120. --hello-host=${MAILCOW_HOSTNAME} \
  121. --ipv4
  122. #--server="${RCPT_MX}"
  123. log_msg "Sent notification email to ${rcpt}"
  124. done
  125. }
  126. get_container_ip() {
  127. # ${1} is container
  128. CONTAINER_ID=()
  129. CONTAINER_IPS=()
  130. CONTAINER_IP=
  131. LOOP_C=1
  132. until [[ ${CONTAINER_IP} =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]] || [[ ${LOOP_C} -gt 5 ]]; do
  133. if [ ${IP_BY_DOCKER_API} -eq 0 ]; then
  134. CONTAINER_IP=$(dig a "${1}" +short)
  135. else
  136. sleep 0.5
  137. # get long container id for exact match
  138. CONTAINER_ID=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
  139. # returned id can have multiple elements (if scaled), shuffle for random test
  140. CONTAINER_ID=($(printf "%s\n" "${CONTAINER_ID[@]}" | shuf))
  141. if [[ ! -z ${CONTAINER_ID} ]]; then
  142. for matched_container in "${CONTAINER_ID[@]}"; do
  143. CONTAINER_IPS=($(curl --silent --insecure https://dockerapi/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
  144. for ip_match in "${CONTAINER_IPS[@]}"; do
  145. # grep will do nothing if one of these vars is empty
  146. [[ -z ${ip_match} ]] && continue
  147. [[ -z ${IPV4_NETWORK} ]] && continue
  148. # only return ips that are part of our network
  149. if ! grep -q ${IPV4_NETWORK} <(echo ${ip_match}); then
  150. continue
  151. else
  152. CONTAINER_IP=${ip_match}
  153. break
  154. fi
  155. done
  156. [[ ! -z ${CONTAINER_IP} ]] && break
  157. done
  158. fi
  159. fi
  160. LOOP_C=$((LOOP_C + 1))
  161. done
  162. [[ ${LOOP_C} -gt 5 ]] && echo 240.0.0.0 || echo ${CONTAINER_IP}
  163. }
  164. # One-time check
  165. if grep -qi "$(echo ${IPV6_NETWORK} | cut -d: -f1-3)" <<< "$(ip a s)"; then
  166. if [[ -z "$(get_ipv6)" ]]; then
  167. mail_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection."
  168. fi
  169. fi
  170. external_checks() {
  171. err_count=0
  172. diff_c=0
  173. THRESHOLD=${EXTERNAL_CHECKS_THRESHOLD}
  174. # Reduce error count by 2 after restarting an unhealthy container
  175. GUID=$(mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
  176. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  177. while [ ${err_count} -lt ${THRESHOLD} ]; do
  178. err_c_cur=${err_count}
  179. CHECK_REPONSE="$(curl --connect-timeout 3 -m 10 -4 -s https://checks.mailcow.email -X POST -dguid=${GUID} 2> /dev/null)"
  180. if [[ ! -z "${CHECK_REPONSE}" ]] && [[ "$(echo ${CHECK_REPONSE} | jq -r .response)" == "critical" ]]; then
  181. echo ${CHECK_REPONSE} | jq -r .out > /tmp/external_checks
  182. err_count=$(( ${err_count} + 1 ))
  183. fi
  184. CHECK_REPONSE6="$(curl --connect-timeout 3 -m 10 -6 -s https://checks.mailcow.email -X POST -dguid=${GUID} 2> /dev/null)"
  185. if [[ ! -z "${CHECK_REPONSE6}" ]] && [[ "$(echo ${CHECK_REPONSE6} | jq -r .response)" == "critical" ]]; then
  186. echo ${CHECK_REPONSE} | jq -r .out > /tmp/external_checks
  187. err_count=$(( ${err_count} + 1 ))
  188. fi
  189. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  190. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  191. progress "External checks" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  192. if [[ $? == 10 ]]; then
  193. diff_c=0
  194. sleep 60
  195. else
  196. diff_c=0
  197. sleep $(( ( RANDOM % 20 ) + 1800 ))
  198. fi
  199. done
  200. return 1
  201. }
  202. nginx_checks() {
  203. err_count=0
  204. diff_c=0
  205. THRESHOLD=${NGINX_THRESHOLD}
  206. # Reduce error count by 2 after restarting an unhealthy container
  207. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  208. while [ ${err_count} -lt ${THRESHOLD} ]; do
  209. touch /tmp/nginx-mailcow; echo "$(tail -50 /tmp/nginx-mailcow)" > /tmp/nginx-mailcow
  210. host_ip=$(get_container_ip nginx-mailcow)
  211. err_c_cur=${err_count}
  212. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u / -p 8081 2>> /tmp/nginx-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  213. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  214. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  215. progress "Nginx" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  216. if [[ $? == 10 ]]; then
  217. diff_c=0
  218. sleep 1
  219. else
  220. diff_c=0
  221. sleep $(( ( RANDOM % 60 ) + 20 ))
  222. fi
  223. done
  224. return 1
  225. }
  226. unbound_checks() {
  227. err_count=0
  228. diff_c=0
  229. THRESHOLD=${UNBOUND_THRESHOLD}
  230. # Reduce error count by 2 after restarting an unhealthy container
  231. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  232. while [ ${err_count} -lt ${THRESHOLD} ]; do
  233. touch /tmp/unbound-mailcow; echo "$(tail -50 /tmp/unbound-mailcow)" > /tmp/unbound-mailcow
  234. host_ip=$(get_container_ip unbound-mailcow)
  235. err_c_cur=${err_count}
  236. /usr/lib/nagios/plugins/check_dns -s ${host_ip} -H stackoverflow.com 2>> /tmp/unbound-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  237. DNSSEC=$(dig com +dnssec | egrep 'flags:.+ad')
  238. if [[ -z ${DNSSEC} ]]; then
  239. echo "DNSSEC failure" 2>> /tmp/unbound-mailcow 1>&2
  240. err_count=$(( ${err_count} + 1))
  241. else
  242. echo "DNSSEC check succeeded" 2>> /tmp/unbound-mailcow 1>&2
  243. fi
  244. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  245. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  246. progress "Unbound" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  247. if [[ $? == 10 ]]; then
  248. diff_c=0
  249. sleep 1
  250. else
  251. diff_c=0
  252. sleep $(( ( RANDOM % 60 ) + 20 ))
  253. fi
  254. done
  255. return 1
  256. }
  257. redis_checks() {
  258. # A check for the local redis container
  259. err_count=0
  260. diff_c=0
  261. THRESHOLD=${REDIS_THRESHOLD}
  262. # Reduce error count by 2 after restarting an unhealthy container
  263. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  264. while [ ${err_count} -lt ${THRESHOLD} ]; do
  265. touch /tmp/redis-mailcow; echo "$(tail -50 /tmp/redis-mailcow)" > /tmp/redis-mailcow
  266. host_ip=$(get_container_ip redis-mailcow)
  267. err_c_cur=${err_count}
  268. /usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "PING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  269. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  270. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  271. progress "Redis" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  272. if [[ $? == 10 ]]; then
  273. diff_c=0
  274. sleep 1
  275. else
  276. diff_c=0
  277. sleep $(( ( RANDOM % 60 ) + 20 ))
  278. fi
  279. done
  280. return 1
  281. }
  282. mysql_checks() {
  283. err_count=0
  284. diff_c=0
  285. THRESHOLD=${MYSQL_THRESHOLD}
  286. # Reduce error count by 2 after restarting an unhealthy container
  287. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  288. while [ ${err_count} -lt ${THRESHOLD} ]; do
  289. touch /tmp/mysql-mailcow; echo "$(tail -50 /tmp/mysql-mailcow)" > /tmp/mysql-mailcow
  290. err_c_cur=${err_count}
  291. /usr/lib/nagios/plugins/check_mysql -s /var/run/mysqld/mysqld.sock -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} 2>> /tmp/mysql-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  292. /usr/lib/nagios/plugins/check_mysql_query -s /var/run/mysqld/mysqld.sock -u ${DBUSER} -p ${DBPASS} -d ${DBNAME} -q "SELECT COUNT(*) FROM information_schema.tables" 2>> /tmp/mysql-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  293. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  294. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  295. progress "MySQL/MariaDB" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  296. if [[ $? == 10 ]]; then
  297. diff_c=0
  298. sleep 1
  299. else
  300. diff_c=0
  301. sleep $(( ( RANDOM % 60 ) + 20 ))
  302. fi
  303. done
  304. return 1
  305. }
  306. mysql_repl_checks() {
  307. err_count=0
  308. diff_c=0
  309. THRESHOLD=${MYSQL_REPLICATION_THRESHOLD}
  310. # Reduce error count by 2 after restarting an unhealthy container
  311. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  312. while [ ${err_count} -lt ${THRESHOLD} ]; do
  313. touch /tmp/mysql_repl_checks; echo "$(tail -50 /tmp/mysql_repl_checks)" > /tmp/mysql_repl_checks
  314. err_c_cur=${err_count}
  315. /usr/lib/nagios/plugins/check_mysql_slavestatus.sh -S /var/run/mysqld/mysqld.sock -u root -p ${DBROOT} 2>> /tmp/mysql_repl_checks 1>&2; err_count=$(( ${err_count} + $? ))
  316. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  317. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  318. progress "MySQL/MariaDB replication" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  319. if [[ $? == 10 ]]; then
  320. diff_c=0
  321. sleep 60
  322. else
  323. diff_c=0
  324. sleep $(( ( RANDOM % 60 ) + 20 ))
  325. fi
  326. done
  327. return 1
  328. }
  329. sogo_checks() {
  330. err_count=0
  331. diff_c=0
  332. THRESHOLD=${SOGO_THRESHOLD}
  333. # Reduce error count by 2 after restarting an unhealthy container
  334. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  335. while [ ${err_count} -lt ${THRESHOLD} ]; do
  336. touch /tmp/sogo-mailcow; echo "$(tail -50 /tmp/sogo-mailcow)" > /tmp/sogo-mailcow
  337. host_ip=$(get_container_ip sogo-mailcow)
  338. err_c_cur=${err_count}
  339. /usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 2>> /tmp/sogo-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  340. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  341. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  342. progress "SOGo" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  343. if [[ $? == 10 ]]; then
  344. diff_c=0
  345. sleep 1
  346. else
  347. diff_c=0
  348. sleep $(( ( RANDOM % 60 ) + 20 ))
  349. fi
  350. done
  351. return 1
  352. }
  353. postfix_checks() {
  354. err_count=0
  355. diff_c=0
  356. THRESHOLD=${POSTFIX_THRESHOLD}
  357. # Reduce error count by 2 after restarting an unhealthy container
  358. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  359. while [ ${err_count} -lt ${THRESHOLD} ]; do
  360. touch /tmp/postfix-mailcow; echo "$(tail -50 /tmp/postfix-mailcow)" > /tmp/postfix-mailcow
  361. host_ip=$(get_container_ip postfix-mailcow)
  362. err_c_cur=${err_count}
  363. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -f "watchdog@invalid" -C "RCPT TO:watchdog@localhost" -C DATA -C . -R 250 2>> /tmp/postfix-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  364. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 589 -S 2>> /tmp/postfix-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  365. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  366. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  367. progress "Postfix" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  368. if [[ $? == 10 ]]; then
  369. diff_c=0
  370. sleep 1
  371. else
  372. diff_c=0
  373. sleep $(( ( RANDOM % 60 ) + 20 ))
  374. fi
  375. done
  376. return 1
  377. }
  378. clamd_checks() {
  379. err_count=0
  380. diff_c=0
  381. THRESHOLD=${CLAMD_THRESHOLD}
  382. # Reduce error count by 2 after restarting an unhealthy container
  383. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  384. while [ ${err_count} -lt ${THRESHOLD} ]; do
  385. touch /tmp/clamd-mailcow; echo "$(tail -50 /tmp/clamd-mailcow)" > /tmp/clamd-mailcow
  386. host_ip=$(get_container_ip clamd-mailcow)
  387. err_c_cur=${err_count}
  388. /usr/lib/nagios/plugins/check_clamd -4 -H ${host_ip} 2>> /tmp/clamd-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  389. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  390. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  391. progress "Clamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  392. if [[ $? == 10 ]]; then
  393. diff_c=0
  394. sleep 1
  395. else
  396. diff_c=0
  397. sleep $(( ( RANDOM % 120 ) + 20 ))
  398. fi
  399. done
  400. return 1
  401. }
  402. dovecot_checks() {
  403. err_count=0
  404. diff_c=0
  405. THRESHOLD=${DOVECOT_THRESHOLD}
  406. # Reduce error count by 2 after restarting an unhealthy container
  407. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  408. while [ ${err_count} -lt ${THRESHOLD} ]; do
  409. touch /tmp/dovecot-mailcow; echo "$(tail -50 /tmp/dovecot-mailcow)" > /tmp/dovecot-mailcow
  410. host_ip=$(get_container_ip dovecot-mailcow)
  411. err_c_cur=${err_count}
  412. /usr/lib/nagios/plugins/check_smtp -4 -H ${host_ip} -p 24 -f "watchdog@invalid" -C "RCPT TO:<watchdog@invalid>" -L -R "User doesn't exist" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  413. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 993 -S -e "OK " 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  414. /usr/lib/nagios/plugins/check_imap -4 -H ${host_ip} -p 143 -e "OK " 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  415. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10001 -e "VERSION" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  416. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 4190 -e "Dovecot ready" 2>> /tmp/dovecot-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  417. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  418. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  419. progress "Dovecot" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  420. if [[ $? == 10 ]]; then
  421. diff_c=0
  422. sleep 1
  423. else
  424. diff_c=0
  425. sleep $(( ( RANDOM % 60 ) + 20 ))
  426. fi
  427. done
  428. return 1
  429. }
  430. dovecot_repl_checks() {
  431. err_count=0
  432. diff_c=0
  433. THRESHOLD=${DOVECOT_REPL_THRESHOLD}
  434. D_REPL_STATUS=$(redis-cli -h redis -r GET DOVECOT_REPL_HEALTH)
  435. # Reduce error count by 2 after restarting an unhealthy container
  436. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  437. while [ ${err_count} -lt ${THRESHOLD} ]; do
  438. err_c_cur=${err_count}
  439. D_REPL_STATUS=$(redis-cli --raw -h redis GET DOVECOT_REPL_HEALTH)
  440. if [[ "${D_REPL_STATUS}" != "1" ]]; then
  441. err_count=$(( ${err_count} + 1 ))
  442. fi
  443. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  444. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  445. progress "Dovecot replication" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  446. if [[ $? == 10 ]]; then
  447. diff_c=0
  448. sleep 60
  449. else
  450. diff_c=0
  451. sleep $(( ( RANDOM % 60 ) + 20 ))
  452. fi
  453. done
  454. return 1
  455. }
  456. cert_checks() {
  457. err_count=0
  458. diff_c=0
  459. THRESHOLD=7
  460. # Reduce error count by 2 after restarting an unhealthy container
  461. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  462. while [ ${err_count} -lt ${THRESHOLD} ]; do
  463. touch /tmp/certcheck; echo "$(tail -50 /tmp/certcheck)" > /tmp/certcheck
  464. host_ip_postfix=$(get_container_ip postfix)
  465. host_ip_dovecot=$(get_container_ip dovecot)
  466. err_c_cur=${err_count}
  467. /usr/lib/nagios/plugins/check_smtp -H ${host_ip_postfix} -p 589 -4 -S -D 7 2>> /tmp/certcheck 1>&2; err_count=$(( ${err_count} + $? ))
  468. /usr/lib/nagios/plugins/check_imap -H ${host_ip_dovecot} -p 993 -4 -S -D 7 2>> /tmp/certcheck 1>&2; err_count=$(( ${err_count} + $? ))
  469. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  470. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  471. progress "Primary certificate expiry check" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  472. # Always sleep 5 minutes, mail notifications are limited
  473. sleep 300
  474. done
  475. return 1
  476. }
  477. phpfpm_checks() {
  478. err_count=0
  479. diff_c=0
  480. THRESHOLD=${PHPFPM_THRESHOLD}
  481. # Reduce error count by 2 after restarting an unhealthy container
  482. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  483. while [ ${err_count} -lt ${THRESHOLD} ]; do
  484. touch /tmp/php-fpm-mailcow; echo "$(tail -50 /tmp/php-fpm-mailcow)" > /tmp/php-fpm-mailcow
  485. host_ip=$(get_container_ip php-fpm-mailcow)
  486. err_c_cur=${err_count}
  487. /usr/lib/nagios/plugins/check_tcp -H ${host_ip} -p 9001 2>> /tmp/php-fpm-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  488. /usr/lib/nagios/plugins/check_tcp -H ${host_ip} -p 9002 2>> /tmp/php-fpm-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  489. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  490. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  491. progress "PHP-FPM" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  492. if [[ $? == 10 ]]; then
  493. diff_c=0
  494. sleep 1
  495. else
  496. diff_c=0
  497. sleep $(( ( RANDOM % 60 ) + 20 ))
  498. fi
  499. done
  500. return 1
  501. }
  502. ratelimit_checks() {
  503. err_count=0
  504. diff_c=0
  505. THRESHOLD=${RATELIMIT_THRESHOLD}
  506. RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
  507. # Reduce error count by 2 after restarting an unhealthy container
  508. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  509. while [ ${err_count} -lt ${THRESHOLD} ]; do
  510. err_c_cur=${err_count}
  511. RL_LOG_STATUS_PREV=${RL_LOG_STATUS}
  512. RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
  513. if [[ ${RL_LOG_STATUS_PREV} != ${RL_LOG_STATUS} ]]; then
  514. err_count=$(( ${err_count} + 1 ))
  515. echo 'Last 10 applied ratelimits (may overlap with previous reports).' > /tmp/ratelimit
  516. echo 'Full ratelimit buckets can be emptied by deleting the ratelimit hash from within mailcow UI (see /debug -> Protocols -> Ratelimit):' >> /tmp/ratelimit
  517. echo >> /tmp/ratelimit
  518. redis-cli --raw -h redis LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
  519. fi
  520. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  521. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  522. progress "Ratelimit" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  523. if [[ $? == 10 ]]; then
  524. diff_c=0
  525. sleep 1
  526. else
  527. diff_c=0
  528. sleep $(( ( RANDOM % 60 ) + 20 ))
  529. fi
  530. done
  531. return 1
  532. }
  533. mailq_checks() {
  534. err_count=0
  535. diff_c=0
  536. THRESHOLD=${MAILQ_THRESHOLD}
  537. # Reduce error count by 2 after restarting an unhealthy container
  538. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  539. while [ ${err_count} -lt ${THRESHOLD} ]; do
  540. touch /tmp/mail_queue_status; echo "$(tail -50 /tmp/mail_queue_status)" > /tmp/mail_queue_status
  541. MAILQ_LOG_STATUS=$(find /var/spool/postfix/deferred -type f | wc -l)
  542. echo "Mail queue contains ${MAILQ_LOG_STATUS} items (critical limit is ${MAILQ_CRIT}) at $(date)" >> /tmp/mail_queue_status
  543. err_c_cur=${err_count}
  544. if [ ${MAILQ_LOG_STATUS} -ge ${MAILQ_CRIT} ]; then
  545. err_count=$(( ${err_count} + 1 ))
  546. echo "Mail queue contains ${MAILQ_LOG_STATUS} items (critical limit is ${MAILQ_CRIT}) at $(date)" >> /tmp/mail_queue_status
  547. fi
  548. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  549. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  550. progress "Mail queue" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  551. if [[ $? == 10 ]]; then
  552. diff_c=0
  553. sleep 60
  554. else
  555. diff_c=0
  556. sleep $(( ( RANDOM % 60 ) + 20 ))
  557. fi
  558. done
  559. return 1
  560. }
  561. fail2ban_checks() {
  562. err_count=0
  563. diff_c=0
  564. THRESHOLD=${FAIL2BAN_THRESHOLD}
  565. F2B_LOG_STATUS=($(${REDIS_CMDLINE} --raw HKEYS F2B_ACTIVE_BANS))
  566. F2B_RES=
  567. # Reduce error count by 2 after restarting an unhealthy container
  568. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  569. while [ ${err_count} -lt ${THRESHOLD} ]; do
  570. err_c_cur=${err_count}
  571. F2B_LOG_STATUS_PREV=(${F2B_LOG_STATUS[@]})
  572. F2B_LOG_STATUS=($(${REDIS_CMDLINE} --raw HKEYS F2B_ACTIVE_BANS))
  573. array_diff F2B_RES F2B_LOG_STATUS F2B_LOG_STATUS_PREV
  574. if [[ ! -z "${F2B_RES}" ]]; then
  575. err_count=$(( ${err_count} + 1 ))
  576. echo -n "${F2B_RES[@]}" | tr -cd "[a-fA-F0-9.:/] " | timeout 3s ${REDIS_CMDLINE} -x SET F2B_RES > /dev/null
  577. if [ $? -ne 0 ]; then
  578. ${REDIS_CMDLINE} -x DEL F2B_RES
  579. fi
  580. fi
  581. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  582. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  583. progress "Fail2ban" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  584. if [[ $? == 10 ]]; then
  585. diff_c=0
  586. sleep 1
  587. else
  588. diff_c=0
  589. sleep $(( ( RANDOM % 60 ) + 20 ))
  590. fi
  591. done
  592. return 1
  593. }
  594. acme_checks() {
  595. err_count=0
  596. diff_c=0
  597. THRESHOLD=${ACME_THRESHOLD}
  598. ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME)
  599. if [[ -z "${ACME_LOG_STATUS}" ]]; then
  600. ${REDIS_CMDLINE} SET ACME_FAIL_TIME 0
  601. ACME_LOG_STATUS=0
  602. fi
  603. # Reduce error count by 2 after restarting an unhealthy container
  604. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  605. while [ ${err_count} -lt ${THRESHOLD} ]; do
  606. err_c_cur=${err_count}
  607. ACME_LOG_STATUS_PREV=${ACME_LOG_STATUS}
  608. ACME_LC=0
  609. until [[ ! -z ${ACME_LOG_STATUS} ]] || [ ${ACME_LC} -ge 3 ]; do
  610. ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME 2> /dev/null)
  611. sleep 3
  612. ACME_LC=$((ACME_LC+1))
  613. done
  614. if [[ ${ACME_LOG_STATUS_PREV} != ${ACME_LOG_STATUS} ]]; then
  615. err_count=$(( ${err_count} + 1 ))
  616. fi
  617. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  618. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  619. progress "ACME" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  620. if [[ $? == 10 ]]; then
  621. diff_c=0
  622. sleep 1
  623. else
  624. diff_c=0
  625. sleep $(( ( RANDOM % 60 ) + 20 ))
  626. fi
  627. done
  628. return 1
  629. }
  630. rspamd_checks() {
  631. err_count=0
  632. diff_c=0
  633. THRESHOLD=${RSPAMD_THRESHOLD}
  634. # Reduce error count by 2 after restarting an unhealthy container
  635. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  636. while [ ${err_count} -lt ${THRESHOLD} ]; do
  637. touch /tmp/rspamd-mailcow; echo "$(tail -50 /tmp/rspamd-mailcow)" > /tmp/rspamd-mailcow
  638. host_ip=$(get_container_ip rspamd-mailcow)
  639. err_c_cur=${err_count}
  640. SCORE=$(echo 'To: null@localhost
  641. From: watchdog@localhost
  642. Empty
  643. ' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/scan | jq -rc .default.required_score)
  644. if [[ ${SCORE} != "9999" ]]; then
  645. echo "Rspamd settings check failed, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
  646. err_count=$(( ${err_count} + 1))
  647. else
  648. echo "Rspamd settings check succeeded, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
  649. fi
  650. # A dirty hack until a PING PONG event is implemented to worker proxy
  651. # We expect an empty response, not a timeout
  652. if [ "$(curl -s --max-time 10 ${host_ip}:9900 2> /dev/null ; echo $?)" == "28" ]; then
  653. echo "Milter check failed" 2>> /tmp/rspamd-mailcow 1>&2; err_count=$(( ${err_count} + 1 ));
  654. else
  655. echo "Milter check succeeded" 2>> /tmp/rspamd-mailcow 1>&2
  656. fi
  657. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  658. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  659. progress "Rspamd" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  660. if [[ $? == 10 ]]; then
  661. diff_c=0
  662. sleep 1
  663. else
  664. diff_c=0
  665. sleep $(( ( RANDOM % 60 ) + 20 ))
  666. fi
  667. done
  668. return 1
  669. }
  670. olefy_checks() {
  671. err_count=0
  672. diff_c=0
  673. THRESHOLD=${OLEFY_THRESHOLD}
  674. # Reduce error count by 2 after restarting an unhealthy container
  675. trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
  676. while [ ${err_count} -lt ${THRESHOLD} ]; do
  677. touch /tmp/olefy-mailcow; echo "$(tail -50 /tmp/olefy-mailcow)" > /tmp/olefy-mailcow
  678. host_ip=$(get_container_ip olefy-mailcow)
  679. err_c_cur=${err_count}
  680. /usr/lib/nagios/plugins/check_tcp -4 -H ${host_ip} -p 10055 -s "PING\n" 2>> /tmp/olefy-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
  681. [ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
  682. [ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
  683. progress "Olefy" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
  684. if [[ $? == 10 ]]; then
  685. diff_c=0
  686. sleep 1
  687. else
  688. diff_c=0
  689. sleep $(( ( RANDOM % 60 ) + 20 ))
  690. fi
  691. done
  692. return 1
  693. }
  694. # Notify about start
  695. if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then
  696. mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow."
  697. fi
  698. # Create watchdog agents
  699. (
  700. while true; do
  701. if ! nginx_checks; then
  702. log_msg "Nginx hit error limit"
  703. echo nginx-mailcow > /tmp/com_pipe
  704. fi
  705. done
  706. ) &
  707. PID=$!
  708. echo "Spawned nginx_checks with PID ${PID}"
  709. BACKGROUND_TASKS+=(${PID})
  710. if [[ ${WATCHDOG_EXTERNAL_CHECKS} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
  711. (
  712. while true; do
  713. if ! external_checks; then
  714. log_msg "External checks hit error limit"
  715. echo external_checks > /tmp/com_pipe
  716. fi
  717. done
  718. ) &
  719. PID=$!
  720. echo "Spawned external_checks with PID ${PID}"
  721. BACKGROUND_TASKS+=(${PID})
  722. fi
  723. if [[ ${WATCHDOG_MYSQL_REPLICATION_CHECKS} =~ ^([yY][eE][sS]|[yY])+$ ]]; then
  724. (
  725. while true; do
  726. if ! mysql_repl_checks; then
  727. log_msg "MySQL replication check hit error limit"
  728. echo mysql_repl_checks > /tmp/com_pipe
  729. fi
  730. done
  731. ) &
  732. PID=$!
  733. echo "Spawned mysql_repl_checks with PID ${PID}"
  734. BACKGROUND_TASKS+=(${PID})
  735. fi
  736. (
  737. while true; do
  738. if ! mysql_checks; then
  739. log_msg "MySQL hit error limit"
  740. echo mysql-mailcow > /tmp/com_pipe
  741. fi
  742. done
  743. ) &
  744. PID=$!
  745. echo "Spawned mysql_checks with PID ${PID}"
  746. BACKGROUND_TASKS+=(${PID})
  747. (
  748. while true; do
  749. if ! redis_checks; then
  750. log_msg "Local Redis hit error limit"
  751. echo redis-mailcow > /tmp/com_pipe
  752. fi
  753. done
  754. ) &
  755. PID=$!
  756. echo "Spawned redis_checks with PID ${PID}"
  757. BACKGROUND_TASKS+=(${PID})
  758. (
  759. while true; do
  760. if ! phpfpm_checks; then
  761. log_msg "PHP-FPM hit error limit"
  762. echo php-fpm-mailcow > /tmp/com_pipe
  763. fi
  764. done
  765. ) &
  766. PID=$!
  767. echo "Spawned phpfpm_checks with PID ${PID}"
  768. BACKGROUND_TASKS+=(${PID})
  769. if [[ "${SKIP_SOGO}" =~ ^([nN][oO]|[nN])+$ ]]; then
  770. (
  771. while true; do
  772. if ! sogo_checks; then
  773. log_msg "SOGo hit error limit"
  774. echo sogo-mailcow > /tmp/com_pipe
  775. fi
  776. done
  777. ) &
  778. PID=$!
  779. echo "Spawned sogo_checks with PID ${PID}"
  780. BACKGROUND_TASKS+=(${PID})
  781. fi
  782. if [ ${CHECK_UNBOUND} -eq 1 ]; then
  783. (
  784. while true; do
  785. if ! unbound_checks; then
  786. log_msg "Unbound hit error limit"
  787. echo unbound-mailcow > /tmp/com_pipe
  788. fi
  789. done
  790. ) &
  791. PID=$!
  792. echo "Spawned unbound_checks with PID ${PID}"
  793. BACKGROUND_TASKS+=(${PID})
  794. fi
  795. if [[ "${SKIP_CLAMD}" =~ ^([nN][oO]|[nN])+$ ]]; then
  796. (
  797. while true; do
  798. if ! clamd_checks; then
  799. log_msg "Clamd hit error limit"
  800. echo clamd-mailcow > /tmp/com_pipe
  801. fi
  802. done
  803. ) &
  804. PID=$!
  805. echo "Spawned clamd_checks with PID ${PID}"
  806. BACKGROUND_TASKS+=(${PID})
  807. fi
  808. (
  809. while true; do
  810. if ! postfix_checks; then
  811. log_msg "Postfix hit error limit"
  812. echo postfix-mailcow > /tmp/com_pipe
  813. fi
  814. done
  815. ) &
  816. PID=$!
  817. echo "Spawned postfix_checks with PID ${PID}"
  818. BACKGROUND_TASKS+=(${PID})
  819. (
  820. while true; do
  821. if ! mailq_checks; then
  822. log_msg "Mail queue hit error limit"
  823. echo mail_queue_status > /tmp/com_pipe
  824. fi
  825. done
  826. ) &
  827. PID=$!
  828. echo "Spawned mailq_checks with PID ${PID}"
  829. BACKGROUND_TASKS+=(${PID})
  830. (
  831. while true; do
  832. if ! dovecot_checks; then
  833. log_msg "Dovecot hit error limit"
  834. echo dovecot-mailcow > /tmp/com_pipe
  835. fi
  836. done
  837. ) &
  838. PID=$!
  839. echo "Spawned dovecot_checks with PID ${PID}"
  840. BACKGROUND_TASKS+=(${PID})
  841. (
  842. while true; do
  843. if ! dovecot_repl_checks; then
  844. log_msg "Dovecot hit error limit"
  845. echo dovecot_repl_checks > /tmp/com_pipe
  846. fi
  847. done
  848. ) &
  849. PID=$!
  850. echo "Spawned dovecot_repl_checks with PID ${PID}"
  851. BACKGROUND_TASKS+=(${PID})
  852. (
  853. while true; do
  854. if ! rspamd_checks; then
  855. log_msg "Rspamd hit error limit"
  856. echo rspamd-mailcow > /tmp/com_pipe
  857. fi
  858. done
  859. ) &
  860. PID=$!
  861. echo "Spawned rspamd_checks with PID ${PID}"
  862. BACKGROUND_TASKS+=(${PID})
  863. (
  864. while true; do
  865. if ! ratelimit_checks; then
  866. log_msg "Ratelimit hit error limit"
  867. echo ratelimit > /tmp/com_pipe
  868. fi
  869. done
  870. ) &
  871. PID=$!
  872. echo "Spawned ratelimit_checks with PID ${PID}"
  873. BACKGROUND_TASKS+=(${PID})
  874. (
  875. while true; do
  876. if ! fail2ban_checks; then
  877. log_msg "Fail2ban hit error limit"
  878. echo fail2ban > /tmp/com_pipe
  879. fi
  880. done
  881. ) &
  882. PID=$!
  883. echo "Spawned fail2ban_checks with PID ${PID}"
  884. BACKGROUND_TASKS+=(${PID})
  885. (
  886. while true; do
  887. if ! cert_checks; then
  888. log_msg "Cert check hit error limit"
  889. echo certcheck > /tmp/com_pipe
  890. fi
  891. done
  892. ) &
  893. PID=$!
  894. echo "Spawned cert_checks with PID ${PID}"
  895. BACKGROUND_TASKS+=(${PID})
  896. (
  897. while true; do
  898. if ! olefy_checks; then
  899. log_msg "Olefy hit error limit"
  900. echo olefy-mailcow > /tmp/com_pipe
  901. fi
  902. done
  903. ) &
  904. PID=$!
  905. echo "Spawned olefy_checks with PID ${PID}"
  906. BACKGROUND_TASKS+=(${PID})
  907. (
  908. while true; do
  909. if ! acme_checks; then
  910. log_msg "ACME client hit error limit"
  911. echo acme-mailcow > /tmp/com_pipe
  912. fi
  913. done
  914. ) &
  915. PID=$!
  916. echo "Spawned acme_checks with PID ${PID}"
  917. BACKGROUND_TASKS+=(${PID})
  918. # Monitor watchdog agents, stop script when agents fails and wait for respawn by Docker (restart:always:n)
  919. (
  920. while true; do
  921. for bg_task in ${BACKGROUND_TASKS[*]}; do
  922. if ! kill -0 ${bg_task} 1>&2; then
  923. log_msg "Worker ${bg_task} died, stopping watchdog and waiting for respawn..."
  924. kill -TERM 1
  925. fi
  926. sleep 10
  927. done
  928. done
  929. ) &
  930. # Monitor dockerapi
  931. (
  932. while true; do
  933. while nc -z dockerapi 443; do
  934. sleep 3
  935. done
  936. log_msg "Cannot find dockerapi-mailcow, waiting to recover..."
  937. kill -STOP ${BACKGROUND_TASKS[*]}
  938. until nc -z dockerapi 443; do
  939. sleep 3
  940. done
  941. kill -CONT ${BACKGROUND_TASKS[*]}
  942. kill -USR1 ${BACKGROUND_TASKS[*]}
  943. done
  944. ) &
  945. # Actions when threshold limit is reached
  946. while true; do
  947. CONTAINER_ID=
  948. HAS_INITDB=
  949. read com_pipe_answer </tmp/com_pipe
  950. if [ -s "/tmp/${com_pipe_answer}" ]; then
  951. cat "/tmp/${com_pipe_answer}"
  952. fi
  953. if [[ ${com_pipe_answer} == "ratelimit" ]]; then
  954. log_msg "At least one ratelimit was applied"
  955. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
  956. elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then
  957. log_msg "Mail queue status is critical"
  958. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
  959. elif [[ ${com_pipe_answer} == "external_checks" ]]; then
  960. log_msg "Your mailcow is an open relay!"
  961. # Define $2 to override message text, else print service was restarted at ...
  962. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!"
  963. elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then
  964. log_msg "MySQL replication is not working properly"
  965. # Define $2 to override message text, else print service was restarted at ...
  966. # Once mail per 10 minutes
  967. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the SQL replication status" 600
  968. elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then
  969. log_msg "Dovecot replication is not working properly"
  970. # Define $2 to override message text, else print service was restarted at ...
  971. # Once mail per 10 minutes
  972. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600
  973. elif [[ ${com_pipe_answer} == "certcheck" ]]; then
  974. log_msg "Certificates are about to expire"
  975. # Define $2 to override message text, else print service was restarted at ...
  976. # Only mail once a day
  977. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please renew your certificate" 86400
  978. elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then
  979. log_msg "acme-mailcow did not complete successfully"
  980. # Define $2 to override message text, else print service was restarted at ...
  981. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check acme-mailcow for further information."
  982. elif [[ ${com_pipe_answer} == "fail2ban" ]]; then
  983. F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null))
  984. if [[ ! -z "${F2B_RES}" ]]; then
  985. ${REDIS_CMDLINE} DEL F2B_RES > /dev/null
  986. host=
  987. for host in "${F2B_RES[@]}"; do
  988. log_msg "Banned ${host}"
  989. rm /tmp/fail2ban 2> /dev/null
  990. timeout 2s whois "${host}" > /tmp/fail2ban
  991. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && mail_error "${com_pipe_answer}" "IP ban: ${host}"
  992. done
  993. fi
  994. elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
  995. kill -STOP ${BACKGROUND_TASKS[*]}
  996. sleep 10
  997. CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
  998. if [[ ! -z ${CONTAINER_ID} ]]; then
  999. if [[ "${com_pipe_answer}" == "php-fpm-mailcow" ]]; then
  1000. HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
  1001. fi
  1002. S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
  1003. if [ ${S_RUNNING} -lt 360 ]; then
  1004. log_msg "Container is running for less than 360 seconds, skipping action..."
  1005. elif [[ ! -z ${HAS_INITDB} ]]; then
  1006. log_msg "Database is being initialized by php-fpm-mailcow, not restarting but delaying checks for a minute..."
  1007. sleep 60
  1008. else
  1009. log_msg "Sending restart command to ${CONTAINER_ID}..."
  1010. curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart
  1011. [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}"
  1012. log_msg "Wait for restarted container to settle and continue watching..."
  1013. sleep 35
  1014. fi
  1015. fi
  1016. kill -CONT ${BACKGROUND_TASKS[*]}
  1017. sleep 1
  1018. kill -USR1 ${BACKGROUND_TASKS[*]}
  1019. fi
  1020. done