cgroup-name.sh 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. #!/usr/bin/env bash
  2. #shellcheck disable=SC2001
  3. # netdata
  4. # real-time performance and health monitoring, done right!
  5. # (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
  6. # SPDX-License-Identifier: GPL-3.0-or-later
  7. #
  8. # Script to find a better name for cgroups
  9. #
  10. export PATH="${PATH}:/sbin:/usr/sbin:/usr/local/sbin"
  11. export LC_ALL=C
  12. # -----------------------------------------------------------------------------
  13. PROGRAM_NAME="$(basename "${0}")"
  14. logdate() {
  15. date "+%Y-%m-%d %H:%M:%S"
  16. }
  17. log() {
  18. local status="${1}"
  19. shift
  20. echo >&2 "$(logdate): ${PROGRAM_NAME}: ${status}: ${*}"
  21. }
  22. warning() {
  23. log WARNING "${@}"
  24. }
  25. error() {
  26. log ERROR "${@}"
  27. }
  28. info() {
  29. log INFO "${@}"
  30. }
  31. fatal() {
  32. log FATAL "${@}"
  33. exit 1
  34. }
  35. function parse_docker_like_inspect_output() {
  36. local output="${1}"
  37. eval "$(grep -E "^(NOMAD_NAMESPACE|NOMAD_JOB_NAME|NOMAD_TASK_NAME|NOMAD_SHORT_ALLOC_ID|CONT_NAME|IMAGE_NAME)=" <<<"$output")"
  38. if [ -n "$NOMAD_NAMESPACE" ] && [ -n "$NOMAD_JOB_NAME" ] && [ -n "$NOMAD_TASK_NAME" ] && [ -n "$NOMAD_SHORT_ALLOC_ID" ]; then
  39. NAME="${NOMAD_NAMESPACE}-${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}-${NOMAD_SHORT_ALLOC_ID}"
  40. else
  41. NAME=$(echo "${CONT_NAME}" | sed 's|^/||')
  42. fi
  43. if [ -n "${IMAGE_NAME}" ]; then
  44. LABELS="image=\"${IMAGE_NAME}\""
  45. fi
  46. }
  47. function docker_like_get_name_command() {
  48. local command="${1}"
  49. local id="${2}"
  50. info "Running command: ${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}' \"${id}\""
  51. if OUTPUT="$(${command} inspect --format='{{range .Config.Env}}{{println .}}{{end}}CONT_NAME={{ .Name}}{{println}}IMAGE_NAME={{ .Config.Image}}' "${id}")" &&
  52. [ -n "$OUTPUT" ]; then
  53. parse_docker_like_inspect_output "$OUTPUT"
  54. fi
  55. return 0
  56. }
  57. function docker_like_get_name_api() {
  58. local host_var="${1}"
  59. local host="${!host_var}"
  60. local path="/containers/${2}/json"
  61. if [ -z "${host}" ]; then
  62. warning "No ${host_var} is set"
  63. return 1
  64. fi
  65. if ! command -v jq >/dev/null 2>&1; then
  66. warning "Can't find jq command line tool. jq is required for netdata to retrieve container name using ${host} API, falling back to docker ps"
  67. return 1
  68. fi
  69. if [ -S "${host}" ]; then
  70. info "Running API command: curl --unix-socket \"${host}\" http://localhost${path}"
  71. JSON=$(curl -sS --unix-socket "${host}" "http://localhost${path}")
  72. else
  73. info "Running API command: curl \"${host}${path}\""
  74. JSON=$(curl -sS "${host}${path}")
  75. fi
  76. if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[],"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then
  77. parse_docker_like_inspect_output "$OUTPUT"
  78. fi
  79. return 0
  80. }
  81. # get_lbl_val returns the value for the label with the given name.
  82. # Returns "null" string if the label doesn't exist.
  83. # Expected labels format: 'name="value",...'.
  84. function get_lbl_val() {
  85. local labels want_name
  86. labels="${1}"
  87. want_name="${2}"
  88. IFS=, read -ra labels <<< "$labels"
  89. local lname lval
  90. for l in "${labels[@]}"; do
  91. IFS="=" read -r lname lval <<< "$l"
  92. if [ "$want_name" = "$lname" ] && [ -n "$lval" ]; then
  93. echo "${lval:1:-1}" # trim "
  94. return 0
  95. fi
  96. done
  97. echo "null"
  98. return 1
  99. }
  100. function add_lbl_prefix() {
  101. local orig_labels prefix
  102. orig_labels="${1}"
  103. prefix="${2}"
  104. IFS=, read -ra labels <<< "$orig_labels"
  105. local new_labels
  106. for l in "${labels[@]}"; do
  107. new_labels+="${prefix}${l},"
  108. done
  109. echo "${new_labels:0:-1}" # trim last ','
  110. }
  111. function remove_lbl() {
  112. local orig_labels lbl_name
  113. orig_labels="${1}"
  114. lbl_name="${2}"
  115. IFS=, read -ra labels <<< "$orig_labels"
  116. local new_labels
  117. for l in "${labels[@]}"; do
  118. IFS="=" read -r lname lval <<< "$l"
  119. [ "$lbl_name" != "$lname" ] && new_labels+="${l},"
  120. done
  121. echo "${new_labels:0:-1}" # trim last ','
  122. }
  123. function k8s_is_pause_container() {
  124. local cgroup_path="${1}"
  125. local file
  126. if [ -d "${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct" ]; then
  127. file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/cpuacct/$cgroup_path/cgroup.procs"
  128. else
  129. file="${NETDATA_HOST_PREFIX}/sys/fs/cgroup/$cgroup_path/cgroup.procs"
  130. fi
  131. [ ! -f "$file" ] && return 1
  132. local procs
  133. IFS= read -rd' ' procs 2>/dev/null <"$file"
  134. #shellcheck disable=SC2206
  135. procs=($procs)
  136. [ "${#procs[@]}" -ne 1 ] && return 1
  137. IFS= read -r comm 2>/dev/null <"/proc/${procs[0]}/comm"
  138. [ "$comm" == "pause" ]
  139. return
  140. }
  141. function k8s_gcp_get_cluster_name() {
  142. local header url id loc name
  143. header="Metadata-Flavor: Google"
  144. url="http://metadata/computeMetadata/v1"
  145. if id=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/project/project-id") &&
  146. loc=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-location") &&
  147. name=$(curl --fail -s -m 3 --noproxy "*" -H "$header" "$url/instance/attributes/cluster-name") &&
  148. [ -n "$id" ] && [ -n "$loc" ] && [ -n "$name" ]; then
  149. echo "gke_${id}_${loc}_${name}"
  150. return 0
  151. fi
  152. return 1
  153. }
  154. # k8s_get_kubepod_name resolves */kubepods/* cgroup name.
  155. # pod level cgroup name format: 'pod_<namespace>_<pod_name>'
  156. # container level cgroup name format: 'cntr_<namespace>_<pod_name>_<container_name>'
  157. function k8s_get_kubepod_name() {
  158. # GKE /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
  159. # |-- kubepods
  160. # | |-- burstable
  161. # | | |-- pod98cee708-023b-11eb-933d-42010a800193
  162. # | | | |-- 922161c98e6ea450bf665226cdc64ca2aa3e889934c2cff0aec4325f8f78ac03
  163. # | `-- pode314bbac-d577-11ea-a171-42010a80013b
  164. # | |-- 7d505356b04507de7b710016d540b2759483ed5f9136bb01a80872b08f771930
  165. #
  166. # GKE /sys/fs/cgroup/*/ (cri=containerd, cgroups=v1):
  167. # |-- kubepods.slice
  168. # | |-- kubepods-besteffort.slice
  169. # | | |-- kubepods-besteffort-pode1465238_4518_4c21_832f_fd9f87033dad.slice
  170. # | | | |-- cri-containerd-66be9b2efdf4d85288c319b8c1a2f50d2439b5617e36f45d9d0d0be1381113be.scope
  171. # | `-- kubepods-pod91f5b561_369f_4103_8015_66391059996a.slice
  172. # | |-- cri-containerd-24c53b774a586f06abc058619b47f71d9d869ac50c92898adbd199106fd0aaeb.scope
  173. #
  174. # GKE /sys/fs/cgroup/*/ (cri=crio, cgroups=v1):
  175. # |-- kubepods.slice
  176. # | |-- kubepods-besteffort.slice
  177. # | | |-- kubepods-besteffort-podad412dfe_3589_4056_965a_592356172968.slice
  178. # | | | |-- crio-77b019312fd9825828b70214b2c94da69c30621af2a7ee06f8beace4bc9439e5.scope
  179. #
  180. # Minikube (v1.8.2) /sys/fs/cgroup/*/ (cri=docker, cgroups=v1):
  181. # |-- kubepods.slice
  182. # | |-- kubepods-besteffort.slice
  183. # | | |-- kubepods-besteffort-pod10fb5647_c724_400c_b9cc_0e6eae3110e7.slice
  184. # | | | |-- docker-36e5eb5056dfdf6dbb75c0c44a1ecf23217fe2c50d606209d8130fcbb19fb5a7.scope
  185. #
  186. # kind v0.14.0
  187. # |-- kubelet.slice
  188. # | |-- kubelet-kubepods.slice
  189. # | | |-- kubelet-kubepods-besteffort.slice
  190. # | | | |-- kubelet-kubepods-besteffort-pod7881ed9e_c63e_4425_b5e0_ac55a08ae939.slice
  191. # | | | | |-- cri-containerd-00c7939458bffc416bb03451526e9fde13301d6654cfeadf5b4964a7fb5be1a9.scope
  192. #
  193. # NOTE: cgroups plugin
  194. # - uses '_' to join dir names (so it is <parent>_<child>_<child>_...)
  195. # - replaces '.' with '-'
  196. local fn="${FUNCNAME[0]}"
  197. local cgroup_path="${1}"
  198. local id="${2}"
  199. if [[ ! $id =~ ^.*kubepods.* ]]; then
  200. warning "${fn}: '${id}' is not kubepod cgroup."
  201. return 1
  202. fi
  203. local clean_id="$id"
  204. clean_id=${clean_id//.slice/}
  205. clean_id=${clean_id//.scope/}
  206. local name pod_uid cntr_id
  207. if [[ $clean_id == "kubepods" ]]; then
  208. name="$clean_id"
  209. elif [[ $clean_id =~ .+(besteffort|burstable|guaranteed)$ ]]; then
  210. # kubepods_<QOS_CLASS>
  211. # kubepods_kubepods-<QOS_CLASS>
  212. name=${clean_id//-/_}
  213. name=${name/#kubepods_kubepods/kubepods}
  214. elif [[ $clean_id =~ .+pod[a-f0-9_-]+_(docker|crio|cri-containerd)-([a-f0-9]+)$ ]]; then
  215. # ...pod<POD_UID>_(docker|crio|cri-containerd)-<CONTAINER_ID> (POD_UID w/ "_")
  216. cntr_id=${BASH_REMATCH[2]}
  217. elif [[ $clean_id =~ .+pod[a-f0-9-]+_([a-f0-9]+)$ ]]; then
  218. # ...pod<POD_UID>_<CONTAINER_ID>
  219. cntr_id=${BASH_REMATCH[1]}
  220. elif [[ $clean_id =~ .+pod([a-f0-9_-]+)$ ]]; then
  221. # ...pod<POD_UID> (POD_UID w/ and w/o "_")
  222. pod_uid=${BASH_REMATCH[1]}
  223. pod_uid=${pod_uid//_/-}
  224. fi
  225. if [ -n "$name" ]; then
  226. echo "$name"
  227. return 0
  228. fi
  229. if [ -z "$pod_uid" ] && [ -z "$cntr_id" ]; then
  230. warning "${fn}: can't extract pod_uid or container_id from the cgroup '$id'."
  231. return 3
  232. fi
  233. [ -n "$pod_uid" ] && info "${fn}: cgroup '$id' is a pod(uid:$pod_uid)"
  234. [ -n "$cntr_id" ] && info "${fn}: cgroup '$id' is a container(id:$cntr_id)"
  235. if [ -n "$cntr_id" ] && k8s_is_pause_container "$cgroup_path"; then
  236. return 3
  237. fi
  238. if ! command -v jq > /dev/null 2>&1; then
  239. warning "${fn}: 'jq' command not available."
  240. return 1
  241. fi
  242. local tmp_kube_cluster_name="${TMPDIR:-"/tmp"}/netdata-cgroups-k8s-cluster-name"
  243. local tmp_kube_system_ns_uid_file="${TMPDIR:-"/tmp"}/netdata-cgroups-kubesystem-uid"
  244. local tmp_kube_containers_file="${TMPDIR:-"/tmp"}/netdata-cgroups-containers"
  245. local kube_cluster_name
  246. local kube_system_uid
  247. local labels
  248. if [ -n "$cntr_id" ] &&
  249. [ -f "$tmp_kube_cluster_name" ] &&
  250. [ -f "$tmp_kube_system_ns_uid_file" ] &&
  251. [ -f "$tmp_kube_containers_file" ] &&
  252. labels=$(grep "$cntr_id" "$tmp_kube_containers_file" 2>/dev/null); then
  253. IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
  254. IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
  255. else
  256. IFS= read -r kube_system_uid 2>/dev/null <"$tmp_kube_system_ns_uid_file"
  257. IFS= read -r kube_cluster_name 2>/dev/null <"$tmp_kube_cluster_name"
  258. [ -z "$kube_cluster_name" ] && ! kube_cluster_name=$(k8s_gcp_get_cluster_name) && kube_cluster_name="unknown"
  259. local kube_system_ns
  260. local pods
  261. if [ -n "${KUBERNETES_SERVICE_HOST}" ] && [ -n "${KUBERNETES_PORT_443_TCP_PORT}" ]; then
  262. local token header host url
  263. token="$(</var/run/secrets/kubernetes.io/serviceaccount/token)"
  264. header="Authorization: Bearer $token"
  265. host="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
  266. if [ -z "$kube_system_uid" ]; then
  267. url="https://$host/api/v1/namespaces/kube-system"
  268. # FIX: check HTTP response code
  269. if ! kube_system_ns=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
  270. warning "${fn}: error on curl '${url}': ${kube_system_ns}."
  271. fi
  272. fi
  273. local url
  274. if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
  275. url="${KUBELET_URL:-https://localhost:10250}/pods"
  276. else
  277. url="https://$host/api/v1/pods"
  278. [ -n "$MY_NODE_NAME" ] && url+="?fieldSelector=spec.nodeName==$MY_NODE_NAME"
  279. fi
  280. # FIX: check HTTP response code
  281. if ! pods=$(curl --fail -sSk -H "$header" "$url" 2>&1); then
  282. warning "${fn}: error on curl '${url}': ${pods}."
  283. return 1
  284. fi
  285. elif ps -C kubelet >/dev/null 2>&1 && command -v kubectl >/dev/null 2>&1; then
  286. if [ -z "$kube_system_uid" ]; then
  287. if ! kube_system_ns=$(kubectl --kubeconfig="$KUBE_CONFIG" get namespaces kube-system -o json 2>&1); then
  288. warning "${fn}: error on 'kubectl': ${kube_system_ns}."
  289. fi
  290. fi
  291. [[ -z ${KUBE_CONFIG+x} ]] && KUBE_CONFIG="/etc/kubernetes/admin.conf"
  292. if ! pods=$(kubectl --kubeconfig="$KUBE_CONFIG" get pods --all-namespaces -o json 2>&1); then
  293. warning "${fn}: error on 'kubectl': ${pods}."
  294. return 1
  295. fi
  296. else
  297. warning "${fn}: not inside the k8s cluster and 'kubectl' command not available."
  298. return 1
  299. fi
  300. if [ -n "$kube_system_ns" ] && ! kube_system_uid=$(jq -r '.metadata.uid' <<<"$kube_system_ns" 2>&1); then
  301. warning "${fn}: error on 'jq' parse kube_system_ns: ${kube_system_uid}."
  302. fi
  303. local jq_filter
  304. jq_filter+='.items[] | "'
  305. jq_filter+='namespace=\"\(.metadata.namespace)\",'
  306. jq_filter+='pod_name=\"\(.metadata.name)\",'
  307. jq_filter+='pod_uid=\"\(.metadata.uid)\",'
  308. #jq_filter+='\(.metadata.labels | to_entries | map("pod_label_"+.key+"=\""+.value+"\"") | join(",") | if length > 0 then .+"," else . end)'
  309. jq_filter+='\((.metadata.ownerReferences[]? | select(.controller==true) | "controller_kind=\""+.kind+"\",controller_name=\""+.name+"\",") // "")'
  310. jq_filter+='node_name=\"\(.spec.nodeName)\",'
  311. jq_filter+='" + '
  312. jq_filter+='(.status.containerStatuses[]? | "'
  313. jq_filter+='container_name=\"\(.name)\",'
  314. jq_filter+='container_id=\"\(.containerID)\"'
  315. jq_filter+='") | '
  316. jq_filter+='sub("(docker|cri-o|containerd)://";"")' # containerID: docker://a346da9bc0e3eaba6b295f64ac16e02f2190db2cef570835706a9e7a36e2c722
  317. local containers
  318. if ! containers=$(jq -r "${jq_filter}" <<<"$pods" 2>&1); then
  319. warning "${fn}: error on 'jq' parse pods: ${containers}."
  320. return 1
  321. fi
  322. [ -n "$kube_cluster_name" ] && echo "$kube_cluster_name" >"$tmp_kube_cluster_name" 2>/dev/null
  323. [ -n "$kube_system_ns" ] && [ -n "$kube_system_uid" ] && echo "$kube_system_uid" >"$tmp_kube_system_ns_uid_file" 2>/dev/null
  324. echo "$containers" >"$tmp_kube_containers_file" 2>/dev/null
  325. fi
  326. local qos_class
  327. if [[ $clean_id =~ .+(besteffort|burstable) ]]; then
  328. qos_class="${BASH_REMATCH[1]}"
  329. else
  330. qos_class="guaranteed"
  331. fi
  332. # available labels:
  333. # namespace, pod_name, pod_uid, container_name, container_id, node_name
  334. if [ -n "$cntr_id" ]; then
  335. if [ -n "$labels" ] || labels=$(grep "$cntr_id" <<< "$containers" 2> /dev/null); then
  336. labels+=',kind="container"'
  337. labels+=",qos_class=\"$qos_class\""
  338. [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
  339. [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
  340. name="cntr"
  341. name+="_$(get_lbl_val "$labels" namespace)"
  342. name+="_$(get_lbl_val "$labels" pod_name)"
  343. name+="_$(get_lbl_val "$labels" container_name)"
  344. labels=$(remove_lbl "$labels" "container_id")
  345. labels=$(remove_lbl "$labels" "pod_uid")
  346. labels=$(add_lbl_prefix "$labels" "k8s_")
  347. name+=" $labels"
  348. else
  349. return 2
  350. fi
  351. elif [ -n "$pod_uid" ]; then
  352. if labels=$(grep "$pod_uid" -m 1 <<< "$containers" 2> /dev/null); then
  353. labels="${labels%%,container_*}"
  354. labels+=',kind="pod"'
  355. labels+=",qos_class=\"$qos_class\""
  356. [ -n "$kube_system_uid" ] && [ "$kube_system_uid" != "null" ] && labels+=",cluster_id=\"$kube_system_uid\""
  357. [ -n "$kube_cluster_name" ] && [ "$kube_cluster_name" != "unknown" ] && labels+=",cluster_name=\"$kube_cluster_name\""
  358. name="pod"
  359. name+="_$(get_lbl_val "$labels" namespace)"
  360. name+="_$(get_lbl_val "$labels" pod_name)"
  361. labels=$(remove_lbl "$labels" "pod_uid")
  362. labels=$(add_lbl_prefix "$labels" "k8s_")
  363. name+=" $labels"
  364. else
  365. return 2
  366. fi
  367. fi
  368. # jq filter nonexistent field and nonexistent label value is 'null'
  369. if [[ $name =~ _null(_|$) ]]; then
  370. warning "${fn}: invalid name: $name (cgroup '$id')"
  371. if [ -n "${USE_KUBELET_FOR_PODS_METADATA}" ]; then
  372. # local data is cached and may not contain the correct id
  373. return 2
  374. fi
  375. return 1
  376. fi
  377. echo "$name"
  378. [ -n "$name" ]
  379. return
  380. }
  381. function k8s_get_name() {
  382. local fn="${FUNCNAME[0]}"
  383. local cgroup_path="${1}"
  384. local id="${2}"
  385. local kubepod_name=""
  386. kubepod_name=$(k8s_get_kubepod_name "$cgroup_path" "$id")
  387. case "$?" in
  388. 0)
  389. kubepod_name="k8s_${kubepod_name}"
  390. local name labels
  391. name=${kubepod_name%% *}
  392. labels=${kubepod_name#* }
  393. if [ "$name" != "$labels" ]; then
  394. info "${fn}: cgroup '${id}' has chart name '${name}', labels '${labels}"
  395. NAME="$name"
  396. LABELS="$labels"
  397. else
  398. info "${fn}: cgroup '${id}' has chart name '${NAME}'"
  399. NAME="$name"
  400. fi
  401. EXIT_CODE=$EXIT_SUCCESS
  402. ;;
  403. 1)
  404. NAME="k8s_${id}"
  405. warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and enabling it."
  406. EXIT_CODE=$EXIT_SUCCESS
  407. ;;
  408. 2)
  409. NAME="k8s_${id}"
  410. warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and asking for retry."
  411. EXIT_CODE=$EXIT_RETRY
  412. ;;
  413. *)
  414. NAME="k8s_${id}"
  415. warning "${fn}: cannot find the name of cgroup with id '${id}'. Setting name to ${NAME} and disabling it."
  416. EXIT_CODE=$EXIT_DISABLE
  417. ;;
  418. esac
  419. }
  420. function docker_get_name() {
  421. local id="${1}"
  422. # See https://github.com/netdata/netdata/pull/13523 for details
  423. if command -v snap >/dev/null 2>&1 && snap list docker >/dev/null 2>&1; then
  424. docker_like_get_name_api DOCKER_HOST "${id}"
  425. elif hash docker 2> /dev/null; then
  426. docker_like_get_name_command docker "${id}"
  427. else
  428. docker_like_get_name_api DOCKER_HOST "${id}" || docker_like_get_name_command podman "${id}"
  429. fi
  430. if [ -z "${NAME}" ]; then
  431. warning "cannot find the name of docker container '${id}'"
  432. EXIT_CODE=$EXIT_RETRY
  433. NAME="${id:0:12}"
  434. else
  435. info "docker container '${id}' is named '${NAME}'"
  436. fi
  437. }
  438. function docker_validate_id() {
  439. local id="${1}"
  440. if [ -n "${id}" ] && { [ ${#id} -eq 64 ] || [ ${#id} -eq 12 ]; }; then
  441. docker_get_name "${id}"
  442. else
  443. error "a docker id cannot be extracted from docker cgroup '${CGROUP}'."
  444. fi
  445. }
  446. function podman_get_name() {
  447. local id="${1}"
  448. # for Podman, prefer using the API if we can, as netdata will not normally have access
  449. # to other users' containers, so they will not be visible when running `podman ps`
  450. docker_like_get_name_api PODMAN_HOST "${id}" || docker_like_get_name_command podman "${id}"
  451. if [ -z "${NAME}" ]; then
  452. warning "cannot find the name of podman container '${id}'"
  453. EXIT_CODE=$EXIT_RETRY
  454. NAME="${id:0:12}"
  455. else
  456. info "podman container '${id}' is named '${NAME}'"
  457. fi
  458. }
  459. function podman_validate_id() {
  460. local id="${1}"
  461. if [ -n "${id}" ] && [ ${#id} -eq 64 ]; then
  462. podman_get_name "${id}"
  463. else
  464. error "a podman id cannot be extracted from docker cgroup '${CGROUP}'."
  465. fi
  466. }
  467. # -----------------------------------------------------------------------------
  468. DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
  469. PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
  470. CGROUP_PATH="${1}" # the path as it is (e.g. '/docker/efcf4c409')
  471. CGROUP="${2}" # the modified path (e.g. 'docker_efcf4c409')
  472. EXIT_SUCCESS=0
  473. EXIT_RETRY=2
  474. EXIT_DISABLE=3
  475. EXIT_CODE=$EXIT_SUCCESS
  476. NAME=
  477. LABELS=
  478. # -----------------------------------------------------------------------------
  479. if [ -z "${CGROUP}" ]; then
  480. fatal "called without a cgroup name. Nothing to do."
  481. fi
  482. if [ -z "${NAME}" ]; then
  483. if [[ ${CGROUP} =~ ^.*kubepods.* ]]; then
  484. k8s_get_name "${CGROUP_PATH}" "${CGROUP}"
  485. fi
  486. fi
  487. if [ -z "${NAME}" ]; then
  488. if [[ ${CGROUP} =~ ^.*docker[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
  489. # docker containers
  490. #shellcheck disable=SC1117
  491. DOCKERID="$(echo "${CGROUP}" | sed "s|^.*docker[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
  492. docker_validate_id "${DOCKERID}"
  493. elif [[ ${CGROUP} =~ ^.*ecs[-_/\.][a-fA-F0-9]+[-_\.]?.*$ ]]; then
  494. # ECS
  495. #shellcheck disable=SC1117
  496. DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ecs[-_/].*[-_/]\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
  497. docker_validate_id "${DOCKERID}"
  498. elif [[ ${CGROUP} =~ system.slice_containerd.service_cpuset_[a-fA-F0-9]+[-_\.]?.*$ ]]; then
  499. # docker containers under containerd
  500. #shellcheck disable=SC1117
  501. DOCKERID="$(echo "${CGROUP}" | sed "s|^.*ystem.slice_containerd.service_cpuset_\([a-fA-F0-9]\+\)[-_\.]\?.*$|\1|")"
  502. docker_validate_id "${DOCKERID}"
  503. elif [[ ${CGROUP} =~ ^.*libpod-[a-fA-F0-9]+.*$ ]]; then
  504. # Podman
  505. PODMANID="$(echo "${CGROUP}" | sed "s|^.*libpod-\([a-fA-F0-9]\+\).*$|\1|")"
  506. podman_validate_id "${PODMANID}"
  507. elif [[ ${CGROUP} =~ machine.slice[_/].*\.service ]]; then
  508. # systemd-nspawn
  509. NAME="$(echo "${CGROUP}" | sed 's/.*machine.slice[_\/]\(.*\)\.service/\1/g')"
  510. elif [[ ${CGROUP} =~ machine.slice_machine.*-lxc ]]; then
  511. # libvirtd / lxc containers
  512. # machine.slice machine-lxc/x2d969/x2dhubud0xians01.scope => lxc/hubud0xians01
  513. # machine.slice_machine-lxc/x2d969/x2dhubud0xians01.scope/libvirt_init.scope => lxc/hubud0xians01/libvirt_init
  514. NAME="lxc/$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-lxc//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
  515. elif [[ ${CGROUP} =~ machine.slice_machine.*-qemu ]]; then
  516. # libvirtd / qemu virtual machines
  517. # machine.slice_machine-qemu_x2d1_x2dopnsense.scope => qemu_opnsense
  518. NAME="qemu_$(echo "${CGROUP}" | sed 's/machine.slice_machine.*-qemu//; s/[\/_]x2d[[:digit:]]*//; s/[\/_]x2d//g; s/\.scope//g')"
  519. elif [[ ${CGROUP} =~ machine_.*\.libvirt-qemu ]]; then
  520. # libvirtd / qemu virtual machines
  521. NAME="qemu_$(echo "${CGROUP}" | sed 's/^machine_//; s/\.libvirt-qemu$//; s/-/_/;')"
  522. elif [[ ${CGROUP} =~ qemu.slice_([0-9]+).scope && -d /etc/pve ]]; then
  523. # Proxmox VMs
  524. FILENAME="/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf"
  525. if [[ -f $FILENAME && -r $FILENAME ]]; then
  526. NAME="qemu_$(grep -e '^name: ' "/etc/pve/qemu-server/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*name\s*:\s*(.*)?$|\1|p')"
  527. else
  528. error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
  529. fi
  530. elif [[ ${CGROUP} =~ lxc_([0-9]+) && -d /etc/pve ]]; then
  531. # Proxmox Containers (LXC)
  532. FILENAME="/etc/pve/lxc/${BASH_REMATCH[1]}.conf"
  533. if [[ -f ${FILENAME} && -r ${FILENAME} ]]; then
  534. NAME=$(grep -e '^hostname: ' "/etc/pve/lxc/${BASH_REMATCH[1]}.conf" | head -1 | sed -rn 's|\s*hostname\s*:\s*(.*)?$|\1|p')
  535. else
  536. error "proxmox config file missing ${FILENAME} or netdata does not have read access. Please ensure netdata is a member of www-data group."
  537. fi
  538. elif [[ ${CGROUP} =~ lxc.payload.* ]]; then
  539. # LXC 4.0
  540. NAME="$(echo "${CGROUP}" | sed 's/lxc\.payload\.\(.*\)/\1/g')"
  541. fi
  542. [ -z "${NAME}" ] && NAME="${CGROUP}"
  543. [ ${#NAME} -gt 100 ] && NAME="${NAME:0:100}"
  544. fi
  545. NAME="${NAME// /_}"
  546. info "cgroup '${CGROUP}' is called '${NAME}', labels '${LABELS}'"
  547. if [ -n "$LABELS" ]; then
  548. echo "${NAME} ${LABELS}"
  549. else
  550. echo "${NAME}"
  551. fi
  552. exit ${EXIT_CODE}