# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 0.000153025 go_gc_duration_seconds{quantile="0.25"} 0.000331136 go_gc_duration_seconds{quantile="0.5"} 0.000374854 go_gc_duration_seconds{quantile="0.75"} 0.000435137 go_gc_duration_seconds{quantile="1"} 0.005733993 go_gc_duration_seconds_sum 1.255256347 go_gc_duration_seconds_count 2800 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 367 # HELP go_info Information about the Go environment. # TYPE go_info gauge go_info{version="go1.15.2"} 1 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge go_memstats_alloc_bytes 1.04656604e+09 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter go_memstats_alloc_bytes_total 5.8061197716e+11 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge go_memstats_buck_hash_sys_bytes 2.868518e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter go_memstats_frees_total 3.522421189e+09 # HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. # TYPE go_memstats_gc_cpu_fraction gauge go_memstats_gc_cpu_fraction 0.001122391805759586 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge go_memstats_gc_sys_bytes 7.9063816e+07 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge go_memstats_heap_alloc_bytes 1.04656604e+09 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge go_memstats_heap_idle_bytes 6.04454912e+08 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge go_memstats_heap_inuse_bytes 1.130446848e+09 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge go_memstats_heap_objects 8.052075e+06 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge go_memstats_heap_released_bytes 5.1027968e+08 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge go_memstats_heap_sys_bytes 1.73490176e+09 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge go_memstats_last_gc_time_seconds 1.7697485371060493e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter go_memstats_mallocs_total 3.530473264e+09 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge go_memstats_mcache_inuse_bytes 13888 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge go_memstats_mcache_sys_bytes 16384 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge go_memstats_mspan_inuse_bytes 1.2732592e+07 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge go_memstats_mspan_sys_bytes 2.5411584e+07 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge go_memstats_next_gc_bytes 1.542288176e+09 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge go_memstats_other_sys_bytes 3.858842e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge go_memstats_stack_inuse_bytes 9.928704e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge go_memstats_stack_sys_bytes 9.928704e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge go_memstats_sys_bytes 1.856049608e+09 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge go_threads 14 # HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name. # TYPE net_conntrack_dialer_conn_attempted_total counter net_conntrack_dialer_conn_attempted_total{dialer_name="alertmanager"} 2 net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-apiservers"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-nodes"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-nodes-cadvisor"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-pods"} 3646 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-pods-slow"} 0 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-service-endpoints"} 15 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-service-endpoints-slow"} 0 net_conntrack_dialer_conn_attempted_total{dialer_name="kubernetes-services"} 0 net_conntrack_dialer_conn_attempted_total{dialer_name="opsmx_ssd_metrics"} 120713 net_conntrack_dialer_conn_attempted_total{dialer_name="otel-collector-redica-instance"} 76 net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 1 net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus-pushgateway"} 0 # HELP net_conntrack_dialer_conn_closed_total Total number of connections closed which originated from the dialer of a given name. # TYPE net_conntrack_dialer_conn_closed_total counter net_conntrack_dialer_conn_closed_total{dialer_name="alertmanager"} 1 net_conntrack_dialer_conn_closed_total{dialer_name="default"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-apiservers"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-nodes"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-nodes-cadvisor"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-pods"} 3637 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-pods-slow"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-service-endpoints"} 3 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-service-endpoints-slow"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="kubernetes-services"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="opsmx_ssd_metrics"} 112150 net_conntrack_dialer_conn_closed_total{dialer_name="otel-collector-redica-instance"} 75 net_conntrack_dialer_conn_closed_total{dialer_name="prometheus"} 0 net_conntrack_dialer_conn_closed_total{dialer_name="prometheus-pushgateway"} 0 # HELP net_conntrack_dialer_conn_established_total Total number of connections successfully established by the given dialer a given name. # TYPE net_conntrack_dialer_conn_established_total counter net_conntrack_dialer_conn_established_total{dialer_name="alertmanager"} 2 net_conntrack_dialer_conn_established_total{dialer_name="default"} 0 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-apiservers"} 1 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-nodes"} 1 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-nodes-cadvisor"} 1 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-pods"} 3645 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-pods-slow"} 0 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-service-endpoints"} 15 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-service-endpoints-slow"} 0 net_conntrack_dialer_conn_established_total{dialer_name="kubernetes-services"} 0 net_conntrack_dialer_conn_established_total{dialer_name="opsmx_ssd_metrics"} 112190 net_conntrack_dialer_conn_established_total{dialer_name="otel-collector-redica-instance"} 76 net_conntrack_dialer_conn_established_total{dialer_name="prometheus"} 1 net_conntrack_dialer_conn_established_total{dialer_name="prometheus-pushgateway"} 0 # HELP net_conntrack_dialer_conn_failed_total Total number of connections failed to dial by the dialer a given name. # TYPE net_conntrack_dialer_conn_failed_total counter net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="alertmanager",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="default",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-apiservers",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-nodes-cadvisor",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="timeout"} 1 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods",reason="unknown"} 1 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods-slow",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods-slow",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods-slow",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-pods-slow",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints-slow",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints-slow",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints-slow",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-service-endpoints-slow",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-services",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-services",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-services",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="kubernetes-services",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="opsmx_ssd_metrics",reason="refused"} 8521 net_conntrack_dialer_conn_failed_total{dialer_name="opsmx_ssd_metrics",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="opsmx_ssd_metrics",reason="timeout"} 2 net_conntrack_dialer_conn_failed_total{dialer_name="opsmx_ssd_metrics",reason="unknown"} 8523 net_conntrack_dialer_conn_failed_total{dialer_name="otel-collector-redica-instance",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="otel-collector-redica-instance",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="otel-collector-redica-instance",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="otel-collector-redica-instance",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus",reason="unknown"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="refused"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="resolution"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="timeout"} 0 net_conntrack_dialer_conn_failed_total{dialer_name="prometheus-pushgateway",reason="unknown"} 0 # HELP net_conntrack_listener_conn_accepted_total Total number of connections opened to the listener of a given name. # TYPE net_conntrack_listener_conn_accepted_total counter net_conntrack_listener_conn_accepted_total{listener_name="http"} 59053 # HELP net_conntrack_listener_conn_closed_total Total number of connections closed that were made to the listener of a given name. # TYPE net_conntrack_listener_conn_closed_total counter net_conntrack_listener_conn_closed_total{listener_name="http"} 59050 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 14217.36 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge process_open_fds 97 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge process_resident_memory_bytes 1.762111488e+09 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge process_start_time_seconds 1.76953059767e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 4.179976192e+09 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes -1 # HELP prometheus_api_remote_read_queries The current number of remote read queries being executed or waiting. # TYPE prometheus_api_remote_read_queries gauge prometheus_api_remote_read_queries 0 # HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. # TYPE prometheus_build_info gauge prometheus_build_info{branch="HEAD",goversion="go1.15.2",revision="e83ef207b6c2398919b69cd87d2693cfc2fb4127",version="2.21.0"} 1 # HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. # TYPE prometheus_config_last_reload_success_timestamp_seconds gauge prometheus_config_last_reload_success_timestamp_seconds 1.769602246073451e+09 # HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. # TYPE prometheus_config_last_reload_successful gauge prometheus_config_last_reload_successful 1 # HELP prometheus_engine_queries The current number of queries being executed or waiting. # TYPE prometheus_engine_queries gauge prometheus_engine_queries 0 # HELP prometheus_engine_queries_concurrent_max The max number of concurrent queries. # TYPE prometheus_engine_queries_concurrent_max gauge prometheus_engine_queries_concurrent_max 20 # HELP prometheus_engine_query_duration_seconds Query timings # TYPE prometheus_engine_query_duration_seconds summary prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.5"} 0.00050271 prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.9"} 0.007894246 prometheus_engine_query_duration_seconds{slice="inner_eval",quantile="0.99"} 0.009130462 prometheus_engine_query_duration_seconds_sum{slice="inner_eval"} 65.28584907000041 prometheus_engine_query_duration_seconds_count{slice="inner_eval"} 25572 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.5"} 0.00021285 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.9"} 0.002714157 prometheus_engine_query_duration_seconds{slice="prepare_time",quantile="0.99"} 0.004152343 prometheus_engine_query_duration_seconds_sum{slice="prepare_time"} 21.58000196899972 prometheus_engine_query_duration_seconds_count{slice="prepare_time"} 25572 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.5"} 5.4036e-05 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.9"} 7.3088e-05 prometheus_engine_query_duration_seconds{slice="queue_time",quantile="0.99"} 0.000129753 prometheus_engine_query_duration_seconds_sum{slice="queue_time"} 1.408751559000001 prometheus_engine_query_duration_seconds_count{slice="queue_time"} 25572 prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.5"} NaN prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.9"} NaN prometheus_engine_query_duration_seconds{slice="result_sort",quantile="0.99"} NaN prometheus_engine_query_duration_seconds_sum{slice="result_sort"} 0.0029595049999999994 prometheus_engine_query_duration_seconds_count{slice="result_sort"} 128 # HELP prometheus_engine_query_log_enabled State of the query log. # TYPE prometheus_engine_query_log_enabled gauge prometheus_engine_query_log_enabled 0 # HELP prometheus_engine_query_log_failures_total The number of query log failures. # TYPE prometheus_engine_query_log_failures_total counter prometheus_engine_query_log_failures_total 0 # HELP prometheus_http_request_duration_seconds Histogram of latencies for HTTP requests. # TYPE prometheus_http_request_duration_seconds histogram prometheus_http_request_duration_seconds_bucket{handler="/",le="0.1"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="0.2"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="0.4"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="1"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="3"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="8"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="20"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="60"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="120"} 35 prometheus_http_request_duration_seconds_bucket{handler="/",le="+Inf"} 35 prometheus_http_request_duration_seconds_sum{handler="/"} 0.0025013609999999997 prometheus_http_request_duration_seconds_count{handler="/"} 35 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.1"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.2"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="0.4"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="1"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="3"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="8"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="20"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="60"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="120"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/healthy",le="+Inf"} 14535 prometheus_http_request_duration_seconds_sum{handler="/-/healthy"} 0.253867226 prometheus_http_request_duration_seconds_count{handler="/-/healthy"} 14535 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.1"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.2"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="0.4"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="1"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="3"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="8"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="20"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="60"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="120"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/ready",le="+Inf"} 43606 prometheus_http_request_duration_seconds_sum{handler="/-/ready"} 0.7510753080000043 prometheus_http_request_duration_seconds_count{handler="/-/ready"} 43606 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="0.1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="0.2"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="3"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="8"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="20"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="60"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="120"} 1 prometheus_http_request_duration_seconds_bucket{handler="/-/reload",le="+Inf"} 1 prometheus_http_request_duration_seconds_sum{handler="/-/reload"} 0.070045698 prometheus_http_request_duration_seconds_count{handler="/-/reload"} 1 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.2"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="0.4"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="3"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="8"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="20"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="60"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="120"} 2 prometheus_http_request_duration_seconds_bucket{handler="/alerts",le="+Inf"} 2 prometheus_http_request_duration_seconds_sum{handler="/alerts"} 0.017021176 prometheus_http_request_duration_seconds_count{handler="/alerts"} 2 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.1"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.2"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="0.4"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="1"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="3"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="8"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="20"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="60"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="120"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 4 prometheus_http_request_duration_seconds_sum{handler="/api/v1/label/:name/values"} 0.042473822999999994 prometheus_http_request_duration_seconds_count{handler="/api/v1/label/:name/values"} 4 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.1"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.2"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="0.4"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="1"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="3"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="8"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="20"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="60"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="120"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query",le="+Inf"} 5 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query"} 0.028498053 prometheus_http_request_duration_seconds_count{handler="/api/v1/query"} 5 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.1"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.2"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="0.4"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="1"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="3"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="8"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="20"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="60"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="120"} 128 prometheus_http_request_duration_seconds_bucket{handler="/api/v1/query_range",le="+Inf"} 128 prometheus_http_request_duration_seconds_sum{handler="/api/v1/query_range"} 1.018730458 prometheus_http_request_duration_seconds_count{handler="/api/v1/query_range"} 128 prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/config",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/config"} 0.020543 prometheus_http_request_duration_seconds_count{handler="/config"} 3 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.2"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="1"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="3"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="8"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="20"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="60"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="120"} 1 prometheus_http_request_duration_seconds_bucket{handler="/debug/*subpath",le="+Inf"} 1 prometheus_http_request_duration_seconds_sum{handler="/debug/*subpath"} 0.000107354 prometheus_http_request_duration_seconds_count{handler="/debug/*subpath"} 1 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/flags",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/flags"} 0.005377598999999999 prometheus_http_request_duration_seconds_count{handler="/flags"} 3 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.1"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.2"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="0.4"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="1"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="3"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="8"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="20"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="60"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="120"} 38 prometheus_http_request_duration_seconds_bucket{handler="/graph",le="+Inf"} 38 prometheus_http_request_duration_seconds_sum{handler="/graph"} 0.090049831 prometheus_http_request_duration_seconds_count{handler="/graph"} 38 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 3634 prometheus_http_request_duration_seconds_sum{handler="/metrics"} 26.100980509999996 prometheus_http_request_duration_seconds_count{handler="/metrics"} 3634 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/rules",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/rules"} 0.031983938 prometheus_http_request_duration_seconds_count{handler="/rules"} 3 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.1"} 0 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.2"} 0 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="0.4"} 1 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="1"} 2 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="3"} 2 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="8"} 2 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="20"} 2 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="60"} 2 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="120"} 2 prometheus_http_request_duration_seconds_bucket{handler="/service-discovery",le="+Inf"} 2 prometheus_http_request_duration_seconds_sum{handler="/service-discovery"} 0.7340854729999999 prometheus_http_request_duration_seconds_count{handler="/service-discovery"} 2 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.1"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.2"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="0.4"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="1"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="3"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="8"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="20"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="60"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="120"} 163 prometheus_http_request_duration_seconds_bucket{handler="/static/*filepath",le="+Inf"} 163 prometheus_http_request_duration_seconds_sum{handler="/static/*filepath"} 0.1725231150000001 prometheus_http_request_duration_seconds_count{handler="/static/*filepath"} 163 prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.2"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="0.4"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="3"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="8"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="20"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="60"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="120"} 3 prometheus_http_request_duration_seconds_bucket{handler="/status",le="+Inf"} 3 prometheus_http_request_duration_seconds_sum{handler="/status"} 0.10734580299999999 prometheus_http_request_duration_seconds_count{handler="/status"} 3 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.1"} 3 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.2"} 4 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="0.4"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="1"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="3"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="8"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="20"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="60"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="120"} 5 prometheus_http_request_duration_seconds_bucket{handler="/targets",le="+Inf"} 5 prometheus_http_request_duration_seconds_sum{handler="/targets"} 0.591396736 prometheus_http_request_duration_seconds_count{handler="/targets"} 5 # HELP prometheus_http_requests_total Counter of HTTP requests. # TYPE prometheus_http_requests_total counter prometheus_http_requests_total{code="200",handler="/-/healthy"} 14535 prometheus_http_requests_total{code="200",handler="/-/ready"} 43606 prometheus_http_requests_total{code="200",handler="/-/reload"} 1 prometheus_http_requests_total{code="200",handler="/alerts"} 2 prometheus_http_requests_total{code="200",handler="/api/v1/label/:name/values"} 4 prometheus_http_requests_total{code="200",handler="/api/v1/query"} 5 prometheus_http_requests_total{code="200",handler="/api/v1/query_range"} 128 prometheus_http_requests_total{code="200",handler="/config"} 3 prometheus_http_requests_total{code="200",handler="/flags"} 3 prometheus_http_requests_total{code="200",handler="/graph"} 38 prometheus_http_requests_total{code="200",handler="/metrics"} 3634 prometheus_http_requests_total{code="200",handler="/rules"} 3 prometheus_http_requests_total{code="200",handler="/service-discovery"} 2 prometheus_http_requests_total{code="200",handler="/static/*filepath"} 155 prometheus_http_requests_total{code="200",handler="/status"} 3 prometheus_http_requests_total{code="200",handler="/targets"} 5 prometheus_http_requests_total{code="302",handler="/"} 35 prometheus_http_requests_total{code="404",handler="/debug/*subpath"} 1 prometheus_http_requests_total{code="404",handler="/static/*filepath"} 8 # HELP prometheus_http_response_size_bytes Histogram of response size for HTTP requests. # TYPE prometheus_http_response_size_bytes histogram prometheus_http_response_size_bytes_bucket{handler="/",le="100"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="1000"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="10000"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="100000"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+06"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+07"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+08"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="1e+09"} 35 prometheus_http_response_size_bytes_bucket{handler="/",le="+Inf"} 35 prometheus_http_response_size_bytes_sum{handler="/"} 1015 prometheus_http_response_size_bytes_count{handler="/"} 35 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1000"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="10000"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="100000"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+06"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+07"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+08"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="1e+09"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/healthy",le="+Inf"} 14535 prometheus_http_response_size_bytes_sum{handler="/-/healthy"} 334305 prometheus_http_response_size_bytes_count{handler="/-/healthy"} 14535 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1000"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="10000"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="100000"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+06"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+07"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+08"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="1e+09"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/ready",le="+Inf"} 43606 prometheus_http_response_size_bytes_sum{handler="/-/ready"} 915726 prometheus_http_response_size_bytes_count{handler="/-/ready"} 43606 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="100"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="1000"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="100000"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="1e+07"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="1e+08"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="1e+09"} 1 prometheus_http_response_size_bytes_bucket{handler="/-/reload",le="+Inf"} 1 prometheus_http_response_size_bytes_sum{handler="/-/reload"} 0 prometheus_http_response_size_bytes_count{handler="/-/reload"} 1 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="100000"} 2 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+06"} 2 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+07"} 2 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+08"} 2 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="1e+09"} 2 prometheus_http_response_size_bytes_bucket{handler="/alerts",le="+Inf"} 2 prometheus_http_response_size_bytes_sum{handler="/alerts"} 32995 prometheus_http_response_size_bytes_count{handler="/alerts"} 2 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="10000"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="100000"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+06"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+07"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+08"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="1e+09"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/label/:name/values",le="+Inf"} 4 prometheus_http_response_size_bytes_sum{handler="/api/v1/label/:name/values"} 36992 prometheus_http_response_size_bytes_count{handler="/api/v1/label/:name/values"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1000"} 4 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="10000"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="100000"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+06"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+07"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+08"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="1e+09"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query",le="+Inf"} 5 prometheus_http_response_size_bytes_sum{handler="/api/v1/query"} 3146 prometheus_http_response_size_bytes_count{handler="/api/v1/query"} 5 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100"} 30 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1000"} 127 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="10000"} 128 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="100000"} 128 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+06"} 128 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+07"} 128 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+08"} 128 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="1e+09"} 128 prometheus_http_response_size_bytes_bucket{handler="/api/v1/query_range",le="+Inf"} 128 prometheus_http_response_size_bytes_sum{handler="/api/v1/query_range"} 33362 prometheus_http_response_size_bytes_count{handler="/api/v1/query_range"} 128 prometheus_http_response_size_bytes_bucket{handler="/config",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/config",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/config",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/config",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/config",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/config",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/config"} 49770 prometheus_http_response_size_bytes_count{handler="/config"} 3 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1000"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="100000"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+06"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+07"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+08"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="1e+09"} 1 prometheus_http_response_size_bytes_bucket{handler="/debug/*subpath",le="+Inf"} 1 prometheus_http_response_size_bytes_sum{handler="/debug/*subpath"} 19 prometheus_http_response_size_bytes_count{handler="/debug/*subpath"} 1 prometheus_http_response_size_bytes_bucket{handler="/flags",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/flags",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/flags",le="10000"} 3 prometheus_http_response_size_bytes_bucket{handler="/flags",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/flags",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/flags",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/flags"} 24816 prometheus_http_response_size_bytes_count{handler="/flags"} 3 prometheus_http_response_size_bytes_bucket{handler="/graph",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/graph",le="10000"} 38 prometheus_http_response_size_bytes_bucket{handler="/graph",le="100000"} 38 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+06"} 38 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+07"} 38 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+08"} 38 prometheus_http_response_size_bytes_bucket{handler="/graph",le="1e+09"} 38 prometheus_http_response_size_bytes_bucket{handler="/graph",le="+Inf"} 38 prometheus_http_response_size_bytes_sum{handler="/graph"} 227012 prometheus_http_response_size_bytes_count{handler="/graph"} 38 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="10000"} 1 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="100000"} 3634 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+06"} 3634 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+07"} 3634 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+08"} 3634 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="1e+09"} 3634 prometheus_http_response_size_bytes_bucket{handler="/metrics",le="+Inf"} 3634 prometheus_http_response_size_bytes_sum{handler="/metrics"} 4.3285772e+07 prometheus_http_response_size_bytes_count{handler="/metrics"} 3634 prometheus_http_response_size_bytes_bucket{handler="/rules",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/rules",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/rules",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/rules",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/rules",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/rules",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/rules"} 45619 prometheus_http_response_size_bytes_count{handler="/rules"} 3 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="100000"} 0 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+06"} 0 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+07"} 2 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+08"} 2 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="1e+09"} 2 prometheus_http_response_size_bytes_bucket{handler="/service-discovery",le="+Inf"} 2 prometheus_http_response_size_bytes_sum{handler="/service-discovery"} 1.0970008e+07 prometheus_http_response_size_bytes_count{handler="/service-discovery"} 2 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="100"} 8 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1000"} 18 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="10000"} 49 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="100000"} 142 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+06"} 163 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+07"} 163 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+08"} 163 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="1e+09"} 163 prometheus_http_response_size_bytes_bucket{handler="/static/*filepath",le="+Inf"} 163 prometheus_http_response_size_bytes_sum{handler="/static/*filepath"} 7.358507e+06 prometheus_http_response_size_bytes_count{handler="/static/*filepath"} 163 prometheus_http_response_size_bytes_bucket{handler="/status",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/status",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/status",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/status",le="100000"} 3 prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+06"} 3 prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+07"} 3 prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+08"} 3 prometheus_http_response_size_bytes_bucket{handler="/status",le="1e+09"} 3 prometheus_http_response_size_bytes_bucket{handler="/status",le="+Inf"} 3 prometheus_http_response_size_bytes_sum{handler="/status"} 37936 prometheus_http_response_size_bytes_count{handler="/status"} 3 prometheus_http_response_size_bytes_bucket{handler="/targets",le="100"} 0 prometheus_http_response_size_bytes_bucket{handler="/targets",le="1000"} 0 prometheus_http_response_size_bytes_bucket{handler="/targets",le="10000"} 0 prometheus_http_response_size_bytes_bucket{handler="/targets",le="100000"} 0 prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+06"} 5 prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+07"} 5 prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+08"} 5 prometheus_http_response_size_bytes_bucket{handler="/targets",le="1e+09"} 5 prometheus_http_response_size_bytes_bucket{handler="/targets",le="+Inf"} 5 prometheus_http_response_size_bytes_sum{handler="/targets"} 3.183661e+06 prometheus_http_response_size_bytes_count{handler="/targets"} 5 # HELP prometheus_notifications_alertmanagers_discovered The number of alertmanagers discovered and active. # TYPE prometheus_notifications_alertmanagers_discovered gauge prometheus_notifications_alertmanagers_discovered 1 # HELP prometheus_notifications_dropped_total Total number of alerts dropped due to errors when sending to Alertmanager. # TYPE prometheus_notifications_dropped_total counter prometheus_notifications_dropped_total 0 # HELP prometheus_notifications_errors_total Total number of errors sending alert notifications. # TYPE prometheus_notifications_errors_total counter prometheus_notifications_errors_total{alertmanager="http://10.152.0.16:9093/api/v1/alerts"} 0 # HELP prometheus_notifications_latency_seconds Latency quantiles for sending alert notifications. # TYPE prometheus_notifications_latency_seconds summary prometheus_notifications_latency_seconds{alertmanager="http://10.152.0.16:9093/api/v1/alerts",quantile="0.5"} 0.001816453 prometheus_notifications_latency_seconds{alertmanager="http://10.152.0.16:9093/api/v1/alerts",quantile="0.9"} 0.002014435 prometheus_notifications_latency_seconds{alertmanager="http://10.152.0.16:9093/api/v1/alerts",quantile="0.99"} 0.002151425 prometheus_notifications_latency_seconds_sum{alertmanager="http://10.152.0.16:9093/api/v1/alerts"} 8.43952242799999 prometheus_notifications_latency_seconds_count{alertmanager="http://10.152.0.16:9093/api/v1/alerts"} 4446 # HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. # TYPE prometheus_notifications_queue_capacity gauge prometheus_notifications_queue_capacity 10000 # HELP prometheus_notifications_queue_length The number of alert notifications in the queue. # TYPE prometheus_notifications_queue_length gauge prometheus_notifications_queue_length 0 # HELP prometheus_notifications_sent_total Total number of alerts sent. # TYPE prometheus_notifications_sent_total counter prometheus_notifications_sent_total{alertmanager="http://10.152.0.16:9093/api/v1/alerts"} 6271 # HELP prometheus_remote_storage_highest_timestamp_in_seconds Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. # TYPE prometheus_remote_storage_highest_timestamp_in_seconds gauge prometheus_remote_storage_highest_timestamp_in_seconds 1.769748656e+09 # HELP prometheus_remote_storage_samples_in_total Samples in to remote storage, compare to samples out for queue managers. # TYPE prometheus_remote_storage_samples_in_total counter prometheus_remote_storage_samples_in_total 7.69139103e+08 # HELP prometheus_remote_storage_string_interner_zero_reference_releases_total The number of times release has been called for strings that are not interned. # TYPE prometheus_remote_storage_string_interner_zero_reference_releases_total counter prometheus_remote_storage_string_interner_zero_reference_releases_total 0 # HELP prometheus_rule_evaluation_duration_seconds The duration for a rule to execute. # TYPE prometheus_rule_evaluation_duration_seconds summary prometheus_rule_evaluation_duration_seconds{quantile="0.5"} 0.001047796 prometheus_rule_evaluation_duration_seconds{quantile="0.9"} 0.011033069 prometheus_rule_evaluation_duration_seconds{quantile="0.99"} 0.013708484 prometheus_rule_evaluation_duration_seconds_sum 97.98050813099928 prometheus_rule_evaluation_duration_seconds_count 25439 # HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. # TYPE prometheus_rule_evaluation_failures_total counter prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 0 prometheus_rule_evaluation_failures_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 0 # HELP prometheus_rule_evaluations_total The total number of rule evaluations. # TYPE prometheus_rule_evaluations_total counter prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 3634 prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 3634 prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 3635 prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 3634 prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 3634 prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 3634 prometheus_rule_evaluations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 3634 # HELP prometheus_rule_group_duration_seconds The duration of rule group evaluations. # TYPE prometheus_rule_group_duration_seconds summary prometheus_rule_group_duration_seconds{quantile="0.01"} 0.000349719 prometheus_rule_group_duration_seconds{quantile="0.05"} 0.000382412 prometheus_rule_group_duration_seconds{quantile="0.5"} 0.001062713 prometheus_rule_group_duration_seconds{quantile="0.9"} 0.011044688 prometheus_rule_group_duration_seconds{quantile="0.99"} 0.013722381 prometheus_rule_group_duration_seconds_sum 98.35031455400029 prometheus_rule_group_duration_seconds_count 25439 # HELP prometheus_rule_group_interval_seconds The interval of a rule group. # TYPE prometheus_rule_group_interval_seconds gauge prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 60 prometheus_rule_group_interval_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 60 # HELP prometheus_rule_group_iterations_missed_total The total number of rule group evaluations missed due to slow rule group evaluation. # TYPE prometheus_rule_group_iterations_missed_total counter prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 0 prometheus_rule_group_iterations_missed_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 0 # HELP prometheus_rule_group_iterations_total The total number of scheduled rule group evaluations, whether executed or missed. # TYPE prometheus_rule_group_iterations_total counter prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 3634 prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 3634 prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 3635 prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 3634 prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 3634 prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 3634 prometheus_rule_group_iterations_total{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 3634 # HELP prometheus_rule_group_last_duration_seconds The duration of the last rule group evaluation. # TYPE prometheus_rule_group_last_duration_seconds gauge prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 0.006347199 prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 0.013245088 prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 0.001668483 prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 0.001062713 prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 0.000382412 prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 0.00084365 prometheus_rule_group_last_duration_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 0.000396331 # HELP prometheus_rule_group_last_evaluation_timestamp_seconds The timestamp of the last rule group evaluation in seconds. # TYPE prometheus_rule_group_last_evaluation_timestamp_seconds gauge prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 1.7697486398773065e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 1.769748629309238e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 1.7697486546314485e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 1.769748599497184e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 1.7697486450602791e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 1.7697486343457263e+09 prometheus_rule_group_last_evaluation_timestamp_seconds{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 1.7697486030687842e+09 # HELP prometheus_rule_group_rules The number of rules. # TYPE prometheus_rule_group_rules gauge prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_cpu_usage_is_high"} 1 prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;container_memory_usage_is_high"} 1 prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_cpu_greater_than_80"} 1 prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_disk_space_too_low"} 1 prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_down"} 1 prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;node_memory_left_lessser_than_10"} 1 prometheus_rule_group_rules{rule_group="/etc/alerts.d/node_alerting_rules.yml;prometheus-job-down"} 1 # HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. # TYPE prometheus_sd_consul_rpc_duration_seconds summary prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 # HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. # TYPE prometheus_sd_consul_rpc_failures_total counter prometheus_sd_consul_rpc_failures_total 0 # HELP prometheus_sd_discovered_targets Current number of discovered targets. # TYPE prometheus_sd_discovered_targets gauge prometheus_sd_discovered_targets{config="config-0",name="notify"} 275 prometheus_sd_discovered_targets{config="kubernetes-apiservers",name="scrape"} 162 prometheus_sd_discovered_targets{config="kubernetes-nodes",name="scrape"} 6 prometheus_sd_discovered_targets{config="kubernetes-nodes-cadvisor",name="scrape"} 6 prometheus_sd_discovered_targets{config="kubernetes-pods",name="scrape"} 275 prometheus_sd_discovered_targets{config="kubernetes-pods-slow",name="scrape"} 275 prometheus_sd_discovered_targets{config="kubernetes-service-endpoints",name="scrape"} 162 prometheus_sd_discovered_targets{config="kubernetes-service-endpoints-slow",name="scrape"} 162 prometheus_sd_discovered_targets{config="kubernetes-services",name="scrape"} 310 prometheus_sd_discovered_targets{config="opsmx_ssd_metrics",name="scrape"} 162 prometheus_sd_discovered_targets{config="otel-collector-redica-instance",name="scrape"} 1 prometheus_sd_discovered_targets{config="prometheus",name="scrape"} 1 prometheus_sd_discovered_targets{config="prometheus-pushgateway",name="scrape"} 310 # HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. # TYPE prometheus_sd_dns_lookup_failures_total counter prometheus_sd_dns_lookup_failures_total 0 # HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. # TYPE prometheus_sd_dns_lookups_total counter prometheus_sd_dns_lookups_total 0 # HELP prometheus_sd_failed_configs Current number of service discovery configurations that failed to load. # TYPE prometheus_sd_failed_configs gauge prometheus_sd_failed_configs{name="notify"} 0 prometheus_sd_failed_configs{name="scrape"} 0 # HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. # TYPE prometheus_sd_file_read_errors_total counter prometheus_sd_file_read_errors_total 0 # HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. # TYPE prometheus_sd_file_scan_duration_seconds summary prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN prometheus_sd_file_scan_duration_seconds_sum 0 prometheus_sd_file_scan_duration_seconds_count 0 # HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. # TYPE prometheus_sd_kubernetes_events_total counter prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 397 prometheus_sd_kubernetes_events_total{event="add",role="endpointslice"} 0 prometheus_sd_kubernetes_events_total{event="add",role="ingress"} 0 prometheus_sd_kubernetes_events_total{event="add",role="node"} 12 prometheus_sd_kubernetes_events_total{event="add",role="pod"} 708 prometheus_sd_kubernetes_events_total{event="add",role="service"} 790 prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 5 prometheus_sd_kubernetes_events_total{event="delete",role="endpointslice"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="ingress"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 160 prometheus_sd_kubernetes_events_total{event="delete",role="service"} 10 prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 71316 prometheus_sd_kubernetes_events_total{event="update",role="endpointslice"} 0 prometheus_sd_kubernetes_events_total{event="update",role="ingress"} 0 prometheus_sd_kubernetes_events_total{event="update",role="node"} 10827 prometheus_sd_kubernetes_events_total{event="update",role="pod"} 99038 prometheus_sd_kubernetes_events_total{event="update",role="service"} 141570 # HELP prometheus_sd_kubernetes_http_request_duration_seconds Summary of latencies for HTTP requests to the Kubernetes API by endpoint. # TYPE prometheus_sd_kubernetes_http_request_duration_seconds summary prometheus_sd_kubernetes_http_request_duration_seconds_sum{endpoint="/%7Bprefix%7D"} 0.660161879 prometheus_sd_kubernetes_http_request_duration_seconds_count{endpoint="/%7Bprefix%7D"} 14 # HELP prometheus_sd_kubernetes_http_request_total Total number of HTTP requests to the Kubernetes API by status code. # TYPE prometheus_sd_kubernetes_http_request_total counter prometheus_sd_kubernetes_http_request_total{status_code="200"} 3433 # HELP prometheus_sd_kubernetes_workqueue_depth Current depth of the work queue. # TYPE prometheus_sd_kubernetes_workqueue_depth gauge prometheus_sd_kubernetes_workqueue_depth{queue_name="endpoints"} 0 prometheus_sd_kubernetes_workqueue_depth{queue_name="node"} 0 prometheus_sd_kubernetes_workqueue_depth{queue_name="pod"} 0 prometheus_sd_kubernetes_workqueue_depth{queue_name="service"} 0 # HELP prometheus_sd_kubernetes_workqueue_items_total Total number of items added to the work queue. # TYPE prometheus_sd_kubernetes_workqueue_items_total counter prometheus_sd_kubernetes_workqueue_items_total{queue_name="endpoints"} 84992 prometheus_sd_kubernetes_workqueue_items_total{queue_name="node"} 10839 prometheus_sd_kubernetes_workqueue_items_total{queue_name="pod"} 99906 prometheus_sd_kubernetes_workqueue_items_total{queue_name="service"} 71185 # HELP prometheus_sd_kubernetes_workqueue_latency_seconds How long an item stays in the work queue. # TYPE prometheus_sd_kubernetes_workqueue_latency_seconds summary prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="endpoints"} 976.2935442589944 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="endpoints"} 84992 prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="node"} 2.066929573000002 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="node"} 10839 prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="pod"} 314.5750873720019 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="pod"} 99906 prometheus_sd_kubernetes_workqueue_latency_seconds_sum{queue_name="service"} 321.8374057350009 prometheus_sd_kubernetes_workqueue_latency_seconds_count{queue_name="service"} 71185 # HELP prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds Duration of the longest running processor in the work queue. # TYPE prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds gauge prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="endpoints"} 0 prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="node"} 0 prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="pod"} 0 prometheus_sd_kubernetes_workqueue_longest_running_processor_seconds{queue_name="service"} 0 # HELP prometheus_sd_kubernetes_workqueue_unfinished_work_seconds How long an item has remained unfinished in the work queue. # TYPE prometheus_sd_kubernetes_workqueue_unfinished_work_seconds gauge prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="endpoints"} 0 prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="node"} 0 prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="pod"} 0 prometheus_sd_kubernetes_workqueue_unfinished_work_seconds{queue_name="service"} 0 # HELP prometheus_sd_kubernetes_workqueue_work_duration_seconds How long processing an item from the work queue takes. # TYPE prometheus_sd_kubernetes_workqueue_work_duration_seconds summary prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="endpoints"} 8.626390022999898 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="endpoints"} 84992 prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="node"} 2.0368067250000044 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="node"} 10839 prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="pod"} 4.694798913999943 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="pod"} 99906 prometheus_sd_kubernetes_workqueue_work_duration_seconds_sum{queue_name="service"} 3.490097273000003 prometheus_sd_kubernetes_workqueue_work_duration_seconds_count{queue_name="service"} 71185 # HELP prometheus_sd_received_updates_total Total number of update events received from the SD providers. # TYPE prometheus_sd_received_updates_total counter prometheus_sd_received_updates_total{name="notify"} 49953 prometheus_sd_received_updates_total{name="scrape"} 216977 # HELP prometheus_sd_updates_total Total number of update events sent to the SD consumers. # TYPE prometheus_sd_updates_total counter prometheus_sd_updates_total{name="notify"} 500 prometheus_sd_updates_total{name="scrape"} 8145 # HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. # TYPE prometheus_target_interval_length_seconds summary prometheus_target_interval_length_seconds{interval="1m0s",quantile="0.01"} 59.999627587 prometheus_target_interval_length_seconds{interval="1m0s",quantile="0.05"} 59.999908498 prometheus_target_interval_length_seconds{interval="1m0s",quantile="0.5"} 60.000015503 prometheus_target_interval_length_seconds{interval="1m0s",quantile="0.9"} 60.000090799 prometheus_target_interval_length_seconds{interval="1m0s",quantile="0.99"} 60.000486577 prometheus_target_interval_length_seconds_sum{interval="1m0s"} 2.436552716291379e+07 prometheus_target_interval_length_seconds_count{interval="1m0s"} 406092 prometheus_target_interval_length_seconds{interval="5s",quantile="0.01"} 4.999849816 prometheus_target_interval_length_seconds{interval="5s",quantile="0.05"} 4.99988617 prometheus_target_interval_length_seconds{interval="5s",quantile="0.5"} 5.000016367 prometheus_target_interval_length_seconds{interval="5s",quantile="0.9"} 5.000101658 prometheus_target_interval_length_seconds{interval="5s",quantile="0.99"} 5.000166243 prometheus_target_interval_length_seconds_sum{interval="5s"} 218030.7650299883 prometheus_target_interval_length_seconds_count{interval="5s"} 43606 # HELP prometheus_target_metadata_cache_bytes The number of bytes that are currently used for storing metric metadata in the cache # TYPE prometheus_target_metadata_cache_bytes gauge prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-apiservers"} 40809 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-nodes"} 211197 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-nodes-cadvisor"} 21594 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-pods"} 28778 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-pods-slow"} 0 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-service-endpoints"} 94637 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-service-endpoints-slow"} 0 prometheus_target_metadata_cache_bytes{scrape_job="kubernetes-services"} 0 prometheus_target_metadata_cache_bytes{scrape_job="opsmx_ssd_metrics"} 0 prometheus_target_metadata_cache_bytes{scrape_job="otel-collector-redica-instance"} 517 prometheus_target_metadata_cache_bytes{scrape_job="prometheus"} 10024 prometheus_target_metadata_cache_bytes{scrape_job="prometheus-pushgateway"} 0 # HELP prometheus_target_metadata_cache_entries Total number of metric metadata entries in the cache # TYPE prometheus_target_metadata_cache_entries gauge prometheus_target_metadata_cache_entries{scrape_job="kubernetes-apiservers"} 298 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-nodes"} 1495 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-nodes-cadvisor"} 432 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-pods"} 612 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-pods-slow"} 0 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-service-endpoints"} 2129 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-service-endpoints-slow"} 0 prometheus_target_metadata_cache_entries{scrape_job="kubernetes-services"} 0 prometheus_target_metadata_cache_entries{scrape_job="opsmx_ssd_metrics"} 0 prometheus_target_metadata_cache_entries{scrape_job="otel-collector-redica-instance"} 13 prometheus_target_metadata_cache_entries{scrape_job="prometheus"} 173 prometheus_target_metadata_cache_entries{scrape_job="prometheus-pushgateway"} 0 # HELP prometheus_target_reload_length_seconds Actual interval to reload the scrape pool with a given configuration. # TYPE prometheus_target_reload_length_seconds summary prometheus_target_reload_length_seconds{interval="5s",quantile="0.01"} NaN prometheus_target_reload_length_seconds{interval="5s",quantile="0.05"} NaN prometheus_target_reload_length_seconds{interval="5s",quantile="0.5"} NaN prometheus_target_reload_length_seconds{interval="5s",quantile="0.9"} NaN prometheus_target_reload_length_seconds{interval="5s",quantile="0.99"} NaN prometheus_target_reload_length_seconds_sum{interval="5s"} 0.002460574 prometheus_target_reload_length_seconds_count{interval="5s"} 1 # HELP prometheus_target_scrape_pool_exceeded_target_limit_total Total number of times scrape pools hit the target limit, during sync or config reload. # TYPE prometheus_target_scrape_pool_exceeded_target_limit_total counter prometheus_target_scrape_pool_exceeded_target_limit_total 0 # HELP prometheus_target_scrape_pool_reloads_failed_total Total number of failed scrape pool reloads. # TYPE prometheus_target_scrape_pool_reloads_failed_total counter prometheus_target_scrape_pool_reloads_failed_total 0 # HELP prometheus_target_scrape_pool_reloads_total Total number of scrape pool reloads. # TYPE prometheus_target_scrape_pool_reloads_total counter prometheus_target_scrape_pool_reloads_total 1 # HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. # TYPE prometheus_target_scrape_pool_sync_total counter prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-apiservers"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-nodes"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-nodes-cadvisor"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-pods"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-pods-slow"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-service-endpoints"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-service-endpoints-slow"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="kubernetes-services"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="opsmx_ssd_metrics"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="otel-collector-redica-instance"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 8145 prometheus_target_scrape_pool_sync_total{scrape_job="prometheus-pushgateway"} 8145 # HELP prometheus_target_scrape_pool_target_limit Maximum number of targets allowed in this scrape pool. # TYPE prometheus_target_scrape_pool_target_limit gauge prometheus_target_scrape_pool_target_limit{scrape_job="otel-collector-redica-instance"} 0 # HELP prometheus_target_scrape_pool_targets Current number of targets in this scrape pool. # TYPE prometheus_target_scrape_pool_targets gauge prometheus_target_scrape_pool_targets{scrape_job="kubernetes-apiservers"} 1 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-nodes"} 6 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-nodes-cadvisor"} 6 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-pods"} 9 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-pods-slow"} 0 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-service-endpoints"} 12 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-service-endpoints-slow"} 0 prometheus_target_scrape_pool_targets{scrape_job="kubernetes-services"} 0 prometheus_target_scrape_pool_targets{scrape_job="opsmx_ssd_metrics"} 67 prometheus_target_scrape_pool_targets{scrape_job="otel-collector-redica-instance"} 1 prometheus_target_scrape_pool_targets{scrape_job="prometheus"} 1 prometheus_target_scrape_pool_targets{scrape_job="prometheus-pushgateway"} 0 # HELP prometheus_target_scrape_pools_failed_total Total number of scrape pool creations that failed. # TYPE prometheus_target_scrape_pools_failed_total counter prometheus_target_scrape_pools_failed_total 0 # HELP prometheus_target_scrape_pools_total Total number of scrape pool creation attempts. # TYPE prometheus_target_scrape_pools_total counter prometheus_target_scrape_pools_total 12 # HELP prometheus_target_scrapes_cache_flush_forced_total How many times a scrape cache was flushed due to getting big while scrapes are failing. # TYPE prometheus_target_scrapes_cache_flush_forced_total counter prometheus_target_scrapes_cache_flush_forced_total 0 # HELP prometheus_target_scrapes_exceeded_sample_limit_total Total number of scrapes that hit the sample limit and were rejected. # TYPE prometheus_target_scrapes_exceeded_sample_limit_total counter prometheus_target_scrapes_exceeded_sample_limit_total 0 # HELP prometheus_target_scrapes_sample_duplicate_timestamp_total Total number of samples rejected due to duplicate timestamps but different values # TYPE prometheus_target_scrapes_sample_duplicate_timestamp_total counter prometheus_target_scrapes_sample_duplicate_timestamp_total 0 # HELP prometheus_target_scrapes_sample_out_of_bounds_total Total number of samples rejected due to timestamp falling outside of the time bounds # TYPE prometheus_target_scrapes_sample_out_of_bounds_total counter prometheus_target_scrapes_sample_out_of_bounds_total 0 # HELP prometheus_target_scrapes_sample_out_of_order_total Total number of samples rejected due to not being out of the expected order # TYPE prometheus_target_scrapes_sample_out_of_order_total counter prometheus_target_scrapes_sample_out_of_order_total 0 # HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. # TYPE prometheus_target_sync_length_seconds summary prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.01"} 0.005956153 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.05"} 0.005956153 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.5"} 0.006383141 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.9"} 0.015356174 prometheus_target_sync_length_seconds{scrape_job="kubernetes-apiservers",quantile="0.99"} 0.015786221 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-apiservers"} 77.49214214100037 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-apiservers"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.01"} 0.001697971 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.05"} 0.001697971 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.5"} 0.001780676 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.9"} 0.002183131 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes",quantile="0.99"} 0.002282601 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-nodes"} 18.36740536500007 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-nodes"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.01"} 0.001579547 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.05"} 0.001579547 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.5"} 0.001722152 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.9"} 0.002288396 prometheus_target_sync_length_seconds{scrape_job="kubernetes-nodes-cadvisor",quantile="0.99"} 0.002433099 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-nodes-cadvisor"} 18.148784746999937 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-nodes-cadvisor"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.01"} 0.007559212 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.05"} 0.007559212 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.5"} 0.009550908 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.9"} 0.0145156 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods",quantile="0.99"} 0.014553992 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-pods"} 104.91948812199983 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-pods"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods-slow",quantile="0.01"} 0.005461758 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods-slow",quantile="0.05"} 0.005461758 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods-slow",quantile="0.5"} 0.005842419 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods-slow",quantile="0.9"} 0.01224861 prometheus_target_sync_length_seconds{scrape_job="kubernetes-pods-slow",quantile="0.99"} 0.014017989 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-pods-slow"} 68.01474132099995 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-pods-slow"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints",quantile="0.01"} 0.008764608 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints",quantile="0.05"} 0.008764608 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints",quantile="0.5"} 0.011212441 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints",quantile="0.9"} 0.016001595 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints",quantile="0.99"} 0.018266589 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-service-endpoints"} 118.40373077800017 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-service-endpoints"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints-slow",quantile="0.01"} 0.005976623 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints-slow",quantile="0.05"} 0.005976623 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints-slow",quantile="0.5"} 0.006572145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints-slow",quantile="0.9"} 0.011624261 prometheus_target_sync_length_seconds{scrape_job="kubernetes-service-endpoints-slow",quantile="0.99"} 0.014388752 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-service-endpoints-slow"} 75.52850920300006 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-service-endpoints-slow"} 8145 prometheus_target_sync_length_seconds{scrape_job="kubernetes-services",quantile="0.01"} 0.005283705 prometheus_target_sync_length_seconds{scrape_job="kubernetes-services",quantile="0.05"} 0.005283705 prometheus_target_sync_length_seconds{scrape_job="kubernetes-services",quantile="0.5"} 0.006785767 prometheus_target_sync_length_seconds{scrape_job="kubernetes-services",quantile="0.9"} 0.010681034 prometheus_target_sync_length_seconds{scrape_job="kubernetes-services",quantile="0.99"} 0.011024964 prometheus_target_sync_length_seconds_sum{scrape_job="kubernetes-services"} 71.91300998699998 prometheus_target_sync_length_seconds_count{scrape_job="kubernetes-services"} 8145 prometheus_target_sync_length_seconds{scrape_job="opsmx_ssd_metrics",quantile="0.01"} 0.008787259 prometheus_target_sync_length_seconds{scrape_job="opsmx_ssd_metrics",quantile="0.05"} 0.008787259 prometheus_target_sync_length_seconds{scrape_job="opsmx_ssd_metrics",quantile="0.5"} 0.010742684 prometheus_target_sync_length_seconds{scrape_job="opsmx_ssd_metrics",quantile="0.9"} 0.018371719 prometheus_target_sync_length_seconds{scrape_job="opsmx_ssd_metrics",quantile="0.99"} 0.019058928 prometheus_target_sync_length_seconds_sum{scrape_job="opsmx_ssd_metrics"} 140.92694763500083 prometheus_target_sync_length_seconds_count{scrape_job="opsmx_ssd_metrics"} 8145 prometheus_target_sync_length_seconds{scrape_job="otel-collector-redica-instance",quantile="0.01"} 2.6823e-05 prometheus_target_sync_length_seconds{scrape_job="otel-collector-redica-instance",quantile="0.05"} 2.6823e-05 prometheus_target_sync_length_seconds{scrape_job="otel-collector-redica-instance",quantile="0.5"} 5.8198e-05 prometheus_target_sync_length_seconds{scrape_job="otel-collector-redica-instance",quantile="0.9"} 0.000140668 prometheus_target_sync_length_seconds{scrape_job="otel-collector-redica-instance",quantile="0.99"} 0.000164362 prometheus_target_sync_length_seconds_sum{scrape_job="otel-collector-redica-instance"} 0.6444272610000006 prometheus_target_sync_length_seconds_count{scrape_job="otel-collector-redica-instance"} 8145 prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 1.9139e-05 prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 1.9139e-05 prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 5.3551e-05 prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.000101751 prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.000109652 prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.5908366869999998 prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 8145 prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.01"} 0.006189245 prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.05"} 0.006189245 prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.5"} 0.006675843 prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.9"} 0.010304571 prometheus_target_sync_length_seconds{scrape_job="prometheus-pushgateway",quantile="0.99"} 0.012983375 prometheus_target_sync_length_seconds_sum{scrape_job="prometheus-pushgateway"} 69.17653688499992 prometheus_target_sync_length_seconds_count{scrape_job="prometheus-pushgateway"} 8145 # HELP prometheus_template_text_expansion_failures_total The total number of template text expansion failures. # TYPE prometheus_template_text_expansion_failures_total counter prometheus_template_text_expansion_failures_total 0 # HELP prometheus_template_text_expansions_total The total number of template text expansions. # TYPE prometheus_template_text_expansions_total counter prometheus_template_text_expansions_total 22314 # HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. # TYPE prometheus_treecache_watcher_goroutines gauge prometheus_treecache_watcher_goroutines 0 # HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. # TYPE prometheus_treecache_zookeeper_failures_total counter prometheus_treecache_zookeeper_failures_total 0 # HELP prometheus_tsdb_blocks_loaded Number of currently loaded data blocks # TYPE prometheus_tsdb_blocks_loaded gauge prometheus_tsdb_blocks_loaded 6 # HELP prometheus_tsdb_checkpoint_creations_failed_total Total number of checkpoint creations that failed. # TYPE prometheus_tsdb_checkpoint_creations_failed_total counter prometheus_tsdb_checkpoint_creations_failed_total 0 # HELP prometheus_tsdb_checkpoint_creations_total Total number of checkpoint creations attempted. # TYPE prometheus_tsdb_checkpoint_creations_total counter prometheus_tsdb_checkpoint_creations_total 249 # HELP prometheus_tsdb_checkpoint_deletions_failed_total Total number of checkpoint deletions that failed. # TYPE prometheus_tsdb_checkpoint_deletions_failed_total counter prometheus_tsdb_checkpoint_deletions_failed_total 0 # HELP prometheus_tsdb_checkpoint_deletions_total Total number of checkpoint deletions attempted. # TYPE prometheus_tsdb_checkpoint_deletions_total counter prometheus_tsdb_checkpoint_deletions_total 249 # HELP prometheus_tsdb_compaction_chunk_range_seconds Final time range of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_range_seconds histogram prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="100"} 1284 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="400"} 1284 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1600"} 1284 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6400"} 1322 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="25600"} 1415 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="102400"} 2651 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="409600"} 11452 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="1.6384e+06"} 311411 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="6.5536e+06"} 595956 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="2.62144e+07"} 6.428263e+06 prometheus_tsdb_compaction_chunk_range_seconds_bucket{le="+Inf"} 6.428263e+06 prometheus_tsdb_compaction_chunk_range_seconds_sum 4.251569849571e+13 prometheus_tsdb_compaction_chunk_range_seconds_count 6.428263e+06 # HELP prometheus_tsdb_compaction_chunk_samples Final number of samples on their first compaction # TYPE prometheus_tsdb_compaction_chunk_samples histogram prometheus_tsdb_compaction_chunk_samples_bucket{le="4"} 2703 prometheus_tsdb_compaction_chunk_samples_bucket{le="6"} 3504 prometheus_tsdb_compaction_chunk_samples_bucket{le="9"} 4455 prometheus_tsdb_compaction_chunk_samples_bucket{le="13.5"} 5712 prometheus_tsdb_compaction_chunk_samples_bucket{le="20.25"} 8581 prometheus_tsdb_compaction_chunk_samples_bucket{le="30.375"} 12620 prometheus_tsdb_compaction_chunk_samples_bucket{le="45.5625"} 237437 prometheus_tsdb_compaction_chunk_samples_bucket{le="68.34375"} 296334 prometheus_tsdb_compaction_chunk_samples_bucket{le="102.515625"} 303243 prometheus_tsdb_compaction_chunk_samples_bucket{le="153.7734375"} 6.427052e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="230.66015625"} 6.428169e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="345.990234375"} 6.428243e+06 prometheus_tsdb_compaction_chunk_samples_bucket{le="+Inf"} 6.428263e+06 prometheus_tsdb_compaction_chunk_samples_sum 7.47396271e+08 prometheus_tsdb_compaction_chunk_samples_count 6.428263e+06 # HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction # TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 6382 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 78048 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2.4312e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3.273422e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 4.131342e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 4.962621e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 6.10051e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 6.271828e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 6.35953e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 6.428253e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 6.428263e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 6.428263e+06 prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 6.428263e+06 prometheus_tsdb_compaction_chunk_size_bytes_sum 1.024570474e+09 prometheus_tsdb_compaction_chunk_size_bytes_count 6.428263e+06 # HELP prometheus_tsdb_compaction_duration_seconds Duration of compaction runs # TYPE prometheus_tsdb_compaction_duration_seconds histogram prometheus_tsdb_compaction_duration_seconds_bucket{le="1"} 0 prometheus_tsdb_compaction_duration_seconds_bucket{le="2"} 439 prometheus_tsdb_compaction_duration_seconds_bucket{le="4"} 458 prometheus_tsdb_compaction_duration_seconds_bucket{le="8"} 458 prometheus_tsdb_compaction_duration_seconds_bucket{le="16"} 477 prometheus_tsdb_compaction_duration_seconds_bucket{le="32"} 501 prometheus_tsdb_compaction_duration_seconds_bucket{le="64"} 501 prometheus_tsdb_compaction_duration_seconds_bucket{le="128"} 501 prometheus_tsdb_compaction_duration_seconds_bucket{le="256"} 501 prometheus_tsdb_compaction_duration_seconds_bucket{le="512"} 501 prometheus_tsdb_compaction_duration_seconds_bucket{le="+Inf"} 501 prometheus_tsdb_compaction_duration_seconds_sum 1448.8642990880003 prometheus_tsdb_compaction_duration_seconds_count 501 # HELP prometheus_tsdb_compaction_populating_block Set to 1 when a block is currently being written to the disk. # TYPE prometheus_tsdb_compaction_populating_block gauge prometheus_tsdb_compaction_populating_block 0 # HELP prometheus_tsdb_compactions_failed_total Total number of compactions that failed for the partition. # TYPE prometheus_tsdb_compactions_failed_total counter prometheus_tsdb_compactions_failed_total 0 # HELP prometheus_tsdb_compactions_skipped_total Total number of skipped compactions due to disabled auto compaction. # TYPE prometheus_tsdb_compactions_skipped_total counter prometheus_tsdb_compactions_skipped_total 0 # HELP prometheus_tsdb_compactions_total Total number of compactions that were executed for the partition. # TYPE prometheus_tsdb_compactions_total counter prometheus_tsdb_compactions_total 501 # HELP prometheus_tsdb_compactions_triggered_total Total number of triggered compactions for the partition. # TYPE prometheus_tsdb_compactions_triggered_total counter prometheus_tsdb_compactions_triggered_total 3626 # HELP prometheus_tsdb_head_active_appenders Number of currently active appender transactions # TYPE prometheus_tsdb_head_active_appenders gauge prometheus_tsdb_head_active_appenders 0 # HELP prometheus_tsdb_head_chunks Total number of chunks in the head block. # TYPE prometheus_tsdb_head_chunks gauge prometheus_tsdb_head_chunks 386700 # HELP prometheus_tsdb_head_chunks_created_total Total number of chunks created in the head # TYPE prometheus_tsdb_head_chunks_created_total counter prometheus_tsdb_head_chunks_created_total 6.814963e+06 # HELP prometheus_tsdb_head_chunks_removed_total Total number of chunks removed in the head # TYPE prometheus_tsdb_head_chunks_removed_total counter prometheus_tsdb_head_chunks_removed_total 6.428263e+06 # HELP prometheus_tsdb_head_gc_duration_seconds Runtime of garbage collection in the head block. # TYPE prometheus_tsdb_head_gc_duration_seconds summary prometheus_tsdb_head_gc_duration_seconds_sum 121.08688920399999 prometheus_tsdb_head_gc_duration_seconds_count 488 # HELP prometheus_tsdb_head_max_time Maximum timestamp of the head block. The unit is decided by the library consumer. # TYPE prometheus_tsdb_head_max_time gauge prometheus_tsdb_head_max_time 1.769748656446e+12 # HELP prometheus_tsdb_head_max_time_seconds Maximum timestamp of the head block. # TYPE prometheus_tsdb_head_max_time_seconds gauge prometheus_tsdb_head_max_time_seconds 1.769748656e+09 # HELP prometheus_tsdb_head_min_time Minimum time bound of the head block. The unit is decided by the library consumer. # TYPE prometheus_tsdb_head_min_time gauge prometheus_tsdb_head_min_time 1.7697384e+12 # HELP prometheus_tsdb_head_min_time_seconds Minimum time bound of the head block. # TYPE prometheus_tsdb_head_min_time_seconds gauge prometheus_tsdb_head_min_time_seconds 1.7697384e+09 # HELP prometheus_tsdb_head_samples_appended_total Total number of appended samples. # TYPE prometheus_tsdb_head_samples_appended_total counter prometheus_tsdb_head_samples_appended_total 7.69139103e+08 # HELP prometheus_tsdb_head_series Total number of series in the head block. # TYPE prometheus_tsdb_head_series gauge prometheus_tsdb_head_series 186750 # HELP prometheus_tsdb_head_series_created_total Total number of series created in the head # TYPE prometheus_tsdb_head_series_created_total counter prometheus_tsdb_head_series_created_total 257934 # HELP prometheus_tsdb_head_series_not_found_total Total number of requests for series that were not found. # TYPE prometheus_tsdb_head_series_not_found_total counter prometheus_tsdb_head_series_not_found_total 0 # HELP prometheus_tsdb_head_series_removed_total Total number of series removed in the head # TYPE prometheus_tsdb_head_series_removed_total counter prometheus_tsdb_head_series_removed_total 71184 # HELP prometheus_tsdb_head_truncations_failed_total Total number of head truncations that failed. # TYPE prometheus_tsdb_head_truncations_failed_total counter prometheus_tsdb_head_truncations_failed_total 0 # HELP prometheus_tsdb_head_truncations_total Total number of head truncations attempted. # TYPE prometheus_tsdb_head_truncations_total counter prometheus_tsdb_head_truncations_total 488 # HELP prometheus_tsdb_isolation_high_watermark The highest TSDB append ID that has been given out. # TYPE prometheus_tsdb_isolation_high_watermark gauge prometheus_tsdb_isolation_high_watermark 492106 # HELP prometheus_tsdb_isolation_low_watermark The lowest TSDB append ID that is still referenced. # TYPE prometheus_tsdb_isolation_low_watermark gauge prometheus_tsdb_isolation_low_watermark 492106 # HELP prometheus_tsdb_lowest_timestamp Lowest timestamp value stored in the database. The unit is decided by the library consumer. # TYPE prometheus_tsdb_lowest_timestamp gauge prometheus_tsdb_lowest_timestamp 1.7695224e+12 # HELP prometheus_tsdb_lowest_timestamp_seconds Lowest timestamp value stored in the database. # TYPE prometheus_tsdb_lowest_timestamp_seconds gauge prometheus_tsdb_lowest_timestamp_seconds 1.7695224e+09 # HELP prometheus_tsdb_mmap_chunk_corruptions_total Total number of memory-mapped chunk corruptions. # TYPE prometheus_tsdb_mmap_chunk_corruptions_total counter prometheus_tsdb_mmap_chunk_corruptions_total 0 # HELP prometheus_tsdb_out_of_bound_samples_total Total number of out of bound samples ingestion failed attempts. # TYPE prometheus_tsdb_out_of_bound_samples_total counter prometheus_tsdb_out_of_bound_samples_total 0 # HELP prometheus_tsdb_out_of_order_samples_total Total number of out of order samples ingestion failed attempts. # TYPE prometheus_tsdb_out_of_order_samples_total counter prometheus_tsdb_out_of_order_samples_total 0 # HELP prometheus_tsdb_reloads_failures_total Number of times the database failed to reload block data from disk. # TYPE prometheus_tsdb_reloads_failures_total counter prometheus_tsdb_reloads_failures_total 0 # HELP prometheus_tsdb_reloads_total Number of times the database reloaded block data from disk. # TYPE prometheus_tsdb_reloads_total counter prometheus_tsdb_reloads_total 502 # HELP prometheus_tsdb_retention_limit_bytes Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled # TYPE prometheus_tsdb_retention_limit_bytes gauge prometheus_tsdb_retention_limit_bytes 0 # HELP prometheus_tsdb_size_retentions_total The number of times that blocks were deleted because the maximum number of bytes was exceeded. # TYPE prometheus_tsdb_size_retentions_total counter prometheus_tsdb_size_retentions_total 0 # HELP prometheus_tsdb_storage_blocks_bytes The number of bytes that are currently used for local storage by all blocks. # TYPE prometheus_tsdb_storage_blocks_bytes gauge prometheus_tsdb_storage_blocks_bytes 1.342815035e+09 # HELP prometheus_tsdb_symbol_table_size_bytes Size of symbol table on disk (in bytes) # TYPE prometheus_tsdb_symbol_table_size_bytes gauge prometheus_tsdb_symbol_table_size_bytes 9392 # HELP prometheus_tsdb_time_retentions_total The number of times that blocks were deleted because the maximum time limit was exceeded. # TYPE prometheus_tsdb_time_retentions_total counter prometheus_tsdb_time_retentions_total 1 # HELP prometheus_tsdb_tombstone_cleanup_seconds The time taken to recompact blocks to remove tombstones. # TYPE prometheus_tsdb_tombstone_cleanup_seconds histogram prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.005"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.01"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.025"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.05"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.1"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.25"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="0.5"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="1"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="2.5"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="5"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="10"} 0 prometheus_tsdb_tombstone_cleanup_seconds_bucket{le="+Inf"} 0 prometheus_tsdb_tombstone_cleanup_seconds_sum 0 prometheus_tsdb_tombstone_cleanup_seconds_count 0 # HELP prometheus_tsdb_vertical_compactions_total Total number of compactions done on overlapping blocks. # TYPE prometheus_tsdb_vertical_compactions_total counter prometheus_tsdb_vertical_compactions_total 0 # HELP prometheus_tsdb_wal_completed_pages_total Total number of completed pages. # TYPE prometheus_tsdb_wal_completed_pages_total counter prometheus_tsdb_wal_completed_pages_total 127096 # HELP prometheus_tsdb_wal_corruptions_total Total number of WAL corruptions. # TYPE prometheus_tsdb_wal_corruptions_total counter prometheus_tsdb_wal_corruptions_total 0 # HELP prometheus_tsdb_wal_fsync_duration_seconds Duration of WAL fsync. # TYPE prometheus_tsdb_wal_fsync_duration_seconds summary prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.5"} NaN prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.9"} NaN prometheus_tsdb_wal_fsync_duration_seconds{quantile="0.99"} NaN prometheus_tsdb_wal_fsync_duration_seconds_sum 4.099716436999999 prometheus_tsdb_wal_fsync_duration_seconds_count 498 # HELP prometheus_tsdb_wal_page_flushes_total Total number of page flushes. # TYPE prometheus_tsdb_wal_page_flushes_total counter prometheus_tsdb_wal_page_flushes_total 581880 # HELP prometheus_tsdb_wal_segment_current WAL segment index that TSDB is currently writing to. # TYPE prometheus_tsdb_wal_segment_current gauge prometheus_tsdb_wal_segment_current 6285 # HELP prometheus_tsdb_wal_truncate_duration_seconds Duration of WAL truncation. # TYPE prometheus_tsdb_wal_truncate_duration_seconds summary prometheus_tsdb_wal_truncate_duration_seconds_sum 1145.9813914170002 prometheus_tsdb_wal_truncate_duration_seconds_count 249 # HELP prometheus_tsdb_wal_truncations_failed_total Total number of WAL truncations that failed. # TYPE prometheus_tsdb_wal_truncations_failed_total counter prometheus_tsdb_wal_truncations_failed_total 0 # HELP prometheus_tsdb_wal_truncations_total Total number of WAL truncations attempted. # TYPE prometheus_tsdb_wal_truncations_total counter prometheus_tsdb_wal_truncations_total 249 # HELP prometheus_tsdb_wal_writes_failed_total Total number of WAL writes that failed. # TYPE prometheus_tsdb_wal_writes_failed_total counter prometheus_tsdb_wal_writes_failed_total 0 # HELP prometheus_web_federation_errors_total Total number of errors that occurred while sending federation responses. # TYPE prometheus_web_federation_errors_total counter prometheus_web_federation_errors_total 0 # HELP prometheus_web_federation_warnings_total Total number of warnings that occurred while sending federation responses. # TYPE prometheus_web_federation_warnings_total counter prometheus_web_federation_warnings_total 0 # HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. # TYPE promhttp_metric_handler_requests_in_flight gauge promhttp_metric_handler_requests_in_flight 1 # HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. # TYPE promhttp_metric_handler_requests_total counter promhttp_metric_handler_requests_total{code="200"} 3634 promhttp_metric_handler_requests_total{code="500"} 0 promhttp_metric_handler_requests_total{code="503"} 0