affinity: {}
| (object) Pod affinity |
allowedMetricsEndpoints[0]: /metrics
| (string) |
annotations: {}
| (object) Annotations to be added to the deployment |
config:
global:
scrape_interval: 10s
scrape_configs:
- job_name: vmagent
static_configs:
- targets:
- localhost:8429
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
job_name: kubernetes-apiservers
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- action: keep
regex: default;kubernetes;https
source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
- __meta_kubernetes_endpoint_port_name
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
job_name: kubernetes-nodes
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- replacement: kubernetes.default.svc:443
target_label: __address__
- regex: (.+)
replacement: /api/v1/nodes/$1/proxy/metrics
source_labels:
- __meta_kubernetes_node_name
target_label: __metrics_path__
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
honor_timestamps: false
job_name: kubernetes-nodes-cadvisor
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- replacement: kubernetes.default.svc:443
target_label: __address__
- regex: (.+)
replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
source_labels:
- __meta_kubernetes_node_name
target_label: __metrics_path__
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpointslices
relabel_configs:
- action: drop
regex: true
source_labels:
- __meta_kubernetes_pod_container_init
- action: keep_if_equal
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_port
- __meta_kubernetes_pod_container_port_number
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape
- action: replace
regex: (https?)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
target_label: __scheme__
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: service
- replacement: ${1}
source_labels:
- __meta_kubernetes_service_name
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node
- job_name: kubernetes-service-endpoints-slow
kubernetes_sd_configs:
- role: endpointslices
relabel_configs:
- action: drop
regex: true
source_labels:
- __meta_kubernetes_pod_container_init
- action: keep_if_equal
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_port
- __meta_kubernetes_pod_container_port_number
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scrape_slow
- action: replace
regex: (https?)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_scheme
target_label: __scheme__
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_service_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: service
- replacement: ${1}
source_labels:
- __meta_kubernetes_service_name
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node
scrape_interval: 5m
scrape_timeout: 30s
- job_name: kubernetes-services
kubernetes_sd_configs:
- role: service
metrics_path: /probe
params:
module:
- http_2xx
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_prometheus_io_probe
- source_labels:
- __address__
target_label: __param_target
- replacement: blackbox
target_label: __address__
- source_labels:
- __param_target
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: service
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: drop
regex: true
source_labels:
- __meta_kubernetes_pod_container_init
- action: keep_if_equal
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_port
- __meta_kubernetes_pod_container_port_number
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: node
| (object) VMAgent scrape configuration |
configMap: ""
| (string) VMAgent scraping configuration use existing configmap if specified otherwise .config values will be used |
containerWorkingDir: /
| (string) Container working directory |
daemonSet:
spec: {}
| (object) K8s DaemonSet specific variables |
deployment:
spec:
strategy: {}
| (object) K8s Deployment specific variables |
deployment.spec.strategy: {}
| (object) Deployment strategy. Check here for details |
emptyDir: {}
| (object) Empty dir configuration for a case, when persistence is disabled |
env: []
| (list) Additional environment variables (ex.: secret tokens, flags). Check here for more details. |
envFrom: []
| (list) Specify alternative source for env variables |
fullnameOverride: ""
| (string) Override resources fullname |
global.cluster.dnsDomain: cluster.local.
| (string) K8s cluster domain suffix, uses for building storage pods’ FQDN. Details are here |
global.compatibility:
openshift:
adaptSecurityContext: auto
| (object) Openshift security context compatibility configuration |
global.image.registry: ""
| (string) Image registry, that can be shared across multiple helm charts |
global.imagePullSecrets: []
| (list) Image pull secrets, that can be shared across multiple helm charts |
horizontalPodAutoscaling:
enabled: false
maxReplicas: 10
metrics: []
minReplicas: 1
| (object) Horizontal Pod Autoscaling. Note that it is not intended to be used for vmagents which perform scraping. In order to scale scraping vmagents check here |
horizontalPodAutoscaling.enabled: false
| (bool) Use HPA for vmagent |
horizontalPodAutoscaling.maxReplicas: 10
| (int) Maximum replicas for HPA to use to to scale vmagent |
horizontalPodAutoscaling.metrics: []
| (list) Metric for HPA to use to scale vmagent |
horizontalPodAutoscaling.minReplicas: 1
| (int) Minimum replicas for HPA to use to scale vmagent |
image.pullPolicy: IfNotPresent
| (string) Image pull policy |
image.registry: ""
| (string) Image registry |
image.repository: victoriametrics/vmagent
| (string) Image repository |
image.tag: ""
| (string) Image tag, set to Chart.AppVersion by default |
image.variant: ""
| (string) Variant of the image to use. e.g. enterprise, scratch |
imagePullSecrets: []
| (list) Image pull secrets |
ingress.annotations: {}
| (object) Ingress annotations |
ingress.enabled: false
| (bool) Enable deployment of ingress for agent |
ingress.hosts:
- name: vmagent.local
path:
- /
port: http
| (list) Array of host objects |
ingress.ingressClassName: ""
| (string) Ingress controller class name |
ingress.pathType: Prefix
| (string) Ingress path type |
ingress.tls: []
| (list) Array of TLS objects |
initContainers: []
| (list) Init containers for vmagent |
license:
key: ""
secret:
key: ""
name: ""
| (object) Enterprise license key configuration for VictoriaMetrics enterprise. Required only for VictoriaMetrics enterprise. Check docs here, for more information, visit site. Request a trial license here Supported starting from VictoriaMetrics v1.94.0 |
license.key: ""
| (string) License key |
license.secret:
key: ""
name: ""
| (object) Use existing secret with license key |
license.secret.key: ""
| (string) Key in secret with license key |
license.secret.name: ""
| (string) Existing secret name |
lifecycle: {}
| (object) Specify pod lifecycle |
mode: deployment
| (string) VMAgent mode: daemonSet, deployment, statefulSet |
nameOverride: ""
| (string) Override chart name |
nodeSelector: {}
| (object) Pod’s node selector. Details are here |
persistentVolume.accessModes:
- ReadWriteOnce
| (list) Array of access modes. Must match those of existing PV or dynamic provisioner. Details are here |
persistentVolume.annotations: {}
| (object) Persistent volume annotations |
persistentVolume.enabled: false
| (bool) Create/use Persistent Volume Claim for server component. Empty dir if false |
persistentVolume.existingClaim: ""
| (string) Existing Claim name. If defined, PVC must be created manually before volume will be bound |
persistentVolume.matchLabels: {}
| (object) Bind Persistent Volume by labels. Must match all labels of targeted PV. |
persistentVolume.size: 10Gi
| (string) Size of the volume. Should be calculated based on the logs you send and retention policy you set. |
persistentVolume.storageClassName: ""
| (string) StorageClass to use for persistent volume. Requires server.persistentVolume.enabled: true. If defined, PVC created automatically |
podAnnotations: {}
| (object) Annotations to be added to pod |
podDisruptionBudget:
enabled: false
labels: {}
| (object) See kubectl explain poddisruptionbudget.spec for more or check official documentation |
podLabels: {}
| (object) Extra labels for Pods only |
podSecurityContext:
enabled: true
| (object) Security context to be added to pod |
priorityClassName: ""
| (string) Priority class to be assigned to the pod(s) |
probe.liveness:
initialDelaySeconds: 5
periodSeconds: 15
tcpSocket: {}
timeoutSeconds: 5
| (object) Liveness probe |
probe.readiness:
httpGet: {}
initialDelaySeconds: 5
periodSeconds: 15
| (object) Readiness probe |
probe.startup: {}
| (object) Startup probe |
rbac.annotations: {}
| (object) Role/RoleBinding annotations |
rbac.create: true
| (bool) Enables Role/RoleBinding creation |
rbac.namespaced: false
| (bool) If true and rbac.enabled , will deploy a Role/RoleBinding instead of a ClusterRole/ClusterRoleBinding |
remoteWrite: []
| (list) Generates remoteWrite.* flags and config maps with value content for values, that are of type list of map. Each item should contain url param to pass validation. |
replicaCount: 1
| (int) Replica count |
resources: {}
| (object) Resource object. Details are here |
schedulerName: ""
| (string) Use an alternate scheduler, e.g. “stork”. Check details here |
securityContext:
enabled: true
| (object) Security context to be added to pod’s containers |
service.annotations: {}
| (object) Service annotations |
service.clusterIP: ""
| (string) Service ClusterIP |
service.enabled: false
| (bool) Enable agent service |
service.externalIPs: []
| (list) Service external IPs. Check here for details |
service.externalTrafficPolicy: ""
| (string) Service external traffic policy. Check here for details |
service.healthCheckNodePort: ""
| (string) Health check node port for a service. Check here for details |
service.ipFamilies: []
| (list) List of service IP families. Check here for details. |
service.ipFamilyPolicy: ""
| (string) Service IP family policy. Check here for details. |
service.loadBalancerIP: ""
| (string) Service load balancer IP |
service.loadBalancerSourceRanges: []
| (list) Load balancer source range |
service.servicePort: 8429
| (int) Service port |
service.targetPort: http
| (string) Target port |
service.type: ClusterIP
| (string) Service type |
serviceAccount.annotations: {}
| (object) Annotations to add to the service account |
serviceAccount.automountToken: true
| (bool) mount API token to pod directly |
serviceAccount.create: true
| (bool) Specifies whether a service account should be created |
serviceAccount.name: null
| (string) The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
serviceMonitor.annotations: {}
| (object) Service Monitor annotations |
serviceMonitor.basicAuth: {}
| (object) Basic auth params for Service Monitor |
serviceMonitor.enabled: false
| (bool) Enable deployment of Service Monitor for server component. This is Prometheus operator object |
serviceMonitor.metricRelabelings: []
| (list) Service Monitor metricRelabelings |
serviceMonitor.relabelings: []
| (list) Service Monitor relabelings |
serviceMonitor.targetPort: http
| (string) Service Monitor targetPort |
statefulSet:
clusterMode: false
replicationFactor: 1
spec:
updateStrategy: {}
| (object) K8s StatefulSet specific variables |
statefulSet.clusterMode: false
| (bool) create cluster of vmagents. Check here available since v1.77.2 |
statefulSet.replicationFactor: 1
| (int) replication factor for vmagent in cluster mode |
statefulSet.spec.updateStrategy: {}
| (object) StatefulSet update strategy. Check here for details. |
tolerations: []
| (list) Node tolerations for server scheduling to nodes with taints. Details are here |
topologySpreadConstraints: []
| (list) Pod topologySpreadConstraints |