additionalVictoriaMetricsMap: null
| (string) Provide custom recording or alerting rules to be deployed into the cluster. |
alertmanager.annotations: {}
| (object) Alertmanager annotations |
alertmanager.config:
receivers:
- name: blackhole
route:
receiver: blackhole
| (object) Alertmanager configuration |
alertmanager.enabled: true
| (bool) Create VMAlertmanager CR |
alertmanager.ingress:
annotations: {}
enabled: false
extraPaths: []
hosts:
- alertmanager.domain.com
labels: {}
path: '{{ .Values.alertmanager.spec.routePrefix | default "/" }}'
pathType: Prefix
tls: []
| (object) Alertmanager ingress configuration |
alertmanager.monzoTemplate:
enabled: true
| (object) Better alert templates for slack source |
alertmanager.spec:
configSecret: ""
externalURL: ""
image:
tag: v0.28.1
port: "9093"
replicaCount: 1
routePrefix: /
selectAllByDefault: true
| (object) Full spec for VMAlertmanager CRD. Allowed values described here |
alertmanager.spec.configSecret: ""
| (string) If this one defined, it will be used for alertmanager configuration and config parameter will be ignored |
alertmanager.templateFiles: {}
| (object) Extra alert templates |
alertmanager.useManagedConfig: false
| (bool) enable storing .Values.alertmanager.config in VMAlertmanagerConfig instead of k8s Secret.
Note: VMAlertmanagerConfig and plain Alertmanager config structures are not equal.
If you’re migrating existing config, please make sure that .Values.alertmanager.config : - with
useManagedConfig: false has structure described here. - with
useManagedConfig: true has structure described here.
|
argocdReleaseOverride: ""
| (string) If this chart is used in “Argocd” with “releaseName” field then VMServiceScrapes couldn’t select the proper services. For correct working need set value ‘argocdReleaseOverride=$ARGOCD_APP_NAME’ |
coreDns.enabled: true
| (bool) Enabled CoreDNS metrics scraping |
coreDns.service.enabled: true
| (bool) Create service for CoreDNS metrics |
coreDns.service.port: 9153
| (int) CoreDNS service port |
coreDns.service.selector:
k8s-app: kube-dns
| (object) CoreDNS service pod selector |
coreDns.service.targetPort: 9153
| (int) CoreDNS service target port |
coreDns.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
jobLabel: jobLabel
namespaceSelector:
matchNames:
- kube-system
| (object) Spec for VMServiceScrape CRD is here |
defaultDashboards.annotations: {}
| (object) |
defaultDashboards.dashboards:
node-exporter-full:
enabled: true
victoriametrics-operator:
enabled: true
victoriametrics-vmalert:
enabled: true
| (object) Create dashboards as ConfigMap despite dependency it requires is not installed |
defaultDashboards.dashboards.node-exporter-full:
enabled: true
| (object) In ArgoCD using client-side apply this dashboard reaches annotations size limit and causes k8s issues without server side apply See this issue |
defaultDashboards.defaultTimezone: utc
| (string) |
defaultDashboards.enabled: true
| (bool) Enable custom dashboards installation |
defaultDashboards.grafanaOperator.enabled: false
| (bool) Create dashboards as CRDs (requires grafana-operator to be installed) |
defaultDashboards.grafanaOperator.spec.allowCrossNamespaceImport: false
| (bool) |
defaultDashboards.grafanaOperator.spec.instanceSelector.matchLabels.dashboards: grafana
| (string) |
defaultDashboards.labels: {}
| (object) |
defaultDatasources.alertmanager:
datasources:
- access: proxy
jsonData:
implementation: prometheus
name: Alertmanager
perReplica: false
| (object) List of alertmanager datasources. Alertmanager generated url will be added to each datasource in template if alertmanager is enabled |
defaultDatasources.alertmanager.perReplica: false
| (bool) Create per replica alertmanager compatible datasource |
defaultDatasources.grafanaOperator.annotations: {}
| (object) |
defaultDatasources.grafanaOperator.enabled: false
| (bool) Create datasources as CRDs (requires grafana-operator to be installed) |
defaultDatasources.grafanaOperator.spec.allowCrossNamespaceImport: false
| (bool) |
defaultDatasources.grafanaOperator.spec.instanceSelector.matchLabels.dashboards: grafana
| (string) |
defaultDatasources.victoriametrics.datasources:
- access: proxy
isDefault: true
name: VictoriaMetrics
type: prometheus
- access: proxy
isDefault: false
name: VictoriaMetrics (DS)
type: victoriametrics-metrics-datasource
| (list) List of prometheus compatible datasource configurations. VM url will be added to each of them in templates. |
defaultDatasources.victoriametrics.perReplica: false
| (bool) Create per replica prometheus compatible datasource |
defaultRules:
additionalGroupByLabels: []
alerting:
spec:
annotations: {}
labels: {}
annotations: {}
create: true
group:
spec:
params: {}
groups:
alertmanager:
create: true
rules: {}
etcd:
create: true
rules: {}
general:
create: true
rules: {}
k8sContainerCpuLimits:
create: true
rules: {}
k8sContainerCpuRequests:
create: true
rules: {}
k8sContainerCpuUsageSecondsTotal:
create: true
rules: {}
k8sContainerMemoryCache:
create: true
rules: {}
k8sContainerMemoryLimits:
create: true
rules: {}
k8sContainerMemoryRequests:
create: true
rules: {}
k8sContainerMemoryRss:
create: true
rules: {}
k8sContainerMemorySwap:
create: true
rules: {}
k8sContainerMemoryWorkingSetBytes:
create: true
rules: {}
k8sContainerResource:
create: true
rules: {}
k8sPodOwner:
create: true
rules: {}
kubeApiserver:
create: true
rules: {}
kubeApiserverAvailability:
create: true
rules: {}
kubeApiserverBurnrate:
create: true
rules: {}
kubeApiserverHistogram:
create: true
rules: {}
kubeApiserverSlos:
create: true
rules: {}
kubePrometheusGeneral:
create: true
rules: {}
kubePrometheusNodeRecording:
create: true
rules: {}
kubeScheduler:
create: true
rules: {}
kubeStateMetrics:
create: true
rules: {}
kubelet:
create: true
rules: {}
kubernetesApps:
create: true
rules: {}
targetNamespace: .*
kubernetesResources:
create: true
rules: {}
kubernetesStorage:
create: true
rules: {}
targetNamespace: .*
kubernetesSystem:
create: true
rules: {}
kubernetesSystemApiserver:
create: true
rules: {}
kubernetesSystemControllerManager:
create: true
rules: {}
kubernetesSystemKubelet:
create: true
rules: {}
kubernetesSystemScheduler:
create: true
rules: {}
node:
create: true
rules: {}
nodeNetwork:
create: true
rules: {}
vmHealth:
create: true
rules: {}
vmagent:
create: true
rules: {}
vmcluster:
create: true
rules: {}
vmoperator:
create: true
rules: {}
vmsingle:
create: true
rules: {}
labels: {}
recording:
spec:
annotations: {}
labels: {}
rule:
spec:
annotations: {}
labels: {}
rules: {}
runbookUrl: https://runbooks.prometheus-operator.dev/runbooks
| (object) Create default rules for monitoring the cluster |
defaultRules.additionalGroupByLabels: []
| (list) Labels, which are used for grouping results of the queries. Note that these labels are joined with .Values.global.clusterLabel |
defaultRules.alerting:
spec:
annotations: {}
labels: {}
| (object) Common properties for VMRules alerts |
defaultRules.alerting.spec.annotations: {}
| (object) Additional annotations for VMRule alerts |
defaultRules.alerting.spec.labels: {}
| (object) Additional labels for VMRule alerts |
defaultRules.annotations: {}
| (object) Annotations for default rules |
defaultRules.group:
spec:
params: {}
| (object) Common properties for VMRule groups |
defaultRules.group.spec.params: {}
| (object) Optional HTTP URL parameters added to each rule request |
defaultRules.groups:
alertmanager:
create: true
rules: {}
etcd:
create: true
rules: {}
general:
create: true
rules: {}
k8sContainerCpuLimits:
create: true
rules: {}
k8sContainerCpuRequests:
create: true
rules: {}
k8sContainerCpuUsageSecondsTotal:
create: true
rules: {}
k8sContainerMemoryCache:
create: true
rules: {}
k8sContainerMemoryLimits:
create: true
rules: {}
k8sContainerMemoryRequests:
create: true
rules: {}
k8sContainerMemoryRss:
create: true
rules: {}
k8sContainerMemorySwap:
create: true
rules: {}
k8sContainerMemoryWorkingSetBytes:
create: true
rules: {}
k8sContainerResource:
create: true
rules: {}
k8sPodOwner:
create: true
rules: {}
kubeApiserver:
create: true
rules: {}
kubeApiserverAvailability:
create: true
rules: {}
kubeApiserverBurnrate:
create: true
rules: {}
kubeApiserverHistogram:
create: true
rules: {}
kubeApiserverSlos:
create: true
rules: {}
kubePrometheusGeneral:
create: true
rules: {}
kubePrometheusNodeRecording:
create: true
rules: {}
kubeScheduler:
create: true
rules: {}
kubeStateMetrics:
create: true
rules: {}
kubelet:
create: true
rules: {}
kubernetesApps:
create: true
rules: {}
targetNamespace: .*
kubernetesResources:
create: true
rules: {}
kubernetesStorage:
create: true
rules: {}
targetNamespace: .*
kubernetesSystem:
create: true
rules: {}
kubernetesSystemApiserver:
create: true
rules: {}
kubernetesSystemControllerManager:
create: true
rules: {}
kubernetesSystemKubelet:
create: true
rules: {}
kubernetesSystemScheduler:
create: true
rules: {}
node:
create: true
rules: {}
nodeNetwork:
create: true
rules: {}
vmHealth:
create: true
rules: {}
vmagent:
create: true
rules: {}
vmcluster:
create: true
rules: {}
vmoperator:
create: true
rules: {}
vmsingle:
create: true
rules: {}
| (object) Rule group properties |
defaultRules.groups.etcd.rules: {}
| (object) Common properties for all rules in a group |
defaultRules.labels: {}
| (object) Labels for default rules |
defaultRules.recording:
spec:
annotations: {}
labels: {}
| (object) Common properties for VMRules recording rules |
defaultRules.recording.spec.annotations: {}
| (object) Additional annotations for VMRule recording rules |
defaultRules.recording.spec.labels: {}
| (object) Additional labels for VMRule recording rules |
defaultRules.rule:
spec:
annotations: {}
labels: {}
| (object) Common properties for all VMRules |
defaultRules.rule.spec.annotations: {}
| (object) Additional annotations for all VMRules |
defaultRules.rule.spec.labels: {}
| (object) Additional labels for all VMRules |
defaultRules.rules: {}
| (object) Per rule properties |
defaultRules.runbookUrl: https://runbooks.prometheus-operator.dev/runbooks
| (string) Runbook url prefix for default rules |
external.grafana.datasource: VictoriaMetrics
| (string) External Grafana datasource name |
external.grafana.host: ""
| (string) External Grafana host |
external.vm:
read:
url: ""
write:
url: ""
| (object) External VM read and write URLs |
fullnameOverride: ""
| (string) Resource full name override |
global.cluster.dnsDomain: cluster.local.
| (string) K8s cluster domain suffix, uses for building storage pods’ FQDN. Details are here |
global.clusterLabel: cluster
| (string) Cluster label to use for dashboards and rules |
global.license:
key: ""
keyRef: {}
| (object) Global license configuration |
grafana:
enabled: true
forceDeployDatasource: false
ingress:
annotations: {}
enabled: false
extraPaths: []
hosts:
- grafana.domain.com
labels: {}
path: /
pathType: Prefix
tls: []
sidecar:
dashboards:
defaultFolderName: default
enabled: true
folder: /var/lib/grafana/dashboards
multicluster: false
provider:
name: default
orgid: 1
datasources:
enabled: true
initDatasources: true
label: grafana_datasource
vmScrape:
enabled: true
spec:
endpoints:
- port: '{{ .Values.grafana.service.portName }}'
selector:
matchLabels:
app.kubernetes.io/name: '{{ include "grafana.name" .Subcharts.grafana }}'
| (object) Grafana dependency chart configuration. For possible values refer here |
grafana.forceDeployDatasource: false
| (bool) Create datasource configmap even if grafana deployment has been disabled |
grafana.vmScrape:
enabled: true
spec:
endpoints:
- port: '{{ .Values.grafana.service.portName }}'
selector:
matchLabels:
app.kubernetes.io/name: '{{ include "grafana.name" .Subcharts.grafana }}'
| (object) Grafana VM scrape config |
grafana.vmScrape.spec:
endpoints:
- port: '{{ .Values.grafana.service.portName }}'
selector:
matchLabels:
app.kubernetes.io/name: '{{ include "grafana.name" .Subcharts.grafana }}'
| (object) Scrape configuration for Grafana |
kube-state-metrics:
enabled: true
vmScrape:
enabled: true
spec:
endpoints:
- honorLabels: true
metricRelabelConfigs:
- action: labeldrop
regex: (uid|container_id|image_id)
port: http
jobLabel: app.kubernetes.io/name
selector:
matchLabels:
app.kubernetes.io/instance: '{{ include "vm.release" . }}'
app.kubernetes.io/name: '{{ include "kube-state-metrics.name" (index .Subcharts "kube-state-metrics") }}'
| (object) kube-state-metrics dependency chart configuration. For possible values check here |
kube-state-metrics.vmScrape:
enabled: true
spec:
endpoints:
- honorLabels: true
metricRelabelConfigs:
- action: labeldrop
regex: (uid|container_id|image_id)
port: http
jobLabel: app.kubernetes.io/name
selector:
matchLabels:
app.kubernetes.io/instance: '{{ include "vm.release" . }}'
app.kubernetes.io/name: '{{ include "kube-state-metrics.name" (index .Subcharts "kube-state-metrics") }}'
| (object) Scrape configuration for Kube State Metrics |
kubeApiServer.enabled: true
| (bool) Enable Kube Api Server metrics scraping |
kubeApiServer.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: https
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
jobLabel: component
namespaceSelector:
matchNames:
- default
selector:
matchLabels:
component: apiserver
provider: kubernetes
| (object) Spec for VMServiceScrape CRD is here |
kubeControllerManager.enabled: true
| (bool) Enable kube controller manager metrics scraping |
kubeControllerManager.endpoints: []
| (list) If your kube controller manager is not deployed as a pod, specify IPs it can be found on |
kubeControllerManager.service.enabled: true
| (bool) Create service for kube controller manager metrics scraping |
kubeControllerManager.service.port: 10257
| (int) Kube controller manager service port |
kubeControllerManager.service.selector:
component: kube-controller-manager
| (object) Kube controller manager service pod selector |
kubeControllerManager.service.targetPort: 10257
| (int) Kube controller manager service target port |
kubeControllerManager.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
serverName: kubernetes
jobLabel: jobLabel
namespaceSelector:
matchNames:
- kube-system
| (object) Spec for VMServiceScrape CRD is here |
kubeDns.enabled: false
| (bool) Enabled KubeDNS metrics scraping |
kubeDns.service.enabled: false
| (bool) Create Service for KubeDNS metrics |
kubeDns.service.ports:
dnsmasq:
port: 10054
targetPort: 10054
skydns:
port: 10055
targetPort: 10055
| (object) KubeDNS service ports |
kubeDns.service.selector:
k8s-app: kube-dns
| (object) KubeDNS service pods selector |
kubeDns.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics-dnsmasq
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics-skydns
jobLabel: jobLabel
namespaceSelector:
matchNames:
- kube-system
| (object) Spec for VMServiceScrape CRD is here |
kubeEtcd.enabled: true
| (bool) Enabled KubeETCD metrics scraping |
kubeEtcd.endpoints: []
| (list) If your etcd is not deployed as a pod, specify IPs it can be found on |
kubeEtcd.service.enabled: true
| (bool) Enable service for ETCD metrics scraping |
kubeEtcd.service.port: 2379
| (int) ETCD service port |
kubeEtcd.service.selector:
component: etcd
| (object) ETCD service pods selector |
kubeEtcd.service.targetPort: 2379
| (int) ETCD service target port |
kubeEtcd.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
jobLabel: jobLabel
namespaceSelector:
matchNames:
- kube-system
| (object) Spec for VMServiceScrape CRD is here |
kubeProxy.enabled: false
| (bool) Enable kube proxy metrics scraping |
kubeProxy.endpoints: []
| (list) If your kube proxy is not deployed as a pod, specify IPs it can be found on |
kubeProxy.service.enabled: true
| (bool) Enable service for kube proxy metrics scraping |
kubeProxy.service.port: 10249
| (int) Kube proxy service port |
kubeProxy.service.selector:
k8s-app: kube-proxy
| (object) Kube proxy service pod selector |
kubeProxy.service.targetPort: 10249
| (int) Kube proxy service target port |
kubeProxy.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
jobLabel: jobLabel
namespaceSelector:
matchNames:
- kube-system
| (object) Spec for VMServiceScrape CRD is here |
kubeScheduler.enabled: true
| (bool) Enable KubeScheduler metrics scraping |
kubeScheduler.endpoints: []
| (list) If your kube scheduler is not deployed as a pod, specify IPs it can be found on |
kubeScheduler.service.enabled: true
| (bool) Enable service for KubeScheduler metrics scrape |
kubeScheduler.service.port: 10259
| (int) KubeScheduler service port |
kubeScheduler.service.selector:
component: kube-scheduler
| (object) KubeScheduler service pod selector |
kubeScheduler.service.targetPort: 10259
| (int) KubeScheduler service target port |
kubeScheduler.vmScrape:
spec:
endpoints:
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
port: http-metrics
scheme: https
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
jobLabel: jobLabel
namespaceSelector:
matchNames:
- kube-system
| (object) Spec for VMServiceScrape CRD is here |
kubelet:
enabled: true
vmScrape:
kind: VMNodeScrape
spec:
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
honorTimestamps: false
interval: 30s
metricRelabelConfigs:
- action: labeldrop
regex: (uid)
- action: labeldrop
regex: (id|name)
- action: drop
regex: (rest_client_request_duration_seconds_bucket|rest_client_request_duration_seconds_sum|rest_client_request_duration_seconds_count)
source_labels:
- __name__
relabelConfigs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- sourceLabels:
- __metrics_path__
targetLabel: metrics_path
- replacement: kubelet
targetLabel: job
scheme: https
scrapeTimeout: 5s
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecureSkipVerify: true
vmScrapes:
cadvisor:
enabled: true
spec:
path: /metrics/cadvisor
kubelet:
spec: {}
probes:
enabled: true
spec:
path: /metrics/probes
resources:
enabled: true
spec:
path: /metrics/resource
| (object) Component scraping the kubelets |
kubelet.vmScrape:
kind: VMNodeScrape
spec:
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
honorLabels: true
honorTimestamps: false
interval: 30s
metricRelabelConfigs:
- action: labeldrop
regex: (uid)
- action: labeldrop
regex: (id|name)
- action: drop
regex: (rest_client_request_duration_seconds_bucket|rest_client_request_duration_seconds_sum|rest_client_request_duration_seconds_count)
source_labels:
- __name__
relabelConfigs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- sourceLabels:
- __metrics_path__
targetLabel: metrics_path
- replacement: kubelet
targetLabel: job
scheme: https
scrapeTimeout: 5s
tlsConfig:
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecureSkipVerify: true
| (object) Spec for VMNodeScrape CRD is here |
kubelet.vmScrapes.cadvisor:
enabled: true
spec:
path: /metrics/cadvisor
| (object) Enable scraping /metrics/cadvisor from kubelet’s service |
kubelet.vmScrapes.probes:
enabled: true
spec:
path: /metrics/probes
| (object) Enable scraping /metrics/probes from kubelet’s service |
kubelet.vmScrapes.resources:
enabled: true
spec:
path: /metrics/resource
| (object) Enabled scraping /metrics/resource from kubelet’s service |
nameOverride: ""
| (string) Override chart name |
prometheus-node-exporter:
enabled: true
extraArgs:
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|erofs|sysfs|tracefs)$
service:
labels:
jobLabel: node-exporter
vmScrape:
enabled: true
spec:
endpoints:
- metricRelabelConfigs:
- action: drop
regex: /var/lib/kubelet/pods.+
source_labels:
- mountpoint
port: metrics
jobLabel: jobLabel
selector:
matchLabels:
app.kubernetes.io/name: '{{ include "prometheus-node-exporter.name" (index .Subcharts "prometheus-node-exporter") }}'
| (object) prometheus-node-exporter dependency chart configuration. For possible values check here |
prometheus-node-exporter.vmScrape:
enabled: true
spec:
endpoints:
- metricRelabelConfigs:
- action: drop
regex: /var/lib/kubelet/pods.+
source_labels:
- mountpoint
port: metrics
jobLabel: jobLabel
selector:
matchLabels:
app.kubernetes.io/name: '{{ include "prometheus-node-exporter.name" (index .Subcharts "prometheus-node-exporter") }}'
| (object) Node Exporter VM scrape config |
prometheus-node-exporter.vmScrape.spec:
endpoints:
- metricRelabelConfigs:
- action: drop
regex: /var/lib/kubelet/pods.+
source_labels:
- mountpoint
port: metrics
jobLabel: jobLabel
selector:
matchLabels:
app.kubernetes.io/name: '{{ include "prometheus-node-exporter.name" (index .Subcharts "prometheus-node-exporter") }}'
| (object) Scrape configuration for Node Exporter |
tenant: "0"
| (string) Tenant to use for Grafana datasources and remote write |
victoria-metrics-operator:
crds:
cleanup:
enabled: true
image:
pullPolicy: IfNotPresent
repository: bitnami/kubectl
plain: true
enabled: true
operator:
disable_prometheus_converter: false
serviceMonitor:
enabled: true
| (object) VictoriaMetrics Operator dependency chart configuration. More values can be found here. Also checkout here possible ENV variables to configure operator behaviour |
victoria-metrics-operator.operator.disable_prometheus_converter: false
| (bool) By default, operator converts prometheus-operator objects. |
vmagent.additionalRemoteWrites: []
| (list) Remote write configuration of VMAgent, allowed parameters defined in a spec |
vmagent.annotations: {}
| (object) VMAgent annotations |
vmagent.enabled: true
| (bool) Create VMAgent CR |
vmagent.ingress:
annotations: {}
enabled: false
extraPaths: []
hosts:
- vmagent.domain.com
labels: {}
path: ""
pathType: Prefix
tls: []
| (object) VMAgent ingress configuration |
vmagent.spec:
externalLabels: {}
extraArgs:
promscrape.dropOriginalLabels: "true"
promscrape.streamParse: "true"
port: "8429"
scrapeInterval: 20s
selectAllByDefault: true
| (object) Full spec for VMAgent CRD. Allowed values described here |
vmalert.additionalNotifierConfigs: {}
| (object) Allows to configure static notifiers, discover notifiers via Consul and DNS, see specification here. This configuration will be created as separate secret and mounted to VMAlert pod. |
vmalert.annotations: {}
| (object) VMAlert annotations |
vmalert.enabled: true
| (bool) Create VMAlert CR |
vmalert.ingress:
annotations: {}
enabled: false
extraPaths: []
hosts:
- vmalert.domain.com
labels: {}
path: ""
pathType: Prefix
tls: []
| (object) VMAlert ingress config |
vmalert.remoteWriteVMAgent: false
| (bool) Controls whether VMAlert should use VMAgent or VMInsert as a target for remotewrite |
vmalert.spec:
evaluationInterval: 20s
externalLabels: {}
extraArgs:
http.pathPrefix: /
port: "8080"
selectAllByDefault: true
| (object) Full spec for VMAlert CRD. Allowed values described here |
vmalert.templateFiles: {}
| (object) Extra VMAlert annotation templates |
vmauth.annotations: {}
| (object) VMAuth annotations |
vmauth.enabled: false
| (bool) Enable VMAuth CR |
vmauth.spec:
port: "8427"
unauthorizedUserAccessSpec:
disabled: false
discover_backend_ips: true
url_map:
- src_paths:
- '{{ .vm.read.path }}/.*'
url_prefix:
- '{{ urlJoin (omit .vm.read "path") }}/'
- src_paths:
- '{{ .vm.write.path }}/.*'
url_prefix:
- '{{ urlJoin (omit .vm.write "path") }}/'
| (object) Full spec for VMAuth CRD. Allowed values described here It’s possible to use given below predefined variables in spec: * {{ .vm.read }} - parsed vmselect, vmsingle or external.vm.read URL * {{ .vm.write }} - parsed vminsert, vmsingle or external.vm.write URL |
vmauth.spec.unauthorizedUserAccessSpec.disabled: false
| (bool) Flag, that allows to disable default VMAuth unauthorized user access config |
vmcluster.annotations: {}
| (object) VMCluster annotations |
vmcluster.enabled: false
| (bool) Create VMCluster CR |
vmcluster.ingress.insert.annotations: {}
| (object) Ingress annotations |
vmcluster.ingress.insert.enabled: false
| (bool) Enable deployment of ingress for server component |
vmcluster.ingress.insert.hosts: []
| (list) Array of host objects |
vmcluster.ingress.insert.ingressClassName: ""
| (string) Ingress controller class name |
vmcluster.ingress.insert.labels: {}
| (object) Ingress extra labels |
vmcluster.ingress.insert.path: '{{ dig "extraArgs" "http.pathPrefix" "/" .Values.vmcluster.spec.vminsert }}'
| (string) Ingress default path |
vmcluster.ingress.insert.pathType: Prefix
| (string) Ingress path type |
vmcluster.ingress.insert.tls: []
| (list) Array of TLS objects |
vmcluster.ingress.select.annotations: {}
| (object) Ingress annotations |
vmcluster.ingress.select.enabled: false
| (bool) Enable deployment of ingress for server component |
vmcluster.ingress.select.hosts: []
| (list) Array of host objects |
vmcluster.ingress.select.ingressClassName: ""
| (string) Ingress controller class name |
vmcluster.ingress.select.labels: {}
| (object) Ingress extra labels |
vmcluster.ingress.select.path: '{{ dig "extraArgs" "http.pathPrefix" "/" .Values.vmcluster.spec.vmselect }}'
| (string) Ingress default path |
vmcluster.ingress.select.pathType: Prefix
| (string) Ingress path type |
vmcluster.ingress.select.tls: []
| (list) Array of TLS objects |
vmcluster.ingress.storage.annotations: {}
| (object) Ingress annotations |
vmcluster.ingress.storage.enabled: false
| (bool) Enable deployment of ingress for server component |
vmcluster.ingress.storage.hosts: []
| (list) Array of host objects |
vmcluster.ingress.storage.ingressClassName: ""
| (string) Ingress controller class name |
vmcluster.ingress.storage.labels: {}
| (object) Ingress extra labels |
vmcluster.ingress.storage.path: ""
| (string) Ingress default path |
vmcluster.ingress.storage.pathType: Prefix
| (string) Ingress path type |
vmcluster.ingress.storage.tls: []
| (list) Array of TLS objects |
vmcluster.spec:
replicationFactor: 2
retentionPeriod: "1"
vminsert:
enabled: true
extraArgs: {}
port: "8480"
replicaCount: 2
resources: {}
vmselect:
cacheMountPath: /select-cache
enabled: true
extraArgs: {}
port: "8481"
replicaCount: 2
resources: {}
storage:
volumeClaimTemplate:
spec:
resources:
requests:
storage: 2Gi
vmstorage:
replicaCount: 2
resources: {}
storage:
volumeClaimTemplate:
spec:
resources:
requests:
storage: 10Gi
storageDataPath: /vm-data
| (object) Full spec for VMCluster CRD. Allowed values described here |
vmcluster.spec.retentionPeriod: "1"
| (string) Data retention period. Possible units character: h(ours), d(ays), w(eeks), y(ears), if no unit character specified - month. The minimum retention period is 24h. See these docs |
vmcluster.spec.vminsert.enabled: true
| (bool) Set this value to false to disable VMInsert |
vmcluster.spec.vmselect.enabled: true
| (bool) Set this value to false to disable VMSelect |
vmsingle.annotations: {}
| (object) VMSingle annotations |
vmsingle.enabled: true
| (bool) Create VMSingle CR |
vmsingle.ingress.annotations: {}
| (object) Ingress annotations |
vmsingle.ingress.enabled: false
| (bool) Enable deployment of ingress for server component |
vmsingle.ingress.hosts: []
| (list) Array of host objects |
vmsingle.ingress.ingressClassName: ""
| (string) Ingress controller class name |
vmsingle.ingress.labels: {}
| (object) Ingress extra labels |
vmsingle.ingress.path: ""
| (string) Ingress default path |
vmsingle.ingress.pathType: Prefix
| (string) Ingress path type |
vmsingle.ingress.tls: []
| (list) Array of TLS objects |
vmsingle.spec:
extraArgs: {}
port: "8429"
replicaCount: 1
retentionPeriod: "1"
storage:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
| (object) Full spec for VMSingle CRD. Allowed values describe here |
vmsingle.spec.retentionPeriod: "1"
| (string) Data retention period. Possible units character: h(ours), d(ays), w(eeks), y(ears), if no unit character specified - month. The minimum retention period is 24h. See these docs |