chart_name
stringlengths 3
30
| templates
sequence | values
stringlengths 104
39.6k
|
---|---|---|
datadog | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{- define \"check-version\" -}}\n{{- if not .Values.agents.image.doNotCheckTag -}}\n{{- $version := .Values.agents.image.tag | toString | trimSuffix \"-jmx\" -}}\n{{- $length := len (split \".\" $version) -}}\n{{- if and (eq $length 1) (eq $version \"6\") -}}\n{{- $version = \"6.19.0\" -}}\n{{- end -}}\n{{- if and (eq $length 1) (eq $version \"7\") -}}\n{{- $version = \"7.19.0\" -}}\n{{- end -}}\n{{- if and (eq $length 1) (eq $version \"latest\") -}}\n{{- $version = \"7.19.0\" -}}\n{{- end -}}\n{{- if not (semverCompare \"^6.19.0-0 || ^7.19.0-0\" $version) -}}\n{{- fail \"This version of the chart requires an agent image 7.19.0 or greater. If you want to force and skip this check, use `--set agents.image.doNotCheckTag=true`\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"datadog.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nAnd depending on the resources the name is completed with an extension.\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"datadog.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"datadog.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn secret name to be used based on provided values.\n*/}}\n{{- define \"datadog.apiSecretName\" -}}\n{{- $fullName := include \"datadog.fullname\" . -}}\n{{- default $fullName .Values.datadog.apiKeyExistingSecret | quote -}}\n{{- end -}}\n\n{{/*\nReturn secret name to be used based on provided values.\n*/}}\n{{- define \"datadog.appKeySecretName\" -}}\n{{- $fullName := printf \"%s-appkey\" (include \"datadog.fullname\" .) -}}\n{{- default $fullName .Values.datadog.appKeyExistingSecret | quote -}}\n{{- end -}}\n\n{{/*\nReturn secret name to be used based on provided values.\n*/}}\n{{- define \"clusterAgent.tokenSecretName\" -}}\n{{- if not .Values.clusterAgent.tokenExistingSecret -}}\n{{- include \"datadog.fullname\" . -}}-cluster-agent\n{{- else -}}\n{{- .Values.clusterAgent.tokenExistingSecret -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for RBAC APIs.\n*/}}\n{{- define \"rbac.apiVersion\" -}}\n{{- if semverCompare \"^1.8-0\" .Capabilities.KubeVersion.GitVersion -}}\n\"rbac.authorization.k8s.io/v1\"\n{{- else -}}\n\"rbac.authorization.k8s.io/v1beta1\"\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate os label\n*/}}\n{{- define \"label.os\" -}}\n{{- if semverCompare \"^1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\nkubernetes.io/os\n{{- else -}}\nbeta.kubernetes.io/os\n{{- end -}}\n{{- end -}}\n\n{{/*\nCorrect `clusterAgent.metricsProvider.service.port` if Kubernetes <= 1.15\n*/}}\n{{- define \"clusterAgent.metricsProvider.port\" -}}\n{{- if semverCompare \"^1.15-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- .Values.clusterAgent.metricsProvider.service.port -}}\n{{- else -}}\n443\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the container runtime socket\n*/}}\n{{- define \"datadog.dockerOrCriSocketPath\" -}}\n{{- if eq .Values.targetSystem \"linux\" -}}\n{{- .Values.datadog.dockerSocketPath | default .Values.datadog.criSocketPath | default \"/var/run/docker.sock\" -}}\n{{- end -}}\n{{- if eq .Values.targetSystem \"windows\" -}}\n\\\\.\\pipe\\docker_engine\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn agent config path\n*/}}\n{{- define \"datadog.confPath\" -}}\n{{- if eq .Values.targetSystem \"linux\" -}}\n/etc/datadog-agent\n{{- end -}}\n{{- if eq .Values.targetSystem \"windows\" -}}\nC:/ProgramData/Datadog\n{{- end -}}\n{{- end -}}\n",
"# agent-apiservice.yaml\n{{- if and .Values.clusterAgent.rbac.create .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled -}}\napiVersion: apiregistration.k8s.io/v1\nkind: APIService\nmetadata:\n name: v1beta1.external.metrics.k8s.io\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n service:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-metrics-api\n namespace: {{ .Release.Namespace }}\n{{- if semverCompare \"^1.15-0\" .Capabilities.KubeVersion.GitVersion }}\n port: {{ template \"clusterAgent.metricsProvider.port\" . }}\n{{- end }}\n version: v1beta1\n insecureSkipTLSVerify: true\n group: external.metrics.k8s.io\n groupPriorityMinimum: 100\n versionPriority: 100\n{{- end -}}\n",
"# agent-clusterchecks-deployment.yaml\n{{- if and .Values.clusterAgent.enabled .Values.datadog.clusterChecks.enabled .Values.clusterChecksRunner.enabled -}}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-clusterchecks\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n replicas: {{ .Values.clusterChecksRunner.replicas }}\n strategy:\n{{ toYaml .Values.clusterChecksRunner.strategy | indent 4 }}\n selector:\n matchLabels:\n app: {{ template \"datadog.fullname\" . }}-clusterchecks\n template:\n metadata:\n labels:\n app: {{ template \"datadog.fullname\" . }}-clusterchecks\n name: {{ template \"datadog.fullname\" . }}-clusterchecks\n annotations:\n {{- if .Values.datadog.checksd }}\n checksum/checksd-config: {{ tpl (toYaml .Values.datadog.checksd) . | sha256sum }}\n {{- end }}\n spec:\n {{- if .Values.clusterChecksRunner.rbac.dedicated }}\n serviceAccountName: {{ if .Values.clusterChecksRunner.rbac.create }}{{ template \"datadog.fullname\" . }}-cluster-checks{{ else }}\"{{ .Values.clusterChecksRunner.rbac.serviceAccountName }}\"{{ end }}\n {{- else }}\n serviceAccountName: {{ if .Values.clusterChecksRunner.rbac.create }}{{ template \"datadog.fullname\" . }}{{ else }}\"{{ .Values.clusterChecksRunner.rbac.serviceAccountName }}\"{{ end }}\n {{- end }}\n imagePullSecrets:\n{{ toYaml .Values.clusterChecksRunner.image.pullSecrets | indent 8 }}\n {{- if .Values.clusterChecksRunner.dnsConfig }}\n dnsConfig:\n{{ toYaml .Values.clusterChecksRunner.dnsConfig | indent 8 }}\n {{- end }}\n initContainers:\n - name: init-volume\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"bash\", \"-c\"]\n args:\n - cp -r /etc/datadog-agent /opt\n volumeMounts:\n - name: config\n mountPath: /opt/datadog-agent\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 10 }}\n - name: init-config\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"bash\", \"-c\"]\n args:\n - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done\n volumeMounts:\n - name: config\n mountPath: /etc/datadog-agent\n {{- if .Values.datadog.checksd }}\n - name: checksd\n mountPath: /checks.d\n readOnly: true\n {{- end }}\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 10 }}\n containers:\n - name: agent\n image: \"{{ .Values.clusterChecksRunner.image.repository }}:{{ .Values.clusterChecksRunner.image.tag }}\"\n command: [\"bash\", \"-c\"]\n args:\n - rm -rf /etc/datadog-agent/conf.d && touch /etc/datadog-agent/datadog.yaml && exec agent run\n imagePullPolicy: {{ .Values.clusterChecksRunner.image.pullPolicy }}\n env:\n - name: DD_API_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"datadog.apiSecretName\" . }}\n key: api-key\n - name: KUBERNETES\n value: \"yes\"\n {{- if .Values.datadog.site }}\n - name: DD_SITE\n value: {{ .Values.datadog.site | quote }}\n {{- end }}\n {{- if .Values.datadog.dd_url }}\n - name: DD_DD_URL\n value: {{ .Values.datadog.dd_url | quote }}\n {{- end }}\n {{- if .Values.datadog.logLevel }}\n - name: DD_LOG_LEVEL\n value: {{ .Values.datadog.logLevel | quote }}\n {{- end }}\n - name: DD_EXTRA_CONFIG_PROVIDERS\n value: \"clusterchecks\"\n - name: DD_HEALTH_PORT\n value: \"5555\"\n # Cluster checks\n - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME\n value: {{ template \"datadog.fullname\" . }}-cluster-agent\n - name: DD_CLUSTER_AGENT_AUTH_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"clusterAgent.tokenSecretName\" . }}\n key: token\n - name: DD_CLUSTER_AGENT_ENABLED\n value: {{ .Values.clusterAgent.enabled | quote }}\n # Safely run alongside the daemonset\n - name: DD_ENABLE_METADATA_COLLECTION\n value: \"false\"\n # Expose CLC stats\n - name: DD_CLC_RUNNER_ENABLED\n value: \"true\"\n - name: DD_CLC_RUNNER_HOST\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n # Remove unused features\n - name: DD_USE_DOGSTATSD\n value: \"false\"\n - name: DD_PROCESS_AGENT_ENABLED\n value: \"false\"\n - name: DD_LOGS_ENABLED\n value: \"false\"\n - name: DD_APM_ENABLED\n value: \"false\"\n - name: DD_HOSTNAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n{{- if .Values.clusterChecksRunner.env }}\n{{ toYaml .Values.clusterChecksRunner.env | indent 10 }}\n{{- end }}\n resources:\n{{ toYaml .Values.clusterChecksRunner.resources | indent 10 }}\n{{- if .Values.clusterChecksRunner.volumeMounts }}\n volumeMounts:\n - name: config\n mountPath: {{ template \"datadog.confPath\" . }}\n{{ toYaml .Values.clusterChecksRunner.volumeMounts | indent 10 }}\n{{- end }}\n livenessProbe:\n{{ toYaml .Values.clusterChecksRunner.livenessProbe | indent 10 }}\n readinessProbe:\n{{ toYaml .Values.clusterChecksRunner.readinessProbe | indent 10 }}\n volumes:\n{{- if .Values.clusterChecksRunner.volumes }}\n{{ toYaml .Values.clusterChecksRunner.volumes | indent 8 }}\n{{- end }}\n - name: config\n emptyDir: {}\n{{- if .Values.datadog.checksd }}\n - name: checksd\n configMap:\n name: {{ template \"datadog.fullname\" . }}-checksd\n{{- end }}\n affinity:\n{{- if .Values.clusterChecksRunner.affinity }}\n{{ toYaml .Values.clusterChecksRunner.affinity | indent 8 }}\n{{- else }}\n # Ensure we only run one worker per node, to avoid name collisions\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - labelSelector:\n matchLabels:\n app: {{ template \"datadog.fullname\" . }}-clusterchecks\n topologyKey: kubernetes.io/hostname\n{{- end }}\n nodeSelector:\n {{ template \"label.os\" . }}: {{ .Values.targetSystem }}\n {{- if .Values.clusterChecksRunner.nodeSelector }}\n{{ toYaml .Values.clusterChecksRunner.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.clusterChecksRunner.tolerations }}\n tolerations:\n{{ toYaml .Values.clusterChecksRunner.tolerations | indent 8 }}\n {{- end }}\n{{ end }}\n",
"# agent-clusterchecks-pdb.yaml\n{{- if .Values.clusterChecksRunner.createPodDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-clusterchecks\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n maxUnavailable: 1\n selector:\n matchLabels:\n app: {{ template \"datadog.fullname\" . }}-clusterchecks\n{{- end -}}\n",
"# agent-clusterchecks-rbac.yaml\n{{- if and .Values.clusterChecksRunner.rbac.create .Values.clusterAgent.enabled .Values.datadog.clusterChecks.enabled .Values.clusterChecksRunner.enabled .Values.clusterChecksRunner.rbac.dedicated -}}\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRoleBinding\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-checks\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"datadog.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"datadog.fullname\" . }}-cluster-checks\n namespace: {{ .Release.Namespace }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: \"{{ template \"datadog.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-checks\n {{- if .Values.clusterChecksRunner.rbac.serviceAccountAnnotations }}\n annotations: {{ toYaml .Values.clusterChecksRunner.rbac.serviceAccountAnnotations | nindent 4 }}\n {{- end }}\n{{- end -}}\n",
"# agent-psp.yaml\n{{- if .Values.agents.podSecurity.podSecurityPolicy.create}}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"datadog.fullname\" . }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: {{ join \",\" .Values.agents.podSecurity.seccompProfiles | quote }}\n apparmor.security.beta.kubernetes.io/allowedProfileNames: {{ join \",\" .Values.agents.podSecurity.apparmorProfiles | quote }}\n seccomp.security.alpha.kubernetes.io/defaultProfileName: \"runtime/default\"\n apparmor.security.beta.kubernetes.io/defaultProfileName: \"runtime/default\"\nspec:\n privileged: {{ .Values.agents.podSecurity.privileged }}\n hostNetwork: {{ .Values.agents.useHostNetwork }}\n hostPID: {{ .Values.datadog.dogstatsd.useHostPID }}\n allowedCapabilities: \n{{ toYaml .Values.agents.podSecurity.capabilites | indent 4 }}\n volumes:\n{{ toYaml .Values.agents.podSecurity.volumes | indent 4 }}\n fsGroup:\n rule: RunAsAny\n runAsUser:\n rule: RunAsAny\n seLinux:\n{{ toYaml .Values.agents.podSecurity.securityContext | indent 4 }}\n supplementalGroups:\n rule: RunAsAny\n{{- end }}\n",
"# agent-rbac.yaml\n{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.rbac.create -}}\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRole\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\nrules:\n- apiGroups:\n - \"\"\n resources:\n - services\n - endpoints\n - pods\n - nodes\n - componentstatuses\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - get\n - list\n - watch\n - create\n- apiGroups: [\"quota.openshift.io\"]\n resources:\n - clusterresourcequotas\n verbs:\n - get\n - list\n- apiGroups:\n - \"autoscaling\"\n resources:\n - horizontalpodautoscalers\n verbs:\n - list\n - watch\n{{- if .Values.datadog.collectEvents }}\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - datadogtoken # Kubernetes event collection state\n verbs:\n - get\n - update\n{{- end }}\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - datadog-leader-election # Leader election token\n{{- if .Values.clusterAgent.metricsProvider.enabled }}\n - datadog-custom-metrics\n - extension-apiserver-authentication\n{{- end }}\n verbs:\n - get\n - update\n- apiGroups: # To create the leader election token and hpa events\n - \"\"\n resources:\n - configmaps\n - events\n verbs:\n - create\n- nonResourceURLs:\n - \"/version\"\n - \"/healthz\"\n verbs:\n - get\n{{- if and .Values.clusterAgent.metricsProvider.enabled .Values.clusterAgent.metricsProvider.wpaController }}\n- apiGroups:\n - \"datadoghq.com\"\n resources:\n - \"watermarkpodautoscalers\"\n verbs:\n - \"list\"\n - \"get\"\n - \"watch\"\n{{- end }}\n{{- if .Values.datadog.orchestratorExplorer.enabled }}\n- apiGroups: # to get the kube-system namespace UID and generate a cluster ID\n - \"\"\n resources:\n - namespaces\n resourceNames:\n - \"kube-system\"\n verbs:\n - get\n- apiGroups: # To create the cluster-id configmap\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - \"datadog-cluster-id\"\n verbs:\n - create\n - get\n - update\n{{- end }}\n{{- if and .Values.clusterAgent.metricsProvider.enabled .Values.clusterAgent.metricsProvider.useDatadogMetrics }}\n- apiGroups:\n - \"datadoghq.com\"\n resources:\n - \"datadogmetrics\"\n verbs:\n - \"list\"\n - \"create\"\n - \"delete\"\n - \"watch\"\n- apiGroups:\n - \"datadoghq.com\"\n resources:\n - \"datadogmetrics/status\"\n verbs:\n - \"update\"\n{{- end }}\n{{- if .Values.clusterAgent.admissionController.enabled }}\n- apiGroups:\n - admissionregistration.k8s.io\n resources:\n - mutatingwebhookconfigurations\n verbs: [\"get\", \"list\", \"watch\", \"update\", \"create\"]\n- apiGroups: [\"\"]\n resources: [\"secrets\"]\n verbs: [\"get\", \"list\", \"watch\", \"update\", \"create\"]\n- apiGroups: [\"batch\"]\n resources: [\"jobs\", \"cronjobs\"]\n verbs: [\"get\"]\n- apiGroups: [\"apps\"]\n resources: [\"statefulsets\", \"replicasets\", \"deployments\"]\n verbs: [\"get\"]\n{{- end }}\n---\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRoleBinding\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n namespace: {{ .Release.Namespace }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: \"{{ template \"datadog.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n{{- end }}\n\n{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.rbac.create .Values.clusterAgent.metricsProvider.enabled }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: \"{{ template \"datadog.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent:system:auth-delegator\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:auth-delegator\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# agent-scc.yaml\n{{- if .Values.agents.podSecurity.securityContextConstraints.create }}\nkind: SecurityContextConstraints\napiVersion: security.openshift.io/v1\nmetadata:\n name: {{ template \"datadog.fullname\" . }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nusers:\n- system:serviceaccount:{{ .Release.Namespace }}:{{ template \"datadog.fullname\" . }}\npriority: 10\n# Allow host ports for dsd / trace intake\n+allowHostPorts: {{ or .Values.datadog.dogstatsd.useHostPort .Values.datadog.apm.enabled }}\n# Allow host PID for dogstatsd origin detection\nallowHostPID: {{ .Values.datadog.dogstatsd.useHostPID }}\n# Allow host network for the CRIO check to reach Prometheus through localhost\nallowHostNetwork: {{ .Values.agents.useHostNetwork }}\n# Allow hostPath for docker / process metrics\nvolumes:\n{{ toYaml .Values.agents.podSecurity.volumes | indent 2 }}\n# Use the `spc_t` selinux type to access the\n# docker/cri socket + proc and cgroup stats\nseLinuxContext:\n{{ toYaml .Values.agents.podSecurity.securityContext | indent 2 }}\n# system-probe requires some specific seccomp and capabilities\nseccompProfiles:\n{{ toYaml .Values.agents.podSecurity.seccompProfiles | indent 2 }}\nallowedCapabilities:\n{{ toYaml .Values.agents.podSecurity.capabilites | indent 2 }}\n#\n# The rest is copied from restricted SCC\n#\nallowHostDirVolumePlugin: true\nallowHostIPC: false\nallowPrivilegedContainer: {{ .Values.agents.podSecurity.privileged }}\nallowedFlexVolumes: []\ndefaultAddCapabilities: []\nfsGroup:\n type: MustRunAs\nreadOnlyRootFilesystem: false\nrunAsUser:\n type: RunAsAny\nsupplementalGroups:\n type: RunAsAny\n# If your environment restricts user access to the Docker socket or journald (for logging)\n# create or use an existing group that has access and add the GID to\n# the lines below (also remove the previous line, `type: RunAsAny`)\n# type: MustRunAs\n# ranges:\n# - min: <min-group-ID>\n# - max: <max-group-ID>\nrequiredDropCapabilities: []\n{{- end }}\n",
"# agent-secret.yaml\n{{- if not .Values.clusterAgent.tokenExistingSecret }}\n{{- if .Values.clusterAgent.enabled -}}\n\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\ntype: Opaque\ndata:\n {{ if .Values.clusterAgent.token -}}\n token: {{ .Values.clusterAgent.token | b64enc | quote }}\n {{ else -}}\n token: {{ randAlphaNum 32 | b64enc | quote }}\n {{ end }}\n{{- end }}\n\n{{ end }}",
"# agent-services.yaml\n{{- if .Values.clusterAgent.enabled -}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n type: ClusterIP\n selector:\n app: {{ template \"datadog.fullname\" . }}-cluster-agent\n ports:\n - port: 5005\n name: agentport\n protocol: TCP\n{{ end }}\n\n{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled -}}\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-metrics-api\n labels:\n app: \"{{ template \"datadog.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n type: {{ .Values.clusterAgent.metricsProvider.service.type }}\n selector:\n app: {{ template \"datadog.fullname\" . }}-cluster-agent\n ports:\n - port: {{ template \"clusterAgent.metricsProvider.port\" . }}\n name: metricsapi\n protocol: TCP\n{{ end }}\n\n{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.admissionController.enabled -}}\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-admission-controller\n labels:\n app: \"{{ template \"datadog.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n selector:\n app: {{ template \"datadog.fullname\" . }}-cluster-agent\n ports:\n - port: 443\n targetPort: 8000\n{{ end }}\n",
"# checksd-configmap.yaml\n{{- if .Values.datadog.checksd }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-checksd\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n annotations:\n checksum/checksd-config: {{ tpl (toYaml .Values.datadog.checksd) . | sha256sum }}\ndata:\n{{ tpl (toYaml .Values.datadog.checksd) . | indent 2 }}\n{{- end -}}\n",
"# cluster-agent-confd-configmap.yaml\n{{- if .Values.clusterAgent.confd }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-confd\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n annotations:\n checksum/confd-config: {{ tpl (toYaml .Values.clusterAgent.confd) . | sha256sum }}\ndata:\n{{ tpl (toYaml .Values.clusterAgent.confd) . | indent 2 }}\n{{- end -}}\n",
"# cluster-agent-config-configmap.yaml\n{{- if .Values.clusterAgent.datadog_cluster_yaml }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-config\n labels:\n app: \"{{ template \"datadog.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n annotations:\n checksum/clusteragent-config: {{ tpl (toYaml .Values.clusterAgent.datadog_cluster_yaml) . | sha256sum }}\ndata:\n datadog-cluster.yaml: |\n{{ tpl (toYaml .Values.clusterAgent.datadog_cluster_yaml) . | indent 4 }}\n{{- end }}\n",
"# cluster-agent-deployment.yaml\n{{- if .Values.clusterAgent.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n replicas: {{ .Values.clusterAgent.replicas }}\n strategy:\n{{- if .Values.clusterAgent.strategy }}\n{{ toYaml .Values.clusterAgent.strategy | indent 4 }}\n{{- else }}\n type: RollingUpdate\n rollingUpdate:\n maxSurge: 1\n maxUnavailable: 0\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"datadog.fullname\" . }}-cluster-agent\n {{- if .Values.clusterAgent.podLabels }}\n{{ toYaml .Values.clusterAgent.podLabels | indent 6 }}\n {{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"datadog.fullname\" . }}-cluster-agent\n {{- if .Values.clusterAgent.podLabels }}\n{{ toYaml .Values.clusterAgent.podLabels | indent 8 }}\n {{- end }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n annotations:\n {{- if .Values.clusterAgent.datadog_cluster_yaml }}\n checksum/clusteragent-config: {{ tpl (toYaml .Values.clusterAgent.datadog_cluster_yaml) . | sha256sum }}\n {{- end }}\n {{- if .Values.clusterAgent.confd }}\n checksum/confd-config: {{ tpl (toYaml .Values.clusterAgent.confd) . | sha256sum }}\n {{- end }}\n ad.datadoghq.com/cluster-agent.check_names: '[\"prometheus\"]'\n ad.datadoghq.com/cluster-agent.init_configs: '[{}]'\n ad.datadoghq.com/cluster-agent.instances: |\n [{\n \"prometheus_url\": \"http://%%host%%:5000/metrics\",\n \"namespace\": \"datadog.cluster_agent\",\n \"metrics\": [\n \"go_goroutines\", \"go_memstats_*\", \"process_*\",\n \"api_requests\",\n \"datadog_requests\", \"external_metrics\", \"rate_limit_queries_*\",\n \"cluster_checks_*\"\n ]\n }]\n {{- if .Values.clusterAgent.podAnnotations }}\n{{ toYaml .Values.clusterAgent.podAnnotations | indent 8 }}\n {{- end }}\n\n spec:\n {{- if .Values.clusterAgent.priorityClassName }}\n priorityClassName: \"{{ .Values.clusterAgent.priorityClassName }}\"\n {{- end }}\n {{- if .Values.clusterAgent.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.clusterAgent.image.pullSecrets | indent 8 }}\n {{- end }}\n serviceAccountName: {{ if .Values.clusterAgent.rbac.create }}{{ template \"datadog.fullname\" . }}-cluster-agent{{ else }}\"{{ .Values.clusterAgent.rbac.serviceAccountName }}\"{{ end }}\n {{- if .Values.clusterAgent.useHostNetwork }}\n hostNetwork: {{ .Values.clusterAgent.useHostNetwork }}\n dnsPolicy: ClusterFirstWithHostNet\n {{- end }}\n {{- if .Values.clusterAgent.dnsConfig }}\n dnsConfig:\n{{ toYaml .Values.clusterAgent.dnsConfig | indent 8 }}\n {{- end }}\n containers:\n - name: cluster-agent\n image: \"{{ .Values.clusterAgent.image.repository }}:{{ .Values.clusterAgent.image.tag }}\"\n {{- with .Values.clusterAgent.command }}\n command: {{ range . }}\n - {{ . | quote }}\n {{- end }}\n {{- end }}\n imagePullPolicy: {{ .Values.clusterAgent.image.pullPolicy }}\n resources:\n{{ toYaml .Values.clusterAgent.resources | indent 10 }}\n ports:\n - containerPort: 5005\n name: agentport\n protocol: TCP\n {{- if .Values.clusterAgent.metricsProvider.enabled }}\n - containerPort: {{ template \"clusterAgent.metricsProvider.port\" . }}\n name: metricsapi\n protocol: TCP\n {{- end }}\n env:\n - name: DD_HEALTH_PORT\n value: {{ .Values.clusterAgent.healthPort | quote }}\n - name: DD_API_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"datadog.apiSecretName\" . }}\n key: api-key\n optional: true\n {{- if .Values.clusterAgent.metricsProvider.enabled }}\n - name: DD_APP_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"datadog.appKeySecretName\" . }}\n key: app-key\n - name: DD_EXTERNAL_METRICS_PROVIDER_ENABLED\n value: {{ .Values.clusterAgent.metricsProvider.enabled | quote }}\n - name: DD_EXTERNAL_METRICS_PROVIDER_PORT\n value: {{ include \"clusterAgent.metricsProvider.port\" . | quote }}\n - name: DD_EXTERNAL_METRICS_PROVIDER_WPA_CONTROLLER\n value: {{ .Values.clusterAgent.metricsProvider.wpaController | quote }}\n - name: DD_EXTERNAL_METRICS_PROVIDER_USE_DATADOGMETRIC_CRD\n value: {{ .Values.clusterAgent.metricsProvider.useDatadogMetrics | quote }}\n {{- end }}\n {{- if .Values.clusterAgent.admissionController.enabled }}\n - name: DD_ADMISSION_CONTROLLER_ENABLED\n value: {{ .Values.clusterAgent.admissionController.enabled | quote }}\n - name: DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED\n value: {{ .Values.clusterAgent.admissionController.mutateUnlabelled | quote }}\n - name: DD_ADMISSION_CONTROLLER_SERVICE_NAME\n value: {{ template \"datadog.fullname\" . }}-cluster-agent-admission-controller\n {{- end }}\n {{- if .Values.datadog.clusterChecks.enabled }}\n - name: DD_CLUSTER_CHECKS_ENABLED\n value: {{ .Values.datadog.clusterChecks.enabled | quote }}\n - name: DD_EXTRA_CONFIG_PROVIDERS\n value: \"kube_endpoints kube_services\"\n - name: DD_EXTRA_LISTENERS\n value: \"kube_endpoints kube_services\"\n {{- end }}\n {{- if .Values.datadog.clusterName }}\n {{- if not (regexMatch \"^([a-z]([a-z0-9\\\\-]{0,38}[a-z0-9])?\\\\.)*([a-z]([a-z0-9\\\\-]{0,38}[a-z0-9])?)$\" .Values.datadog.clusterName) }}\n {{- fail \"Your `clusterName` isn’t valid. It must be dot-separated tokens where a token start with a lowercase letter followed by up to 39 lowercase letters, numbers, or hyphens and cannot end with a hyphen.\"}}\n {{- end}}\n - name: DD_CLUSTER_NAME\n value: {{ .Values.datadog.clusterName | quote }}\n {{- end }}\n {{- if .Values.datadog.site }}\n - name: DD_SITE\n value: {{ .Values.datadog.site | quote }}\n {{- end }}\n {{- if .Values.datadog.dd_url }}\n - name: DD_DD_URL\n value: {{ .Values.datadog.dd_url | quote }}\n {{- end }}\n {{- if .Values.datadog.logLevel }}\n - name: DD_LOG_LEVEL\n value: {{ .Values.datadog.logLevel | quote }}\n {{- end }}\n - name: DD_LEADER_ELECTION\n value: {{ default \"true\" .Values.datadog.leaderElection | quote}}\n {{- if .Values.datadog.leaderLeaseDuration }}\n - name: DD_LEADER_LEASE_DURATION\n value: {{ .Values.datadog.leaderLeaseDuration | quote }}\n {{- else if .Values.datadog.clusterChecks.enabled }}\n - name: DD_LEADER_LEASE_DURATION\n value: \"15\"\n {{- end }}\n {{- if .Values.datadog.collectEvents }}\n - name: DD_COLLECT_KUBERNETES_EVENTS\n value: {{ .Values.datadog.collectEvents | quote}}\n {{- end }}\n - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME\n value: {{ template \"datadog.fullname\" . }}-cluster-agent\n - name: DD_CLUSTER_AGENT_AUTH_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"clusterAgent.tokenSecretName\" . }}\n key: token\n - name: DD_KUBE_RESOURCES_NAMESPACE\n value: {{ .Release.Namespace }}\n {{- if .Values.datadog.orchestratorExplorer.enabled }}\n - name: DD_ORCHESTRATOR_EXPLORER_ENABLED\n value: \"true\"\n {{- end }}\n{{- if .Values.clusterAgent.env }}\n{{ toYaml .Values.clusterAgent.env | indent 10 }}\n{{- end }}\n livenessProbe:\n{{ toYaml .Values.clusterAgent.livenessProbe | indent 10 }}\n readinessProbe:\n{{ toYaml .Values.clusterAgent.readinessProbe | indent 10 }}\n volumeMounts:\n{{- if .Values.clusterAgent.volumeMounts }}\n{{ toYaml .Values.clusterAgent.volumeMounts | indent 10 }}\n{{- end }}\n{{- if .Values.clusterAgent.confd }}\n - name: confd\n mountPath: /conf.d\n readOnly: true\n{{- end }}\n{{- if .Values.clusterAgent.datadog_cluster_yaml }}\n - name: cluster-agent-yaml\n mountPath: /etc/datadog-agent/datadog-cluster.yaml\n subPath: datadog-cluster.yaml\n readOnly: true\n{{- end}}\n volumes:\n{{- if .Values.clusterAgent.confd }}\n - name: confd\n configMap:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-confd\n{{- end }}\n{{- if .Values.clusterAgent.datadog_cluster_yaml }}\n - name: cluster-agent-yaml\n configMap:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-config\n{{- end}}\n\n{{- if .Values.clusterAgent.volumes }}\n{{ toYaml .Values.clusterAgent.volumes | indent 8 }}\n{{- end }}\n {{- if .Values.clusterAgent.tolerations }}\n tolerations:\n{{ toYaml .Values.clusterAgent.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.clusterAgent.affinity }}\n affinity:\n{{ toYaml .Values.clusterAgent.affinity | indent 8 }}\n {{- end }}\n nodeSelector:\n {{ template \"label.os\" . }}: {{ .Values.targetSystem }}\n {{- if .Values.clusterAgent.nodeSelector }}\n{{ toYaml .Values.clusterAgent.nodeSelector | indent 8 }}\n {{- end }}\n{{ end }}\n",
"# cluster-agent-pdb.yaml\n{{- if .Values.clusterAgent.createPodDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n minAvailable: 1\n selector:\n matchLabels:\n app: {{ template \"datadog.fullname\" . }}-cluster-agent\n{{- end -}}\n",
"# confd-configmap.yaml\n{{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-confd\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n annotations:\n checksum/confd-config: {{ tpl (toYaml .Values.datadog.confd) . | sha256sum }}\n checksum/autoconf-config: {{ tpl (toYaml .Values.datadog.autoconf) . | sha256sum }}\ndata:\n{{/*\nMerge the legacy autoconf dict before so confd static configurations\noverride duplicates\n*/}}\n{{- if .Values.datadog.autoconf }}\n{{ tpl (toYaml .Values.datadog.autoconf) . | indent 2 }}\n{{- end }}\n{{- if .Values.datadog.confd }}\n{{ tpl (toYaml .Values.datadog.confd) . | indent 2 }}\n{{- end }}\n{{- end -}}\n",
"# container-agent.yaml\n{{- define \"container-agent\" -}}\n- name: agent\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"agent\", \"run\"]\n resources:\n{{ toYaml .Values.agents.containers.agent.resources | indent 4 }}\n ports:\n - containerPort: {{ .Values.datadog.dogstatsd.port }}\n {{- if .Values.datadog.dogstatsd.useHostPort }}\n hostPort: {{ .Values.datadog.dogstatsd.port }}\n {{- end }}\n name: dogstatsdport\n protocol: UDP\n env:\n {{- include \"containers-common-env\" . | nindent 4 }}\n {{- if .Values.datadog.logLevel }}\n - name: DD_LOG_LEVEL\n value: {{ .Values.agents.containers.agent.logLevel | default .Values.datadog.logLevel | quote }}\n {{- end }}\n {{- if .Values.datadog.dogstatsd.port }}\n - name: DD_DOGSTATSD_PORT\n value: {{ .Values.datadog.dogstatsd.port | quote }}\n {{- end }}\n {{- if .Values.datadog.dogstatsd.nonLocalTraffic }}\n - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC\n value: {{ .Values.datadog.dogstatsd.nonLocalTraffic | quote }}\n {{- end }}\n {{- if .Values.datadog.dogstatsd.originDetection }}\n - name: DD_DOGSTATSD_ORIGIN_DETECTION\n value: {{ .Values.datadog.dogstatsd.originDetection | quote }}\n {{- end }}\n {{- if not .Values.clusterAgent.enabled }}\n {{- if .Values.datadog.leaderElection }}\n - name: DD_LEADER_ELECTION\n value: {{ .Values.datadog.leaderElection | quote}}\n {{- end }}\n {{- if .Values.datadog.leaderLeaseDuration }}\n - name: DD_LEADER_LEASE_DURATION\n value: {{ .Values.datadog.leaderLeaseDuration | quote }}\n {{- end }}\n {{- if .Values.datadog.collectEvents }}\n - name: DD_COLLECT_KUBERNETES_EVENTS\n value: {{.Values.datadog.collectEvents | quote}}\n {{- end }}\n {{- else }}\n - name: DD_CLUSTER_AGENT_ENABLED\n value: {{ .Values.clusterAgent.enabled | quote }}\n - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME\n value: {{ template \"datadog.fullname\" . }}-cluster-agent\n - name: DD_CLUSTER_AGENT_AUTH_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"clusterAgent.tokenSecretName\" . }}\n key: token\n {{- end }}\n - name: DD_APM_ENABLED\n value: \"false\"\n - name: DD_LOGS_ENABLED\n value: {{ (default false (or .Values.datadog.logs.enabled .Values.datadog.logsEnabled)) | quote}}\n - name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL\n value: {{ (default false (or .Values.datadog.logs.containerCollectAll .Values.datadog.logsConfigContainerCollectAll)) | quote}}\n - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE\n value: {{ .Values.datadog.logs.containerCollectUsingFiles | quote }}\n {{- if not .Values.datadog.livenessProbe }}\n - name: DD_HEALTH_PORT\n value: \"5555\"\n {{- end }}\n {{- if .Values.datadog.dogstatsd.useSocketVolume }}\n - name: DD_DOGSTATSD_SOCKET\n value: {{ .Values.datadog.dogstatsd.socketPath | quote }}\n {{- end }}\n {{- if .Values.datadog.clusterChecks.enabled }}\n {{- if .Values.clusterChecksRunner.enabled }}\n - name: DD_EXTRA_CONFIG_PROVIDERS\n value: \"endpointschecks\"\n {{ else }}\n - name: DD_EXTRA_CONFIG_PROVIDERS\n value: \"clusterchecks endpointschecks\"\n {{- end }}\n {{- end }}\n{{- if .Values.agents.containers.agent.env }}\n{{ toYaml .Values.agents.containers.agent.env | indent 4 }}\n{{- end }}\n volumeMounts:\n - name: config\n mountPath: {{ template \"datadog.confPath\" . }}\n {{- if eq .Values.targetSystem \"linux\" }}\n - name: runtimesocketdir\n mountPath: {{ print \"/host/\" (dir (include \"datadog.dockerOrCriSocketPath\" .)) | clean }}\n readOnly: true\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n - name: runtimesocket\n mountPath: {{ template \"datadog.dockerOrCriSocketPath\" . }}\n {{- end }}\n {{- if .Values.agents.useConfigMap }}\n - name: {{ template \"datadog.fullname\" . }}-datadog-yaml\n mountPath: {{ template \"datadog.confPath\" . }}/datadog.yaml\n subPath: datadog.yaml\n {{- end }}\n {{- if eq .Values.targetSystem \"linux\" }}\n {{- if .Values.datadog.dogstatsd.useSocketVolume }}\n - name: dsdsocket\n mountPath: {{ (dir .Values.datadog.dogstatsd.socketPath) }}\n {{- end }}\n {{- if .Values.datadog.systemProbe.enabled }}\n - name: sysprobe-socket-dir\n mountPath: /var/run/sysprobe\n readOnly: true\n - name: sysprobe-config\n mountPath: /etc/datadog-agent/system-probe.yaml\n subPath: system-probe.yaml\n {{- end }}\n - name: procdir\n mountPath: /host/proc\n readOnly: true\n - name: cgroups\n mountPath: /host/sys/fs/cgroup\n readOnly: true\n {{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }}\n - name: pointerdir\n mountPath: /opt/datadog-agent/run\n - name: logpodpath\n mountPath: /var/log/pods\n readOnly: true\n {{- if not .Values.datadog.criSocketPath }}\n - name: logdockercontainerpath\n mountPath: /var/lib/docker/containers\n readOnly: true\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n {{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }}\n - name: pointerdir\n mountPath: C:/var/log\n - name: logpodpath\n mountPath: C:/var/log/pods\n readOnly: true\n - name: logdockercontainerpath\n mountPath: C:/ProgramData/docker/containers\n readOnly: true\n {{- end }}\n {{- end }}\n{{- if .Values.agents.volumeMounts }}\n{{ toYaml .Values.agents.volumeMounts | indent 4 }}\n{{- end }}\n livenessProbe:\n{{ toYaml .Values.agents.containers.agent.livenessProbe | indent 4 }}\n readinessProbe:\n{{ toYaml .Values.agents.containers.agent.readinessProbe | indent 4 }}\n{{- end -}}\n",
"# container-process-agent.yaml\n{{- define \"container-process-agent\" -}}\n- name: process-agent\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n {{- if eq .Values.targetSystem \"linux\" }}\n command: [\"process-agent\", \"-config={{ template \"datadog.confPath\" . }}/datadog.yaml\"]\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n command: [\"process-agent\", \"-foreground\", \"-config={{ template \"datadog.confPath\" . }}/datadog.yaml\"]\n {{- end }}\n resources:\n{{ toYaml .Values.agents.containers.processAgent.resources | indent 4 }}\n env:\n {{- include \"containers-common-env\" . | nindent 4 }}\n {{- if .Values.datadog.processAgent.processCollection }}\n - name: DD_PROCESS_AGENT_ENABLED\n value: \"true\"\n {{- end }}\n - name: DD_LOG_LEVEL\n value: {{ .Values.agents.containers.processAgent.logLevel | default .Values.datadog.logLevel | quote }}\n {{- if .Values.datadog.systemProbe.enabled }}\n - name: DD_SYSTEM_PROBE_ENABLED\n value: {{ .Values.datadog.systemProbe.enabled | quote }}\n {{- end }}\n {{- if .Values.datadog.orchestratorExplorer.enabled }}\n - name: DD_ORCHESTRATOR_EXPLORER_ENABLED\n value: \"true\"\n - name: DD_ORCHESTRATOR_CLUSTER_ID\n valueFrom:\n configMapKeyRef:\n name: datadog-cluster-id\n key: id\n {{- end }}\n{{- if .Values.agents.containers.processAgent.env }}\n{{ toYaml .Values.agents.containers.processAgent.env | indent 4 }}\n{{- end }}\n volumeMounts:\n - name: config\n mountPath: {{ template \"datadog.confPath\" . }}\n {{- if eq .Values.targetSystem \"linux\" }}\n - name: runtimesocketdir\n mountPath: {{ print \"/host/\" (dir (include \"datadog.dockerOrCriSocketPath\" .)) | clean }}\n readOnly: true\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n - name: runtimesocket\n mountPath: {{ template \"datadog.dockerOrCriSocketPath\" . }}\n {{- end }}\n {{- if .Values.agents.useConfigMap }}\n - name: {{ template \"datadog.fullname\" . }}-datadog-yaml\n mountPath: {{ template \"datadog.confPath\" . }}/datadog.yaml\n subPath: datadog.yaml\n {{- end }}\n {{- if eq .Values.targetSystem \"linux\" }}\n - name: cgroups\n mountPath: /host/sys/fs/cgroup\n readOnly: true\n - name: passwd\n mountPath: /etc/passwd\n - name: procdir\n mountPath: /host/proc\n readOnly: true\n {{- if .Values.datadog.systemProbe.enabled }}\n - name: sysprobe-socket-dir\n mountPath: /var/run/sysprobe\n readOnly: true\n - name: sysprobe-config\n mountPath: /etc/datadog-agent/system-probe.yaml\n subPath: system-probe.yaml\n {{- end }}\n {{- end }}\n{{- end -}}\n",
"# container-system-probe.yaml\n{{- define \"container-system-probe\" -}}\n- name: system-probe\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n securityContext:\n capabilities:\n add: [\"SYS_ADMIN\", \"SYS_RESOURCE\", \"SYS_PTRACE\", \"NET_ADMIN\", \"IPC_LOCK\"]\n command: [\"/opt/datadog-agent/embedded/bin/system-probe\", \"--config=/etc/datadog-agent/system-probe.yaml\"]\n env:\n - name: DD_LOG_LEVEL\n value: {{ .Values.agents.containers.systemProbe.logLevel | default .Values.datadog.logLevel | quote }}\n{{- if .Values.agents.containers.systemProbe.env }}\n{{ toYaml .Values.agents.containers.systemProbe.env | indent 4 }}\n{{- end }}\n resources:\n{{ toYaml .Values.agents.containers.systemProbe.resources | indent 4 }}\n volumeMounts:\n - name: debugfs\n mountPath: /sys/kernel/debug\n - name: sysprobe-config\n mountPath: /etc/datadog-agent\n - name: sysprobe-socket-dir\n mountPath: /var/run/sysprobe\n - name: procdir\n mountPath: /host/proc\n readOnly: true\n - name: modules\n mountPath: /lib/modules\n readOnly: true\n - name: src\n mountPath: /usr/src\n readOnly: true\n{{- end -}}\n",
"# container-trace-agent.yaml\n{{- define \"container-trace-agent\" -}}\n- name: trace-agent\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n {{- if eq .Values.targetSystem \"linux\" }}\n command: [\"trace-agent\", \"-config={{ template \"datadog.confPath\" . }}/datadog.yaml\"]\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n command: [\"trace-agent\", \"-foreground\", \"-config={{ template \"datadog.confPath\" . }}/datadog.yaml\"]\n {{- end }}\n resources:\n{{ toYaml .Values.agents.containers.traceAgent.resources | indent 4 }}\n ports:\n - containerPort: {{ .Values.datadog.apm.port }}\n hostPort: {{ .Values.datadog.apm.port }}\n name: traceport\n protocol: TCP\n env:\n {{- include \"containers-common-env\" . | nindent 4 }}\n - name: DD_LOG_LEVEL\n value: {{ .Values.agents.containers.traceAgent.logLevel | default .Values.datadog.logLevel | quote }}\n - name: DD_APM_ENABLED\n value: \"true\"\n - name: DD_APM_NON_LOCAL_TRAFFIC\n value: \"true\"\n - name: DD_APM_RECEIVER_PORT\n value: {{ .Values.datadog.apm.port | quote }}\n {{- if .Values.datadog.apm.useSocketVolume }}\n - name: DD_APM_RECEIVER_SOCKET\n value: {{ .Values.datadog.apm.socketPath | quote }}\n {{- end }}\n{{- if .Values.agents.containers.traceAgent.env }}\n{{ toYaml .Values.agents.containers.traceAgent.env | indent 4 }}\n{{- end }}\n volumeMounts:\n - name: config\n mountPath: {{ template \"datadog.confPath\" . }}\n {{- if .Values.agents.useConfigMap }}\n - name: {{ template \"datadog.fullname\" . }}-datadog-yaml\n mountPath: {{ template \"datadog.confPath\" . }}/datadog.yaml\n subPath: datadog.yaml\n {{- end }}\n {{- if eq .Values.targetSystem \"linux\" }}\n - name: runtimesocketdir\n mountPath: {{ print \"/host/\" (dir (include \"datadog.dockerOrCriSocketPath\" .)) | clean }}\n readOnly: true\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n - name: runtimesocket\n mountPath: {{ template \"datadog.dockerOrCriSocketPath\" . }}\n {{- end }}\n {{- if .Values.datadog.apm.useSocketVolume }}\n - name: apmsocket\n mountPath: {{ (dir .Values.datadog.apm.socketPath) }}\n {{- end }}\n livenessProbe:\n{{ toYaml .Values.agents.containers.traceAgent.livenessProbe | indent 4 }}\n{{- end -}}\n",
"# containers-common-env.yaml\n# The purpose of this template is to define a minimal set of environment\n# variables required to operate dedicated containers in the daemonset\n{{- define \"containers-common-env\" -}}\n- name: DD_API_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"datadog.apiSecretName\" . }}\n key: api-key\n{{- if semverCompare \"^1.7-0\" .Capabilities.KubeVersion.GitVersion }}\n- name: DD_KUBERNETES_KUBELET_HOST\n valueFrom:\n fieldRef:\n fieldPath: status.hostIP\n{{- end }}\n{{- if .Values.datadog.clusterName }}\n{{- if not (regexMatch \"^([a-z]([a-z0-9\\\\-]{0,38}[a-z0-9])?\\\\.)*([a-z]([a-z0-9\\\\-]{0,38}[a-z0-9])?)$\" .Values.datadog.clusterName) }}\n{{- fail \"Your `clusterName` isn’t valid. It must be dot-separated tokens where a token start with a lowercase letter followed by up to 39 lowercase letters, numbers, or hyphens and cannot end with a hyphen.\"}}\n{{- end}}\n- name: DD_CLUSTER_NAME\n value: {{ .Values.datadog.clusterName | quote }}\n{{- end }}\n{{- if .Values.datadog.tags }}\n- name: DD_TAGS\n value: {{ .Values.datadog.tags | join \" \" | quote }}\n{{- end }}\n{{- if .Values.datadog.nodeLabelsAsTags }}\n- name: DD_KUBERNETES_NODE_LABELS_AS_TAGS\n value: '{{ toJson .Values.datadog.nodeLabelsAsTags }}'\n{{- end }}\n{{- if .Values.datadog.podLabelsAsTags }}\n- name: DD_KUBERNETES_POD_LABELS_AS_TAGS\n value: '{{ toJson .Values.datadog.podLabelsAsTags }}'\n{{- end }}\n{{- if .Values.datadog.podAnnotationsAsTags }}\n- name: DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS\n value: '{{ toJson .Values.datadog.podAnnotationsAsTags }}'\n{{- end }}\n- name: KUBERNETES\n value: \"yes\"\n{{- if .Values.datadog.site }}\n- name: DD_SITE\n value: {{ .Values.datadog.site | quote }}\n{{- end }}\n{{- if .Values.datadog.dd_url }}\n- name: DD_DD_URL\n value: {{ .Values.datadog.dd_url | quote }}\n{{- end }}\n{{- if .Values.datadog.env }}\n{{ toYaml .Values.datadog.env }}\n{{- end }}\n{{- if .Values.datadog.acInclude }}\n- name: DD_AC_INCLUDE\n value: {{ .Values.datadog.acInclude | quote }}\n{{- end }}\n{{- if .Values.datadog.acExclude }}\n- name: DD_AC_EXCLUDE\n value: {{ .Values.datadog.acExclude | quote }}\n{{- end }}\n{{- if .Values.datadog.containerInclude }}\n- name: DD_CONTAINER_INCLUDE\n value: {{ .Values.datadog.containerInclude | quote }}\n{{- end }}\n{{- if .Values.datadog.containerExclude }}\n- name: DD_CONTAINER_EXCLUDE\n value: {{ .Values.datadog.containerExclude | quote }}\n{{- end }}\n{{- if .Values.datadog.containerIncludeMetrics }}\n- name: DD_CONTAINER_INCLUDE_METRICS\n value: {{ .Values.datadog.containerIncludeMetrics | quote }}\n{{- end }}\n{{- if .Values.datadog.containerExcludeMetrics }}\n- name: DD_CONTAINER_EXCLUDE_METRICS\n value: {{ .Values.datadog.containerExcludeMetrics | quote }}\n{{- end }}\n{{- if .Values.datadog.containerIncludeLogs }}\n- name: DD_CONTAINER_INCLUDE_LOGS\n value: {{ .Values.datadog.containerIncludeLogs | quote }}\n{{- end }}\n{{- if .Values.datadog.containerExcludeLogs }}\n- name: DD_CONTAINER_EXCLUDE_LOGS\n value: {{ .Values.datadog.containerExcludeLogs | quote }}\n{{- end }}\n{{- if .Values.datadog.criSocketPath }}\n- name: DD_CRI_SOCKET_PATH\n value: {{ print \"/host/\" .Values.datadog.criSocketPath | clean }}\n{{- else }}\n- name: DOCKER_HOST\n{{- if eq .Values.targetSystem \"linux\" }}\n value: unix://{{ print \"/host/\" (include \"datadog.dockerOrCriSocketPath\" .) | clean }}\n{{- end }}\n{{- if eq .Values.targetSystem \"windows\" }}\n value: npipe://{{ (include \"datadog.dockerOrCriSocketPath\" .) | replace \"\\\\\" \"/\" }}\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# containers-init-linux.yaml\n{{- define \"containers-init-linux\" -}}\n- name: init-volume\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"bash\", \"-c\"]\n args:\n - cp -r /etc/datadog-agent /opt\n volumeMounts:\n - name: config\n mountPath: /opt/datadog-agent\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }}\n- name: init-config\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"bash\", \"-c\"]\n args:\n - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done\n volumeMounts:\n - name: config\n mountPath: /etc/datadog-agent\n {{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }}\n - name: confd\n mountPath: /conf.d\n readOnly: true\n {{- end }}\n {{- if .Values.datadog.checksd }}\n - name: checksd\n mountPath: /checks.d\n readOnly: true\n {{- end }}\n - name: procdir\n mountPath: /host/proc\n readOnly: true\n - name: runtimesocketdir\n mountPath: {{ print \"/host/\" (dir (include \"datadog.dockerOrCriSocketPath\" .)) | clean }}\n readOnly: true\n {{- if .Values.datadog.systemProbe.enabled }}\n - name: sysprobe-config\n mountPath: /etc/datadog-agent/system-probe.yaml\n subPath: system-probe.yaml\n {{- end }}\n env:\n {{- include \"containers-common-env\" . | nindent 4 }}\n {{- if and (not .Values.clusterAgent.enabled) .Values.datadog.leaderElection }}\n - name: DD_LEADER_ELECTION\n value: {{ .Values.datadog.leaderElection | quote }}\n {{- end }}\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }}\n{{- end -}}\n",
"# containers-init-windows.yaml\n{{- define \"containers-init-windows\" -}}\n- name: init-volume\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"pwsh\", \"-Command\"]\n args:\n - Copy-Item -Recurse -Force {{ template \"datadog.confPath\" . }} C:/Temp\n volumeMounts:\n - name: config\n mountPath: C:/Temp/Datadog\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }}\n- name: init-config\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n imagePullPolicy: {{ .Values.agents.image.pullPolicy }}\n command: [\"pwsh\", \"-Command\"]\n args:\n - Get-ChildItem 'entrypoint-ps1' | ForEach-Object { & $_.FullName if (-Not $?) { exit 1 } }\n volumeMounts:\n - name: config\n mountPath: {{ template \"datadog.confPath\" . }}\n {{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }}\n - name: confd\n mountPath: C:/conf.d\n readOnly: true\n {{- end }}\n {{- if .Values.datadog.checksd }}\n - name: checksd\n mountPath: C:/checks.d\n readOnly: true\n {{- end }}\n - name: runtimesocket\n mountPath: {{ template \"datadog.dockerOrCriSocketPath\" . }}\n env:\n {{- include \"containers-common-env\" . | nindent 4 }}\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }}\n{{- end -}}\n",
"# daemonset-volumes-linux.yaml\n{{- define \"daemonset-volumes-linux\" -}}\n- hostPath:\n path: /proc\n name: procdir\n- hostPath:\n path: /sys/fs/cgroup\n name: cgroups\n{{- if .Values.datadog.dogstatsd.useSocketVolume }}\n- hostPath:\n path: {{ .Values.datadog.dogstatsd.hostSocketPath }}\n type: DirectoryOrCreate\n name: dsdsocket\n{{- end }}\n{{- if .Values.datadog.apm.useSocketVolume }}\n- hostPath:\n path: {{ .Values.datadog.apm.hostSocketPath }}\n type: DirectoryOrCreate\n name: apmsocket\n{{- end }}\n- name: s6-run\n emptyDir: {}\n{{- if (or (.Values.datadog.confd) (.Values.datadog.autoconf)) }}\n- name: confd\n configMap:\n name: {{ template \"datadog.fullname\" . }}-confd\n{{- end }}\n{{- if .Values.datadog.systemProbe.enabled }}\n- name: sysprobe-config\n configMap:\n name: {{ template \"datadog.fullname\" . }}-system-probe-config\n{{- if eq .Values.datadog.systemProbe.seccomp \"localhost/system-probe\" }}\n- name: datadog-agent-security\n configMap:\n name: {{ template \"datadog.fullname\" . }}-security\n- hostPath:\n path: {{ .Values.datadog.systemProbe.seccompRoot }}\n name: seccomp-root\n{{- end }}\n- hostPath:\n path: /sys/kernel/debug\n name: debugfs\n- name: sysprobe-socket-dir\n emptyDir: {}\n- hostPath:\n path: /lib/modules\n name: modules\n- hostPath:\n path: /usr/src\n name: src\n{{- end }}\n{{- if or .Values.datadog.processAgent.enabled .Values.datadog.systemProbe.enabled }}\n- hostPath:\n path: /etc/passwd\n name: passwd\n{{- end }}\n{{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }}\n- hostPath:\n path: \"/var/lib/datadog-agent/logs\"\n name: pointerdir\n- hostPath:\n path: /var/log/pods\n name: logpodpath\n{{- if not .Values.datadog.criSocketPath }}\n- hostPath:\n path: /var/lib/docker/containers\n name: logdockercontainerpath\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# daemonset-volumes-windows.yaml\n{{- define \"daemonset-volumes-windows\" -}}\n{{- if or .Values.datadog.logs.enabled .Values.datadog.logsEnabled }}\n- hostPath:\n path: C:/var/log\n name: pointerdir\n- hostPath:\n path: C:/var/log/pods\n name: logpodpath\n- hostPath:\n path: C:/ProgramData/docker/containers\n name: logdockercontainerpath\n{{- end }}\n{{- end -}}\n",
"# daemonset.yaml\n{{- template \"check-version\" . }}\n{{- if .Values.agents.enabled }}\n{{- if (or (.Values.datadog.apiKeyExistingSecret) (.Values.datadog.apiKey)) }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"datadog.fullname\" . }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"datadog.fullname\" . }}\n {{- if .Values.agents.podLabels }}\n{{ toYaml .Values.agents.podLabels | indent 6 }}\n {{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"datadog.fullname\" . }}\n {{- if .Values.agents.podLabels }}\n{{ toYaml .Values.agents.podLabels | indent 8 }}\n {{- end }}\n name: {{ template \"datadog.fullname\" . }}\n annotations:\n checksum/autoconf-config: {{ tpl (toYaml .Values.datadog.autoconf) . | sha256sum }}\n checksum/confd-config: {{ tpl (toYaml .Values.datadog.confd) . | sha256sum }}\n checksum/checksd-config: {{ tpl (toYaml .Values.datadog.checksd) . | sha256sum }}\n {{- if .Values.agents.customAgentConfig }}\n checksum/agent-config: {{ tpl (toYaml .Values.agents.customAgentConfig) . | sha256sum }}\n {{- end }}\n {{- if .Values.datadog.systemProbe.enabled }}\n container.apparmor.security.beta.kubernetes.io/system-probe: {{ .Values.datadog.systemProbe.apparmor }}\n container.seccomp.security.alpha.kubernetes.io/system-probe: {{ .Values.datadog.systemProbe.seccomp }}\n {{- end }}\n {{- if .Values.agents.podAnnotations }}\n{{ toYaml .Values.agents.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.datadog.securityContext }}\n securityContext:\n{{ toYaml .Values.datadog.securityContext| indent 8 }}\n {{- else if or .Values.agents.podSecurity.podSecurityPolicy.create .Values.agents.podSecurity.securityContextConstraints.create -}}\n {{- if and (.Values.agents.podSecurity.securityContext) .Values.agents.podSecurity.securityContext.seLinuxOptions }}\n securityContext:\n seLinuxOptions:\n{{ toYaml .Values.agents.podSecurity.securityContext.seLinuxOptions | indent 10 }}\n {{- end }}\n {{- end }}\n {{- if .Values.agents.useHostNetwork }}\n hostNetwork: {{ .Values.agents.useHostNetwork }}\n dnsPolicy: ClusterFirstWithHostNet\n {{- end }}\n {{- if .Values.agents.dnsConfig }}\n dnsConfig:\n{{ toYaml .Values.agents.dnsConfig | indent 8 }}\n {{- end }}\n {{- if .Values.datadog.dogstatsd.useHostPID }}\n hostPID: {{ .Values.datadog.dogstatsd.useHostPID }}\n {{- end }}\n {{- if .Values.agents.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.agents.image.pullSecrets | indent 8 }}\n {{- end }}\n {{- if .Values.agents.priorityClassName }}\n priorityClassName: {{ .Values.agents.priorityClassName }}\n {{- end }}\n containers:\n {{- include \"container-agent\" . | nindent 6 }}\n {{- if .Values.datadog.apm.enabled }}\n {{- include \"container-trace-agent\" . | nindent 6 }}\n {{- end }}\n {{- if .Values.datadog.processAgent.enabled }}\n {{- include \"container-process-agent\" . | nindent 6 }}\n {{- end }}\n {{- if .Values.datadog.systemProbe.enabled }}\n {{- include \"container-system-probe\" . | nindent 6 }}\n {{- end }}\n initContainers:\n {{- if eq .Values.targetSystem \"windows\" }}\n {{ include \"containers-init-windows\" . | nindent 6 }}\n {{- end }}\n {{- if eq .Values.targetSystem \"linux\" }}\n {{ include \"containers-init-linux\" . | nindent 6 }}\n {{- end }}\n {{- if and .Values.datadog.systemProbe.enabled (eq .Values.datadog.systemProbe.seccomp \"localhost/system-probe\") }}\n {{ include \"system-probe-init\" . | nindent 6 }}\n {{- end }}\n volumes:\n - name: config\n emptyDir: {}\n {{- if eq .Values.targetSystem \"linux\" }}\n - hostPath:\n path: {{ dir (include \"datadog.dockerOrCriSocketPath\" .) }}\n name: runtimesocketdir\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n - hostPath:\n path: {{ template \"datadog.dockerOrCriSocketPath\" . }}\n name: runtimesocket\n {{- end }}\n {{- if .Values.datadog.checksd }}\n - name: checksd\n configMap:\n name: {{ template \"datadog.fullname\" . }}-checksd\n {{- end }}\n {{- if .Values.agents.useConfigMap }}\n - name: {{ template \"datadog.fullname\" . }}-datadog-yaml\n configMap:\n name: {{ template \"datadog.fullname\" . }}-datadog-yaml\n {{- end }}\n {{- if eq .Values.targetSystem \"windows\" }}\n {{ include \"daemonset-volumes-windows\" . | nindent 6 }}\n {{- end }}\n {{- if eq .Values.targetSystem \"linux\" }}\n {{ include \"daemonset-volumes-linux\" . | nindent 6 }}\n {{- end }}\n{{- if .Values.agents.volumes }}\n{{ toYaml .Values.agents.volumes | indent 6 }}\n{{- end }}\n tolerations:\n {{- if eq .Values.targetSystem \"windows\" }}\n - effect: NoSchedule\n key: node.kubernetes.io/os\n value: windows\n operator: Equal\n {{- end }}\n {{- if .Values.agents.tolerations }}\n{{ toYaml .Values.agents.tolerations | indent 6 }}\n {{- end }}\n affinity:\n{{ toYaml .Values.agents.affinity | indent 8 }}\n serviceAccountName: {{ if .Values.agents.rbac.create }}{{ template \"datadog.fullname\" . }}{{ else }}\"{{ .Values.agents.rbac.serviceAccountName }}\"{{ end }}\n nodeSelector:\n {{ template \"label.os\" . }}: {{ .Values.targetSystem }}\n {{- if .Values.agents.nodeSelector }}\n{{ toYaml .Values.agents.nodeSelector | indent 8 }}\n {{- end }}\n updateStrategy:\n{{ toYaml .Values.agents.updateStrategy | indent 4 }}\n{{ end }}\n{{ end }}\n",
"# datadog-yaml-configmap.yaml\n{{- if .Values.agents.useConfigMap }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-datadog-yaml\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n annotations:\n {{- if .Values.agents.customAgentConfig }}\n checksum/agent-config: {{ tpl (toYaml .Values.agents.customAgentConfig) . | sha256sum }}\n {{- end }}\ndata:\n datadog.yaml: |\n {{- if .Values.agents.customAgentConfig }}\n{{ tpl (toYaml .Values.agents.customAgentConfig) . | indent 4 }}\n {{- else }}\n ## Provides autodetected defaults, for kubernetes environments,\n ## please see datadog.yaml.example for all supported options\n\n # Autodiscovery for Kubernetes\n listeners:\n - name: kubelet\n config_providers:\n - name: kubelet\n polling: true\n\n # Enable APM by setting the DD_APM_ENABLED envvar to true, or override this configuration\n apm_config:\n enabled: true\n apm_non_local_traffic: true\n max_memory: 0\n max_cpu_percent: 0\n\n {{- $version := (.Values.agents.image.tag | toString | trimSuffix \"-jmx\") }}\n {{- $length := len (split \".\" $version ) -}} \n {{- if and (eq $length 1) (ge $version \"6\") -}}\n {{- $version := \"6.15\" }} \n {{- end -}}\n {{ if semverCompare \">=6.15\" $version }}\n # Enable java container awareness (agent version >= 6.15)\n jmx_use_container_support: true\n {{ else }}\n # Enable java cgroup memory awareness (agent version < 6.15)\n jmx_use_cgroup_memory_limit: true\n {{ end }}\n {{- end }}\n{{- end }}\n",
"# hpa-rbac.yaml\n{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.rbac.create .Values.clusterAgent.metricsProvider.enabled -}}\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRole\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-external-metrics-reader\nrules:\n- apiGroups:\n - \"external.metrics.k8s.io\"\n resources:\n - \"*\"\n verbs:\n - list\n - get\n - watch\n---\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRoleBinding\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-external-metrics-reader\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"datadog.fullname\" . }}-cluster-agent-external-metrics-reader\nsubjects:\n- kind: ServiceAccount\n name: horizontal-pod-autoscaler\n namespace: kube-system\n---\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: RoleBinding\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: \"{{ template \"datadog.fullname\" . }}-cluster-agent\"\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: extension-apiserver-authentication-reader\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"datadog.fullname\" . }}-cluster-agent\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# rbac.yaml\n{{- if .Values.agents.rbac.create -}}\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRole\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}\nrules:\n{{- if not .Values.clusterAgent.enabled }}\n- apiGroups:\n - \"\"\n resources:\n - services\n - events\n - endpoints\n - pods\n - nodes\n - componentstatuses\n verbs:\n - get\n - list\n - watch\n- apiGroups: [\"quota.openshift.io\"]\n resources:\n - clusterresourcequotas\n verbs:\n - get\n - list\n{{- if .Values.datadog.collectEvents }}\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - datadogtoken # Kubernetes event collection state\n verbs:\n - get\n - update\n{{- end }}\n{{- if .Values.datadog.leaderElection }}\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - datadog-leader-election # Leader election token\n verbs:\n - get\n - update\n- apiGroups: # To create the leader election token\n - \"\"\n resources:\n - configmaps\n verbs:\n - create\n{{- end }}\n- nonResourceURLs:\n - \"/version\"\n - \"/healthz\"\n verbs:\n - get\n{{- end }}\n- nonResourceURLs:\n - \"/metrics\"\n verbs:\n - get\n- apiGroups: # Kubelet connectivity\n - \"\"\n resources:\n - nodes/metrics\n - nodes/spec\n - nodes/proxy\n - nodes/stats\n verbs:\n - get\n- apiGroups: # leader election check\n - \"\"\n resources:\n - endpoints\n verbs:\n - get\n---\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRoleBinding\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"datadog.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"datadog.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n name: {{ template \"datadog.fullname\" . }}\n{{- end -}}\n",
"# secrets.yaml\n# API Key\n{{- if not .Values.datadog.apiKeyExistingSecret }}\n\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"datadog.fullname\" . }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\ntype: Opaque\ndata:\n api-key: {{ default \"MISSING\" .Values.datadog.apiKey | b64enc | quote }}\n\n{{- end }}\n\n# APP Key\n{{- if not .Values.datadog.appKeyExistingSecret }}\n{{- if and .Values.clusterAgent.enabled .Values.clusterAgent.metricsProvider.enabled }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"datadog.appKeySecretName\" . }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\ntype: Opaque\ndata:\n app-key: {{ default \"MISSING\" .Values.datadog.appKey | b64enc | quote }}\n{{- end }}\n{{- end }}\n",
"# system-probe-configmap.yaml\n{{- if .Values.datadog.systemProbe.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-system-probe-config\n namespace: {{ $.Release.Namespace }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\ndata:\n system-probe.yaml: |\n system_probe_config:\n enabled: {{ $.Values.datadog.systemProbe.enabled }}\n debug_port: {{ $.Values.datadog.systemProbe.debugPort }}\n sysprobe_socket: /var/run/sysprobe/sysprobe.sock\n enable_conntrack: {{ $.Values.datadog.systemProbe.enableConntrack }}\n bpf_debug: {{ $.Values.datadog.systemProbe.bpfDebug }}\n enable_tcp_queue_length: {{ $.Values.datadog.systemProbe.enableTCPQueueLength }}\n enable_oom_kill: {{ $.Values.datadog.systemProbe.enableOOMKill }}\n collect_dns_stats: {{ $.Values.datadog.systemProbe.collectDNSStats }}\n\n{{- if eq .Values.datadog.systemProbe.seccomp \"localhost/system-probe\" }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"datadog.fullname\" . }}-security\n namespace: {{ $.Release.Namespace }}\n labels:\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n app.kubernetes.io/name: \"{{ template \"datadog.fullname\" . }}\"\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\ndata:\n system-probe-seccomp.json: |\n {\n \"defaultAction\": \"SCMP_ACT_ERRNO\",\n \"syscalls\": [\n {\n \"names\": [\n \"accept4\",\n \"access\",\n \"arch_prctl\",\n \"bind\",\n \"bpf\",\n \"brk\",\n \"capget\",\n \"capset\",\n \"chdir\",\n \"clock_gettime\",\n \"clone\",\n \"close\",\n \"connect\",\n \"copy_file_range\",\n \"creat\",\n \"dup\",\n \"dup2\",\n \"dup3\",\n \"epoll_create\",\n \"epoll_create1\",\n \"epoll_ctl\",\n \"epoll_ctl_old\",\n \"epoll_pwait\",\n \"epoll_wait\",\n \"epoll_wait\",\n \"epoll_wait_old\",\n \"execve\",\n \"execveat\",\n \"exit\",\n \"exit_group\",\n \"fchmod\",\n \"fchmodat\",\n \"fchown\",\n \"fchown32\",\n \"fchownat\",\n \"fcntl\",\n \"fcntl64\",\n \"fstat\",\n \"fstat64\",\n \"fstatfs\",\n \"fsync\",\n \"futex\",\n \"getcwd\",\n \"getdents\",\n \"getdents64\",\n \"getegid\",\n \"geteuid\",\n \"getgid\",\n \"getpeername\",\n \"getpid\",\n \"getppid\",\n \"getpriority\",\n \"getrandom\",\n \"getresgid\",\n \"getresgid32\",\n \"getresuid\",\n \"getresuid32\",\n \"getrlimit\",\n \"getrusage\",\n \"getsid\",\n \"getsockname\",\n \"getsockopt\",\n \"gettid\",\n \"gettimeofday\",\n \"getuid\",\n \"getxattr\",\n \"ioctl\",\n \"ipc\",\n \"listen\",\n \"lseek\",\n \"lstat\",\n \"lstat64\",\n \"madvise\",\n \"mkdir\",\n \"mkdirat\",\n \"mmap\",\n \"mmap2\",\n \"mprotect\",\n \"mremap\",\n \"munmap\",\n \"nanosleep\",\n \"newfstatat\",\n \"open\",\n \"openat\",\n \"pause\",\n \"perf_event_open\",\n \"pipe\",\n \"pipe2\",\n \"poll\",\n \"ppoll\",\n \"prctl\",\n \"pread64\",\n \"prlimit64\",\n \"pselect6\",\n \"read\",\n \"readlink\",\n \"readlinkat\",\n \"recvfrom\",\n \"recvmmsg\",\n \"recvmsg\",\n \"rename\",\n \"restart_syscall\",\n \"rmdir\",\n \"rt_sigaction\",\n \"rt_sigpending\",\n \"rt_sigprocmask\",\n \"rt_sigqueueinfo\",\n \"rt_sigreturn\",\n \"rt_sigsuspend\",\n \"rt_sigtimedwait\",\n \"rt_tgsigqueueinfo\",\n \"sched_getaffinity\",\n \"sched_yield\",\n \"seccomp\",\n \"select\",\n \"semtimedop\",\n \"send\",\n \"sendmmsg\",\n \"sendmsg\",\n \"sendto\",\n \"set_robust_list\",\n \"set_tid_address\",\n \"setgid\",\n \"setgid32\",\n \"setgroups\",\n \"setgroups32\",\n \"setns\",\n \"setrlimit\",\n \"setsid\",\n \"setsidaccept4\",\n \"setsockopt\",\n \"setuid\",\n \"setuid32\",\n \"sigaltstack\",\n \"socket\",\n \"socketcall\",\n \"socketpair\",\n \"stat\",\n \"stat64\",\n \"statfs\",\n \"sysinfo\",\n \"umask\",\n \"uname\",\n \"unlink\",\n \"unlinkat\",\n \"wait4\",\n \"waitid\",\n \"waitpid\",\n \"write\"\n ],\n \"action\": \"SCMP_ACT_ALLOW\",\n \"args\": null\n },\n {\n \"names\": [\n \"setns\"\n ],\n \"action\": \"SCMP_ACT_ALLOW\",\n \"args\": [\n {\n \"index\": 1,\n \"value\": 1073741824,\n \"valueTwo\": 0,\n \"op\": \"SCMP_CMP_EQ\"\n }\n ],\n \"comment\": \"\",\n \"includes\": {},\n \"excludes\": {}\n }\n ]\n }\n{{- end }}\n{{- end }}\n",
"# system-probe-init.yaml\n{{- define \"system-probe-init\" -}}\n- name: seccomp-setup\n image: \"{{ .Values.agents.image.repository }}:{{ .Values.agents.image.tag }}\"\n command:\n - cp\n - /etc/config/system-probe-seccomp.json\n - /host/var/lib/kubelet/seccomp/system-probe\n volumeMounts:\n - name: datadog-agent-security\n mountPath: /etc/config\n - name: seccomp-root\n mountPath: /host/var/lib/kubelet/seccomp\n resources:\n{{ toYaml .Values.agents.containers.initContainers.resources | indent 4 }}\n{{- end -}}\n"
] | ## Default values for Datadog Agent
## See Datadog helm documentation to learn more:
## https://docs.datadoghq.com/agent/kubernetes/helm/
## @param nameOverride - string - optional
## Override name of app.
#
nameOverride: # ""
## @param fullnameOverride - string - optional
## Override the full qualified app name.
#
fullnameOverride: # ""
## @param targetSystem - string - required
## Set the target OS for this deployment
## Possible values: linux, windows
#
targetSystem: "linux"
datadog:
## @param apiKey - string - required
## Set this to your Datadog API key before the Agent runs.
## ref: https://app.datadoghq.com/account/settings#agent/kubernetes
#
apiKey: <DATADOG_API_KEY>
## @param apiKeyExistingSecret - string - optional
## Use existing Secret which stores API key instead of creating a new one.
## If set, this parameter takes precedence over "apiKey".
#
apiKeyExistingSecret: # <DATADOG_API_KEY_SECRET>
## @param appKey - string - optional
## If you are using clusterAgent.metricsProvider.enabled = true, you must set
## a Datadog application key for read access to your metrics.
#
appKey: # <DATADOG_APP_KEY>
## @param appKeyExistingSecret - string - optional
## Use existing Secret which stores APP key instead of creating a new one
## If set, this parameter takes precedence over "appKey".
#
appKeyExistingSecret: # <DATADOG_APP_KEY_SECRET>
## @param securityContext - object - optional
## You can modify the security context used to run the containers by
## modifying the label type below:
#
securityContext: {}
# seLinuxOptions:
# user: "system_u"
# role: "system_r"
# type: "spc_t"
# level: "s0"
## @param clusterName - string - optional
## Set a unique cluster name to allow scoping hosts and Cluster Checks easily
## The name must be unique and must be dot-separated tokens where a token can be up to 40 characters with the following restrictions:
## * Lowercase letters, numbers, and hyphens only.
## * Must start with a letter.
## * Must end with a number or a letter.
## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE:
## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name
#
clusterName: # <CLUSTER_NAME>
## @param site - string - optional - default: 'datadoghq.com'
## The site of the Datadog intake to send Agent data to.
## Set to 'datadoghq.eu' to send data to the EU site.
#
site: # datadoghq.com
## @param dd_url - string - optional - default: 'https://app.datadoghq.com'
## The host of the Datadog intake server to send Agent data to, only set this option
## if you need the Agent to send data to a custom URL.
## Overrides the site setting defined in "site".
#
dd_url: # https://app.datadoghq.com
## @param logLevel - string - required
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off
#
logLevel: INFO
## @param kubeStateMetricsEnabled - boolean - required
## If true, deploys the kube-state-metrics deployment.
## ref: https://github.com/kubernetes/charts/tree/master/stable/kube-state-metrics
#
kubeStateMetricsEnabled: true
## @param clusterChecks - object - required
## Enable the Cluster Checks feature on both the cluster-agents and the daemonset
## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/
## Autodiscovery via Kube Service annotations is automatically enabled
#
clusterChecks:
enabled: false
## @param nodeLabelsAsTags - list of key:value strings - optional
## Provide a mapping of Kubernetes Node Labels to Datadog Tags.
#
nodeLabelsAsTags: {}
# beta.kubernetes.io/instance-type: aws-instance-type
# kubernetes.io/role: kube_role
# <KUBERNETES_NODE_LABEL>: <DATADOG_TAG_KEY>
## @param podLabelsAsTags - list of key:value strings - optional
## Provide a mapping of Kubernetes Labels to Datadog Tags.
#
podLabelsAsTags: {}
# app: kube_app
# release: helm_release
# <KUBERNETES_LABEL>: <DATADOG_TAG_KEY>
## @param podAnnotationsAsTags - list of key:value strings - optional
## Provide a mapping of Kubernetes Annotations to Datadog Tags
#
podAnnotationsAsTags: {}
# iam.amazonaws.com/role: kube_iamrole
# <KUBERNETES_ANNOTATIONS>: <DATADOG_TAG_KEY>
## @param tags - list of key:value elements - optional
## List of tags to attach to every metric, event and service check collected by this Agent.
##
## Learn more about tagging: https://docs.datadoghq.com/tagging/
#
tags: []
# - "<KEY_1>:<VALUE_1>"
# - "<KEY_2>:<VALUE_2>"
## @param dogstatsd - object - required
## dogstatsd configuration
## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/
## To emit custom metrics from your Kubernetes application, use DogStatsD.
#
dogstatsd:
## @param port - integer - optional - default: 8125
## Override the Agent DogStatsD port.
## Note: Make sure your client is sending to the same UDP port.
#
port: 8125
## @param originDetection - boolean - optional
## Enable origin detection for container tagging
## https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging
#
originDetection: false
## @param useSocketVolume - boolean - optional
## Enable dogstatsd over Unix Domain Socket
## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/
#
useSocketVolume: false
## @param socketPath - string - optional
## Path to the DogStatsD socket
#
socketPath: /var/run/datadog/dsd.socket
## @param hostSocketPath - string - optional
## host path to the DogStatsD socket
#
hostSocketPath: /var/run/datadog/
## @param useHostPort - boolean - optional
## Sets the hostPort to the same value of the container port. Needs to be used
## for sending custom metrics.
## The ports need to be available on all hosts.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostPort: false
## @param useHostPID - boolean - optional
## Run the agent in the host's PID namespace. This is required for Dogstatsd origin
## detection to work. See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/
#
useHostPID: false
## @param nonLocalTraffic - boolean - optional - default: false
## Enable this to make each node accept non-local statsd traffic.
## ref: https://github.com/DataDog/docker-dd-agent#environment-variables
#
nonLocalTraffic: false
## @param collectEvents - boolean - optional - default: false
## Enables this to start event collection from the kubernetes API
## ref: https://docs.datadoghq.com/agent/kubernetes/event_collection/
#
collectEvents: false
## @param leaderElection - boolean - optional - default: false
## Enables leader election mechanism for event collection.
#
leaderElection: false
## @param leaderLeaseDuration - integer - optional - default: 60
## Set the lease time for leader election in second.
#
leaderLeaseDuration: # 60
## @param logs - object - required
## Enable logs agent and provide custom configs
#
logs:
## @param enabled - boolean - optional - default: false
## Enables this to activate Datadog Agent log collection.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
#
enabled: false
## @param containerCollectAll - boolean - optional - default: false
## Enable this to allow log collection for all containers.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
#
containerCollectAll: false
## @param containerUseFiles - boolean - optional - default: true
## Collect logs from files in /var/log/pods instead of using container runtime API.
## It's usually the most efficient way of collecting logs.
## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup
#
containerCollectUsingFiles: true
## @param apm - object - required
## Enable apm agent and provide custom configs
#
apm:
## @param enabled - boolean - optional - default: false
## Enable this to enable APM and tracing, on port 8126
## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host
#
enabled: false
## @param port - integer - optional - default: 8126
## Override the trace Agent port.
## Note: Make sure your client is sending to the same UDP port.
#
port: 8126
## @param useSocketVolume - boolean - optional
## Enable APM over Unix Domain Socket
## ref: https://docs.datadoghq.com/agent/kubernetes/apm/
#
useSocketVolume: false
## @param socketPath - string - optional
## Path to the trace-agent socket
#
socketPath: /var/run/datadog/apm.socket
## @param hostSocketPath - string - optional
## host path to the trace-agent socket
#
hostSocketPath: /var/run/datadog/
## @param env - list of object - optional
## The dd-agent supports many environment variables
## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables
#
env: []
# - name: <ENV_VAR_NAME>
# value: <ENV_VAR_VALUE>
## @param confd - list of objects - optional
## Provide additional check configurations (static and Autodiscovery)
## Each key becomes a file in /conf.d
## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes
## ref: https://docs.datadoghq.com/agent/autodiscovery/
#
confd: {}
# redisdb.yaml: |-
# init_config:
# instances:
# - host: "name"
# port: "6379"
# kubernetes_state.yaml: |-
# ad_identifiers:
# - kube-state-metrics
# init_config:
# instances:
# - kube_state_url: http://%%host%%:8080/metrics
## @param checksd - list of key:value strings - optional
## Provide additional custom checks as python code
## Each key becomes a file in /checks.d
## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#optional-volumes
#
checksd: {}
# service.py: |-
## @param dockerSocketPath - string - optional
## Path to the docker socket
#
dockerSocketPath: # /var/run/docker.sock
## @param criSocketPath - string - optional
## Path to the container runtime socket (if different from Docker)
## This is supported starting from agent 6.6.0
#
criSocketPath: # /var/run/containerd/containerd.sock
## @param processAgent - object - required
## Enable process agent and provide custom configs
#
processAgent:
## @param enabled - boolean - required
## Set this to true to enable live process monitoring agent
## Note: /etc/passwd is automatically mounted to allow username resolution.
## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset
#
enabled: true
## @param processCollection - boolean - required
## Set this to true to enable process collection in process monitoring agent
## Requires processAgent.enabled to be set to true to have any effect
#
processCollection: false
## @param systemProbe - object - required
## Enable systemProbe agent and provide custom configs
#
systemProbe:
## @param enabled - boolean - required
## Set this to true to enable system-probe agent
#
enabled: false
## @param debugPort - integer - required
## Specify the port to expose pprof and expvar for system-probe agent
#
debugPort: 0
## @param enableConntrack - boolean - required
## Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data
## Ref: http://conntrack-tools.netfilter.org/
#
enableConntrack: true
## @param seccomp - string - required
## Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges
## Note that this will break `kubectl exec … -c system-probe -- /bin/bash`
#
seccomp: localhost/system-probe
## @param seccompRoot - string - required
## Specify the seccomp profile root directory
#
seccompRoot: /var/lib/kubelet/seccomp
## @param bpfDebug - boolean - required
## Enable logging for kernel debug
#
bpfDebug: false
## @param apparmor profile - string - required
## specify a apparmor profile for system-probe
#
apparmor: unconfined
## @param enableTCPQueueLength - boolean - optional
## Enable the TCP queue length eBPF-based check
#
enableTCPQueueLength: false
## @param enableOOMKill - boolean - optional
## Enable the OOM kill eBPF-based check
#
enableOOMKill: false
## @param collectDNSStats - boolean - optional
## Enable DNS stat collection
#
collectDNSStats: false
orchestratorExplorer:
## @param enabled - boolean - required
## Set this to true to enable the orchestrator explorer.
## This requires processAgent.enabled and clusterAgent.enabled to be set to true
## ref: TODO - add doc link
#
enabled: false
## @param clusterAgent - object - required
## This is the Datadog Cluster Agent implementation that handles cluster-wide
## metrics more cleanly, separates concerns for better rbac, and implements
## the external metrics API so you can autoscale HPAs based on datadog metrics
## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/
#
clusterAgent:
## @param enabled - boolean - required
## Set this to true to enable Datadog Cluster Agent
#
enabled: false
## @param image - object - required
## Define the Datadog Cluster-Agent image to work with.
#
image:
## @param repository - string - required
## Define the repository to use:
#
repository: datadog/cluster-agent
## @param tag - string - required
## Define the Cluster-Agent version to use.
#
tag: 1.7.0
## @param pullPolicy - string - required
## The Kubernetes pull policy.
#
pullPolicy: IfNotPresent
## @param pullSecrets - list of key:value strings - optional
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
pullSecrets: []
# - name: "<REG_SECRET>"
## @param command - array - optional
## Command to run in the Cluster Agent container
#
command: []
## @param token - string - required
## This needs to be at least 32 characters a-zA-z
## It is a preshared key between the node agents and the cluster agent
## ref:
#
token: ""
## @param replicas - integer - required
## Specify the of cluster agent replicas, if > 1 it allow the cluster agent to
## work in HA mode.
#
replicas: 1
## @param rbac - object - required
## Provide Cluster Agent Deployment pod(s) RBAC configuration
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
## @param serviceAccountName - string - required
## Ignored if clusterAgentrbac.create is true
#
serviceAccountName: default
## @param metricsProvider - object - required
## Enable the metricsProvider to be able to scale based on metrics in Datadog
#
metricsProvider:
## @param enabled - boolean - required - default: false
## Set this to true to enable Metrics Provider
#
enabled: false
## @param wpaController - boolean - optional
## Enable informer and controller of the watermark pod autoscaler
## NOTE: You need to install the `WatermarkPodAutoscaler` CRD before
#
wpaController: false
## @param useDatadogMetrics - boolean - optional
## Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries
## NOTE: You need to install the `DatadogMetric` CRD before
#
useDatadogMetrics: false
## Configuration for the service for the cluster-agent metrics server
#
service:
## @param type - string - optional
##
#
type: ClusterIP
## @param port - int - optional
##
port: 8443
## @param env - list of object - optional
## The Cluster-Agent supports many additional environment variables that can
## be passed literally.
## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options
#
env: []
## @param admissionController - object - required
## Enable the admissionController to be able to inject APM/Dogstatsd config
## and standard tags (env, service, version) automatically into your pods
#
admissionController:
enabled: false
## @param mutateUnlabelled - boolean - optional
## Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"'
#
mutateUnlabelled: false
## @param confd - list of objects - optional
## Provide additional cluster check configurations
## Each key will become a file in /conf.d
## ref: https://docs.datadoghq.com/agent/autodiscovery/
#
confd: {}
# mysql.yaml: |-
# cluster_check: true
# instances:
# - server: '<EXTERNAL_IP>'
# port: 3306
# user: datadog
# pass: '<YOUR_CHOSEN_PASSWORD>'
## @param resources - object -required
## Datadog cluster-agent resource requests and limits.
#
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
## @param priorityclassName - string - optional
## Name of the priorityClass to apply to the Cluster Agent
#
priorityClassName: # system-cluster-critical
## @param nodeSelector - object - optional
## Allow the Cluster Agent Deployment to schedule on selected nodes
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## @param affinity - object - optional
## Allow the Cluster Agent Deployment to schedule using affinity rules
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
#
affinity: {}
## @param healthPort - integer - optional - default: 5555
## Port number use the cluster-agent to server healthz endpoint
healthPort: 5555
## @param livenessProbe - object - required
## Override the agent's liveness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
livenessProbe:
httpGet:
port: 5555
path: /live
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe - object - required
## Override the cluster-agent's readiness probe logic from the default:
#
readinessProbe:
httpGet:
port: 5555
path: /ready
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param strategy - string - required
## Allow the Cluster Agent deployment to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
#
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
## @param podAnnotations - list of key:value strings - optional
## Annotations to add to the cluster-agents's pod(s)
#
podAnnotations: {}
# key: "value"
## @param useHostNetwork - boolean - optional
## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might
## not be supported. The ports need to be available on all hosts. It can be
## used for custom metrics instead of a service endpoint.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostNetwork: # true
## @param dnsConfig - list of objects - optional
## specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
## @param volumes - list of objects - optional
## Specify additional volumes to mount in the cluster-agent container
#
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
## @param volumeMounts - list of objects - optional
## Specify additional volumes to mount in the cluster-agent container
#
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
## @param datadog-cluster.yaml - object - optional
## Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml).
#
datadog_cluster_yaml: {}
## @param createPodDisruptionBudget - boolean - optional
## Specify the pod disruption budget to apply to the cluster agents
#
createPodDisruptionBudget: false
agents:
## @param enabled - boolean - required
## You should keep Datadog DaemonSet enabled!
## The exceptional case could be a situation when you need to run
## single Datadog pod per every namespace, but you do not need to
## re-create a DaemonSet for every non-default namespace install.
## Note: StatsD and DogStatsD work over UDP, so you may not
## get guaranteed delivery of the metrics in Datadog-per-namespace setup!
#
enabled: true
## @param image - object - required
## Define the Datadog image to work with.
#
image:
## @param repository - string - required
## Define the repository to use:
## use "datadog/agent" for Datadog Agent 7
## use "datadog/dogstatsd" for Standalone Datadog Agent DogStatsD 7
#
repository: datadog/agent
## @param tag - string - required
## Define the Agent version to use.
## Use 7-jmx to enable jmx fetch collection
#
tag: 7.21.1
## @param doNotCheckTag - boolean - optional
## By default, the version passed in agents.image.tag is checked
## for compatibility with the version of the chart.
## This boolean permits to completely skip this check.
## This is useful, for example, for custom tags that are not
## respecting semantic versioning
#
doNotCheckTag: # false
## @param pullPolicy - string - required
## The Kubernetes pull policy.
#
pullPolicy: IfNotPresent
## @param pullSecrets - list of key:value strings - optional
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
pullSecrets: []
# - name: "<REG_SECRET>"
## @param rbac - object - required
## Provide Daemonset RBAC configuration
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
## @param serviceAccountName - string - required
## Ignored if daemonset.rbac.create is true
#
serviceAccountName: default
## @param podSecurity - object - optional
## Provide Daemonset PodSecurityPolicy configuration
podSecurity:
## @param podSecurityPolicy - object - required
## Provide Daemonset PodSecurityPolicy configuration
podSecurityPolicy:
## @param created - boolean - optional
## If true, create a PodSecurityPolicy resource for Agent pods
#
create: false
## @param securityContextConstraints - object - required
## Provide Daemonset securityContextConstraints configuration
securityContextConstraints:
## @param created - boolean - optional
## If true, create a SecurityContextConstraints resource for Agent pods
#
create: false
## @param securityContext - object - required
## Provide securityContext configuration
#
securityContext:
rule: MustRunAs
seLinuxOptions:
user: system_u
role: system_r
type: spc_t
level: s0
## @param privileged - boolean - optional
## If true, Allow to run privileged containers
#
privileged: false
## @param capabilites - list - optional
## Allowed capabilites
#
capabilites:
- SYS_ADMIN
- SYS_RESOURCE
- SYS_PTRACE
- NET_ADMIN
- NET_BROADCAST
- IPC_LOCK
## @param volumes - list - optional
## Allowed volumes types
#
volumes:
- configMap
- downwardAPI
- emptyDir
- hostPath
- secret
## @param seccompProfiles - list - optional
## Allowed seccomp profiles
#
seccompProfiles:
- "runtime/default"
- "localhost/system-probe"
## @param apparmorProfiles - list - optional
## Allowed apparmor profiles
#
apparmorProfiles:
- "runtime/default"
containers:
agent:
## @param env - list - required
## Additional environment variables for the agent container.
#
env: []
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the agent container.
#
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
## @param livenessProbe - object - required
## Override the agent's liveness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
livenessProbe:
httpGet:
path: /live
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe - object - required
## Override the agent's readiness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
readinessProbe:
httpGet:
path: /ready
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
processAgent:
## @param env - list - required
## Additional environment variables for the process-agent container.
#
env: []
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the process-agent container.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
traceAgent:
## @param env - list - required
## Additional environment variables for the trace-agent container.
#
env:
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the trace-agent container.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
## @param livenessProbe - object - optional
## Override the trace agent's liveness probe logic from the default:
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
livenessProbe:
tcpSocket:
port: 8126
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
systemProbe:
## @param env - list - required
## Additional environment variables for the system-probe container.
#
env: []
## @param logLevel - string - optional
## Set logging verbosity, valid log levels are:
## trace, debug, info, warn, error, critical, and off.
## If not set, fall back to the value of datadog.logLevel.
#
logLevel: # INFO
## @param resources - object - required
## Resource requests and limits for the system-probe container.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
initContainers:
## @param resources - object - required
## Resource requests and limits for the init containers.
#
resources: {}
# requests:
# cpu: 100m
# memory: 200Mi
# limits:
# cpu: 100m
# memory: 200Mi
## @param volumes - list of objects - optional
## Specify additional volumes to mount in the dd-agent container
#
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
## @param volumeMounts - list of objects - optional
## Specify additional volumes to mount in the dd-agent container
#
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
## @param useHostNetwork - boolean - optional
## Bind ports on the hostNetwork. Useful for CNI networking where hostPort might
## not be supported. The ports need to be available on all hosts. It Can be
## used for custom metrics instead of a service endpoint.
##
## WARNING: Make sure that hosts using this are properly firewalled otherwise
## metrics and traces are accepted from any host able to connect to this host.
#
useHostNetwork: false
## @param dnsConfig - list of objects - optional
## specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
## @param podAnnotations - list of key:value strings - optional
## Annotations to add to the DaemonSet's Pods
#
podAnnotations: {}
# <POD_ANNOTATION>: '[{"key": "<KEY>", "value": "<VALUE>"}]'
## @param tolerations - array - optional
## Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6)
#
tolerations: []
## @param nodeSelector - object - optional
## Allow the DaemonSet to schedule on selected nodes
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## @param affinity - object - optional
## Allow the DaemonSet to schedule using affinity rules
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
#
affinity: {}
## @param updateStrategy - string - optional
## Allow the DaemonSet to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
#
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: "10%"
## @param priorityClassName - string - optional
## Sets PriorityClassName if defined.
#
priorityClassName:
## @param podLabels - object - optional
## Sets podLabels if defined.
#
podLabels: {}
## @param useConfigMap - boolean - optional
## Configures a configmap to provide the agent configuration
## Use this in combination with the `agent.customAgentConfig` parameter.
#
useConfigMap: # false
## @param customAgentConfig - object - optional
## Specify custom contents for the datadog agent config (datadog.yaml).
## ref: https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6
## ref: https://github.com/DataDog/datadog-agent/blob/master/pkg/config/config_template.yaml
## Note the `agents.useConfigMap` needs to be set to `true` for this parameter to be taken into account.
#
customAgentConfig: {}
# # Autodiscovery for Kubernetes
# listeners:
# - name: kubelet
# config_providers:
# - name: kubelet
# polling: true
# # needed to support legacy docker label config templates
# - name: docker
# polling: true
#
# # Enable APM by setting the DD_APM_ENABLED envvar to true, or override this configuration
# apm_config:
# enabled: false
# apm_non_local_traffic: true
#
# # Enable java cgroup handling. Only one of those options should be enabled,
# # depending on the agent version you are using along that chart.
#
# # agent version < 6.15
# # jmx_use_cgroup_memory_limit: true
#
# # agent version >= 6.15
# # jmx_use_container_support: true
clusterChecksRunner:
## @param enabled - boolean - required
## If true, deploys agent dedicated for running the Cluster Checks instead of running in the Daemonset's agents.
## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/
#
enabled: false
## @param image - object - required
## Define the Datadog image to work with.
#
image:
## @param repository - string - required
## Define the repository to use:
## use "datadog/agent" for Datadog Agent 7
## use "datadog/dogstatsd" for Standalone Datadog Agent DogStatsD 7
#
repository: datadog/agent
## @param tag - string - required
## Define the Agent version to use.
## Use 7-jmx to enable jmx fetch collection
#
tag: 7.20.2
## @param pullPolicy - string - required
## The Kubernetes pull policy.
#
pullPolicy: IfNotPresent
## @param pullSecrets - list of key:value strings - optional
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
pullSecrets: []
# - name: "<REG_SECRET>"
## @param createPodDisruptionBudget - boolean - optional
## Specify the pod disruption budget to apply to the cluster checks agents
#
createPodDisruptionBudget: false
## @param rbac - object - required
## Provide Cluster Checks Deployment pods RBAC configuration
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
## @param dedicated - boolean - required
## If true, use a dedicated RBAC resource for the cluster checks agent(s)
#
dedicated: false
## @param serviceAccountAnnotations - object - required
## Annotations to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true
#
serviceAccountAnnotations: {}
## @param serviceAccountName - string - required
## Ignored if clusterChecksRunner.rbac.create is true
#
serviceAccountName: default
## @param replicas - integer - required
## If you want to deploy the clusterChecks agent in HA, keep at least clusterChecksRunner.replicas set to 2.
## And increase the clusterChecksRunner.replicas according to the number of Cluster Checks.
#
replicas: 2
## @param resources - object -required
## Datadog clusterchecks-agent resource requests and limits.
#
resources: {}
# requests:
# cpu: 200m
# memory: 500Mi
# limits:
# cpu: 200m
# memory: 500Mi
## @param affinity - object - optional
## Allow the ClusterChecks Deployment to schedule using affinity rules.
## By default, ClusterChecks Deployment Pods are forced to run on different Nodes.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
#
affinity: {}
## @param strategy - string - optional
## Allow the ClusterChecks deployment to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
#
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
## @param dnsConfig - list of objects - optional
## specify dns configuration options for datadog cluster agent containers e.g ndots
## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config
dnsConfig: {}
# options:
# - name: ndots
# value: "1"
## @param nodeSelector - object - optional
## Allow the ClusterChecks Deployment to schedule on selected nodes
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## @param tolerations - array - required
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
#
tolerations: []
## @param livenessProbe - object - required
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
# livenessProbe:
# exec:
# command: ["/bin/true"]
#
livenessProbe:
httpGet:
path: /live
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param readinessProbe - object - required
## In case of issues with the probe, you can disable it with the
## following values, to allow easier investigating:
#
# readinessProbe:
# exec:
# command: ["/bin/true"]
#
readinessProbe:
httpGet:
path: /ready
port: 5555
initialDelaySeconds: 15
periodSeconds: 15
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
## @param env - list of object - optional
## The dd-agent supports many environment variables
## ref: https://github.com/DataDog/datadog-agent/tree/master/Dockerfiles/agent#environment-variables
#
env: []
# - name: <ENV_VAR_NAME>
# value: <ENV_VAR_VALUE>
## @param volumes - list of objects - optional
## Specify additional volumes to mount in the cluster checks container
#
volumes: []
# - hostPath:
# path: <HOST_PATH>
# name: <VOLUME_NAME>
## @param volumeMounts - list of objects - optional
## Specify additional volumes to mount in the cluster checks container
#
volumeMounts: []
# - name: <VOLUME_NAME>
# mountPath: <CONTAINER_PATH>
# readOnly: true
kube-state-metrics:
rbac:
## @param created - boolean - required
## If true, create & use RBAC resources
#
create: true
serviceAccount:
## @param created - boolean - required
## If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true
#
create: true
## @param name - string - required
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
#
name:
## @param resources - object - optional
## Resource requests and limits for the kube-state-metrics container.
#
resources: {}
# requests:
# cpu: 200m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 256Mi
|
gitlab-ce | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gitlab-ce.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"gitlab-ce.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified postgresql name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"gitlab-ce.postgresql.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"postgresql\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified redis name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"gitlab-ce.redis.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"redis\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"gitlab-ce.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ce.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n ## This is used by GitLab Omnibus as the primary means of configuration.\n ## ref: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template\n ##\n gitlab_omnibus_config: |\n external_url ENV['EXTERNAL_URL'];\n root_pass = ENV['GITLAB_ROOT_PASSWORD'];\n gitlab_rails['initial_root_password'] = root_pass unless root_pass.to_s == '';\n postgresql['enable'] = false;\n gitlab_rails['db_host'] = ENV['DB_HOST'];\n gitlab_rails['db_password'] = ENV['DB_PASSWORD'];\n gitlab_rails['db_username'] = ENV['DB_USER'];\n gitlab_rails['db_database'] = ENV['DB_DATABASE'];\n redis['enable'] = false;\n gitlab_rails['redis_host'] = ENV['REDIS_HOST'];\n gitlab_rails['redis_password'] = ENV['REDIS_PASSWORD'];\n unicorn['worker_processes'] = 2;\n manage_accounts['enable'] = true;\n manage_storage_directories['manage_etc'] = false;\n gitlab_shell['auth_file'] = '/gitlab-data/ssh/authorized_keys';\n git_data_dir '/gitlab-data/git-data';\n gitlab_rails['shared_path'] = '/gitlab-data/shared';\n gitlab_rails['uploads_directory'] = '/gitlab-data/uploads';\n gitlab_ci['builds_directory'] = '/gitlab-data/builds';\n",
"# data-pvc.yaml\n{{- if .Values.persistence.gitlabData.enabled }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"gitlab-ce.fullname\" . }}-data\n annotations:\n {{- if .Values.persistence.gitlabData.storageClass }}\n volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.gitlabData.storageClass | quote }}\n {{- else }}\n volume.alpha.kubernetes.io/storage-class: default\n {{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.gitlabData.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.gitlabData.size | quote }}\n{{- end }}\n",
"# deployment.yaml\n{{- if default \"\" .Values.externalUrl }}\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"gitlab-ce.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ce.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"gitlab-ce.fullname\" . }}\n spec:\n containers:\n - name: {{ template \"gitlab-ce.fullname\" . }}\n image: {{ .Values.image }}\n imagePullPolicy: {{ default \"\" .Values.imagePullPolicy | quote }}\n env:\n ## General GitLab Configs\n ##\n # This is a free-form env var that GitLab Omnibus uses to configure\n # everything. We're passing this in from a configmap and pulling some\n # of the values from the env vars defined below. This is done to\n # avoid leaving secrets visible in kubectl.\n - name: GITLAB_OMNIBUS_CONFIG\n valueFrom:\n configMapKeyRef:\n name: {{ template \"gitlab-ce.fullname\" . }}\n key: gitlab_omnibus_config\n - name: GITLAB_ROOT_PASSWORD\n {{- if default \"\" .Values.gitlabRootPassword }}\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ce.fullname\" . }}\n key: gitlab-root-password\n {{ end }}\n - name: EXTERNAL_URL\n value: {{ default \"\" .Values.externalUrl | quote }}\n ## DB configuration\n ##\n - name: DB_HOST\n value: {{ template \"gitlab-ce.postgresql.fullname\" . }}\n - name: DB_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ce.fullname\" . }}\n key: db-user\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ce.fullname\" . }}\n key: db-password\n - name: DB_DATABASE\n value: {{ .Values.postgresql.postgresDatabase | quote }}\n ## Redis configuration\n ##\n - name: REDIS_HOST\n value: {{ template \"gitlab-ce.redis.fullname\" . }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ce.fullname\" . }}\n key: redis-password\n ports:\n - name: ssh\n containerPort: 22\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n httpGet:\n path: /help\n port: {{ .Values.livenessPort }}\n # This pod takes a very long time to start up. Be cautious when\n # lowering this value to avoid Pod death during startup.\n initialDelaySeconds: 200\n timeoutSeconds: 1\n periodSeconds: 10\n successThreshold: 1\n failureThreshold: 10\n readinessProbe:\n httpGet:\n path: /help\n port: {{ .Values.readinessPort }}\n initialDelaySeconds: 30\n timeoutSeconds: 1\n periodSeconds: 10\n successThreshold: 1\n failureThreshold: 3\n volumeMounts:\n - name: gitlab-etc\n mountPath: /etc/gitlab\n - name: gitlab-data\n mountPath: /gitlab-data\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumes:\n - name: gitlab-etc\n {{- if .Values.persistence.gitlabEtc.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"gitlab-ce.fullname\" . }}-etc\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: gitlab-data\n {{- if .Values.persistence.gitlabData.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"gitlab-ce.fullname\" . }}-data\n {{- else }}\n emptyDir: {}\n {{- end }}\n{{ else }}\n{{ end }}\n",
"# etc-pvc.yaml\n{{- if .Values.persistence.gitlabEtc.enabled }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"gitlab-ce.fullname\" . }}-etc\n annotations:\n {{- if .Values.persistence.gitlabEtc.storageClass }}\n volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.gitlabEtc.storageClass | quote }}\n {{- else }}\n volume.alpha.kubernetes.io/storage-class: default\n {{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.gitlabEtc.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.gitlabEtc.size | quote }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n{{- if .Values.ingress.annotations }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\n{{- end }}\n name: {{ template \"gitlab-ce.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ce.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n rules:\n - host: {{ .Values.ingress.url | quote }}\n {{- if (not (empty .Values.ingress.tls)) }}\n https:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"gitlab-ce.fullname\" . }}\n servicePort: {{ .Values.httpsPort }}\n {{- else }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"gitlab-ce.fullname\" . }}\n servicePort: {{ .Values.httpPort }}\n {{- end }}\n\n{{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n{{- end -}}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"gitlab-ce.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ce.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{- if default \"\" .Values.gitlabRootPassword }}\n # Defaulting to a non-sensical value to silence b64enc warning. We'll never\n # actually use this default due to the if statement.\n gitlab-root-password: {{ default \"ignore\" .Values.gitlabRootPassword | b64enc | quote }}\n {{ end }}\n db-user: {{ .Values.postgresql.postgresUser | b64enc | quote }}\n db-password: {{ .Values.postgresql.postgresPassword | b64enc | quote }}\n redis-password: {{ .Values.redis.redisPassword | b64enc | quote }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"gitlab-ce.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ce.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.serviceType }}\n ports:\n - name: ssh\n port: {{ .Values.sshPort | int }}\n targetPort: ssh\n - name: http\n port: {{ .Values.httpPort | int }}\n targetPort: http\n - name: https\n port: {{ .Values.httpsPort | int }}\n targetPort: https\n selector:\n app: {{ template \"gitlab-ce.fullname\" . }}\n"
] | ## GitLab CE image
## ref: https://hub.docker.com/r/gitlab/gitlab-ce/tags/
##
image: gitlab/gitlab-ce:9.4.1-ce.0
## Specify a imagePullPolicy
## 'Always' if imageTag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
# imagePullPolicy:
## The URL (with protocol) that your users will use to reach the install.
## ref: https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab
##
# externalUrl: http://your-domain.com/
## Change the initial default admin password if set. If not set, you'll be
## able to set it when you first visit your install.
##
# gitlabRootPassword: ""
## For minikube, set this to NodePort, elsewhere use LoadBalancer
## ref: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
##
serviceType: LoadBalancer
## Ingress configuration options
##
ingress:
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
enabled: false
tls:
# - secretName: gitlab.cluster.local
# hosts:
# - gitlab.cluster.local
url: gitlab.cluster.local
## Configure external service ports
## ref: http://kubernetes.io/docs/user-guide/services/
sshPort: 22
httpPort: 80
httpsPort: 443
## livenessPort Port of liveness probe endpoint
livenessPort: http
## readinessPort Port of readiness probe endpoint
readinessPort: http
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## GitLab requires a good deal of resources. We have split out Postgres and
## redis, which helps some. Refer to the guidelines for larger installs.
## ref: https://docs.gitlab.com/ce/install/requirements.html#hardware-requirements
requests:
memory: 1Gi
cpu: 500m
limits:
memory: 2Gi
cpu: 1
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
## ref: https://docs.gitlab.com/ce/install/requirements.html#storage
##
persistence:
## This volume persists generated configuration files, keys, and certs.
##
gitlabEtc:
enabled: true
size: 1Gi
## If defined, volume.beta.kubernetes.io/storage-class: <storageClass>
## Default: volume.alpha.kubernetes.io/storage-class: default
##
# storageClass:
accessMode: ReadWriteOnce
## This volume is used to store git data and other project files.
## ref: https://docs.gitlab.com/omnibus/settings/configuration.html#storing-git-data-in-an-alternative-directory
##
gitlabData:
enabled: true
size: 10Gi
## If defined, volume.beta.kubernetes.io/storage-class: <storageClass>
## Default: volume.alpha.kubernetes.io/storage-class: default
##
# storageClass:
accessMode: ReadWriteOnce
## Configuration values for the postgresql dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
##
postgresql:
# 9.6 is the newest supported version for the GitLab container
imageTag: "9.6"
cpu: 1000m
memory: 1Gi
postgresUser: gitlab
postgresPassword: gitlab
postgresDatabase: gitlab
persistence:
size: 10Gi
## Configuration values for the redis dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
##
redis:
redisPassword: "gitlab"
resources:
requests:
memory: 1Gi
persistence:
size: 10Gi
|
logstash | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"logstash.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"logstash.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"logstash.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"logstash.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"logstash.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# files-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"logstash.fullname\" . }}-files\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.files }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n{{- end }}\nbinaryData:\n {{- range $key, $value := .Values.binaryFiles }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n {{- end }}\n ",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"logstash.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# patterns-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"logstash.fullname\" . }}-patterns\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.patterns }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n{{- end }}\n",
"# pipeline-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"logstash.fullname\" . }}-pipeline\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.inputs }}\n input_{{ $key }}.conf: |-\n{{ $value | indent 4 }}\n{{- end }}\n\n{{- range $key, $value := .Values.filters }}\n filter_{{ $key }}.conf: |-\n{{ $value | indent 4 }}\n{{- end }}\n\n{{- range $key, $value := .Values.outputs }}\n output_{{ $key }}.conf: |-\n{{ $value | indent 4 }}\n{{- end }}\n",
"# poddisruptionbudget.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"logstash.fullname\" . }}\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"logstash.name\" . }}\n release: {{ .Release.Name }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"logstash.fullname\" . }}\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n{{- with .Values.service.annotations }}\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n{{- if .Values.service.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}\n{{- end }}\n ports:\n {{- range $key, $value := .Values.service.ports }}\n - name: {{ $key }}\n{{ toYaml $value | indent 6 }}\n {{- end }}\n selector:\n app: {{ template \"logstash.name\" . }}\n release: {{ .Release.Name }}\n{{- if eq .Values.service.type \"LoadBalancer\" }}\n{{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n{{- end }}\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{- range $cidr := .Values.service.loadBalancerSourceRanges }}\n - {{ $cidr }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n{{- end }}\n{{ if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{ .Values.service.nodePort }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"logstash.serviceAccountName\" . }}\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n",
"# servicemonitor.yaml\n{{- if .Values.exporter.serviceMonitor.enabled }}\n---\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"logstash.fullname\" . }}\n {{- if .Values.exporter.serviceMonitor.namespace }}\n namespace: {{ .Values.exporter.serviceMonitor.namespace }}\n {{- end }}\n labels:\n chart: {{ template \"logstash.chart\" . }}\n app: {{ template \"logstash.name\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- if .Values.exporter.serviceMonitor.labels }}\n {{- toYaml .Values.exporter.serviceMonitor.labels | nindent 4 }}\n {{- end }}\nspec:\n endpoints:\n - interval: {{ .Values.exporter.serviceMonitor.interval }}\n {{- if .Values.exporter.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.exporter.serviceMonitor.scrapeTimeout }}\n {{- end }}\n honorLabels: true\n targetPort: {{ .Values.exporter.logstash.port }}\n path: {{ .Values.exporter.logstash.path }}\n scheme: {{ .Values.exporter.serviceMonitor.scheme }}\n jobLabel: \"{{ .Release.Name }}\"\n selector:\n matchLabels:\n app: {{ template \"logstash.name\" . }}\n release: \"{{ .Release.Name }}\"\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n{{- end }}",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"logstash.fullname\" . }}\n labels:\n app: {{ template \"logstash.name\" . }}\n chart: {{ template \"logstash.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n serviceName: {{ template \"logstash.fullname\" . }}\n replicas: {{ .Values.replicaCount }}\n podManagementPolicy: {{ .Values.podManagementPolicy }}\n selector:\n matchLabels:\n app: {{ template \"logstash.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"logstash.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.podLabels }}\n ## Custom pod labels\n {{- range $key, $value := .Values.podLabels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n annotations:\n checksum/patterns: {{ include (print $.Template.BasePath \"/patterns-config.yaml\") . | sha256sum }}\n checksum/templates: {{ include (print $.Template.BasePath \"/files-config.yaml\") . | sha256sum }}\n checksum/pipeline: {{ include (print $.Template.BasePath \"/pipeline-config.yaml\") . | sha256sum }}\n {{- if .Values.podAnnotations }}\n ## Custom pod annotations\n {{- range $key, $value := .Values.podAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n spec:\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.image.pullSecrets | indent 8 }}\n {{- end }}\n initContainers:\n{{- if .Values.extraInitContainers }}\n{{ toYaml .Values.extraInitContainers | indent 8 }}\n{{- end }}\n containers:\n{{- if .Values.extraContainers }}\n{{ toYaml .Values.extraContainers | indent 8 }}\n{{- end }}\n ## logstash\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: monitor\n containerPort: {{ .Values.exporter.logstash.target.port }}\n protocol: TCP\n{{ toYaml .Values.ports | indent 12 }}\n livenessProbe:\n{{ toYaml .Values.livenessProbe | indent 12 }}\n readinessProbe:\n{{ toYaml .Values.readinessProbe | indent 12 }}\n {{- if .Values.args }}\n args:\n{{ .Values.args | toYaml | indent 12 }}\n {{- end }}\n env:\n ## Logstash monitoring API host and port env vars\n - name: HTTP_HOST\n value: \"0.0.0.0\"\n - name: HTTP_PORT\n value: {{ .Values.exporter.logstash.target.port | quote }}\n ## Elasticsearch output\n - name: ELASTICSEARCH_HOST\n value: {{ tpl (.Values.elasticsearch.host | toString) $ | quote }}\n - name: ELASTICSEARCH_PORT\n value: {{ .Values.elasticsearch.port | quote }}\n # Logstash Java Options\n - name: LS_JAVA_OPTS\n value: {{ .Values.logstashJavaOpts }}\n ## Additional env vars\n {{- range $key, $value := .Values.config }}\n - name: {{ $key | upper | replace \".\" \"_\" }}\n value: {{ $value | quote }}\n {{- end }}\n {{- if .Values.extraEnv }}\n{{ .Values.extraEnv | toYaml | indent 12 }}\n {{- end }}\n {{- if .Values.envFrom }}\n envFrom:\n{{ toYaml .Values.envFrom | indent 12 }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n{{ toYaml .Values.volumeMounts | indent 12 }}\n\n{{- if .Values.exporter.logstash.enabled }}\n ## logstash-exporter\n - name: {{ .Chart.Name }}-exporter\n image: \"{{ .Values.exporter.logstash.image.repository }}:{{ .Values.exporter.logstash.image.tag }}\"\n imagePullPolicy: {{ .Values.exporter.logstash.image.pullPolicy }}\n command: [\"/bin/sh\", \"-c\"]\n ## Delay start of logstash-exporter to give logstash more time to come online.\n args:\n - >-\n sleep 60;\n exec /logstash_exporter\n --logstash.endpoint=http://localhost:{{ .Values.exporter.logstash.target.port }}\n --web.listen-address=:{{ .Values.exporter.logstash.port }}\n ports:\n - name: ls-exporter\n containerPort: {{ .Values.exporter.logstash.port }}\n protocol: TCP\n livenessProbe:\n{{ toYaml .Values.exporter.logstash.livenessProbe | indent 12 }}\n readinessProbe:\n{{ toYaml .Values.exporter.logstash.readinessProbe | indent 12 }}\n {{- with .Values.exporter.logstash.config }}\n env:\n {{- range $key, $value := . }}\n - name: {{ $key | upper | replace \".\" \"_\" }}\n value: {{ $value | quote }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.exporter.logstash.resources | indent 12 }}\n{{- end }}\n\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n serviceAccountName: {{ template \"logstash.serviceAccountName\" . }}\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n volumes:\n - name: patterns\n configMap:\n name: {{ template \"logstash.fullname\" . }}-patterns\n - name: files\n configMap:\n name: {{ template \"logstash.fullname\" . }}-files\n - name: pipeline\n configMap:\n name: {{ template \"logstash.fullname\" . }}-pipeline\n {{- with .Values.volumes }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- if not .Values.persistence.enabled }}\n - name: data\n emptyDir: {}\n{{- else }}\n volumeClaimTemplates:\n - metadata:\n name: data\n spec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- if .Values.persistence.storageClass }}\n {{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n{{- end }}\n"
] | replicaCount: 1
podDisruptionBudget:
maxUnavailable: 1
updateStrategy:
type: RollingUpdate
terminationGracePeriodSeconds: 30
image:
repository: docker.elastic.co/logstash/logstash-oss
tag: 7.1.1
pullPolicy: IfNotPresent
## Add secrets manually via kubectl on kubernetes cluster and reference here
# pullSecrets:
# - name: "myKubernetesSecret"
service:
type: ClusterIP
# clusterIP: None
# nodePort:
# Set this to local, to preserve client source ip. Default stripes out the source ip
# externalTrafficPolicy: Local
annotations: {}
## AWS example for use with LoadBalancer service type.
# external-dns.alpha.kubernetes.io/hostname: logstash.cluster.local
# service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
ports:
# syslog-udp:
# port: 1514
# targetPort: syslog-udp
# protocol: UDP
# syslog-tcp:
# port: 1514
# targetPort: syslog-tcp
# protocol: TCP
beats:
port: 5044
targetPort: beats
protocol: TCP
# http:
# port: 8080
# targetPort: http
# protocol: TCP
# loadBalancerIP: 10.0.0.1
# loadBalancerSourceRanges:
# - 192.168.0.1
ports:
# - name: syslog-udp
# containerPort: 1514
# protocol: UDP
# - name: syslog-tcp
# containerPort: 1514
# protocol: TCP
- name: beats
containerPort: 5044
protocol: TCP
# - name: http
# containerPort: 8080
# protocol: TCP
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- logstash.cluster.local
tls: []
# - secretName: logstash-tls
# hosts:
# - logstash.cluster.local
# set java options like heap size
logstashJavaOpts: "-Xmx1g -Xms1g"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
priorityClassName: ""
nodeSelector: {}
tolerations: []
securityContext:
fsGroup: 1000
runAsUser: 1000
affinity: {}
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# release: logstash
podAnnotations: {}
# iam.amazonaws.com/role: "logstash-role"
# prometheus.io/scrape: "true"
# prometheus.io/path: "/metrics"
# prometheus.io/port: "9198"
podLabels: {}
# team: "developers"
# service: "logstash"
extraEnv: []
envFrom: []
# - configMapRef:
# name: configMap-name
# - secretRef:
# name: secret-name
extraInitContainers: []
# - name: echo
# image: busybox
# imagePullPolicy: Always
# args:
# - echo
# - hello
podManagementPolicy: OrderedReady
# can be OrderReady or Parallel
livenessProbe:
httpGet:
path: /
port: monitor
initialDelaySeconds: 20
# periodSeconds: 30
# timeoutSeconds: 30
# failureThreshold: 6
# successThreshold: 1
readinessProbe:
httpGet:
path: /
port: monitor
initialDelaySeconds: 20
# periodSeconds: 30
# timeoutSeconds: 30
# failureThreshold: 6
# successThreshold: 1
persistence:
enabled: true
## logstash data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 2Gi
volumeMounts:
- name: data
mountPath: /usr/share/logstash/data
- name: patterns
mountPath: /usr/share/logstash/patterns
- name: files
mountPath: /usr/share/logstash/files
- name: pipeline
mountPath: /usr/share/logstash/pipeline
volumes: []
# - name: tls
# secret:
# secretName: logstash-tls
# - name: pipeline
# configMap:
# name: logstash-pipeline
# - name: certs
# hostPath:
# path: /tmp
exporter:
logstash:
enabled: false
image:
repository: bonniernews/logstash_exporter
tag: v0.1.2
pullPolicy: IfNotPresent
env: {}
resources: {}
path: /metrics
port: 9198
target:
port: 9600
path: /metrics
livenessProbe:
httpGet:
path: /metrics
port: ls-exporter
periodSeconds: 15
timeoutSeconds: 60
failureThreshold: 8
successThreshold: 1
readinessProbe:
httpGet:
path: /metrics
port: ls-exporter
periodSeconds: 15
timeoutSeconds: 60
failureThreshold: 8
successThreshold: 1
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
# namespace: monitoring
labels: {}
interval: 10s
scrapeTimeout: 10s
scheme: http
port: metrics
elasticsearch:
host: elasticsearch-client.default.svc.cluster.local
port: 9200
## ref: https://github.com/elastic/logstash-docker/blob/master/build/logstash/env2yaml/env2yaml.go
config:
config.reload.automatic: "true"
path.config: /usr/share/logstash/pipeline
path.data: /usr/share/logstash/data
## ref: https://www.elastic.co/guide/en/logstash/current/persistent-queues.html
queue.checkpoint.writes: 1
queue.drain: "true"
queue.max_bytes: 1gb # disk capacity must be greater than the value of `queue.max_bytes`
queue.type: persisted
## Patterns for filters.
## Each YAML heredoc will become a separate pattern file.
patterns:
# main: |-
# TESTING {"foo":.*}$
## Custom files that can be referenced by plugins.
## Each YAML heredoc will become located in the logstash home directory under
## the files subdirectory.
files:
# logstash-template.json: |-
# {
# "order": 0,
# "version": 1,
# "index_patterns": [
# "logstash-*"
# ],
# "settings": {
# "index": {
# "refresh_interval": "5s"
# }
# },
# "mappings": {
# "doc": {
# "_meta": {
# "version": "1.0.0"
# },
# "enabled": false
# }
# },
# "aliases": {}
# }
## Custom binary files encoded as base64 string that can be referenced by plugins
## Each base64 encoded string is decoded & mounted as a file under logstash home directory under
## the files subdirectory.
binaryFiles: {}
## NOTE: To achieve multiple pipelines with this chart, current best practice
## is to maintain one pipeline per chart release. In this way configuration is
## simplified and pipelines are more isolated from one another.
inputs:
main: |-
input {
# udp {
# port => 1514
# type => syslog
# }
# tcp {
# port => 1514
# type => syslog
# }
beats {
port => 5044
}
# http {
# port => 8080
# }
# kafka {
# ## ref: https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html
# bootstrap_servers => "kafka-input:9092"
# codec => json { charset => "UTF-8" }
# consumer_threads => 1
# topics => ["source"]
# type => "example"
# }
}
filters:
# main: |-
# filter {
# }
outputs:
main: |-
output {
# stdout { codec => rubydebug }
elasticsearch {
hosts => ["${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}"]
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
}
# kafka {
# ## ref: https://www.elastic.co/guide/en/logstash/current/plugins-outputs-kafka.html
# bootstrap_servers => "kafka-output:9092"
# codec => json { charset => "UTF-8" }
# compression_type => "lz4"
# topic_id => "destination"
# }
}
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Additional arguments to pass to the Logstash entrypoint
# args:
# - fizz
|
velero | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"velero.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"velero.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"velero.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use for creating or deleting the velero server\n*/}}\n{{- define \"velero.serverServiceAccount\" -}}\n{{- if .Values.serviceAccount.server.create -}}\n {{ default (printf \"%s-%s\" (include \"velero.fullname\" .) \"server\") .Values.serviceAccount.server.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.server.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name for the credentials secret.\n*/}}\n{{- define \"velero.secretName\" -}}\n{{- if .Values.credentials.existingSecret -}}\n {{- .Values.credentials.existingSecret -}}\n{{- else -}}\n {{- include \"velero.fullname\" . -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the Velero priority class name.\n*/}}\n{{- define \"velero.priorityClassName\" -}}\n{{- if .Values.priorityClassName -}}\n {{- .Values.priorityClassName -}}\n{{- else -}}\n {{- include \"velero.fullname\" . -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the Restic priority class name.\n*/}}\n{{- define \"velero.restic.priorityClassName\" -}}\n{{- if .Values.restic.priorityClassName -}}\n {{- .Values.restic.priorityClassName -}}\n{{- else -}}\n {{- include \"velero.fullname\" . -}}\n{{- end -}}\n{{- end -}}\n",
"# backupstoragelocation.yaml\napiVersion: velero.io/v1\nkind: BackupStorageLocation\nmetadata:\n name: default\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nspec:\n{{- with .Values.configuration }}\n{{- with .backupStorageLocation }}\n provider: {{ .name }}\n objectStorage:\n bucket: {{ .bucket }}\n {{- with .prefix }}\n prefix: {{ . }}\n {{- end }}\n{{- with .config }}\n config:\n {{- with .region }}\n region: {{ . }}\n {{- end }}\n {{- with .s3ForcePathStyle }}\n s3ForcePathStyle: {{ . | quote }}\n {{- end }}\n {{- with .s3Url }}\n s3Url: {{ . }}\n {{- end }}\n {{- with .kmsKeyId }}\n kmsKeyId: {{ . }}\n {{- end }}\n {{- with .resourceGroup }}\n resourceGroup: {{ . }}\n {{- end }}\n {{- with .storageAccount }}\n storageAccount: {{ . }}\n {{- end }}\n {{- if .publicUrl }}\n {{- with .publicUrl }}\n publicUrl: {{ . }}\n {{- end }}\n {{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# cleanup-crds.yaml\n# This job is meant primarily for cleaning up on CI systems.\n# Using this on production systems, especially those that have multiple releases of Velero, will be destructive.\n{{- if .Values.cleanUpCRDs }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ template \"velero.fullname\" . }}-cleanup\n namespace: {{ .Release.Namespace }}\n annotations:\n \"helm.sh/hook\": pre-delete\n \"helm.sh/hook-weight\": \"3\"\n \"helm.sh/hook-delete-policy\": hook-succeeded\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nspec:\n template:\n metadata:\n name: velero-cleanup\n spec:\n serviceAccountName: {{ include \"velero.serverServiceAccount\" . }}\n containers:\n - name: kubectl\n image: docker.io/bitnami/kubectl:1.14.1\n imagePullPolicy: IfNotPresent\n command:\n - /bin/sh\n - -c\n - >\n kubectl delete restore --all;\n kubectl delete backup --all;\n kubectl delete backupstoragelocation --all;\n kubectl delete volumesnapshotlocation --all;\n kubectl delete podvolumerestore --all;\n kubectl delete crd -l helm.sh/chart={{ include \"velero.chart\" . }}\n restartPolicy: OnFailure\n{{- end }}\n",
"# configmaps.yaml\n{{- range $configMapName, $configMap := .Values.configMaps }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"velero.fullname\" $ }}-{{ $configMapName }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" $ }}\n app.kubernetes.io/instance: {{ $.Release.Name }}\n app.kubernetes.io/managed-by: {{ $.Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" $ }}\n {{- with $configMap.labels }}\n {{- toYaml . | nindent 4 }}\n {{- end }}\ndata:\n {{- toYaml $configMap.data | nindent 2 }}\n---\n{{- end }}\n",
"# crds.yaml\n{{- range $path, $bytes := .Files.Glob \"crds/*.yaml\" }}\n{{ $.Files.Get $path }}\n---\n{{- end }}\n\n",
"# deployment.yaml\n{{- if .Values.configuration.provider -}}\n{{- $provider := .Values.configuration.provider -}}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"velero.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\n {{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- with .Values.podAnnotations }}\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.metrics.podAnnotations }}\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- end }}\n spec:\n restartPolicy: Always\n serviceAccountName: {{ include \"velero.serverServiceAccount\" . }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ include \"velero.priorityClassName\" . }}\n {{- end }}\n containers:\n - name: velero\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if .Values.metrics.enabled }}\n ports:\n - name: monitoring\n containerPort: 8085\n {{- end }}\n command:\n - /velero\n args:\n - server\n {{- with .Values.configuration }}\n {{- with .backupSyncPeriod }}\n - --backup-sync-period={{ . }}\n {{- end }}\n {{- with .resticTimeout }}\n - --restic-timeout={{ . }}\n {{- end }}\n {{- if .restoreOnlyMode }}\n - --restore-only\n {{- end }}\n {{- with .restoreResourcePriorities }}\n - --restore-resource-priorities={{ . }}\n {{- end }}\n {{- with .logLevel }}\n - --log-level={{ . }}\n {{- end }}\n {{- with .logFormat }}\n - --log-format={{ . }}\n {{- end }}\n {{- end }}\n {{- if eq $provider \"azure\" }}\n envFrom:\n - secretRef:\n name: {{ include \"velero.secretName\" . }}\n {{- end }}\n {{- with .Values.resources }}\n resources:\n {{- toYaml . | nindent 12 }}\n {{- end }}\n volumeMounts:\n - name: plugins\n mountPath: /plugins\n {{- if .Values.credentials.useSecret }}\n - name: cloud-credentials\n mountPath: /credentials\n - name: scratch\n mountPath: /scratch\n {{- end }}\n {{- if .Values.extraVolumeMounts }}\n {{- toYaml .Values.extraVolumeMounts | nindent 12 }}\n {{- end }}\n env:\n - name: VELERO_SCRATCH_DIR\n value: /scratch\n - name: VELERO_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n {{- if and .Values.credentials.useSecret (or (eq $provider \"aws\") (or (eq $provider \"gcp\") (eq $provider \"azure\"))) }}\n {{- if eq $provider \"aws\" }}\n - name: AWS_SHARED_CREDENTIALS_FILE\n {{- else if eq $provider \"gcp\"}}\n - name: GOOGLE_APPLICATION_CREDENTIALS\n {{- else }}\n - name: AZURE_CREDENTIALS_FILE\n {{- end }}\n value: /credentials/cloud\n {{- end }}\n {{- with .Values.configuration.extraEnvVars }}\n {{- range $key, $value := . }}\n - name: {{ default \"none\" $key }}\n value: {{ default \"none\" $value }}\n {{- end }}\n {{- end }}\n{{- if .Values.initContainers }}\n initContainers:\n {{- toYaml .Values.initContainers | nindent 8 }}\n{{- end }}\n volumes:\n {{- if .Values.credentials.useSecret }}\n - name: cloud-credentials\n secret:\n secretName: {{ include \"velero.secretName\" . }}\n {{- end }}\n - name: plugins\n emptyDir: {}\n - name: scratch\n emptyDir: {}\n {{- if .Values.extraVolumes }}\n {{- toYaml .Values.extraVolumes | nindent 8 }}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n{{- end -}}\n",
"# rbac.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ include \"velero.fullname\" . }}-server\n labels:\n app.kubernetes.io/component: server\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nsubjects:\n - kind: ServiceAccount\n namespace: {{ .Release.Namespace }}\n name: {{ include \"velero.serverServiceAccount\" . }}\nroleRef:\n kind: ClusterRole\n name: cluster-admin\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n",
"# restic-daemonset.yaml\n{{- if .Values.deployRestic }}\n{{- $provider := .Values.configuration.provider -}}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: restic\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nspec:\n selector:\n matchLabels:\n name: restic\n template:\n metadata:\n labels:\n name: restic\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\n {{- with .Values.podAnnotations }}\n annotations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n spec:\n {{- if .Values.serviceAccount.server.create }}\n serviceAccountName: {{ include \"velero.serverServiceAccount\" . }}\n {{- end }}\n securityContext:\n runAsUser: 0\n {{- if .Values.restic.priorityClassName }}\n priorityClassName: {{ include \"velero.restic.priorityClassName\" . }}\n {{- end }}\n volumes:\n {{- if and .Values.credentials.useSecret (or (eq $provider \"aws\") (eq $provider \"gcp\")) }}\n - name: cloud-credentials\n secret:\n secretName: {{ include \"velero.secretName\" . }}\n {{- end }}\n - name: host-pods\n hostPath:\n path: {{ .Values.restic.podVolumePath }}\n - name: scratch\n emptyDir: {}\n {{- if .Values.restic.extraVolumes }}\n {{- toYaml .Values.restic.extraVolumes | nindent 8 }}\n {{- end }}\n containers:\n - name: velero\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /velero\n args:\n - restic\n - server\n volumeMounts:\n {{- if and .Values.credentials.useSecret (or (eq $provider \"aws\") (eq $provider \"gcp\")) }}\n - name: cloud-credentials\n mountPath: /credentials\n {{- end }}\n - name: host-pods\n mountPath: /host_pods\n mountPropagation: HostToContainer\n - name: scratch\n mountPath: /scratch\n {{- if .Values.restic.extraVolumeMounts }}\n {{- toYaml .Values.restic.extraVolumeMounts | nindent 12 }}\n {{- end }}\n {{- if and .Values.credentials.useSecret (eq $provider \"azure\") }}\n envFrom:\n - secretRef:\n name: {{ include \"velero.secretName\" . }}\n {{- end }}\n env:\n - name: VELERO_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: VELERO_SCRATCH_DIR\n value: /scratch\n {{- if and .Values.credentials.useSecret (or (eq $provider \"aws\") (eq $provider \"gcp\")) }}\n {{- if eq $provider \"aws\" }}\n - name: AWS_SHARED_CREDENTIALS_FILE\n value: /credentials/cloud\n {{- else }}\n - name: GOOGLE_APPLICATION_CREDENTIALS\n value: /credentials/cloud\n {{- end }}\n {{- end }}\n {{- if eq $provider \"minio\" }}\n - name: AWS_SHARED_CREDENTIALS_FILE\n value: /credentials/cloud\n {{- end }}\n securityContext:\n privileged: {{ .Values.restic.privileged }}\n {{- with .Values.restic.resources }}\n resources:\n {{- toYaml . | nindent 12 }}\n {{- end }}\n {{- with .Values.restic.tolerations }}\n tolerations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n{{- end }}\n",
"# schedule.yaml\n{{- range $scheduleName, $schedule := .Values.schedules }}\napiVersion: velero.io/v1\nkind: Schedule\nmetadata:\n name: {{ include \"velero.fullname\" $ }}-{{ $scheduleName }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" $ }}\n app.kubernetes.io/instance: {{ $.Release.Name }}\n app.kubernetes.io/managed-by: {{ $.Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" $ }}\nspec:\n schedule: {{ $schedule.schedule | quote }}\n{{- with $schedule.template }}\n template:\n {{- toYaml . | nindent 4 }}\n{{- end }}\n---\n{{- end }}\n",
"# secret.yaml\n{{- if and .Values.credentials.useSecret (not .Values.credentials.existingSecret) -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ include \"velero.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\ntype: Opaque\ndata:\n{{- range $key, $value := .Values.credentials.secretContents }}\n {{ $key }}: {{ $value | b64enc | quote }}\n{{- end }}\n{{- end -}}\n",
"# service.yaml\n{{- if .Values.metrics.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"velero.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nspec:\n type: ClusterIP\n ports:\n - name: monitoring\n port: 8085\n targetPort: monitoring\n selector:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n",
"# serviceaccount-server.yaml\n{{- if .Values.serviceAccount.server.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"velero.serverServiceAccount\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ include \"velero.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\n {{- with .Values.metrics.serviceMonitor.additionalLabels }}\n {{- toYaml . | nindent 4 }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n endpoints:\n - port: monitoring\n interval: {{ .Values.metrics.scrapeInterval }}\n{{- end }}\n",
"# volumesnapshotlocation.yaml\n{{- if .Values.snapshotsEnabled }}\napiVersion: velero.io/v1\nkind: VolumeSnapshotLocation\nmetadata:\n name: default\n labels:\n app.kubernetes.io/name: {{ include \"velero.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"velero.chart\" . }}\nspec:\n{{- with .Values.configuration }}\n{{- with .volumeSnapshotLocation }}\n provider: {{ .name }}\n{{ with .config }}\n config:\n {{- with .region }}\n region: {{ . }}\n {{- end }}\n {{- with .apitimeout }}\n apiTimeout: {{ . }}\n {{- end }}\n {{- with .resourceGroup }}\n resourceGroup: {{ . }}\n {{- end }}\n {{- with .snapshotLocation }}\n snapshotLocation: {{ . }}\n {{- end}}\n {{- with .project }}\n project: {{ . }}\n {{- end}}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}"
] | ##
## Configuration settings that directly affect the Velero deployment YAML.
##
# Details of the container image to use in the Velero deployment & daemonset (if
# enabling restic). Required.
image:
repository: velero/velero
tag: v1.2.0
pullPolicy: IfNotPresent
# Annotations to add to the Velero deployment's pod template. Optional.
#
# If using kube2iam or kiam, use the following annotation with your AWS_ACCOUNT_ID
# and VELERO_ROLE_NAME filled in:
# iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
podAnnotations: {}
# Resource requests/limits to specify for the Velero deployment. Optional.
resources: {}
# Init containers to add to the Velero deployment's pod spec. At least one plugin provider image is required.
initContainers: []
# - name: velero-plugin-for-aws
# image: velero/velero-plugin-for-aws:v1.0.0
# imagePullPolicy: IfNotPresent
# volumeMounts:
# - mountPath: /target
# name: plugins
# Tolerations to use for the Velero deployment. Optional.
tolerations: []
# Node selector to use for the Velero deployment. Optional.
nodeSelector: {}
# Extra volumes for the Velero deployment. Optional.
extraVolumes: []
# Extra volumeMounts for the Velero deployment. Optional.
extraVolumeMounts: []
# Settings for Velero's prometheus metrics. Enabled by default.
metrics:
enabled: true
scrapeInterval: 30s
# Pod annotations for Prometheus
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
serviceMonitor:
enabled: false
additionalLabels: {}
##
## End of deployment-related settings.
##
##
## Parameters for the `default` BackupStorageLocation and VolumeSnapshotLocation,
## and additional server settings.
##
configuration:
# Cloud provider being used (e.g. aws, azure, gcp).
provider:
# Parameters for the `default` BackupStorageLocation. See
# https://velero.io/docs/v1.0.0/api-types/backupstoragelocation/
backupStorageLocation:
# Cloud provider where backups should be stored. Usually should
# match `configuration.provider`. Required.
name:
# Bucket to store backups in. Required.
bucket:
# Prefix within bucket under which to store backups. Optional.
prefix:
# Additional provider-specific configuration. See link above
# for details of required/optional fields for your provider.
config: {}
# region:
# s3ForcePathStyle:
# s3Url:
# kmsKeyId:
# resourceGroup:
# storageAccount:
# publicUrl:
# Parameters for the `default` VolumeSnapshotLocation. See
# https://velero.io/docs/v1.0.0/api-types/volumesnapshotlocation/
volumeSnapshotLocation:
# Cloud provider where volume snapshots are being taken. Usually
# should match `configuration.provider`. Required.,
name:
# Additional provider-specific configuration. See link above
# for details of required/optional fields for your provider.
config: {}
# region:
# apitimeout:
# resourceGroup:
# snapshotLocation:
# project:
# These are server-level settings passed as CLI flags to the `velero server` command. Velero
# uses default values if they're not passed in, so they only need to be explicitly specified
# here if using a non-default value. The `velero server` default values are shown in the
# comments below.
# --------------------
# `velero server` default: 1m
backupSyncPeriod:
# `velero server` default: 1h
resticTimeout:
# `velero server` default: namespaces,persistentvolumes,persistentvolumeclaims,secrets,configmaps,serviceaccounts,limitranges,pods
restoreResourcePriorities:
# `velero server` default: false
restoreOnlyMode:
# additional key/value pairs to be used as environment variables such as "AWS_CLUSTER_NAME: 'yourcluster.domain.tld'"
extraEnvVars: {}
# Set log-level for Velero pod. Default: info. Other options: debug, warning, error, fatal, panic.
logLevel:
# Set log-format for Velero pod. Default: text. Other option: json.
logFormat:
##
## End of backup/snapshot location settings.
##
##
## Settings for additional Velero resources.
##
# Whether to create the Velero cluster role binding.
rbac:
create: true
# Information about the Kubernetes service account Velero uses.
serviceAccount:
server:
create: true
name:
# Info about the secret to be used by the Velero deployment, which
# should contain credentials for the cloud provider IAM account you've
# set up for Velero.
credentials:
# Whether a secret should be used as the source of IAM account
# credentials. Set to false if, for example, using kube2iam or
# kiam to provide IAM credentials for the Velero pod.
useSecret: true
# Name of a pre-existing secret (if any) in the Velero namespace
# that should be used to get IAM account credentials. Optional.
existingSecret:
# Data to be stored in the Velero secret, if `useSecret` is
# true and `existingSecret` is empty. This should be the contents
# of your IAM credentials file.
secretContents: {}
# Wheter to create volumesnapshotlocation crd, if false => disable snapshot feature
snapshotsEnabled: true
# Whether to deploy the restic daemonset.
deployRestic: false
restic:
podVolumePath: /var/lib/kubelet/pods
privileged: false
# Pod priority class name to use for the Restic daemonset. Optional.
priorityClassName: {}
# Resource requests/limits to specify for the Restic daemonset deployment. Optional.
resources: {}
# Tolerations to use for the Restic daemonset. Optional.
tolerations: []
# Extra volumes for the Restic daemonset. Optional.
extraVolumes: []
# Extra volumeMounts for the Restic daemonset. Optional.
extraVolumeMounts: []
# Backup schedules to create.
# Eg:
# schedules:
# mybackup:
# schedule: "0 0 * * *"
# template:
# ttl: "240h"
# includedNamespaces:
# - foo
schedules: {}
# Velero ConfigMaps.
# Eg:
# configMaps:
# restic-restore-action-config:
# labels:
# velero.io/plugin-config: ""
# velero.io/restic: RestoreItemAction
# data:
# image: gcr.io/heptio-images/velero-restic-restore-help
configMaps: {}
##
## End of additional Velero resource settings.
##
|
owncloud | [
"# _helpers.tpl\n\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"owncloud.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"owncloud.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"owncloud.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"owncloud.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"owncloud.host\" -}}\n{{- $host := index .Values (printf \"%sHost\" .Chart.Name) | default \"\" -}}\n{{- default (include \"owncloud.serviceIP\" .) $host -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"owncloud.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper Owncloud image name\n*/}}\n{{- define \"owncloud.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"owncloud.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"owncloud.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"owncloud.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.owncloud.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.owncloud.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.owncloud.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.owncloud.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.owncloud.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.owncloud.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"owncloud.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if include \"owncloud.host\" . -}}\napiVersion: {{ template \"owncloud.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"owncloud.fullname\" . }}\n labels:\n app: {{ template \"owncloud.fullname\" . }}\n chart: {{ template \"owncloud.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"owncloud.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n replicas: 1\n{{- if .Values.updateStrategy }}\n strategy: {{ toYaml .Values.updateStrategy | nindent 4 }}\n{{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"owncloud.fullname\" . }}\n chart: {{ template \"owncloud.chart\" . }}\n release: \"{{ .Release.Name }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"owncloud.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"owncloud.fullname\" . }}\n image: {{ template \"owncloud.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"owncloud.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: OWNCLOUD_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: OWNCLOUD_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: OWNCLOUD_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"owncloud.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: OWNCLOUD_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: OWNCLOUD_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: OWNCLOUD_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n{{- $port:=.Values.service.port | toString }}\n - name: OWNCLOUD_HOST\n value: \"{{ include \"owncloud.host\" . }}{{- if ne $port \"80\" }}:{{ .Values.service.port }}{{ end }}\"\n - name: OWNCLOUD_USERNAME\n value: {{ default \"\" .Values.owncloudUsername | quote }}\n - name: OWNCLOUD_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"owncloud.fullname\" . }}\n key: owncloud-password\n - name: OWNCLOUD_EMAIL\n value: {{ default \"\" .Values.owncloudEmail | quote }}\n ports:\n - name: http\n containerPort: 80\n livenessProbe:\n httpGet:\n path: /status.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"owncloud.host\" . | quote }}\n initialDelaySeconds: 120\n timeoutSeconds: 5\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /status.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"owncloud.host\" . | quote }}\n initialDelaySeconds: 30\n timeoutSeconds: 3\n periodSeconds: 5\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: owncloud-data\n mountPath: /bitnami/owncloud\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"owncloud.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: owncloud-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.owncloud.existingClaim }}{{ .Values.persistence.owncloud.existingClaim }}{{- else }}{{ template \"owncloud.fullname\" . }}-owncloud{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- end -}}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"owncloud.fullname\" . }}\n labels:\n app: {{ template \"owncloud.fullname\" . }}\n chart: {{ template \"owncloud.chart\" . }}\n release: \"{{ $.Release.Name }}\"\n heritage: \"{{ $.Release.Service }}\"\n annotations:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n ingress.kubernetes.io/secure-backends: \"true\"\n {{- end }}\n {{- end }}\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"owncloud.fullname\" $ }}\n servicePort: 80\n {{- end }}\n tls:\n{{- range .Values.ingress.hosts }}\n - hosts:\n{{- if .tls }}\n - {{ .name }}\n secretName: {{ .tlsSecret }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# owncloud-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"owncloud.fullname\" . }}-owncloud\nspec:\n accessModes:\n - {{ .Values.persistence.owncloud.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.owncloud.size | quote }}\n {{ include \"owncloud.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"owncloud.fullname\" . }}\n labels:\n app: {{ template \"owncloud.fullname\" . }}\n chart: {{ template \"owncloud.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.owncloudPassword }}\n owncloud-password: {{ .Values.owncloudPassword | b64enc | quote }}\n {{ else }}\n owncloud-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"owncloud.fullname\" . }}\n labels:\n app: {{ template \"owncloud.fullname\" . }}\n chart: {{ template \"owncloud.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n {{- if eq .Values.service.type \"LoadBalancer\" }}\n loadBalancerIP: {{ default \"\" .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n selector:\n app: {{ template \"owncloud.fullname\" . }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami ownCloud image version
## ref: https://hub.docker.com/r/bitnami/owncloud/tags/
##
image:
registry: docker.io
repository: bitnami/owncloud
tag: 10.4.0-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override owncloud.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override owncloud.fullname template
##
# fullnameOverride:
## For Kubernetes v1.4, v1.5 and v1.6, use 'extensions/v1beta1'
## For Kubernetes v1.7, use 'networking.k8s.io/v1'
networkPolicyApiVersion: extensions/v1beta1
## Configure the ingress resource that allows you to access the
## ownCloud installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: owncloud.local
## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend owncloud service will be connected at port 443
tls: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: owncloud.local-tls
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: owncloud.local-tls
# key:
# certificate:
## ownCloud host to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-owncloud#configuration
##
# owncloudHost:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-owncloud#configuration
##
owncloudUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-owncloud#configuration
##
# owncloudPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-owncloud#configuration
##
owncloudEmail: [email protected]
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-owncloud#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_owncloud
## Database password
password:
## Database name
database: bitnami_owncloud
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_owncloud
user: bn_owncloud
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
## loadBalancerIP:
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
owncloud:
## owncloud data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
## Set up update strategy for the ownCloud installation.
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
## Example:
# updateStrategy:
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 25%
# maxUnavailable: 25%
updateStrategy:
type: RollingUpdate
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r39
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
stellar-core | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"stellar-core.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"stellar-core.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"stellar-core.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"stellar-core.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"stellar-core.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"stellar-core.postgresql.fullname\" -}}\n{{- if .Values.postgresql.fullnameOverride -}}\n{{- .Values.postgresql.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default \"postgresql\" .Values.postgresql.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"stellar-core.fullname\" . }}\n labels:\n app: {{ template \"stellar-core.name\" . }}\n chart: {{ template \"stellar-core.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: 1\n strategy:\n type: Recreate\n selector:\n matchLabels:\n app: {{ template \"stellar-core.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"stellar-core.name\" . }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: \"{{ template \"stellar-core.serviceAccountName\" . }}\"\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}{{ with .Values.image.flavor }}-{{.}}{{ end }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: peer\n containerPort: 11625\n protocol: TCP\n - name: http\n containerPort: 11626\n protocol: TCP\n env:\n {{- with .Values.existingNodeSeedSecret }}\n - name: NODE_SEED\n valueFrom:\n secretKeyRef:\n name: {{ required \"name of existingNodeSeedSecret is required\" .name | quote }}\n key: {{ required \"key of existingNodeSeedSecret is required\" .key | quote }}\n {{- else }}\n - name: NODE_SEED\n valueFrom:\n secretKeyRef:\n name: {{ template \"stellar-core.fullname\" . }}\n key: nodeSeed\n {{- end }}\n {{- if .Values.postgresql.enabled }}\n - name: DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"stellar-core.postgresql.fullname\" . }}\n key: postgres-password\n - name: DATABASE\n value: postgresql://dbname={{ .Values.postgresql.postgresDatabase }} user={{ .Values.postgresql.postgresUser }} password=$(DATABASE_PASSWORD) host={{ template \"stellar-core.postgresql.fullname\" . }} connect_timeout={{ .Values.postgresqlConnectTimeout }}\n {{- else }}\n {{- with .Values.existingDatabase.passwordSecret }}\n - name: DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .name | quote }}\n key: {{ .key | quote }}\n {{- end }}\n - name: DATABASE\n value: {{ .Values.existingDatabase.url }}\n {{- end }}\n{{- with .Values.knownPeers }}\n - name: KNOWN_PEERS\n value: \"{{ join \",\" .}}\"\n{{- end }}\n{{- with .Values.preferredPeers }}\n - name: PREFERRED_PEERS\n value: \"{{ join \",\" .}}\"\n{{- end }}\n{{- with .Values.nodeNames }}\n - name: NODE_NAMES\n value: \"{{range $index, $element := . }}{{ if gt $index 0 }},{{ end }}{{ $element.publicKey }} {{ $element.name }}{{ end }}\"\n{{- end }}\n{{- with .Values.quorumSet }}\n - name: QUORUM_SET\n value: {{ . | toJson | quote }}\n{{- end }}\n{{- with .Values.history }}\n - name: HISTORY\n value: {{ . | toJson | quote }}\n{{- end }}\n{{- with .Values.initializeHistoryArchives }}\n - name: INITIALIZE_HISTORY_ARCHIVES\n value: {{ . | quote }}\n{{- end }}\n{{- if .Values.gcloudServiceAccountKey }}\n - name: GCLOUD_SERVICE_ACCOUNT_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"stellar-core.fullname\" . }}\n key: gcloudServiceAccountKey\n{{- end }}\n{{- with .Values.nodeIsValidator }}\n - name: NODE_IS_VALIDATOR\n value: {{ . | quote }}\n{{- end }}\n{{- with .Values.networkPassphrase }}\n - name: NETWORK_PASSPHRASE\n value: {{ . | quote }}\n{{- end }}\n{{- with .Values.catchupComplete }}\n - name: CATCHUP_COMPLETE\n value: {{ . | quote }}\n{{- end }}\n{{- with .Values.catchupRecent }}\n - name: CATCHUP_RECENT\n value: {{ . | quote }}\n{{- end }}\n{{- with .Values.maxPeerConnections }}\n - name: MAX_PEER_CONNECTIONS\n value: {{ . | quote }}\n{{- end }}\n{{- range $key, $val := .Values.environment }}\n - name: {{ $key }}\n value: {{ $val | quote }}\n{{- end }}\n livenessProbe:\n httpGet:\n path: /info\n port: http\n readinessProbe:\n httpGet:\n path: /info\n port: http\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: data\n mountPath: {{ .Values.persistence.mountPath }}\n subPath: {{ .Values.persistence.subPath }}\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"stellar-core.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"stellar-core.fullname\" . }}\n labels:\n app: {{ template \"stellar-core.name\" . }}\n chart: {{ template \"stellar-core.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"stellar-core.fullname\" . }}\n labels:\n app: {{ template \"stellar-core.name\" . }}\n chart: {{ template \"stellar-core.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n{{- if not .Values.existingNodeSeedSecret }}\n nodeSeed: {{ required \"nodeSeed is required if existingNodeSeedSecret is not provided\" .Values.nodeSeed | b64enc }}\n{{- end }}\n{{- with .Values.gcloudServiceAccountKey }}\n gcloudServiceAccountKey: {{ . | b64enc }}\n{{- end }}\n",
"# service-http.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"stellar-core.fullname\" . }}-http\n labels:\n app: {{ template \"stellar-core.name\" . }}\n chart: {{ template \"stellar-core.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.httpService.type }}\n ports:\n - port: {{ .Values.httpService.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"stellar-core.name\" . }}\n release: {{ .Release.Name }}\n",
"# service-peer.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"stellar-core.fullname\" . }}-peer\n labels:\n app: {{ template \"stellar-core.name\" . }}\n chart: {{ template \"stellar-core.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.peerService.type }}\n ports:\n - port: {{ .Values.peerService.port }}\n targetPort: peer\n protocol: TCP\n name: peer\n {{- with .Values.peerService.loadBalancerIP }}\n loadBalancerIP: {{ . }}\n {{- end }}\n {{- with .Values.peerService.externalTrafficPolicy }}\n externalTrafficPolicy: {{ . }}\n {{- end }}\n selector:\n app: {{ template \"stellar-core.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"stellar-core.serviceAccountName\" . }}\n labels:\n app: {{ template \"stellar-core.name\" . }}\n chart: {{ template \"stellar-core.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n"
] | ## NOTE:
## You have to provide a node seed
## * either by specifying nodeSeed directly
## * or by specifying existingNodeSeedSecret that points to an existing secret
## You can generate a node seed by running the following command:
## docker run --rm -it --entrypoint '' satoshipay/stellar-core stellar-core --genseed
## WARNING: make sure to replace this in your configuration or use existingNodeSeedSecret
nodeSeed: SDUFQA7YL3KTWZNKOXX7XXIYU4R5R6JKELMREKHDQOYY2WPUGXFVJN52
# existingNodeSeedSecret:
# name: stellar-core
# key: nodeSeed
nodeIsValidator: true
networkPassphrase: Public Global Stellar Network ; September 2015
catchupComplete: false
catchupRecent: 0
maxPeerConnections: 50
knownPeers:
- stellar1.tempo.eu.com
- au.stellar.ibm.com
- br.stellar.ibm.com
- ca.stellar.ibm.com
- no.stellar.ibm.com
- it.stellar.ibm.com
- hk.stellar.ibm.com
- in.stellar.ibm.com
- uk.stellar.ibm.com
- us.stellar.ibm.com
- stellar0.keybase.io
- stellar1.satoshipay.io
- stellar2.satoshipay.io
- stellar3.satoshipay.io
- core-live-a.stellar.org
- core-live-b.stellar.org
- core-live-c.stellar.org
- ohio-1.stellar.stellarport.io
- ohio-2.stellar.stellarport.io
- validator1.stellar.stronghold.co
- validator2.stellar.stronghold.co
- validator3.stellar.stronghold.co
- validator4.stellar.stronghold.co
- validator5.stellar.stronghold.co
preferredPeers:
- stellar1.tempo.eu.com
- au.stellar.ibm.com
- br.stellar.ibm.com
- ca.stellar.ibm.com
- no.stellar.ibm.com
- it.stellar.ibm.com
- hk.stellar.ibm.com
- in.stellar.ibm.com
- uk.stellar.ibm.com
- us.stellar.ibm.com
- stellar0.keybase.io
- stellar1.satoshipay.io
- stellar2.satoshipay.io
- stellar3.satoshipay.io
- core-live-a.stellar.org
- core-live-b.stellar.org
- core-live-c.stellar.org
- validator1.stellar.stronghold.co
- validator2.stellar.stronghold.co
- validator3.stellar.stronghold.co
- validator4.stellar.stronghold.co
- validator5.stellar.stronghold.co
nodeNames:
- publicKey: GAOO3LWBC4XF6VWRP5ESJ6IBHAISVJMSBTALHOQM2EZG7Q477UWA6L7U
name: eno
- publicKey: GCKWUQGSVO45ZV3QK7POYL7HMFWDKWJVMFVEGUJKCAEVUITUCTQWFSM6
name: ibm_au
- publicKey: GBUJA3Z5TLAKLI5MEH4TETLXJBQVSVW74MNEKP5UUHTP3IMLNSUPOTVA
name: ibm_br
- publicKey: GB2HF2NHRKKFZYFDGD7MUENOYROOEK7SWYV2APYOODP6P7BUJTLILKIL
name: ibm_ca
- publicKey: GDRA72H7JWXAXWJKOONQOPH3JKNSH5MQ6BO5K74C3X6FO2G3OG464BPU
name: ibm_no
- publicKey: GAEEH4TBR7YQQWKJ2FIT57HXZZTMK2BX5LY4POJUYFSEZ7Y2ONHPPTES
name: ibm_it
- publicKey: GBJ7T3BTLX2BP3T5Q4256PUF7JMDAB35LLO32QRDYE67TDDMN7H33GGE
name: ibm_hk
- publicKey: GCH3O5PTCZVR4G65W3B4XDKWI5V677HQB3QO7CW4YPVYDDFBE2GE7G6V
name: ibm_in
- publicKey: GAENPO2XRTTMAJXDWM3E3GAALNLG4HVMKJ4QF525TR25RI42YPEDULOW
name: ibm_uk
- publicKey: GARBCBH4YSHUJLYEPKEPMVYZIJ3ZSQR3QCJ245CWGY64X72JLN4A6RSG
name: ibm_us
- publicKey: GCWJKM4EGTGJUVSWUJDPCQEOEP5LHSOFKSA4HALBTOO4T4H3HCHOM6UX
name: keybase0
- publicKey: GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE
name: satoshipay1
- publicKey: GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT
name: satoshipay2
- publicKey: GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY
name: satoshipay3
- publicKey: GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH
name: sdf_watcher1
- publicKey: GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK
name: sdf_watcher2
- publicKey: GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ
name: sdf_watcher3
- publicKey: GBB32UXWEXGZUE7H7LUVNNZRT3ZMZ3YH7SP3V5EFBILUVL3NCTSSK3IZ
name: stellarport1
- publicKey: GC5A5WKAPZU5ASNMLNCAMLW7CVHMLJJAKHSZZHE2KWGAJHZ4EW6TQ7PB
name: stellarport2
- publicKey: GDIQKLQVOCD5UD6MUI5D5PTPVX7WTP5TAPP5OBMOLENBBD5KG434KYQ2
name: stronghold1
- publicKey: GA7MREQ7673YDVANF4WBPN7LBQM4BSH4BQUFUTC4YLSSQCQUQTXRVBZN
name: stronghold2
- publicKey: GDHV7FL7JP6LUEWWYUOA4C3QAFQ4LZ6OHFBWFIL6IFIS3AOBI2DHV62F
name: stronghold3
- publicKey: GBGDBLFKR3LORWOI65LVC7ES5OGZ4GHILEHCBVKPW2PMP2OL43F6B2JJ
name: stronghold4
- publicKey: GCBXBCIKCCVUIHAZ5QFWK6CKSX4AESOJ33IQNUE42BP7J66X23TM6WPF
name: stronghold5
- publicKey: GCJCSMSPIWKKPR7WEPIQG63PDF7JGGEENRC33OKVBSPUDIRL6ZZ5M7OO
name: tempo
- publicKey: GD7FVHL2KUTUYNOJFRUUDJPDRO2MAZJ5KP6EBCU6LKXHYGZDUFBNHXQI
name: umbrel
quorumSet:
- threshold_percent: 66
validators:
- $$eno
- $$keybase0
- $$tempo
- $$umbrel
- path: ibm
threshold_percent: 51
validators:
- $$ibm_au
- $$ibm_br
- $$ibm_ca
- $$ibm_no
- $$ibm_it
- $$ibm_hk
- $$ibm_in
- $$ibm_uk
- $$ibm_us
- path: satoshipay
threshold_percent: 51
validators:
- $$satoshipay1
- $$satoshipay2
- $$satoshipay3
- path: sdf
threshold_percent: 51
validators:
- $$sdf_watcher1
- $$sdf_watcher2
- $$sdf_watcher3
- path: stronghold
threshold_percent: 51
validators:
- $$stronghold1
- $$stronghold2
- $$stronghold3
- $$stronghold4
- $$stronghold5
history:
sdf1:
get: "curl -sf http://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}"
sdf2:
get: "curl -sf http://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}"
sdf3:
get: "curl -sf http://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}"
initializeHistoryArchives: false
environment: {}
postgresql:
enabled: true
postgresDatabase: stellar-core
postgresUser: postgres
# options from https://github.com/helm/charts/tree/master/stable/postgresql
# postgresPassword:
postgresqlConnectTimeout: 5
## NOTE:
## existingDatabase is only used if postgresql.enabled is false
existingDatabase:
passwordSecret:
name: postgresql-core
key: password
## NOTE:
## $(DATABASE_PASSWORD) is automatically replaced with the value of the passwordSecret
# url: postgresql://dbname=stellar-core host=postgresql-core password=$(DATABASE_PASSWORD)
image:
repository: satoshipay/stellar-core
tag: '10.0.0-2'
# flavor: aws
# flavor: gcloud
pullPolicy: IfNotPresent
peerService:
type: LoadBalancer
port: 11625
# loadBalancerIP: 35.13.37.42
# externalTrafficPolicy: Local
httpService:
type: ClusterIP
port: 11626
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
subPath: "stellar-core"
mountPath: /data
## Annotations for the persistent volume claim
# annotations:
resources:
requests:
cpu: 100m
memory: 512Mi
nodeSelector: {}
tolerations: []
affinity: {}
serviceAccount:
create: true
name:
|
phpmyadmin | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"phpmyadmin.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"phpmyadmin.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"phpmyadmin.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified database name if the database is part of the same release than phpmyadmin.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"phpmyadmin.dbfullname\" -}}\n{{- printf \"%s-%s\" .Release.Name .Values.db.chartName | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper PHPMyAdmin image name\n*/}}\n{{- define \"phpmyadmin.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"phpmyadmin.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"phpmyadmin.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nRenders a value that contains template.\nUsage:\n{{ include \"phpmyadmin.tplValue\" ( dict \"value\" .Values.path.to.the.Value \"context\" $) }}\n*/}}\n{{- define \"phpmyadmin.tplValue\" -}}\n {{- if typeIs \"string\" .value }}\n {{- tpl .value .context }}\n {{- else }}\n {{- tpl (.value | toYaml) .context }}\n {{- end }}\n{{- end -}}\n\n{{/*\nCompile all warnings into a single message, and call fail.\n*/}}\n{{- define \"phpmyadmin.validateValues\" -}}\n{{- $messages := list -}}\n{{- $messages := append $messages (include \"phpmyadmin.validateValues.db.ssl\" .) -}}\n{{- $messages := without $messages \"\" -}}\n{{- $message := join \"\\n\" $messages -}}\n\n{{- if $message -}}\n{{- printf \"\\nVALUES VALIDATION:\\n%s\" $message | fail -}}\n{{- end -}}\n{{- end -}}\n\n{{/* Validate values of phpMyAdmin - must provide a valid database ssl configuration */}}\n{{- define \"phpmyadmin.validateValues.db.ssl\" -}}\n{{- if and .Values.db.enableSsl (empty .Values.db.ssl.clientKey) (empty .Values.db.ssl.clientCertificate) (empty .Values.db.ssl.caCertificate) -}}\nphpMyAdmin: db.ssl\n Invalid database ssl configuration. You enabled SSL for the connection\n between phpMyAdmin and the database but no key/certificates were provided\n (--set db.ssl.clientKey=\"xxxx\", --set db.ssl.clientCertificate=\"yyyy\")\n{{- end -}}\n{{- end -}}\n",
"# certs.yaml\n{{- if .Values.db.enableSsl }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"phpmyadmin.fullname\" . }}-certs\n labels:\n app: {{ template \"phpmyadmin.name\" . }}\n chart: {{ template \"phpmyadmin.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if not (empty .Values.db.ssl.clientKey) }}\n server_key.pem: {{ .Values.db.ssl.clientKey | b64enc | quote }}\n {{- end }}\n {{- if not (empty .Values.db.ssl.clientCertificate) }}\n server_certificate.pem: {{ .Values.db.ssl.clientCertificate | b64enc | quote }}\n {{- end }}\n {{- if not (empty .Values.db.ssl.caCertificate) }}\n ca_certificate.pem: {{ .Values.db.ssl.caCertificate | b64enc | quote }}\n {{- end }}\n{{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"phpmyadmin.fullname\" . }}\n labels:\n app: {{ template \"phpmyadmin.name\" . }}\n chart: {{ template \"phpmyadmin.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ template \"phpmyadmin.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"phpmyadmin.name\" . }}\n chart: {{ template \"phpmyadmin.chart\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"phpmyadmin.imagePullSecrets\" . | indent 6 }}\n {{- with .Values.nodeSelector }}\n nodeSelector: {{ toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity: {{ toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations: {{ toYaml . | nindent 8 }}\n {{- end }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ .Chart.Name }}\n image: {{ template \"phpmyadmin.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: DATABASE_PORT_NUMBER\n value: {{ .Values.db.port | quote }}\n {{- if .Values.db.chartName }}\n - name: DATABASE_HOST\n value: \"{{ template \"phpmyadmin.dbfullname\" . }}\"\n {{- else if .Values.db.bundleTestDB }}\n - name: DATABASE_HOST\n value: \"{{ template \"mariadb.fullname\" . }}\"\n {{- else }}\n - name: DATABASE_HOST\n value: {{ .Values.db.host | quote }}\n {{- end }}\n {{- if and (not .Values.db.chartName) (not .Values.db.host) }}\n - name: PHPMYADMIN_ALLOW_NO_PASSWORD\n value: \"true\"\n - name: PHPMYADMIN_ALLOW_ARBITRARY_SERVER\n value: \"true\"\n {{- else }}\n - name: PHPMYADMIN_ALLOW_NO_PASSWORD\n value: \"false\"\n {{- end }}\n - name: DATABASE_ENABLE_SSL\n value: {{ ternary \"yes\" \"no\" .Values.db.enableSsl | quote }}\n {{- if .Values.db.enableSsl }}\n {{- if not (empty .Values.db.ssl.clientKey) }}\n - name: DATABASE_SSL_KEY\n value: \"/db_certs/server_key.pem\"\n {{- end }}\n {{- if not (empty .Values.db.ssl.clientCertificate) }}\n - name: DATABASE_SSL_CERT\n value: \"/db_certs/server_certificate.pem\"\n {{- end }}\n {{- if not (empty .Values.db.ssl.caCertificate) }}\n - name: DATABASE_SSL_CA\n value: \"/db_certs/ca_certificate.pem\"\n {{- end }}\n {{- if .Values.db.ssl.ciphers }}\n - name: DATABASE_SSL_CIPHERS\n values: {{ .Values.db.ssl.ciphers | quote }}\n {{- end }}\n - name: DATABASE_SSL_VERIFY\n value: {{ ternary \"yes\" \"no\" .Values.db.ssl.verify | quote }}\n {{- end }}\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n - name: https\n containerPort: 443\n protocol: TCP\n {{- if .Values.probesEnabled }}\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n {{- end }}\n {{- if .Values.resources }}\n resources: {{ toYaml .Values.resources | nindent 12 }}\n {{- end }}\n {{- if .Values.db.enableSsl }}\n volumeMounts:\n - name: ssl-certs\n mountPath: /db_certs\n {{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"phpmyadmin.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n {{- if .Values.metrics.resources }}\n resources: {{ toYaml .Values.metrics.resources | nindent 12 }}\n {{- end }}\n{{- end }}\n{{- if .Values.db.enableSsl }}\n volumes:\n - name: ssl-certs\n secret:\n secretName: {{ template \"phpmyadmin.fullname\" . }}-certs\n items:\n {{- if not (empty .Values.db.ssl.clientKey) }}\n - key: server_key.pem\n path: server_key.pem\n {{- end }}\n {{- if not (empty .Values.db.ssl.clientCertificate) }}\n - key: server_certificate.pem\n path: server_certificate.pem\n {{- end }}\n {{- if not (empty .Values.db.ssl.caCertificate) }}\n - key: ca_certificate.pem\n path: ca_certificate.pem\n {{- end }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"phpmyadmin.fullname\" . }}\n labels:\n app: {{ template \"phpmyadmin.name\" . }}\n chart: {{ template \"phpmyadmin.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- if .Values.ingress.rewriteTarget }}\n ingress.kubernetes.io/rewrite-target: /\n nginx.ingress.kubernetes.io/rewrite-target: /\n {{- end }}\n {{- if .Values.ingress.annotations }}\n {{- include \"phpmyadmin.tplValue\" (dict \"value\" .Values.ingress.annotations \"context\" $) | nindent 4 }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"phpmyadmin.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"phpmyadmin.fullname\" . }}\n labels:\n app: {{ template \"phpmyadmin.name\" . }}\n chart: {{ template \"phpmyadmin.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"phpmyadmin.name\" . }}\n release: {{ .Release.Name }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
## Bitnami WordPress image version
## ref: https://hub.docker.com/r/bitnami/phpmyadmin/tags/
##
image:
registry: docker.io
repository: bitnami/phpmyadmin
tag: 5.0.1-debian-10-r30
## Specify a imagePullPolicy
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override phpmyadmin.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override phpmyadmin.fullname template
##
# fullnameOverride:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-phpmyadmin#environment-variables
##
service:
type: ClusterIP
port: 80
db:
## Database port
##
port: 3306
## If you are deploying phpMyAdmin as part of a release and the database is part
## of the release, you can pass a suffix that will be used to find the database
## in releasename-dbSuffix. Please note that this setting precedes db.host
##
# chartName: mariadb
## Database Hostname. Ignored when db.chartName is set.
##
# host: foo
## If you want to test phpMyAdmin, you can set it to bundle a MariaDB
## instance
##
bundleTestDB: false
## Whether to enable SSL for the connection between phpMyAdmin and the MySQL server to secure the connection
##
enableSsl: false
ssl:
## Client key file when using SSL
##
clientKey: |-
## Client certificate file when using SSL
##
clientCertificate: |-
## CA file when using SSL
##
caCertificate: |-
## List of allowable ciphers for connections when using SSL
##
# ciphers:
## Enable SSL certificate validation
##
verify: true
ingress:
## Set this to true to enable ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
##
certManager: false
## Set this to true in order to add the corresponding annotations to redirect traffic to /
##
rewriteTarget: true
## Additional Ingress annotations done as key:value pairs
## Example:
## annotations:
## kubernetes.io/ingress.class: nginx
## kubernetes.io/tls-acme: "true"
##
# annotations
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
##
hosts:
- name: phpmyadmin.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.phpmyadmin.local
# - phpmyadmin.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: phpmyadmin.local-tls
## Enable liveness and readiness probes
probesEnabled: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Pod labels
podLabels: {}
## Pod annotations
podAnnotations: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r29
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
ethereum | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"ethereum.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"ethereum.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"ethereum.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# bootnode.deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-bootnode\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: bootnode\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: bootnode\n template:\n metadata:\n labels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: bootnode\n spec:\n containers:\n - name: bootnode\n image: {{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"bootnode --nodekey=/etc/bootnode/node.key --verbosity=4\"\n volumeMounts:\n - name: data\n mountPath: /etc/bootnode\n ports:\n - name: discovery\n containerPort: 30301\n protocol: UDP\n - name: bootnode-server\n image: {{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}\n imagePullPolicy: {{.Values.imagePullPolicy}}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"while [ 1 ]; do echo -e \\\"HTTP/1.1 200 OK\\n\\nenode://$(bootnode -writeaddress --nodekey=/etc/bootnode/node.key)@$(POD_IP):30301\\\" | nc -l -v -p 80 || break; done;\"\n volumeMounts:\n - name: data\n mountPath: /etc/bootnode\n env:\n - name: POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n ports:\n - containerPort: 80\n initContainers:\n - name: genkey\n image: {{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"bootnode --genkey=/etc/bootnode/node.key\"\n volumeMounts:\n - name: data\n mountPath: /etc/bootnode\n volumes:\n - name: data\n emptyDir: {}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# bootnode.service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-bootnode\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: bootnode\nspec:\n selector:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: bootnode\n clusterIP: None\n ports:\n - name: discovery\n port: 30301\n protocol: UDP\n - name: http\n port: 80",
"# ethstats.deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-ethstats\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: ethstats\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: ethstats\n template:\n metadata:\n labels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: ethstats\n spec:\n containers:\n - name: ethstats\n image: {{ .Values.ethstats.image.repository }}:{{ .Values.ethstats.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n ports:\n - name: http\n containerPort: 3000\n env:\n - name: WS_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"ethereum.fullname\" . }}-ethstats\n key: WS_SECRET\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ethstats.secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-ethstats\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n WS_SECRET: {{ .Values.ethstats.webSocketSecret | b64enc | quote }}",
"# ethstats.service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-ethstats\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: ethstats\nspec:\n selector:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: ethstats\n type: {{ .Values.ethstats.service.type }}\n ports:\n - port: 80\n targetPort: http",
"# geth-account.secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-geth-account\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.geth.account.privateKey }}\n accountPrivateKey: {{ .Values.geth.account.privateKey | b64enc | quote }}\n {{- end }}\n {{- if .Values.geth.account.secret }}\n accountSecret: {{ .Values.geth.account.secret | b64enc | quote }}\n {{- end }}",
"# geth-miner.deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-geth-miner\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: geth-miner\nspec:\n replicas: {{ .Values.geth.miner.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: geth-miner\n template:\n metadata:\n labels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: geth-miner\n spec:\n containers:\n - name: geth-miner\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"geth --bootnodes=`cat /root/.ethereum/bootnodes` --mine --etherbase=0 --networkid=${NETWORK_ID} --ethstats=${HOSTNAME}:${ETHSTATS_SECRET}@${ETHSTATS_SVC} --verbosity=5\"\n env:\n - name: ETHSTATS_SVC\n value: {{ template \"ethereum.fullname\" . }}-ethstats.{{ .Release.Namespace }}\n - name: ETHSTATS_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"ethereum.fullname\" . }}-ethstats\n key: WS_SECRET\n - name: NETWORK_ID\n valueFrom:\n configMapKeyRef:\n name: {{ template \"ethereum.fullname\" . }}-geth-config\n key: networkid\n ports:\n - name: discovery-udp\n containerPort: 30303\n protocol: UDP\n - name: discovery-tcp\n containerPort: 30303\n volumeMounts:\n - name: data\n mountPath: /root/.ethereum\n initContainers:\n - name: init-genesis\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - \"init\"\n - \"/var/geth/genesis.json\"\n volumeMounts:\n - name: data\n mountPath: /root/.ethereum\n - name: config\n mountPath: /var/geth\n - name: import-geth-account\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"geth account import --password /root/.ethereum/account/accountSecret /root/.ethereum/account/accountPrivateKey\"\n volumeMounts:\n - name: data\n mountPath: /root/.ethereum\n - name: account\n readOnly: true\n mountPath: /root/.ethereum/account\n - name: get-bootnodes\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - |-\n{{ .Files.Get \"scripts/get-bootnode.sh\" | indent 10 }}\n env:\n - name: BOOTNODE_SVC\n value: {{ template \"ethereum.fullname\" . }}-bootnode.{{ .Release.Namespace }}\n volumeMounts:\n - name: data\n mountPath: /geth\n volumes:\n - name: data\n emptyDir: {}\n - name: config\n configMap:\n name: {{ template \"ethereum.fullname\" . }}-geth-config\n - name: account\n secret:\n secretName: {{ template \"ethereum.fullname\" . }}-geth-account\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# geth-tx.deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-geth-tx\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: geth-tx\nspec:\n replicas: {{ .Values.geth.tx.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: geth-tx\n template:\n metadata:\n labels:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: geth-tx\n spec:\n containers:\n - name: geth-tx\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"geth --bootnodes=`cat /root/.ethereum/bootnodes` --rpc --rpcaddr 0.0.0.0 --rpcapi={{ .Values.geth.tx.args.rpcapi }} --rpccorsdomain='*' --ws --networkid=${NETWORK_ID} --ethstats=${HOSTNAME}:${ETHSTATS_SECRET}@${ETHSTATS_SVC} --verbosity=5\"\n env:\n - name: ETHSTATS_SVC\n value: {{ template \"ethereum.fullname\" . }}-ethstats.{{ .Release.Namespace }}\n - name: ETHSTATS_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"ethereum.fullname\" . }}-ethstats\n key: WS_SECRET\n - name: NETWORK_ID\n valueFrom:\n configMapKeyRef:\n name: {{ template \"ethereum.fullname\" . }}-geth-config\n key: networkid\n ports:\n - name: rpc\n containerPort: 8545\n - name: ws\n containerPort: 8546\n - name: discovery-udp\n containerPort: 30303\n protocol: UDP\n - name: discovery-tcp\n containerPort: 30303\n volumeMounts:\n - name: data\n mountPath: /root/.ethereum\n initContainers:\n - name: init-genesis\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - \"init\"\n - \"/var/geth/genesis.json\"\n volumeMounts:\n - name: data\n mountPath: /root/.ethereum\n - name: config\n mountPath: /var/geth\n - name: import-geth-account\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - \"geth account import --password /root/.ethereum/account/accountSecret /root/.ethereum/account/accountPrivateKey\"\n volumeMounts:\n - name: data\n mountPath: /root/.ethereum\n - name: account\n readOnly: true\n mountPath: /root/.ethereum/account\n - name: get-bootnodes\n image: {{ .Values.geth.image.repository }}:{{ .Values.geth.image.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command: [\"/bin/sh\"]\n args:\n - \"-c\"\n - |-\n{{ .Files.Get \"scripts/get-bootnode.sh\" | indent 10 }}\n env:\n - name: BOOTNODE_SVC\n value: {{ template \"ethereum.fullname\" . }}-bootnode.{{ .Release.Namespace }}\n volumeMounts:\n - name: data\n mountPath: /geth\n volumes:\n - name: data\n emptyDir: {}\n - name: config\n configMap:\n name: {{ template \"ethereum.fullname\" . }}-geth-config\n - name: account\n secret:\n secretName: {{ template \"ethereum.fullname\" . }}-geth-account\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# geth-tx.service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-geth-tx\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: geth-tx\nspec:\n selector:\n app: {{ template \"ethereum.name\" . }}\n release: {{ .Release.Name }}\n component: geth-tx\n type: {{ .Values.geth.tx.service.type }}\n sessionAffinity: ClientIP\n ports:\n - name: rpc\n port: 8545\n - name: ws\n port: 8546",
"# geth.configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"ethereum.fullname\" . }}-geth-config\n labels:\n app: {{ template \"ethereum.name\" . }}\n chart: {{ template \"ethereum.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n networkid: \"{{ .Values.geth.genesis.networkId }}\"\n genesis.json: |-\n {\n \"config\": {\n \"chainId\": {{ .Values.geth.genesis.networkId }},\n \"homesteadBlock\": 0,\n \"eip150Block\": 0,\n \"eip155Block\": 0,\n \"eip158Block\": 0\n },\n \"difficulty\": {{ .Values.geth.genesis.difficulty | quote }},\n \"gasLimit\": {{ .Values.geth.genesis.gasLimit | quote }},\n \"alloc\": {\n {{- if .Values.geth.account.address }}\n {{ .Values.geth.account.address | quote }}: {\n \"balance\": \"1000000000000000000000000\"\n }\n {{- end }}\n }\n }"
] | # Default values for ethereum.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
imagePullPolicy: IfNotPresent
# Node labels for pod assignment
# ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
bootnode:
image:
repository: ethereum/client-go
tag: alltools-v1.7.3
ethstats:
image:
repository: ethereumex/eth-stats-dashboard
tag: v0.0.1
webSocketSecret: my-secret-for-connecting-to-ethstats
service:
type: LoadBalancer
geth:
image:
repository: ethereum/client-go
tag: v1.7.3
tx:
# transaction nodes
replicaCount: 2
service:
type: ClusterIP
args:
rpcapi: 'eth,net,web3'
miner:
# miner nodes
replicaCount: 3
genesis:
# geth genesis block
difficulty: '0x0400'
gasLimit: '0x8000000'
networkId: 98052
account:
# You will need to configure an Ethereum account before this
# network will run. The Ethereum account will be used to seed
# Ether and mined Ether will be deposited into this account.
# ref: https://github.com/ethereum/go-ethereum/wiki/Managing-your-accounts
address:
privateKey:
secret:
|
filebeat | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"filebeat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"filebeat.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"filebeat.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"filebeat.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"filebeat.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nrules:\n- apiGroups: [\"\"]\n resources:\n - namespaces\n - pods\n verbs: [\"get\", \"list\", \"watch\"]\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"filebeat.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"filebeat.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n minReadySeconds: 10\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n annotations:\n checksum/secret: {{ toYaml (default .Values.config .Values.overrideConfig) | sha256sum }}\n{{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $sec := .Values.image.pullSecrets }}\n - name: {{ $sec | quote }}\n {{- end }}\n {{- end }}\n{{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n{{- end }}\n initContainers:\n{{- if .Values.indexTemplateLoad }}\n - name: \"load-es-template\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /bin/bash\n - -c\n - filebeat setup --template\n -E output.logstash.enabled=false\n -E output.file.enabled=false\n -E output.elasticsearch.hosts=[{{- range $index, $host := .Values.indexTemplateLoad }}{{ if $index }}, {{ end }}{{ $host | quote }}{{- end }}]\n volumeMounts:\n - name: filebeat-config\n mountPath: /usr/share/filebeat/filebeat.yml\n readOnly: true\n subPath: filebeat.yml\n{{- end }}\n{{- if .Values.extraInitContainers }}\n{{ toYaml .Values.extraInitContainers | indent 6 }}\n{{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n{{- if .Values.command }}\n command:\n{{ toYaml .Values.command | indent 8 }}\n{{- end }}\n args:\n{{- if .Values.args }}\n{{ toYaml .Values.args | indent 8 }}\n{{- else }}\n - \"-e\"\n{{- if .Values.plugins }}\n - \"--plugin\"\n - {{ .Values.plugins | join \",\" | quote }}\n{{- end }}\n{{- end }}\n env:\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n{{- if .Values.extraVars }}\n{{ toYaml .Values.extraVars | indent 8 }}\n{{- end }}\n {{- if index .Values.config \"http.enabled\" }}\n ports:\n - containerPort: {{ index .Values.config \"http.port\" }}\n {{- end }}\n securityContext:\n runAsUser: 0\n{{- if .Values.privileged }}\n privileged: true\n{{- end }}\n{{- if .Values.resources }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n{{- end }}\n volumeMounts:\n - name: filebeat-config\n mountPath: /usr/share/filebeat/filebeat.yml\n readOnly: true\n subPath: filebeat.yml\n - name: data\n mountPath: /usr/share/filebeat/data\n - name: varlog\n mountPath: /var/log\n readOnly: true\n - name: varlibdockercontainers\n mountPath: /var/lib/docker/containers\n readOnly: true\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n{{- if .Values.monitoring.enabled }}\n - name: {{ template \"filebeat.fullname\" . }}-prometheus-exporter\n image: \"{{ .Values.monitoring.image.repository }}:{{ .Values.monitoring.image.tag }}\"\n imagePullPolicy: {{ .Values.monitoring.image.pullPolicy }}\n args:\n{{- if .Values.monitoring.args }}\n{{ toYaml .Values.monitoring.args | indent 8 }}\n{{- end }}\n{{- if .Values.monitoring.resources }}\n resources:\n{{ toYaml .Values.monitoring.resources | indent 10 }}\n{{- end }}\n ports:\n - containerPort: {{ .Values.monitoring.exporterPort}}\n{{- end }}\n volumes:\n - name: varlog\n hostPath:\n path: /var/log\n - name: varlibdockercontainers\n hostPath:\n path: /var/lib/docker/containers\n - name: filebeat-config\n secret:\n secretName: {{ template \"filebeat.fullname\" . }}\n - name: data\n hostPath:\n path: {{ .Values.data.hostPath }}\n type: DirectoryOrCreate\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 6 }}\n{{- end }}\n terminationGracePeriodSeconds: 60\n serviceAccountName: {{ template \"filebeat.serviceAccountName\" . }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.rbac.create -}}\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n annotations:\n{{- if .Values.podSecurityPolicy.annotations }}\n{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}\n{{- end }}\nspec:\n privileged: false\n allowPrivilegeEscalation: false\n allowedHostPaths:\n - pathPrefix: /var/log\n readOnly: true\n - pathPrefix: /var/lib/docker/containers\n readOnly: true\n - pathPrefix: {{ .Values.data.hostPath }}\n requiredDropCapabilities:\n - ALL\n volumes:\n - configMap\n - secret\n - hostPath\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: MustRunAs\n ranges:\n - min: 0\n max: 0\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: MustRunAs\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: MustRunAs\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n hostPorts:\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n{{- end -}}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n namespace: {{ .Release.Namespace }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"filebeat.fullname\" . }}\n{{- end }}\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: Role\n name: {{ template \"filebeat.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"filebeat.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"filebeat.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n filebeat.yml: {{ toYaml (default .Values.config .Values.overrideConfig) | indent 4 | b64enc }}\n {{- if .Values.extraSecrets }}\n {{- range $key, $value := .Values.extraSecrets }}\n {{ $key }}: {{ $value | b64enc }}\n {{- end -}}\n\n {{ end }}\n",
"# service.yaml\n{{- if .Values.monitoring.enabled }}\nkind: Service\napiVersion: v1\nmetadata:\n{{- if not .Values.monitoring.serviceMonitor.enabled }}\n annotations:\n{{- if .Values.monitoring.telemetryPath }}\n prometheus.io/path: {{ .Values.monitoring.telemetryPath }}\n{{- else }}\n prometheus.io/path: /metrics\n{{- end }}\n prometheus.io/port: \"{{ .Values.monitoring.exporterPort }}\"\n prometheus.io/scrape: \"true\"\n{{- end }}\n name: {{ template \"filebeat.fullname\" . }}-metrics\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n selector:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n ports:\n - name: metrics\n port: {{ .Values.monitoring.exporterPort }}\n targetPort: {{ .Values.monitoring.targetPort }}\n protocol: TCP\n{{ end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"filebeat.serviceAccountName\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n helm.sh/chart: {{ template \"filebeat.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n",
"# servicemonitor.yaml\n{{- if and ( .Capabilities.APIVersions.Has \"monitoring.coreos.com/v1\" ) ( .Values.monitoring.serviceMonitor.enabled ) ( .Values.monitoring.enabled ) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n{{- if .Values.monitoring.serviceMonitor.labels }}\n labels:\n{{ toYaml .Values.monitoring.serviceMonitor.labels | indent 4}}\n{{- end }}\n name: {{ template \"filebeat.fullname\" . }}-prometheus-exporter\n{{- if .Values.monitoring.serviceMonitor.namespace }}\n namespace: {{ .Values.monitoring.serviceMonitor.namespace }}\n{{- end }}\nspec:\n endpoints:\n - targetPort: {{ .Values.monitoring.exporterPort }}\n{{- if .Values.monitoring.serviceMonitor.interval }}\n interval: {{ .Values.monitoring.serviceMonitor.interval }}\n{{- end }}\n{{- if .Values.monitoring.serviceMonitor.telemetryPath }}\n path: {{ .Values.monitoring.serviceMonitor.telemetryPath }}\n{{- end }}\n jobLabel: {{ template \"filebeat.fullname\" . }}-prometheus-exporter\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"filebeat.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}"
] | image:
repository: docker.elastic.co/beats/filebeat-oss
tag: 7.4.0
pullPolicy: IfNotPresent
config:
filebeat.config:
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
processors:
- add_cloud_metadata:
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/*.log
- /var/log/messages
- /var/log/syslog
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
- drop_event:
when:
equals:
kubernetes.container.name: "filebeat"
output.file:
path: "/usr/share/filebeat/data"
filename: filebeat
rotate_every_kb: 10000
number_of_files: 5
# When a key contains a period, use this format for setting values on the command line:
# --set config."http\.enabled"=true
http.enabled: true
http.port: 5066
# If overrideConfig is not empty, filebeat chart's default config won't be used at all.
overrideConfig: {}
# Path on the host to mount to /usr/share/filebeat/data in the container.
data:
hostPath: /var/lib/filebeat
# Upload index template to Elasticsearch if Logstash output is enabled
# https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-template.html
# List of Elasticsearch hosts
indexTemplateLoad: []
# - elasticsearch:9200
# List of beat plugins
plugins: []
# - kinesis.so
# pass custom command. This is equivalent of Entrypoint in docker
command: []
# pass custom args. This is equivalent of Cmd in docker
args: []
# A list of additional environment variables
extraVars: []
# - name: TEST1
# value: TEST2
# - name: TEST3
# valueFrom:
# configMapKeyRef:
# name: configmap
# key: config.key
# Add additional volumes and mounts, for example to read other log files on the host
extraVolumes: []
# - hostPath:
# path: /var/log
# name: varlog
extraVolumeMounts: []
# - name: varlog
# mountPath: /host/var/log
# readOnly: true
extraSecrets: {}
# secret: "TEST1"
extraInitContainers: []
# - name: echo
# image: busybox
# imagePullPolicy: Always
# args:
# - echo
# - hello
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
priorityClassName: ""
nodeSelector: {}
annotations: {}
tolerations: []
# - operator: Exists
affinity: {}
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Specify if a Pod Security Policy for filebeat must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
enabled: False
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
privileged: false
## Add Elastic beat-exporter for Prometheus
## https://github.com/trustpilot/beat-exporter
## Dont forget to enable http on config.http.enabled (exposing filebeat stats)
monitoring:
enabled: true
serviceMonitor:
# When set true and if Prometheus Operator is installed then use a ServiceMonitor to configure scraping
enabled: true
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to beats-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
image:
repository: trustpilot/beat-exporter
tag: 0.1.1
pullPolicy: IfNotPresent
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
# pass custom args. This is equivalent of Cmd in docker
args: []
## default is ":9479". If changed, need pass argument "-web.listen-address <...>"
exporterPort: 9479
## Filebeat service port, which exposes Prometheus metrics
targetPort: 9479
|
prometheus-nats-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-nats-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prometheus-nats-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-nats-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"prometheus-nats-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-nats-exporter.chart\" . }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n annotations:\n{{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 8 }}\n{{- end}}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n args:\n - \"-port\"\n - \"{{ .Values.service.targetPort }}\"\n {{- if .Values.config.metrics.varz }}\n - \"-varz\"\n {{- end }}\n {{- if .Values.config.metrics.channelz }}\n - \"-channelz\"\n {{- end }}\n {{- if .Values.config.metrics.connz }}\n - \"-connz\"\n {{- end }}\n {{- if .Values.config.metrics.routez }}\n - \"-routez\"\n {{- end }}\n {{- if .Values.config.metrics.serverz }}\n - \"-serverz\"\n {{- end }}\n {{- if .Values.config.metrics.subz }}\n - \"-subz\"\n {{- end }}\n {{- if .Values.config.metrics.gatewayz }}\n - \"-gatewayz\"\n {{- end }}\n - \"http://{{ .Values.config.nats.service }}.{{ .Values.config.nats.namespace }}.svc:{{ .Values.config.nats.port }}\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: {{ .Values.service.targetPort }}\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /metrics\n port: http\n readinessProbe:\n httpGet:\n path: /metrics\n port: http\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- with .Values.extraVolumes }}\n{{ tpl . $ | indent 6 }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"prometheus-nats-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-nats-exporter.chart\" . }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: {{ .Values.service.targetPort }}\n protocol: TCP\n name: http\n selector:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# servicemonitor.yaml\n{{- if and .Values.serviceMonitor.enabled (.Capabilities.APIVersions.Has \"monitoring.coreos.com/v1\") }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ include \"prometheus-nats-exporter.fullname\" . }}\n {{- with .Values.serviceMonitor.namespace }}\n namespace: {{ . }}\n {{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-nats-exporter.chart\" . }}\n {{- with .Values.serviceMonitor.additionalLabels }}\n{{ toYaml . | indent 4 }}\n {{- end }}\nspec:\n endpoints:\n - port: http\n honorLabels: true\n {{- with .Values.serviceMonitor.interval }}\n interval: {{ . }}\n {{- end }}\n {{- with .Values.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ . }}\n {{- end }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-nats-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n\n"
] | # Default values for prometheus-nats-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: synadia/prometheus-nats-exporter
tag: 0.6.2
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
targetPort: 7777
serviceMonitor:
enabled: false
additionalLabels: {}
namespace:
interval:
scrapeTimeout:
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
config:
nats:
service: nats-nats-monitoring
namespace: default
port: 8222
metrics:
varz: true
channelz: true
connz: true
routez: true
serverz: true
subz: true
gatewayz: true
nodeSelector: {}
tolerations: []
affinity: {}
annotations: {}
extraContainers: |
extraVolumes: |
|
coredns | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"coredns.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"coredns.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGenerate the list of ports automatically from the server definitions\n*/}}\n{{- define \"coredns.servicePorts\" -}}\n {{/* Set ports to be an empty dict */}}\n {{- $ports := dict -}}\n {{/* Iterate through each of the server blocks */}}\n {{- range .Values.servers -}}\n {{/* Capture port to avoid scoping awkwardness */}}\n {{- $port := toString .port -}}\n\n {{/* If none of the server blocks has mentioned this port yet take note of it */}}\n {{- if not (hasKey $ports $port) -}}\n {{- $ports := set $ports $port (dict \"istcp\" false \"isudp\" false) -}}\n {{- end -}}\n {{/* Retrieve the inner dict that holds the protocols for a given port */}}\n {{- $innerdict := index $ports $port -}}\n\n {{/*\n Look at each of the zones and check which protocol they serve\n At the moment the following are supported by CoreDNS:\n UDP: dns://\n TCP: tls://, grpc://\n */}}\n {{- range .zones -}}\n {{- if has (default \"\" .scheme) (list \"dns://\") -}}\n {{/* Optionally enable tcp for this service as well */}}\n {{- if eq (default false .use_tcp) true }}\n {{- $innerdict := set $innerdict \"istcp\" true -}}\n {{- end }}\n {{- $innerdict := set $innerdict \"isudp\" true -}}\n {{- end -}}\n\n {{- if has (default \"\" .scheme) (list \"tls://\" \"grpc://\") -}}\n {{- $innerdict := set $innerdict \"istcp\" true -}}\n {{- end -}}\n {{- end -}}\n\n {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}}\n {{- if and (not (index $innerdict \"istcp\")) (not (index $innerdict \"isudp\")) -}}\n {{- $innerdict := set $innerdict \"isudp\" true -}}\n {{- $innerdict := set $innerdict \"istcp\" true -}}\n {{- end -}}\n\n {{/* Write the dict back into the outer dict */}}\n {{- $ports := set $ports $port $innerdict -}}\n {{- end -}}\n\n {{/* Write out the ports according to the info collected above */}}\n {{- range $port, $innerdict := $ports -}}\n {{- if index $innerdict \"isudp\" -}}\n {{- printf \"- {port: %v, protocol: UDP, name: udp-%s}\\n\" $port $port -}}\n {{- end -}}\n {{- if index $innerdict \"istcp\" -}}\n {{- printf \"- {port: %v, protocol: TCP, name: tcp-%s}\\n\" $port $port -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n\n{{/*\nGenerate the list of ports automatically from the server definitions\n*/}}\n{{- define \"coredns.containerPorts\" -}}\n {{/* Set ports to be an empty dict */}}\n {{- $ports := dict -}}\n {{/* Iterate through each of the server blocks */}}\n {{- range .Values.servers -}}\n {{/* Capture port to avoid scoping awkwardness */}}\n {{- $port := toString .port -}}\n\n {{/* If none of the server blocks has mentioned this port yet take note of it */}}\n {{- if not (hasKey $ports $port) -}}\n {{- $ports := set $ports $port (dict \"istcp\" false \"isudp\" false) -}}\n {{- end -}}\n {{/* Retrieve the inner dict that holds the protocols for a given port */}}\n {{- $innerdict := index $ports $port -}}\n\n {{/*\n Look at each of the zones and check which protocol they serve\n At the moment the following are supported by CoreDNS:\n UDP: dns://\n TCP: tls://, grpc://\n */}}\n {{- range .zones -}}\n {{- if has (default \"\" .scheme) (list \"dns://\") -}}\n {{/* Optionally enable tcp for this service as well */}}\n {{- if eq (default false .use_tcp) true }}\n {{- $innerdict := set $innerdict \"istcp\" true -}}\n {{- end }}\n {{- $innerdict := set $innerdict \"isudp\" true -}}\n {{- end -}}\n\n {{- if has (default \"\" .scheme) (list \"tls://\" \"grpc://\") -}}\n {{- $innerdict := set $innerdict \"istcp\" true -}}\n {{- end -}}\n {{- end -}}\n\n {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}}\n {{- if and (not (index $innerdict \"istcp\")) (not (index $innerdict \"isudp\")) -}}\n {{- $innerdict := set $innerdict \"isudp\" true -}}\n {{- $innerdict := set $innerdict \"istcp\" true -}}\n {{- end -}}\n\n {{/* Write the dict back into the outer dict */}}\n {{- $ports := set $ports $port $innerdict -}}\n {{- end -}}\n\n {{/* Write out the ports according to the info collected above */}}\n {{- range $port, $innerdict := $ports -}}\n {{- if index $innerdict \"isudp\" -}}\n {{- printf \"- {containerPort: %v, protocol: UDP, name: udp-%s}\\n\" $port $port -}}\n {{- end -}}\n {{- if index $innerdict \"istcp\" -}}\n {{- printf \"- {containerPort: %v, protocol: TCP, name: tcp-%s}\\n\" $port $port -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"coredns.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"coredns.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole-autoscaler.yaml\n{{- if and .Values.autoscaler.enabled .Values.rbac.create }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"coredns.fullname\" . }}-autoscaler\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\nrules:\n - apiGroups: [\"\"]\n resources: [\"nodes\"]\n verbs: [\"list\",\"watch\"]\n - apiGroups: [\"\"]\n resources: [\"replicationcontrollers/scale\"]\n verbs: [\"get\", \"update\"]\n - apiGroups: [\"extensions\", \"apps\"]\n resources: [\"deployments/scale\", \"replicasets/scale\"]\n verbs: [\"get\", \"update\"]\n# Remove the configmaps rule once below issue is fixed:\n# kubernetes-incubator/cluster-proportional-autoscaler#16\n - apiGroups: [\"\"]\n resources: [\"configmaps\"]\n verbs: [\"get\", \"create\"]\n{{- end }}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - services\n - pods\n - namespaces\n verbs:\n - list\n - watch\n{{- if .Values.rbac.pspEnable }}\n- apiGroups:\n - policy\n - extensions\n resources:\n - podsecuritypolicies\n verbs:\n - use\n resourceNames:\n - {{ template \"coredns.fullname\" . }}\n{{- end }}\n{{- end }}\n",
"# clusterrolebinding-autoscaler.yaml\n{{- if and .Values.autoscaler.enabled .Values.rbac.create }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"coredns.fullname\" . }}-autoscaler\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"coredns.fullname\" . }}-autoscaler\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"coredns.fullname\" . }}-autoscaler\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"coredns.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"coredns.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# configmap-autoscaler.yaml\n{{- if .Values.autoscaler.enabled }}\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: {{ template \"coredns.fullname\" . }}-autoscaler\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n {{- if .Values.customLabels }}\n {{- toYaml .Values.customLabels | nindent 4 }}\n {{- end }}\n {{- if .Values.autoscaler.configmap.annotations }}\n annotations:\n {{- toYaml .Values.autoscaler.configmap.annotations | nindent 4 }}\n {{- end }}\ndata:\n # When cluster is using large nodes(with more cores), \"coresPerReplica\" should dominate.\n # If using small nodes, \"nodesPerReplica\" should dominate.\n linear: |-\n {\n \"coresPerReplica\": {{ .Values.autoscaler.coresPerReplica | float64 }},\n \"nodesPerReplica\": {{ .Values.autoscaler.nodesPerReplica | float64 }},\n \"preventSinglePointFailure\": {{ .Values.autoscaler.preventSinglePointFailure }},\n \"min\": {{ .Values.autoscaler.min | int }},\n \"max\": {{ .Values.autoscaler.max | int }},\n \"includeUnschedulableNodes\": {{ .Values.autoscaler.includeUnschedulableNodes }}\n }\n{{- end }}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\ndata:\n Corefile: |-\n {{ range .Values.servers }}\n {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default \"\" $zone.scheme }}{{ default \".\" $zone.zone }}{{ else }}.{{ end -}}\n {{- if .port }}:{{ .port }} {{ end -}}\n {\n {{- range .plugins }}\n {{ .name }}{{ if .parameters }} {{ .parameters }}{{ end }}{{ if .configBlock }} {\n{{ .configBlock | indent 12 }}\n }{{ end }}\n {{- end }}\n }\n {{ end }}\n {{- range .Values.zoneFiles }}\n {{ .filename }}: {{ toYaml .contents | indent 4 }}\n {{- end }}\n",
"# deployment-autoscaler.yaml\n{{- if and (.Values.autoscaler.enabled) (not .Values.hpa.enabled) }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"coredns.fullname\" . }}-autoscaler\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n template:\n metadata:\n labels:\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.customLabels }}\n {{ toYaml .Values.customLabels | nindent 8 }}\n {{- end }}\n annotations:\n checksum/configmap: {{ include (print $.Template.BasePath \"/configmap-autoscaler.yaml\") . | sha256sum }}\n {{- if .Values.isClusterService }}\n scheduler.alpha.kubernetes.io/critical-pod: ''\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\":\"CriticalAddonsOnly\", \"operator\":\"Exists\"}]'\n {{- end }}\n spec:\n serviceAccountName: {{ template \"coredns.fullname\" . }}-autoscaler\n {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }}\n {{- if $priorityClassName }}\n priorityClassName: {{ $priorityClassName | quote }}\n {{- end }}\n {{- if .Values.autoscaler.affinity }}\n affinity:\n{{ toYaml .Values.autoscaler.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.autoscaler.tolerations }}\n tolerations:\n{{ toYaml .Values.autoscaler.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.autoscaler.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }}\n {{- end }}\n containers:\n - name: autoscaler\n image: \"{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }}\"\n imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }}\n resources:\n{{ toYaml .Values.autoscaler.resources | indent 10 }}\n command:\n - /cluster-proportional-autoscaler\n - --namespace={{ .Release.Namespace }}\n - --configmap={{ template \"coredns.fullname\" . }}-autoscaler\n - --target=Deployment/{{ template \"coredns.fullname\" . }}\n - --logtostderr=true\n - --v=2\n{{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\nspec:\n {{- if not .Values.autoscaler.enabled }}\n replicas: {{ .Values.replicaCount }}\n {{- end }}\n strategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }}\n maxSurge: {{ .Values.rollingUpdate.maxSurge }}\n selector:\n matchLabels:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n template:\n metadata:\n labels:\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.customLabels }}\n {{ toYaml .Values.customLabels }}\n {{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if .Values.isClusterService }}\n scheduler.alpha.kubernetes.io/critical-pod: ''\n scheduler.alpha.kubernetes.io/tolerations: '[{\"key\":\"CriticalAddonsOnly\", \"operator\":\"Exists\"}]'\n {{- end }}\n{{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n {{- end }}\n serviceAccountName: {{ template \"coredns.serviceAccountName\" . }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName | quote }}\n {{- end }}\n {{- if .Values.isClusterService }}\n dnsPolicy: Default\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n containers:\n - name: \"coredns\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args: [ \"-conf\", \"/etc/coredns/Corefile\" ]\n volumeMounts:\n - name: config-volume\n mountPath: /etc/coredns\n{{- range .Values.extraSecrets }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n readOnly: true\n{{- end }}\n{{- if .Values.extraVolumeMounts }}\n{{- toYaml .Values.extraVolumeMounts | nindent 8}}\n{{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n ports:\n{{ include \"coredns.containerPorts\" . | indent 8 }}\n livenessProbe:\n httpGet:\n path: /health\n port: 8080\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /ready\n port: 8181\n scheme: HTTP\n initialDelaySeconds: 10\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n {{- if .Values.preStopSleep }}\n lifecycle:\n preStop:\n exec:\n command: [\"/usr/bin/sleep\", \"{{ .Values.preStopSleep }}\"]\n {{- end }}\n volumes:\n - name: config-volume\n configMap:\n name: {{ template \"coredns.fullname\" . }}\n items:\n - key: Corefile\n path: Corefile\n {{ range .Values.zoneFiles }}\n - key: {{ .filename }}\n path: {{ .filename }}\n {{ end }}\n{{- range .Values.extraSecrets }}\n - name: {{ .name }}\n secret:\n secretName: {{ .name }}\n defaultMode: 400\n{{- end }}\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 8 }}\n{{- end }}\n",
"# hpa.yaml\n{{- if and (.Values.hpa.enabled) (not .Values.autoscaler.enabled) }}\n---\napiVersion: autoscaling/v2beta1\nkind: HorizontalPodAutoscaler\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\nspec:\n scaleTargetRef:\n apiVersion: apps/v1\n kind: Deployment\n name: {{ template \"coredns.fullname\" . }}\n minReplicas: {{ .Values.hpa.minReplicas }}\n maxReplicas: {{ .Values.hpa.maxReplicas }}\n metrics:\n{{ toYaml .Values.hpa.metrics | indent 4 }}\n{{- end }}",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.rbac.pspEnable }}\n{{ if .Capabilities.APIVersions.Has \"policy/v1beta1\" }}\napiVersion: policy/v1beta1\n{{ else }}\napiVersion: extensions/v1beta1\n{{ end -}}\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- else }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n {{- end }}\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53\n allowedCapabilities:\n - CAP_NET_BIND_SERVICE\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'RunAsAny'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# service-metrics.yaml\n{{- if .Values.prometheus.service.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"coredns.fullname\" . }}-metrics\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n app.kubernetes.io/component: metrics\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\n annotations:\n{{ toYaml .Values.prometheus.service.annotations | indent 4 }}\nspec:\n selector:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n ports:\n - name: metrics\n port: 9153\n targetPort: 9153\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\nspec:\n selector:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n {{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{- end }}\n {{- if .Values.service.externalIPs }}\n externalIPs:\n {{ toYaml .Values.service.externalIPs | indent 4 }}\n {{- end }}\n {{- if .Values.service.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}\n {{- end }}\n {{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n{{ include \"coredns.servicePorts\" . | indent 2 -}}\n type: {{ default \"ClusterIP\" .Values.serviceType }}\n",
"# serviceaccount-autoscaler.yaml\n{{- if and .Values.autoscaler.enabled .Values.rbac.create }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"coredns.fullname\" . }}-autoscaler\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name }}-autoscaler\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}-autoscaler\n{{- if .Values.customLabels }}\n{{ toYaml .Values.customLabels | indent 4 }}\n{{- end }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"coredns.serviceAccountName\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if .Values.prometheus.monitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"coredns.fullname\" . }}\n {{- if .Values.prometheus.monitor.namespace }}\n namespace: {{ .Values.prometheus.monitor.namespace }}\n {{- end }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"CoreDNS\"\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n {{- if .Values.prometheus.monitor.additionalLabels }}\n{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n {{- if .Values.isClusterService }}\n k8s-app: {{ .Chart.Name | quote }}\n {{- end }}\n app.kubernetes.io/name: {{ template \"coredns.name\" . }}\n app.kubernetes.io/component: metrics\n endpoints:\n - port: metrics\n{{- end }}\n"
] | # Default values for coredns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: coredns/coredns
tag: "1.7.1"
pullPolicy: IfNotPresent
replicaCount: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
## Create HorizontalPodAutoscaler object.
##
# autoscaling:
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
# Under heavy load it takes more that standard time to remove Pod endpoint from a cluster.
# This will delay termination of our pod by `preStopSleep`. To make sure kube-proxy has
# enough time to catch up.
# preStopSleep: 5
terminationGracePeriodSeconds: 30
podAnnotations: {}
# cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
serviceType: "ClusterIP"
prometheus:
service:
enabled: false
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
monitor:
enabled: false
additionalLabels: {}
namespace: ""
service:
# clusterIP: ""
# loadBalancerIP: ""
# externalIPs: []
# externalTrafficPolicy: ""
annotations: {}
serviceAccount:
create: false
# The name of the ServiceAccount to use
# If not set and create is true, a name is generated using the fullname template
name:
rbac:
# If true, create & use RBAC resources
create: true
# If true, create and use PodSecurityPolicy
pspEnable: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name:
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
isClusterService: true
# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set.
priorityClassName: ""
# Default zone is what Kubernetes recommends:
# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options
servers:
- zones:
- zone: .
port: 53
plugins:
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
configBlock: |-
lameduck 5s
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
- name: cache
parameters: 30
- name: loop
- name: reload
- name: loadbalance
# Complete example with all the options:
# - zones: # the `zones` block can be left out entirely, defaults to "."
# - zone: hello.world. # optional, defaults to "."
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
# - zone: foo.bar.
# scheme: dns://
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
# # Note that this will not work if you are also exposing tls or grpc on the same server
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
# plugins: # the plugins to use for this server block
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
# parameters: foo bar # list of parameters after the plugin
# configBlock: |- # if the plugin supports extra block style config, supply it here
# hello world
# foo bar
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
podDisruptionBudget: {}
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
zoneFiles: []
# - filename: example.db
# domain: example.com
# contents: |
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
# example.com. IN NS b.iana-servers.net.
# example.com. IN NS a.iana-servers.net.
# example.com. IN A 192.168.99.102
# *.example.com. IN A 192.168.99.102
# optional array of extra volumes to create
extraVolumes: []
# - name: some-volume-name
# emptyDir: {}
# optional array of mount points for extraVolumes
extraVolumeMounts: []
# - name: some-volume-name
# mountPath: /etc/wherever
# optional array of secrets to mount inside coredns container
# possible usecase: need for secure connection with etcd backend
extraSecrets: []
# - name: etcd-client-certs
# mountPath: /etc/coredns/tls/etcd
# - name: some-fancy-secret
# mountPath: /etc/wherever
# Custom labels to apply to Deployment, Pod, Service, ServiceMonitor. Including autoscaler if enabled.
customLabels: {}
## Alternative configuration for HPA deployment if wanted
#
hpa:
enabled: false
minReplicas: 1
maxReplicas: 2
metrics: {}
## Configue a cluster-proportional-autoscaler for coredns
# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler
autoscaler:
# Enabled the cluster-proportional-autoscaler
enabled: false
# Number of cores in the cluster per coredns replica
coresPerReplica: 256
# Number of nodes in the cluster per coredns replica
nodesPerReplica: 16
# Min size of replicaCount
min: 0
# Max size of replicaCount (default of 0 is no max)
max: 0
# Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler
includeUnschedulableNodes: false
# If true does not allow single points of failure to form
preventSinglePointFailure: true
image:
repository: k8s.gcr.io/cluster-proportional-autoscaler-amd64
tag: "1.8.0"
pullPolicy: IfNotPresent
# Optional priority class to be used for the autoscaler pods. priorityClassName used if not set.
priorityClassName: ""
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
tolerations: []
# resources for autoscaler pod
resources:
requests:
cpu: "20m"
memory: "10Mi"
limits:
cpu: "20m"
memory: "10Mi"
# Options for autoscaler configmap
configmap:
## Annotations for the coredns-autoscaler configmap
# i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed
annotations: {}
|
rabbitmq-ha | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"rabbitmq-ha.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"rabbitmq-ha.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"rabbitmq-ha.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"rabbitmq-ha.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"rabbitmq-ha.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGenerate chart secret name\n*/}}\n{{- define \"rabbitmq-ha.secretName\" -}}\n{{ default (include \"rabbitmq-ha.fullname\" .) .Values.existingSecret }}\n{{- end -}}\n\n{{/*\nGenerate chart ssl secret name\n*/}}\n{{- define \"rabbitmq-ha.certSecretName\" -}}\n{{ default (print (include \"rabbitmq-ha.fullname\" .) \"-cert\") .Values.rabbitmqCert.existingSecret }}\n{{- end -}}\n\n{{/*\nDefines a JSON file containing definitions of all broker objects (queues, exchanges, bindings, \nusers, virtual hosts, permissions and parameters) to load by the management plugin.\n*/}}\n{{- define \"rabbitmq-ha.definitions\" -}}\n{\n \"global_parameters\": [\n{{ .Values.definitions.globalParameters | indent 4 }}\n ],\n \"users\": [\n {\n \"name\": {{ .Values.managementUsername | quote }},\n \"password\": {{ .Values.managementPassword | quote }},\n \"tags\": \"management\"\n },\n {\n \"name\": {{ .Values.rabbitmqUsername | quote }},\n \"password\": {{ .Values.rabbitmqPassword | quote }},\n \"tags\": \"administrator\"\n }{{- if .Values.definitions.users -}},\n{{ .Values.definitions.users | indent 4 }}\n{{- end }}\n ],\n \"vhosts\": [\n {\n \"name\": {{ .Values.rabbitmqVhost | quote }}\n }{{- if .Values.definitions.vhosts -}},\n{{ .Values.definitions.vhosts | indent 4 }}\n{{- end }}\n ],\n \"permissions\": [\n {\n \"user\": {{ .Values.rabbitmqUsername | quote }},\n \"vhost\": {{ .Values.rabbitmqVhost | quote }},\n \"configure\": \".*\",\n \"read\": \".*\",\n \"write\": \".*\"\n }{{- if .Values.definitions.permissions -}},\n{{ .Values.definitions.permissions | indent 4 }}\n{{- end }}\n ],\n \"topic_permissions\": [\n{{ .Values.definitions.topicPermissions | indent 4 }}\n ],\n \"parameters\": [\n{{ .Values.definitions.parameters| indent 4 }}\n ],\n \"policies\": [\n{{ .Values.definitions.policies | indent 4 }}\n ],\n \"queues\": [\n{{ .Values.definitions.queues | indent 4 }}\n ],\n \"exchanges\": [\n{{ .Values.definitions.exchanges | indent 4 }}\n ],\n \"bindings\": [\n{{ .Values.definitions.bindings| indent 4 }}\n ]\n}\n{{- end -}}\n",
"# alerts.yaml\n{{ if and .Values.prometheus.operator.enabled .Values.prometheus.operator.alerts.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n name: {{ .Release.Name }}-rabbitmq-alerts\n namespace: {{ .Values.prometheus.operator.serviceMonitor.namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.prometheus.operator.serviceMonitor.selector }}\n{{ toYaml .Values.prometheus.operator.serviceMonitor.selector | indent 4 }}\n{{- end }}\n{{- if .Values.prometheus.operator.alerts.selector }}\n{{ toYaml .Values.prometheus.operator.alerts.selector | indent 4 }}\n{{- end }}\nspec:\n groups:\n - name: {{ template \"rabbitmq-ha.fullname\" . }}.rules\n rules:\n - alert: RabbitMqClusterNodeDown\n expr: rabbitmq_up{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"} == 0\n for: 5m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n{{- if .Values.prometheus.operator.alerts.labels }}\n{{ toYaml .Values.prometheus.operator.alerts.labels | indent 10 }}\n{{- end }}\n annotations:\n description: RabbitMQ {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} is down\n summary: RabbitMQ Node Is Down\n - alert: RabbitMqClusterNotAllNodesRunning\n expr: sum(rabbitmq_up{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"}) by (service) < {{ .Values.replicaCount }}\n for: 5m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n team: devops\n annotations:\n description: Some RabbitMQ Cluster Nodes Are Down in Service {{`{{ $labels.namespace }}`}}/{{`{{ $labels.service}}`}}\n summary: Some RabbitMQ Cluster Nodes Are Down in Service {{`{{ $labels.namespace }}`}}/{{`{{ $labels.service}}`}}\n - alert: RabbitMqDiskSpaceAlarm\n expr: rabbitmq_node_disk_free_alarm{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"} == 1\n for: 1m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n{{- if .Values.prometheus.operator.alerts.labels }}\n{{ toYaml .Values.prometheus.operator.alerts.labels | indent 10 }}\n{{- end }}\n annotations:\n description: RabbitMQ {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} Disk Space Alarm is going off. Which means the node hit highwater mark and has cut off network connectivity, see RabbitMQ WebUI\n summary: RabbitMQ is Out of Disk Space\n - alert: RabbitMqMemoryAlarm\n expr: rabbitmq_node_mem_alarm{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"} == 1\n for: 1m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n{{- if .Values.prometheus.operator.alerts.labels }}\n{{ toYaml .Values.prometheus.operator.alerts.labels | indent 10 }}\n{{- end }}\n annotations:\n description: RabbitMQ {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} High Memory Alarm is going off. Which means the node hit highwater mark and has cut off network connectivity, see RabbitMQ WebUI\n summary: RabbitMQ is Out of Memory\n - alert: RabbitMqMemoryUsageHigh\n expr: (rabbitmq_node_mem_used{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"} / rabbitmq_node_mem_limit{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"}) > .9\n for: 1m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n{{- if .Values.prometheus.operator.alerts.labels }}\n{{ toYaml .Values.prometheus.operator.alerts.labels | indent 10 }}\n{{- end }}\n annotations:\n description: RabbitMQ {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} Memory Usage > 90%\n summary: RabbitMQ Node > 90% Memory Usage\n - alert: RabbitMqFileDescriptorsLow\n expr: (rabbitmq_fd_used{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"} / rabbitmq_fd_total{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"}) > .9\n for: 5m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n{{- if .Values.prometheus.operator.alerts.labels }}\n{{ toYaml .Values.prometheus.operator.alerts.labels | indent 10 }}\n{{- end }}\n annotations:\n description: RabbitMQ {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} File Descriptor Usage > 90%\n summary: RabbitMQ Low File Descriptor Available\n - alert: RabbitMqDiskSpaceLow\n expr: predict_linear(rabbitmq_node_disk_free{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"}[15m], 1 * 60 * 60) < rabbitmq_node_disk_free_limit{service=\"{{ template \"rabbitmq-ha.fullname\" . }}\"}\n for: 5m\n labels:\n installed_by: {{ .Release.Name }}\n severity: critical\n{{- if .Values.prometheus.operator.alerts.labels }}\n{{ toYaml .Values.prometheus.operator.alerts.labels | indent 10 }}\n{{- end }}\n annotations:\n description: RabbitMQ {{`{{ $labels.namespace }}`}}/{{`{{ $labels.pod}}`}} will hit disk limit in the next hr based on last 15 mins trend.\n summary: RabbitMQ is Low on Disk Space and will Run Out in the next hour\n{{ end }}\n",
"# configmap.yaml\n{{- if not .Values.existingConfigMap }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ndata:\n enabled_plugins: |\n [\n{{ .Values.extraPlugins | indent 6 }}\n {{- if .Values.rabbitmqLDAPPlugin.enabled }}\n rabbitmq_auth_backend_ldap,\n {{- end }}\n\n {{- if .Values.rabbitmqMQTTPlugin.enabled }}\n rabbitmq_mqtt,\n {{- end }}\n\n {{- if .Values.rabbitmqWebMQTTPlugin.enabled }}\n rabbitmq_web_mqtt,\n {{- end }}\n\n {{- if .Values.rabbitmqSTOMPPlugin.enabled }}\n rabbitmq_stomp,\n {{- end }}\n\n {{- if .Values.rabbitmqWebSTOMPPlugin.enabled }}\n rabbitmq_web_stomp,\n {{- end }}\n\n {{- if .Values.rabbitmqAuth.enabled }}\n rabbitmq_auth_mechanism_ssl,\n {{- end }}\n\n {{- if .Values.rabbitmqAuthHTTP.enabled }}\n rabbitmq_auth_backend_http,\n {{- end }}\n\n {{- if .Values.rabbitmqPrometheusPlugin.enabled }}\n rabbitmq_prometheus,\n {{- end }}\n\n rabbitmq_consistent_hash_exchange,\n rabbitmq_management,\n rabbitmq_peer_discovery_k8s\n \n ].\n\n rabbitmq.conf: |\n ## RabbitMQ configuration\n ## Ref: https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq.conf.example\n\n ## Authentification\n {{- if .Values.rabbitmqAuth.enabled }}\n{{ .Values.rabbitmqAuth.config | indent 4 }}\n {{- end }}\n\n ## Clustering\n cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s\n cluster_formation.k8s.host = kubernetes.default.svc.{{ .Values.clusterDomain }}\n cluster_formation.k8s.address_type = hostname\n cluster_formation.node_cleanup.interval = 10\n # Set to false if automatic cleanup of absent nodes is desired.\n # This can be dangerous, see http://www.rabbitmq.com/cluster-formation.html#node-health-checks-and-cleanup.\n cluster_formation.node_cleanup.only_log_warning = true\n cluster_partition_handling = {{ .Values.rabbitmqClusterPartitionHandling }}\n\n {{- if eq .Values.rabbitmqUsername \"guest\" }}\n ## The default \"guest\" user is only permitted to access the server\n ## via a loopback interface (e.g. localhost)\n loopback_users.guest = false\n {{- end }}\n\n {{- if semverCompare \">=3.8.2\" .Chart.AppVersion }}\n load_definitions = /etc/definitions/definitions.json\n {{ else }}\n management.load_definitions = /etc/definitions/definitions.json\n {{- end }}\n\n ## Memory-based Flow Control threshold\n vm_memory_high_watermark.{{ .Values.rabbitmqMemoryHighWatermarkType }} = {{ .Values.rabbitmqMemoryHighWatermark }}\n\n ## Auth HTTP Backend Plugin\n {{- if .Values.rabbitmqAuthHTTP.enabled }}\n{{ .Values.rabbitmqAuthHTTP.config | indent 4 }}\n {{- end }}\n\n ## LDAP Plugin\n {{- if .Values.rabbitmqLDAPPlugin.enabled }}\n{{ .Values.rabbitmqLDAPPlugin.config | indent 4 }}\n {{- end }}\n\n ## MQTT Plugin\n {{- if .Values.rabbitmqMQTTPlugin.enabled }}\n{{ .Values.rabbitmqMQTTPlugin.config | indent 4 }}\n {{- end }}\n\n ## Web MQTT Plugin\n {{- if .Values.rabbitmqWebMQTTPlugin.enabled }}\n{{ .Values.rabbitmqWebMQTTPlugin.config | indent 4 }}\n {{- end }}\n\n ## STOMP Plugin\n {{- if .Values.rabbitmqSTOMPPlugin.enabled }}\n{{ .Values.rabbitmqSTOMPPlugin.config | indent 4 }}\n {{- end }}\n\n ## Web STOMP Plugin\n {{- if .Values.rabbitmqWebSTOMPPlugin.enabled }}\n{{ .Values.rabbitmqWebSTOMPPlugin.config | indent 4 }}\n {{- end }}\n\n ## Prometheus Plugin\n {{- if .Values.rabbitmqPrometheusPlugin.enabled }}\n{{ .Values.rabbitmqPrometheusPlugin.config | indent 4 }}\n prometheus.path = {{ .Values.rabbitmqPrometheusPlugin.path }}\n prometheus.tcp.port = {{ .Values.rabbitmqPrometheusPlugin.port }}\n {{- end }}\n\n ## AMQPS support\n {{- if .Values.rabbitmqAmqpsSupport.enabled }}\n{{ .Values.rabbitmqAmqpsSupport.config | indent 4 }}\n {{- end }}\n\n{{ .Values.extraConfig | indent 4 }}\n\n{{- end }}\n\n{{- if .Values.advancedConfig }}\n advanced.config: |\n{{ .Values.advancedConfig | indent 4 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- if .Values.ingress.hostName }}\n - host: {{ .Values.ingress.hostName }}\n http:\n {{- else }}\n - http:\n {{- end }}\n paths:\n - path: {{ .Values.ingress.path }}\n backend:\n serviceName: {{ template \"rabbitmq-ha.fullname\" . }}\n servicePort: {{ .Values.rabbitmqManagerPort }}\n{{- if .Values.ingress.tls }}\n tls:\n - secretName: {{ .Values.ingress.tlsSecret }}\n {{- if .Values.ingress.hostName }}\n hosts:\n - {{ .Values.ingress.hostName }}\n {{- end }}\n{{- end }}\n{{- end }}\n",
"# pdb.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nrules:\n - apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\"]\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"rabbitmq-ha.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n{{- end }}\n",
"# secret.yaml\n{{ if not .Values.existingSecret }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ntype: Opaque\ndata:\n {{- $password := .Values.rabbitmqPassword | default (randAlphaNum 24 | nospace) -}}\n {{- $_ := set .Values \"rabbitmqPassword\" $password }}\n {{- $managementPassword := .Values.managementPassword | default (randAlphaNum 24 | nospace) -}}\n {{- $_ := set .Values \"managementPassword\" $managementPassword }}\n rabbitmq-username: {{ .Values.rabbitmqUsername | b64enc | quote }}\n rabbitmq-password: {{ .Values.rabbitmqPassword | b64enc | quote }}\n rabbitmq-management-username: {{ .Values.managementUsername | b64enc | quote }}\n rabbitmq-management-password: {{ .Values.managementPassword | b64enc | quote }}\n rabbitmq-erlang-cookie: {{ .Values.rabbitmqErlangCookie | default (randAlphaNum 32) | b64enc | quote }}\n {{ .Values.definitionsSource }}: {{ include \"rabbitmq-ha.definitions\" . | b64enc | quote }}\n{{ end }}\n{{- if and .Values.rabbitmqCert.enabled (not .Values.rabbitmqCert.existingSecret) }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}-cert\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ntype: Opaque\ndata:\n cacert.pem: {{ .Values.rabbitmqCert.cacertfile | quote }}\n cert.pem: {{ .Values.rabbitmqCert.certfile | quote }}\n key.pem: {{ .Values.rabbitmqCert.keyfile | quote }}\n{{- end }}\n",
"# service-discovery.yaml\napiVersion: v1\nkind: Service\nmetadata:\n{{- if .Values.service.discovery.annotations }}\n annotations:\n{{ tpl (toYaml .Values.discovery.annotations) . | indent 4 }}\n{{- else if and (.Values.service.annotations) (not .Values.service.discovery.separateAnnotations) }}\n annotations:\n{{ tpl (toYaml .Values.service.annotations) . | indent 4 }}\n{{- end }}\n name: {{ printf \"%s-discovery\" (include \"rabbitmq-ha.fullname\" .) | trunc 63 | trimSuffix \"-\" }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n clusterIP: None\n ports:\n - name: http\n protocol: TCP\n port: {{ .Values.rabbitmqManagerPort }}\n targetPort: http\n - name: amqp\n protocol: TCP\n port: {{ .Values.rabbitmqNodePort }}\n targetPort: amqp\n - name: epmd\n protocol: TCP\n port: {{ .Values.rabbitmqEpmdPort }}\n targetPort: epmd\n {{- if .Values.rabbitmqSTOMPPlugin.enabled }}\n - name: stomp-tcp\n protocol: TCP\n port: 61613\n targetPort: stomp-tcp\n - name: stomp-ssl\n protocol: TCP\n port: 61614\n targetPort: stomp-ssl\n {{- end }}\n {{- if .Values.rabbitmqWebSTOMPPlugin.enabled }}\n - name: stomp-ws\n protocol: TCP\n port: 15674\n targetPort: stomp-ws\n {{- end }}\n {{- if .Values.rabbitmqMQTTPlugin.enabled }}\n - name: mqtt-tcp\n protocol: TCP\n port: 1883\n targetPort: mqtt-tcp\n - name: mqtt-ssl\n protocol: TCP\n port: 8883\n targetPort: mqtt-ssl\n {{- end }}\n {{- if .Values.rabbitmqWebMQTTPlugin.enabled }}\n - name: mqtt-ws\n protocol: TCP\n port: 15675\n targetPort: mqtt-ws\n {{- end }}\n {{- if .Values.rabbitmqAmqpsSupport.enabled }}\n - name: amqps\n protocol: TCP\n port: {{ .Values.rabbitmqAmqpsSupport.amqpsNodePort }}\n targetPort: amqps\n {{- end }}\n publishNotReadyAddresses: true\n selector:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n type: ClusterIP\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n{{- if .Values.service.annotations }}\n annotations:\n{{ tpl (toYaml .Values.service.annotations) . | indent 4 }}\n{{- end }}\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\nspec:\n{{- if ne .Values.service.type \"NodePort\" }}\n{{- if and (eq .Values.service.type \"LoadBalancer\") (ne .Values.service.clusterIP \"None\") }}\n clusterIP: \"{{ .Values.service.clusterIP }}\"\n{{- end }}\n{{- end }}\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n{{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: \"{{ .Values.service.loadBalancerIP }}\"\n{{- end }}\n{{- if .Values.service.externalTrafficPolicy }}\n externalTrafficPolicy: \"{{ .Values.service.externalTrafficPolicy }}\"\n{{- end }}\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n ports:\n {{- if .Values.rabbitmqAmqpsSupport.enabled }}\n - name: amqps\n protocol: TCP\n port: {{ .Values.rabbitmqAmqpsSupport.amqpsNodePort }}\n targetPort: amqps\n {{- end }}\n - name: http\n protocol: TCP\n port: {{ .Values.rabbitmqManagerPort }}\n {{- if eq .Values.service.type \"NodePort\" \"LoadBalancer\" }}\n nodePort: {{ .Values.service.managerNodePort }}\n {{- end }}\n targetPort: http\n - name: amqp\n protocol: TCP\n port: {{ .Values.rabbitmqNodePort }}\n {{- if eq .Values.service.type \"NodePort\" \"LoadBalancer\" }}\n nodePort: {{ .Values.service.amqpNodePort }}\n {{- end }}\n targetPort: amqp\n - name: epmd\n protocol: TCP\n port: {{ .Values.rabbitmqEpmdPort }}\n {{- if eq .Values.service.type \"NodePort\" \"LoadBalancer\" }}\n nodePort: {{ .Values.service.epmdNodePort }}\n {{- end }}\n targetPort: epmd\n {{- if .Values.rabbitmqSTOMPPlugin.enabled }}\n - name: stomp-tcp\n protocol: TCP\n port: 61613\n targetPort: stomp-tcp\n - name: stomp-ssl\n protocol: TCP\n port: 61614\n targetPort: stomp-ssl\n {{- end }}\n {{- if .Values.rabbitmqWebSTOMPPlugin.enabled }}\n - name: stomp-ws\n protocol: TCP\n port: 15674\n targetPort: stomp-ws\n {{- end }}\n {{- if .Values.rabbitmqMQTTPlugin.enabled }}\n - name: mqtt-tcp\n protocol: TCP\n port: 1883\n targetPort: mqtt-tcp\n - name: mqtt-ssl\n protocol: TCP\n port: 8883\n targetPort: mqtt-ssl\n {{- end }}\n {{- if .Values.rabbitmqWebMQTTPlugin.enabled }}\n - name: mqtt-ws\n protocol: TCP\n port: 15675\n targetPort: mqtt-ws\n {{- end }}\n {{ if .Values.prometheus.exporter.enabled }}\n - name: exporter\n protocol: TCP\n port: {{ .Values.prometheus.exporter.port }}\n targetPort: exporter\n {{ end }}\n {{- if .Values.rabbitmqPrometheusPlugin.enabled }}\n - name: metrics\n protocol: TCP\n {{- if eq .Values.service.type \"NodePort\" \"LoadBalancer\" }}\n nodePort: {{ .Values.rabbitmqPrometheusPlugin.nodePort }}\n {{- end }}\n port: {{ .Values.rabbitmqPrometheusPlugin.port }}\n targetPort: metrics\n {{- end }}\n selector:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n type: {{ .Values.service.type }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\n name: {{ template \"rabbitmq-ha.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nautomountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}\n{{- end }}\n",
"# servicemonitor.yaml\n{{ if and (or .Values.prometheus.exporter.enabled .Values.rabbitmqPrometheusPlugin.enabled) .Values.prometheus.operator.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Values.prometheus.operator.serviceMonitor.namespace }}\n labels:\n{{- if .Values.prometheus.operator.serviceMonitor.selector }}\n{{ toYaml .Values.prometheus.operator.serviceMonitor.selector | indent 4 }}\n{{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n endpoints:\n - interval: {{ .Values.prometheus.operator.serviceMonitor.interval }}\n {{- if .Values.prometheus.operator.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.prometheus.operator.serviceMonitor.scrapeTimeout }}\n {{- end }}\n {{- if .Values.rabbitmqPrometheusPlugin.enabled }}\n port: metrics\n path: {{ .Values.rabbitmqPrometheusPlugin.path }}\n {{- else }}\n port: exporter\n {{- end }}\n namespaceSelector:\n any: true\n{{- end }}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n chart: {{ template \"rabbitmq-ha.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\n{{- if .Values.statefulSetAnnotations }}\n annotations:\n{{- range $key, $value := .Values.statefulSetAnnotations }}\n {{ $key }}: {{ $value | quote }}\n{{- end }}\n{{- end }}\nspec:\n podManagementPolicy: {{ .Values.podManagementPolicy }}\n serviceName: {{ template \"rabbitmq-ha.fullname\" . }}-discovery\n replicas: {{ .Values.replicaCount }}\n updateStrategy:\n type: {{ .Values.updateStrategy }}\n selector:\n matchLabels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 8 }}\n{{- end }}\n annotations:\n {{- if not .Values.existingConfigMap }}\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- end }}\n {{- if not .Values.existingSecret }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n {{- end }}\n {{- if and .Values.prometheus.exporter.enabled (not .Values.prometheus.operator.enabled) }}\n prometheus.io/scrape: \"true\"\n prometheus.io/port: {{ .Values.prometheus.exporter.port | quote }}\n {{- end }}\n {{- if and .Values.rabbitmqPrometheusPlugin.enabled (not .Values.prometheus.operator.enabled) }}\n prometheus.io/scrape: \"true\"\n prometheus.io/port: {{ .Values.rabbitmqPrometheusPlugin.port | quote }}\n prometheus.io/path: {{ .Values.rabbitmqPrometheusPlugin.path | quote }}\n {{- end }}\n{{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 10 }}\n serviceAccountName: {{ template \"rabbitmq-ha.serviceAccountName\" . }}\n initContainers:\n{{- if .Values.initContainer.enabled }}\n - name: bootstrap\n image: {{ .Values.busyboxImage.repository}}:{{ .Values.busyboxImage.tag}}\n imagePullPolicy: {{ .Values.busyboxImage.pullPolicy }}\n securityContext:\n{{- toYaml .Values.initContainer.securityContext | nindent 12 }}\n command: ['sh']\n args:\n - \"-c\"\n - |\n set -ex\n cp /configmap/* /etc/rabbitmq\n echo \"${RABBITMQ_ERLANG_COOKIE}\" > /var/lib/rabbitmq/.erlang.cookie\n {{- if .Values.forceBoot }}\n if [ -d \"${RABBITMQ_MNESIA_DIR}\" ]; then\n touch \"${RABBITMQ_MNESIA_DIR}/force_load\"\n fi\n {{- end }}\n {{- if and (.Values.securityContext.runAsUser) (.Values.securityContext.fsGroup) (.Values.initContainer.chownFiles) }}\n chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /var/lib/rabbitmq/\n chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /etc/rabbitmq\n {{- end }}\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.name\n - name: RABBITMQ_MNESIA_DIR\n value: /var/lib/rabbitmq/mnesia/rabbit@$(POD_NAME).{{ template \"rabbitmq-ha.fullname\" . }}-discovery.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}\n - name: RABBITMQ_ERLANG_COOKIE\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq-ha.secretName\" . }}\n key: rabbitmq-erlang-cookie\n resources:\n{{ toYaml .Values.initContainer.resources | indent 12 }}\n volumeMounts:\n - name: configmap\n mountPath: /configmap\n - name: config\n mountPath: /etc/rabbitmq\n - name: {{ .Values.persistentVolume.name }}\n mountPath: /var/lib/rabbitmq\n{{- end}}\n{{- if .Values.extraInitContainers }}\n{{ tpl (toYaml .Values.extraInitContainers) . | indent 8 }}\n{{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: {{ .Values.image.repository }}:{{ .Values.image.tag }}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: epmd\n protocol: TCP\n containerPort: 4369\n - name: amqp\n protocol: TCP\n containerPort: 5672\n - name: http\n protocol: TCP\n containerPort: 15672\n {{- if .Values.rabbitmqSTOMPPlugin.enabled }}\n - name: stomp-tcp\n protocol: TCP\n containerPort: 61613\n - name: stomp-ssl\n protocol: TCP\n containerPort: 61614\n {{- end }}\n {{- if .Values.rabbitmqWebSTOMPPlugin.enabled }}\n - name: stomp-ws\n protocol: TCP\n containerPort: 15674\n {{- end }}\n {{- if .Values.rabbitmqMQTTPlugin.enabled }}\n - name: mqtt-tcp\n protocol: TCP\n containerPort: 1883\n - name: mqtt-ssl\n protocol: TCP\n containerPort: 8883\n {{- end }}\n {{- if .Values.rabbitmqWebMQTTPlugin.enabled }}\n - name: mqtt-ws\n protocol: TCP\n containerPort: 15675\n {{- end }}\n {{- if .Values.rabbitmqAmqpsSupport.enabled }}\n - name: amqps\n protocol: TCP\n containerPort: 5671\n {{- end }}\n {{- if .Values.rabbitmqPrometheusPlugin.enabled }}\n - name: metrics\n protocol: TCP\n containerPort: {{ .Values.rabbitmqPrometheusPlugin.port }}\n {{- end }}\n livenessProbe:\n {{- toYaml .Values.livenessProbe | trim | nindent 12 }}\n readinessProbe:\n {{- toYaml .Values.readinessProbe | trim | nindent 12 }}\n {{- if .Values.lifecycle }}\n lifecycle:\n {{- toYaml .Values.lifecycle | trim | nindent 12 }}\n {{- end }}\n env:\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.name\n - name: RABBITMQ_USE_LONGNAME\n value: \"true\"\n - name: RABBITMQ_NODENAME\n value: rabbit@$(MY_POD_NAME).{{ template \"rabbitmq-ha.fullname\" . }}-discovery.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}\n - name: K8S_HOSTNAME_SUFFIX\n value: .{{ template \"rabbitmq-ha.fullname\" . }}-discovery.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}\n - name: K8S_SERVICE_NAME\n value: {{ template \"rabbitmq-ha.fullname\" . }}-discovery\n - name: RABBITMQ_ERLANG_COOKIE\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq-ha.secretName\" . }}\n key: rabbitmq-erlang-cookie\n - name: RABBIT_MANAGEMENT_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq-ha.secretName\" . }}\n key: rabbitmq-management-username\n - name: RABBIT_MANAGEMENT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq-ha.secretName\" . }}\n key: rabbitmq-management-password\n {{- if .Values.rabbitmqHipeCompile }}\n - name: RABBITMQ_HIPE_COMPILE\n value: {{ .Values.rabbitmqHipeCompile | quote }}\n {{- end }}\n {{- range $key, $value := .Values.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: {{ .Values.persistentVolume.name }}\n mountPath: /var/lib/rabbitmq\n - name: config\n mountPath: /etc/rabbitmq\n - name: definitions\n mountPath: /etc/definitions\n readOnly: true\n {{- if .Values.rabbitmqCert.enabled }}\n - name: cert\n mountPath: /etc/cert\n {{- end }}\n {{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 12 }}\n {{- end }}\n {{ if .Values.prometheus.exporter.enabled }}\n - name: {{ .Chart.Name }}-exporter\n image: {{ .Values.prometheus.exporter.image.repository }}:{{ .Values.prometheus.exporter.image.tag }}\n imagePullPolicy: {{ .Values.prometheus.exporter.image.pullPolicy }}\n ports:\n - name: exporter\n protocol: TCP\n containerPort: {{ .Values.prometheus.exporter.port }}\n env:\n - name: PUBLISH_PORT\n value: \"{{ .Values.prometheus.exporter.port }}\"\n {{ if .Values.prometheus.exporter.capabilities }}\n - name: RABBIT_CAPABILITIES\n value: \"{{ .Values.prometheus.exporter.capabilities }}\"\n {{- end }}\n - name: RABBIT_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq-ha.secretName\" . }}\n key: rabbitmq-username\n - name: RABBIT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq-ha.secretName\" . }}\n key: rabbitmq-password\n {{- range $key, $value := .Values.prometheus.exporter.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n resources:\n{{ toYaml .Values.prometheus.exporter.resources | indent 12 }}\n {{ end }}\n {{- if .Values.extraContainers }}\n{{ tpl (toYaml .Values.extraContainers) . | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n {{- with .Values.affinity }}\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- else }}\n {{- if eq .Values.podAntiAffinity \"hard\" }}\n affinity:\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"{{ .Values.podAntiAffinityTopologyKey }}\"\n labelSelector:\n matchLabels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n {{- else if eq .Values.podAntiAffinity \"soft\" }}\n affinity:\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n podAffinityTerm:\n topologyKey: \"{{ .Values.podAntiAffinityTopologyKey }}\"\n labelSelector:\n matchLabels:\n app: {{ template \"rabbitmq-ha.name\" . }}\n release: {{ .Release.Name }}\n {{- end }}\n {{- end }}\n\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n {{- end }}\n volumes:\n - name: config\n emptyDir: {}\n - name: configmap\n configMap:\n name: {{ template \"rabbitmq-ha.fullname\" . }}\n - name: definitions\n secret:\n secretName: {{ template \"rabbitmq-ha.secretName\" . }}\n items:\n - key: {{ .Values.definitionsSource }}\n path: definitions.json\n {{- if .Values.rabbitmqCert.enabled }}\n - name: cert\n secret:\n defaultMode: 420\n secretName: {{ template \"rabbitmq-ha.certSecretName\" . }}\n {{- end }}\n {{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 8 }}\n {{- end }}\n{{- if .Values.persistentVolume.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: {{ .Values.persistentVolume.name }}\n annotations:\n{{ tpl (toYaml .Values.persistentVolume.annotations) . | indent 10 }}\n labels:\n{{ tpl (toYaml .Values.persistentVolume.labels) . | indent 10 }}\n spec:\n accessModes:\n {{- range .Values.persistentVolume.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.persistentVolume.size | quote }}\n {{- if .Values.persistentVolume.storageClass }}\n {{- if (eq \"-\" .Values.persistentVolume.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistentVolume.storageClass }}\"\n {{- end }}\n {{- end }}\n {{- with .Values.persistentVolume.selector }}\n selector:\n {{- toYaml . | nindent 10 }}\n {{- end }}\n{{- else }}\n - name: {{ .Values.persistentVolume.name }}\n emptyDir: {}\n{{- end }}\n"
] | ## RabbitMQ application credentials
## Ref: http://rabbitmq.com/access-control.html
##
rabbitmqUsername: guest
# rabbitmqPassword:
## RabbitMQ Management user used for health checks
managementUsername: management
# managementPassword:
## Place any additional key/value configuration to add to rabbitmq.conf
## Ref: https://www.rabbitmq.com/configure.html#config-items
extraConfig: |
# queue_master_locator = min-masters
## Place advanced.config file in /etc/rabbitmq/advanced.config
## Ref: https://www.rabbitmq.com/configure.html#advanced-config-file
advancedConfig: |
## Definitions specification within the secret, will always be mounted
## at /etc/definitions/defintions.json
definitionsSource: definitions.json
## Place any additional plugins to enable in /etc/rabbitmq/enabled_plugins
## Ref: https://www.rabbitmq.com/plugins.html
extraPlugins: |
rabbitmq_shovel,
rabbitmq_shovel_management,
rabbitmq_federation,
rabbitmq_federation_management,
definitions:
globalParameters: |-
# {
# "name": "cluster_name",
# "value": "rabbitmq-ha"
# }
users: |-
# {
# "name": "myUsername",
# "password": "myPassword",
# "tags": "administrator"
# }
vhosts: |-
# {
# "name": "/rabbit"
# }
parameters: |-
# {
# "value": {
# "src-uri": "amqp://localhost",
# "src-queue": "source",
# "dest-uri": "amqp://localhost",
# "dest-queue": "destination",
# "add-forward-headers": false,
# "ack-mode": "on-confirm",
# "delete-after": "never"
# },
# "vhost": "/",
# "component": "shovel",
# "name": "test"
# }
permissions: |-
# {
# "user": "myUsername",
# "vhost": "/rabbit",
# "configure": ".*",
# "write": ".*",
# "read": ".*"
# }
topicPermissions: |-
# {
# "user": "myUsername",
# "vhost": "/rabbit",
# "exchange": "myexchange",
# "write": ".*",
# "read": ".*"
# }
queues: |-
# {
# "name":"myName",
# "vhost":"/rabbit",
# "durable":true,
# "auto_delete":false,
# "arguments":{}
# }
exchanges: |-
# {
# "name":"myName",
# "vhost":"/rabbit",
# "type":"direct",
# "durable":true,
# "auto_delete":false,
# "internal":false,
# "arguments":{}
# }
bindings: |-
# {
# "source":"myName",
# "vhost":"/rabbit",
# "destination":"myName",
# "destination_type":"queue",
# "routing_key":"myKey",
# "arguments":{}
# }
## Sets the policies in definitions.json. This can be used to control the high
## availability of queues by mirroring them to multiple nodes.
## Ref: https://www.rabbitmq.com/ha.html
policies: |-
# {
# "name": "ha-all",
# "pattern": ".*",
# "vhost": "/",
# "definition": {
# "ha-mode": "all",
# "ha-sync-mode": "automatic",
# "ha-sync-batch-size": 1
# }
# }
## Ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot
forceBoot: false
## RabbitMQ default VirtualHost
## Ref: https://www.rabbitmq.com/vhosts.html
##
rabbitmqVhost: "/"
## Erlang cookie to determine whether different nodes are allowed to communicate with each other
## Ref: https://www.rabbitmq.com/clustering.html
##
# rabbitmqErlangCookie:
## RabbitMQ Memory high watermark
## Ref: http://www.rabbitmq.com/memory.html
##
rabbitmqMemoryHighWatermark: 256MB
rabbitmqMemoryHighWatermarkType: absolute
## EPMD port for peer discovery service used by RabbitMQ nodes and CLI tools
## Ref: https://www.rabbitmq.com/clustering.html
##
rabbitmqEpmdPort: 4369
## Node port
rabbitmqNodePort: 5672
## Manager port
rabbitmqManagerPort: 15672
## Set to true to precompile parts of RabbitMQ with HiPE, a just-in-time
## compiler for Erlang. This will increase server throughput at the cost of
## increased startup time. You might see 20-50% better performance at the cost
## of a few minutes delay at startup.
rabbitmqHipeCompile: false
## SSL certificates
## Red: http://www.rabbitmq.com/ssl.html
rabbitmqCert:
enabled: false
# Specifies an existing secret to be used for SSL Certs
existingSecret: ""
## Create a new secret using these values
cacertfile: |
certfile: |
keyfile: |
## Extra volumes for statefulset
extraVolumes: []
## Extra volume mounts for statefulset
extraVolumeMounts: []
## Authentication mechanism
## Ref: http://www.rabbitmq.com/authentication.html
rabbitmqAuth:
enabled: false
config: |
# auth_mechanisms.1 = PLAIN
# auth_mechanisms.2 = AMQPLAIN
# auth_mechanisms.3 = EXTERNAL
## Automatic Partition Handling Strategy (split brain handling)
## Ref: https://www.rabbitmq.com/partitions.html#automatic-handling
## Note: pause-if-all-down is not supported without using a custom configmap since it requires extra
## configuration.
rabbitmqClusterPartitionHandling: autoheal
## Authentication backend
## Ref: https://github.com/rabbitmq/rabbitmq-auth-backend-http
rabbitmqAuthHTTP:
enabled: false
config: |
# auth_backends.1 = http
# auth_http.user_path = http://some-server/auth/user
# auth_http.vhost_path = http://some-server/auth/vhost
# auth_http.resource_path = http://some-server/auth/resource
# auth_http.topic_path = http://some-server/auth/topic
## LDAP Plugin
## Ref: http://www.rabbitmq.com/ldap.html
rabbitmqLDAPPlugin:
enabled: false
## LDAP configuration:
config: |
# auth_backends.1 = ldap
# auth_ldap.servers.1 = my-ldap-server
# auth_ldap.user_dn_pattern = cn=${username},ou=People,dc=example,dc=com
# auth_ldap.use_ssl = false
# auth_ldap.port = 389
# auth_ldap.log = false
## MQTT Plugin
## Ref: http://www.rabbitmq.com/mqtt.html
rabbitmqMQTTPlugin:
enabled: false
## MQTT configuration:
config: |
# mqtt.default_user = guest
# mqtt.default_pass = guest
# mqtt.allow_anonymous = true
## Web MQTT Plugin
## Ref: http://www.rabbitmq.com/web-mqtt.html
rabbitmqWebMQTTPlugin:
enabled: false
## Web MQTT configuration:
config: |
# web_mqtt.ssl.port = 12345
# web_mqtt.ssl.backlog = 1024
# web_mqtt.ssl.certfile = /etc/cert/cacert.pem
# web_mqtt.ssl.keyfile = /etc/cert/cert.pem
# web_mqtt.ssl.cacertfile = /etc/cert/key.pem
# web_mqtt.ssl.password = changeme
## STOMP Plugin
## Ref: http://www.rabbitmq.com/stomp.html
rabbitmqSTOMPPlugin:
enabled: false
## STOMP configuration:
config: |
# stomp.default_user = guest
# stomp.default_pass = guest
## Web STOMP Plugin
## Ref: http://www.rabbitmq.com/web-stomp.html
rabbitmqWebSTOMPPlugin:
enabled: false
## Web STOMP configuration:
config: |
# web_stomp.ws_frame = binary
# web_stomp.cowboy_opts.max_keepalive = 10
## Prometheus Plugin
## Ref: https://www.rabbitmq.com/prometheus.html
rabbitmqPrometheusPlugin:
enabled: false
## NodePort
nodePort: null
## metrics port, overrides configuration:
## prometheus.tcp.port
port: 15692
## metrics path, overrides configuration:
## prometheus.path
path: /metrics
## Prometheus configuration:
## https://github.com/rabbitmq/rabbitmq-prometheus
config: |
## prometheus.path and prometheus.tcp.port can be set above
## AMQPS support
## Ref: http://www.rabbitmq.com/ssl.html
rabbitmqAmqpsSupport:
enabled: false
# NodePort
amqpsNodePort: 5671
# SSL configuration
config: |
# listeners.ssl.default = 5671
# ssl_options.cacertfile = /etc/cert/cacert.pem
# ssl_options.certfile = /etc/cert/cert.pem
# ssl_options.keyfile = /etc/cert/key.pem
# ssl_options.verify = verify_peer
# ssl_options.fail_if_no_peer_cert = false
## Number of replicas
replicaCount: 3
image:
repository: rabbitmq
tag: 3.8.7-alpine
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
busyboxImage:
repository: busybox
tag: 1.30.1
pullPolicy: IfNotPresent
## Duration in seconds the pod needs to terminate gracefully
terminationGracePeriodSeconds: 10
service:
annotations: {}
clusterIP: None
## List of IP addresses at which the service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
externalTrafficPolicy: ""
loadBalancerSourceRanges: []
type: ClusterIP
## Customize nodePort number when the service type is NodePort
### Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
###
epmdNodePort: null
amqpNodePort: null
managerNodePort: null
## Custom annotations for discovery service
## Will fallback to `service.annotations` while `separateAnnotations: false`
##
discovery:
separateAnnotations: false
annotations: {}
podManagementPolicy: OrderedReady
## Statefulsets rolling update update strategy
## Ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#rolling-update
##
updateStrategy: OnDelete
## Statefulsets Pod Priority
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
## priorityClassName: ""
## We usually recommend not to specify default resources and to leave this as
## a conscious choice for the user. This also increases chances charts run on
## environments with little resources, such as Minikube. If you do want to
## specify resources, uncomment the following lines, adjust them as necessary,
## and remove the curly braces after 'resources:'.
## If you decide to set the memory limit, make sure to also change the
## rabbitmqMemoryHighWatermark following the formula:
## rabbitmqMemoryHighWatermark = 0.4 * resources.limits.memory
##
resources: {}
# limits:
# cpu: 100m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 1Gi
initContainer:
enabled: true
securityContext:
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
chownFiles: true
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Additional init containers
extraInitContainers: []
## Additional containers
extraContainers: []
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Data Persistency
persistentVolume:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## selector can be used to match an existing PersistentVolume
selector: {}
name: data
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
labels: {}
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
##
nodeSelector: {}
## Node tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
##
tolerations: []
## Extra Annotations to be added to pod
podAnnotations: {}
## Extra Annotations to be added to the StatefulSet
statefulSetAnnotations: {}
## Pod affinity
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
podAntiAffinity: soft
podAntiAffinityTopologyKey: "kubernetes.io/hostname"
## Affinity settings
## Defining 'affinity' will disable any podAntiAffinity settings.
## If you still need anti-affinity, you must include the configuration here.
##
affinity: {}
## Create default configMap
##
existingConfigMap: false
## Add additional labels to all resources
##
extraLabels: {}
## Role Based Access
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
create: true
## Service Account
## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/
##
serviceAccount:
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
# name:
## Automount API credentials for a service account.
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server
automountServiceAccountToken: true
ingress:
## Set to true to enable ingress record generation
enabled: false
path: /
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
## hostName: foo.bar.com
## Set this to true in order to enable TLS on the ingress record
tls: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: myTlsSecret
## Ingress annotations done as key:value pairs
annotations: {}
# kubernetes.io/ingress.class: nginx
livenessProbe:
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
exec:
command:
- /bin/sh
- -c
- 'timeout 5 wget -O - -q --header "Authorization: Basic `echo -n \"$RABBIT_MANAGEMENT_USER:$RABBIT_MANAGEMENT_PASSWORD\" | base64`" http://127.0.0.1:15672/api/healthchecks/node | grep -qF "{\"status\":\"ok\"}"'
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 6
exec:
command:
- /bin/sh
- -c
- 'timeout 3 wget -O - -q --header "Authorization: Basic `echo -n \"$RABBIT_MANAGEMENT_USER:$RABBIT_MANAGEMENT_PASSWORD\" | base64`" http://127.0.0.1:15672/api/healthchecks/node | grep -qF "{\"status\":\"ok\"}"'
# Specifies an existing secret to be used for RMQ password, management user password and Erlang Cookie
existingSecret: ""
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
fsGroup: 101
runAsGroup: 101
runAsNonRoot: true
runAsUser: 100
## Sets environment variables for the rabbitmq container
env: {}
prometheus:
## Configures Prometheus Exporter to expose and scrape stats.
exporter:
enabled: false
env: {}
image:
repository: kbudde/rabbitmq-exporter
tag: v0.29.0
pullPolicy: IfNotPresent
## Port Prometheus scrapes for metrics
port: 9090
## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server
capabilities: "bert,no_sort"
## Allow overriding of container resources
resources: {}
# limits:
# cpu: 200m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 100Mi
## Prometheus is using Operator. Setting to true will create Operator specific resources like ServiceMonitors and Alerts
operator:
## Are you using Prometheus Operator? [Blog Post](https://coreos.com/blog/the-prometheus-operator.html)
enabled: true
## Configures Alerts, which will be setup via Prometheus Operator / ConfigMaps.
alerts:
## Prometheus exporter must be enabled as well
enabled: true
## Selector must be configured to match Prometheus Install, defaulting to whats done by Prometheus Operator
## See [CoreOS Prometheus Chart](https://github.com/coreos/prometheus-operator/tree/master/helm)
selector:
role: alert-rules
labels: {}
serviceMonitor:
## Interval at which Prometheus scrapes RabbitMQ Exporter
interval: 10s
# Namespace Prometheus is installed in
namespace: monitoring
## Defaults to whats used if you follow CoreOS [Prometheus Install Instructions](https://github.com/coreos/prometheus-operator/tree/master/helm#tldr)
## [Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus/templates/prometheus.yaml#L65)
## [Kube Prometheus Selector Label](https://github.com/coreos/prometheus-operator/blob/master/helm/kube-prometheus/values.yaml#L298)
selector:
prometheus: kube-prometheus
## Kubernetes Cluster Domain
clusterDomain: cluster.local
## Pod Disruption Budget
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
lifecycle: {}
|
mariadb | [
"# _helpers.tpl\n{{/*\nReturn the appropriate apiVersion for statefulset.\n*/}}\n{{- define \"mariadb.statefulset.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"mariadb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"mariadb.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{- define \"master.fullname\" -}}\n{{- if .Values.replication.enabled -}}\n{{- printf \"%s-%s\" (include \"mariadb.fullname\" .) \"master\" | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- include \"mariadb.fullname\" . -}}\n{{- end -}}\n{{- end -}}\n\n{{- define \"slave.fullname\" -}}\n{{- printf \"%s-%s\" (include \"mariadb.fullname\" .) \"slave\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"mariadb.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper MariaDB image name\n*/}}\n{{- define \"mariadb.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper metrics image name\n*/}}\n{{- define \"mariadb.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{ template \"mariadb.initdbScriptsCM\" . }}\n{{/*\nGet the initialization scripts ConfigMap name.\n*/}}\n{{- define \"mariadb.initdbScriptsCM\" -}}\n{{- if .Values.initdbScriptsConfigMap -}}\n{{- printf \"%s\" .Values.initdbScriptsConfigMap -}}\n{{- else -}}\n{{- printf \"%s-init-scripts\" (include \"master.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"mariadb.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"mariadb.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"mariadb.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper test image name\n*/}}\n{{- define \"mariadb.tests.testFramework.image\" -}}\n{{- $registryName := .Values.tests.testFramework.image.registry -}}\n{{- $repositoryName := .Values.tests.testFramework.image.repository -}}\n{{- $tag := .Values.tests.testFramework.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the init container volume-permissions image)\n*/}}\n{{- define \"mariadb.volumePermissions.image\" -}}\n{{- $registryName := .Values.volumePermissions.image.registry -}}\n{{- $repositoryName := .Values.volumePermissions.image.repository -}}\n{{- $tag := .Values.volumePermissions.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class for the master\n*/}}\n{{- define \"mariadb.master.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.master.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.master.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.master.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.master.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.master.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.master.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class for the slave\n*/}}\n{{- define \"mariadb.slave.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.slave.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.slave.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.slave.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.slave.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.slave.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.slave.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the name of the Secret used to store the passwords\n*/}}\n{{- define \"mariadb.secretName\" -}}\n{{- if .Values.existingSecret -}}\n{{ .Values.existingSecret }}\n{{- else -}}\n{{ template \"mariadb.fullname\" . -}}\n{{- end -}}\n{{- end -}}\n",
"# initialization-configmap.yaml\n{{- if and (or (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}\") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"master.fullname\" . }}-init-scripts\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: \"master\"\n{{- if and (.Files.Glob \"files/docker-entrypoint-initdb.d/*.sql.gz\") (not .Values.initdbScriptsConfigMap) }}\nbinaryData:\n{{- $root := . }}\n{{- range $path, $bytes := .Files.Glob \"files/docker-entrypoint-initdb.d/*.sql.gz\" }}\n {{ base $path }}: {{ $root.Files.Get $path | b64enc | quote }}\n{{- end }}\n{{- end }}\ndata:\n{{- if and (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql}\") (not .Values.initdbScriptsConfigMap) }}\n{{ (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql}\").AsConfig | indent 2 }}\n{{- end }}\n{{- with .Values.initdbScripts }}\n{{ toYaml . | indent 2 }}\n{{- end }}\n{{ end }}\n",
"# master-configmap.yaml\n{{- if .Values.master.config }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"master.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"master\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n my.cnf: |-\n{{ .Values.master.config | indent 4 }}\n{{- end -}}\n",
"# master-pdb.yaml\n{{- if .Values.master.podDisruptionBudget.enabled }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"mariadb.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"master\"\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n{{- if .Values.master.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.master.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"master\"\n release: {{ .Release.Name | quote }}\n{{- end }}\n",
"# master-statefulset.yaml\napiVersion: {{ template \"mariadb.statefulset.apiVersion\" . }}\nkind: StatefulSet\nmetadata:\n name: {{ template \"master.fullname\" . }}\n labels:\n app: {{ template \"mariadb.name\" . }}\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: master\nspec:\n selector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name }}\n component: master\n serviceName: {{ template \"master.fullname\" . }}\n replicas: 1\n updateStrategy:\n type: {{ .Values.master.updateStrategy.type }}\n {{- if (eq \"Recreate\" .Values.master.updateStrategy.type) }}\n rollingUpdate: null\n {{- end }}\n template:\n metadata:\n {{- with .Values.master.annotations }}\n annotations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n labels:\n app: {{ template \"mariadb.name\" . }}\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name }}\n component: master\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: {{ .Values.schedulerName | quote }}\n {{- end }}\n serviceAccountName: {{ template \"mariadb.serviceAccountName\" . }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n {{- if eq .Values.master.antiAffinity \"hard\" }}\n affinity:\n {{- with .Values.master.affinity }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name }}\n {{- else if eq .Values.master.antiAffinity \"soft\" }}\n affinity:\n {{- with .Values.master.affinity }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n podAffinityTerm:\n topologyKey: kubernetes.io/hostname\n labelSelector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name }}\n {{- else}}\n {{- with .Values.master.affinity }}\n affinity: {{ toYaml . | nindent 8 }}\n {{- end }}\n {{- end }}\n {{- if .Values.master.nodeSelector }}\n nodeSelector: {{ toYaml .Values.master.nodeSelector | nindent 8 }}\n {{- end -}}\n {{- with .Values.master.tolerations }}\n tolerations: {{ toYaml . | nindent 8 }}\n {{- end }}\n{{- include \"mariadb.imagePullSecrets\" . | indent 6 }}\n initContainers:\n {{- if .Values.master.extraInitContainers }}\n{{ tpl .Values.master.extraInitContainers . | indent 8 }}\n {{- end }}\n {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }}\n - name: volume-permissions\n image: {{ template \"mariadb.volumePermissions.image\" . }}\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n command: [\"chown\", \"-R\", \"{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\", \"{{ .Values.master.persistence.mountPath }}\"]\n securityContext:\n runAsUser: 0\n resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }}\n volumeMounts:\n - name: data\n mountPath: {{ .Values.master.persistence.mountPath }}\n {{- end }}\n containers:\n - name: \"mariadb\"\n image: {{ template \"mariadb.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n {{- if .Values.image.debug}}\n - name: BITNAMI_DEBUG\n value: \"true\"\n {{- end }}\n {{- if .Values.master.extraFlags }}\n - name: MARIADB_EXTRA_FLAGS\n value: \"{{ .Values.master.extraFlags }}\"\n {{- end }}\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n - name: MARIADB_ROOT_PASSWORD_FILE\n value: \"/opt/bitnami/mariadb/secrets/mariadb-root-password\"\n {{- else }}\n - name: MARIADB_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-root-password\n {{- end }}\n {{- if not (empty .Values.db.user) }}\n - name: MARIADB_USER\n value: \"{{ .Values.db.user }}\"\n {{- if .Values.db.injectSecretsAsVolume }}\n - name: MARIADB_PASSWORD_FILE\n value: \"/opt/bitnami/mariadb/secrets/mariadb-password\"\n {{- else }}\n - name: MARIADB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-password\n {{- end }}\n {{- end }}\n - name: MARIADB_DATABASE\n value: \"{{ .Values.db.name }}\"\n {{- if .Values.replication.enabled }}\n - name: MARIADB_REPLICATION_MODE\n value: \"master\"\n - name: MARIADB_REPLICATION_USER\n value: \"{{ .Values.replication.user }}\"\n {{- if .Values.replication.injectSecretsAsVolume }}\n - name: MARIADB_REPLICATION_PASSWORD_FILE\n value: \"/opt/bitnami/mariadb/secrets/mariadb-replication-password\"\n {{- else }}\n - name: MARIADB_REPLICATION_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-replication-password\n {{- end }}\n {{- end }}\n {{- if .Values.master.extraEnvVars }}\n {{- tpl (toYaml .Values.master.extraEnvVars) $ | nindent 12 }}\n {{- end }}\n ports:\n - name: mysql\n containerPort: 3306\n {{- if .Values.master.livenessProbe.enabled }}\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - |\n password_aux=\"${MARIADB_ROOT_PASSWORD:-}\"\n if [ -f \"${MARIADB_ROOT_PASSWORD_FILE:-}\" ]; then\n password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE)\n fi\n mysqladmin status -uroot -p$password_aux\n initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.master.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.master.readinessProbe.enabled }}\n readinessProbe:\n exec:\n command:\n - sh\n - -c\n - |\n password_aux=\"${MARIADB_ROOT_PASSWORD:-}\"\n if [ -f \"${MARIADB_ROOT_PASSWORD_FILE:-}\" ]; then\n password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE)\n fi\n mysqladmin status -uroot -p$password_aux\n initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.master.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.master.resources }}\n resources: {{ toYaml .Values.master.resources | nindent 12 }}\n {{- end }}\n volumeMounts:\n - name: data\n mountPath: {{ .Values.master.persistence.mountPath }}\n {{- if .Values.master.persistence.subPath }}\n subPath: {{ .Values.master.persistence.subPath }}\n {{- end }}\n {{- if or (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}\") .Values.initdbScriptsConfigMap .Values.initdbScripts }}\n - name: custom-init-scripts\n mountPath: /docker-entrypoint-initdb.d\n {{- end }}\n {{- if .Values.master.config }}\n - name: config\n mountPath: /opt/bitnami/mariadb/conf/my.cnf\n subPath: my.cnf\n {{- end }}\n {{- if or .Values.rootUser.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }}\n - name: mariadb-credentials\n mountPath: /opt/bitnami/mariadb/secrets/\n {{- end }}\n {{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"mariadb.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n env:\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n - name: MARIADB_ROOT_PASSWORD_FILE\n value: \"/opt/bitnami/mysqld-exporter/secrets/mariadb-root-password\"\n {{- else }}\n - name: MARIADB_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-root-password\n {{- end }}\n command:\n - sh\n - -c\n - |\n password_aux=\"${MARIADB_ROOT_PASSWORD:-}\"\n if [ -f \"${MARIADB_ROOT_PASSWORD_FILE:-}\" ]; then\n password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE)\n fi\n DATA_SOURCE_NAME=\"root:${password_aux}@(localhost:3306)/\" /bin/mysqld_exporter\n {{- range .Values.metrics.extraArgs.master }}\n {{ . }}\n {{- end }}\n ports:\n - name: metrics\n containerPort: 9104\n {{- if .Values.metrics.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.metrics.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.metrics.resources }}\n resources: {{ toYaml .Values.metrics.resources | nindent 12 }}\n {{- end }}\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n volumeMounts:\n - name: mariadb-credentials\n mountPath: /opt/bitnami/mysqld-exporter/secrets/\n {{- end }}\n {{- end }}\n volumes:\n {{- if .Values.master.config }}\n - name: config\n configMap:\n name: {{ template \"master.fullname\" . }}\n {{- end }}\n {{- if or (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}\") .Values.initdbScriptsConfigMap .Values.initdbScripts }}\n - name: custom-init-scripts\n configMap:\n name: {{ template \"mariadb.initdbScriptsCM\" . }}\n {{- end }}\n {{- if or .Values.rootUser.injectSecretsAsVolume .Values.db.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }}\n - name: mariadb-credentials\n secret:\n secretName: {{ template \"mariadb.fullname\" . }}\n items:\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n - key: mariadb-root-password\n path: mariadb-root-password\n {{- end }}\n {{- if .Values.db.injectSecretsAsVolume }}\n - key: mariadb-password\n path: mariadb-password\n {{- end }}\n {{- if and .Values.replication.enabled .Values.replication.injectSecretsAsVolume }}\n - key: mariadb-replication-password\n path: mariadb-replication-password\n {{- end }}\n {{- end }}\n{{- if and .Values.master.persistence.enabled .Values.master.persistence.existingClaim }}\n - name: data\n persistentVolumeClaim:\n claimName: {{ .Values.master.persistence.existingClaim }}\n{{- else if not .Values.master.persistence.enabled }}\n - name: data\n emptyDir: {}\n{{- else if and .Values.master.persistence.enabled (not .Values.master.persistence.existingClaim) }}\n volumeClaimTemplates:\n - metadata:\n name: data\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"master\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n spec:\n accessModes:\n {{- range .Values.master.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.master.persistence.size | quote }}\n {{ include \"mariadb.master.storageClass\" . }}\n{{- end }}\n",
"# master-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"mariadb.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"master\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- if or .Values.metrics.enabled .Values.master.service.annotations }}\n annotations:\n{{- if .Values.metrics.enabled }}\n{{ toYaml .Values.metrics.annotations | indent 4 }}\n{{- end }}\n{{- if .Values.master.service.annotations }}\n{{ toYaml .Values.master.service.annotations | indent 4 }}\n{{- end }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n {{- if eq .Values.service.type \"ClusterIP\" }}\n {{- if .Values.service.clusterIp }}\n clusterIP: {{ .Values.service.clusterIp.master }}\n {{- end }}\n {{- end }}\n ports:\n - name: mysql\n port: {{ .Values.service.port }}\n targetPort: mysql\n{{- if eq .Values.service.type \"NodePort\" }}\n{{- if .Values.service.nodePort }}\n{{- if .Values.service.nodePort.master }}\n nodePort: {{ .Values.service.nodePort.master }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n port: 9104\n targetPort: metrics\n{{- end }}\n selector:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"master\"\n release: \"{{ .Release.Name }}\"\n",
"# role.yaml\n{{- if and .Values.serviceAccount.create .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ template \"master.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n verbs:\n - get\n{{- end }}\n",
"# rolebinding.yaml\n{{- if and .Values.serviceAccount.create .Values.rbac.create }}\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"master.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"mariadb.serviceAccountName\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"master.fullname\" . }}\n{{- end }}\n",
"# secrets.yaml\n{{- if (not .Values.existingSecret) -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"mariadb.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n {{- if not (empty .Values.rootUser.password) }}\n mariadb-root-password: \"{{ .Values.rootUser.password | b64enc }}\"\n {{- else if (not .Values.rootUser.forcePassword) }}\n mariadb-root-password: \"{{ randAlphaNum 10 | b64enc }}\"\n {{ else }}\n mariadb-root-password: {{ required \"A MariaDB Root Password is required!\" .Values.rootUser.password }}\n {{- end }}\n {{- if not (empty .Values.db.user) }}\n {{- if not (empty .Values.db.password) }}\n mariadb-password: \"{{ .Values.db.password | b64enc }}\"\n {{- else if (not .Values.db.forcePassword) }}\n mariadb-password: \"{{ randAlphaNum 10 | b64enc }}\"\n {{- else }}\n mariadb-password: {{ required \"A MariaDB Database Password is required!\" .Values.db.password }}\n {{- end }}\n {{- end }}\n {{- if .Values.replication.enabled }}\n {{- if not (empty .Values.replication.password) }}\n mariadb-replication-password: \"{{ .Values.replication.password | b64enc }}\"\n {{- else if (not .Values.replication.forcePassword) }}\n mariadb-replication-password: \"{{ randAlphaNum 10 | b64enc }}\"\n {{- else }}\n mariadb-replication-password: {{ required \"A MariaDB Replication Password is required!\" .Values.replication.password }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"mariadb.serviceAccountName\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"mariadb.fullname\" . }}\n {{- if .Values.metrics.serviceMonitor.namespace }}\n namespace: {{ .Values.metrics.serviceMonitor.namespace }}\n {{- end }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }} \n {{- range $key, $value := .Values.metrics.serviceMonitor.selector }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name | quote }}\n endpoints:\n - port: metrics\n {{- if .Values.metrics.serviceMonitor.interval }}\n interval: {{ .Values.metrics.serviceMonitor.interval }}\n {{- end }}\n {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}\n {{- end }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n{{- end }}\n",
"# slave-configmap.yaml\n{{- if and .Values.replication.enabled .Values.slave.config }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"slave.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"slave\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n my.cnf: |-\n{{ .Values.slave.config | indent 4 }}\n{{- end }}\n",
"# slave-pdb.yaml\n{{- if .Values.replication.enabled }}\n{{- if .Values.slave.podDisruptionBudget.enabled }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"mariadb.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"slave\"\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n{{- if .Values.slave.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.slave.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.slave.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.slave.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"slave\"\n release: {{ .Release.Name | quote }}\n{{- end }}\n{{- end }}\n",
"# slave-statefulset.yaml\n{{- if .Values.replication.enabled }}\napiVersion: {{ template \"mariadb.statefulset.apiVersion\" . }}\nkind: StatefulSet\nmetadata:\n name: {{ template \"slave.fullname\" . }}\n labels:\n app: {{ template \"mariadb.name\" . }}\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: slave\nspec:\n selector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name }}\n component: slave\n serviceName: {{ template \"slave.fullname\" . }}\n replicas: {{ .Values.slave.replicas }}\n updateStrategy:\n type: {{ .Values.slave.updateStrategy.type }}\n {{- if (eq \"Recreate\" .Values.slave.updateStrategy.type) }}\n rollingUpdate: null\n {{- end }}\n template:\n metadata:\n {{- if .Values.slave.annotations }}\n annotations:\n {{- range $key, $value := .Values.slave.annotations }}\n {{ $key }}: {{ $value }}\n {{- end }}\n {{- end }}\n labels:\n app: {{ template \"mariadb.name\" . }}\n chart: {{ template \"mariadb.chart\" . }}\n release: {{ .Release.Name }}\n component: slave\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: {{ .Values.schedulerName | quote }}\n {{- end }}\n serviceAccountName: {{ template \"mariadb.serviceAccountName\" . }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n {{- if eq .Values.slave.antiAffinity \"hard\" }}\n affinity:\n {{- with .Values.slave.affinity }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name }}\n {{- else if eq .Values.slave.antiAffinity \"soft\" }}\n affinity:\n {{- with .Values.slave.affinity }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n podAffinityTerm:\n topologyKey: kubernetes.io/hostname\n labelSelector:\n matchLabels:\n app: {{ template \"mariadb.name\" . }}\n release: {{ .Release.Name }}\n {{- else}}\n {{- with .Values.slave.affinity }}\n affinity: {{ toYaml . | nindent 8 }}\n {{- end }}\n {{- end }}\n {{- if .Values.slave.nodeSelector }}\n nodeSelector: {{ toYaml .Values.slave.nodeSelector | nindent 8 }}\n {{- end -}}\n {{- with .Values.slave.tolerations }}\n tolerations: {{ toYaml . | nindent 8 }}\n {{- end }}\n{{- include \"mariadb.imagePullSecrets\" . | indent 6 }}\n initContainers:\n {{- if .Values.master.extraInitContainers }}\n{{ tpl .Values.master.extraInitContainers . | indent 6}}\n {{- end }}\n {{- if and .Values.volumePermissions.enabled .Values.slave.persistence.enabled }}\n - name: volume-permissions\n image: {{ template \"mariadb.volumePermissions.image\" . }}\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n command: [\"chown\", \"-R\", \"{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\", \"/bitnami/mariadb\"]\n securityContext:\n runAsUser: 0\n resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }}\n volumeMounts:\n - name: data\n mountPath: /bitnami/mariadb\n {{- end }}\n containers:\n - name: \"mariadb\"\n image: {{ template \"mariadb.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n {{- if .Values.image.debug}}\n - name: BITNAMI_DEBUG\n value: \"true\"\n {{- end }}\n {{- if .Values.slave.extraFlags }}\n - name: MARIADB_EXTRA_FLAGS\n value: \"{{ .Values.slave.extraFlags }}\"\n {{- end }}\n - name: MARIADB_REPLICATION_MODE\n value: \"slave\"\n - name: MARIADB_MASTER_HOST\n value: {{ template \"mariadb.fullname\" . }}\n - name: MARIADB_MASTER_PORT_NUMBER\n value: \"{{ .Values.service.port }}\"\n - name: MARIADB_MASTER_ROOT_USER\n value: \"root\"\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n - name: MARIADB_MASTER_ROOT_PASSWORD_FILE\n value: \"/opt/bitnami/mariadb/secrets/mariadb-root-password\"\n {{- else }}\n - name: MARIADB_MASTER_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-root-password\n {{- end }}\n - name: MARIADB_REPLICATION_USER\n value: \"{{ .Values.replication.user }}\"\n {{- if .Values.replication.injectSecretsAsVolume }}\n - name: MARIADB_REPLICATION_PASSWORD_FILE\n value: \"/opt/bitnami/mariadb/secrets/mariadb-replication-password\"\n {{- else }}\n - name: MARIADB_REPLICATION_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-replication-password\n {{- end }}\n {{- if .Values.slave.extraEnvVars }}\n {{- tpl (toYaml .Values.slave.extraEnvVars) $ | nindent 12 }}\n {{- end }}\n ports:\n - name: mysql\n containerPort: 3306\n {{- if .Values.slave.livenessProbe.enabled }}\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - |\n password_aux=\"${MARIADB_MASTER_ROOT_PASSWORD:-}\"\n if [ -f \"${MARIADB_MASTER_ROOT_PASSWORD_FILE:-}\" ]; then\n password_aux=$(cat $MARIADB_MASTER_ROOT_PASSWORD_FILE)\n fi\n mysqladmin status -uroot -p$password_aux\n initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.slave.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.slave.readinessProbe.enabled }}\n readinessProbe:\n exec:\n command:\n - sh\n - -c\n - |\n password_aux=\"${MARIADB_MASTER_ROOT_PASSWORD:-}\"\n if [ -f \"${MARIADB_MASTER_ROOT_PASSWORD_FILE:-}\" ]; then\n password_aux=$(cat $MARIADB_MASTER_ROOT_PASSWORD_FILE)\n fi\n mysqladmin status -uroot -p$password_aux\n initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.slave.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.slave.resources }}\n resources: {{ toYaml .Values.slave.resources | nindent 12 }}\n {{- end }}\n volumeMounts:\n - name: data\n mountPath: /bitnami/mariadb\n {{- if .Values.slave.config }}\n - name: config\n mountPath: /opt/bitnami/mariadb/conf/my.cnf\n subPath: my.cnf\n {{- end }}\n {{- if or .Values.rootUser.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }}\n - name: mariadb-credentials\n mountPath: /opt/bitnami/mariadb/secrets/\n {{- end }}\n {{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"mariadb.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n env:\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n - name: MARIADB_ROOT_PASSWORD_FILE\n value: \"/opt/bitnami/mysqld-exporter/secrets/mariadb-root-password\"\n {{- else }}\n - name: MARIADB_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-root-password\n {{- end }}\n command:\n - sh\n - -c\n - |\n password_aux=\"${MARIADB_ROOT_PASSWORD:-}\"\n if [ -f \"${MARIADB_ROOT_PASSWORD_FILE:-}\" ]; then\n password_aux=$(cat $MARIADB_ROOT_PASSWORD_FILE)\n fi\n DATA_SOURCE_NAME=\"root:${password_aux}@(localhost:3306)/\" /bin/mysqld_exporter\n {{- range .Values.metrics.extraArgs.slave }}\n {{ . }}\n {{- end }}\n ports:\n - name: metrics\n containerPort: 9104\n {{- if .Values.metrics.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.metrics.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.metrics.resources }}\n resources: {{ toYaml .Values.metrics.resources | nindent 12 }}\n {{- end }}\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n volumeMounts:\n - name: mariadb-credentials\n mountPath: /opt/bitnami/mysqld-exporter/secrets/\n {{- end }}\n {{- end }}\n volumes:\n {{- if .Values.slave.config }}\n - name: config\n configMap:\n name: {{ template \"slave.fullname\" . }}\n {{- end }}\n {{- if or .Values.rootUser.injectSecretsAsVolume .Values.replication.injectSecretsAsVolume }}\n - name: mariadb-credentials\n secret:\n secretName: {{ template \"mariadb.fullname\" . }}\n items:\n {{- if .Values.rootUser.injectSecretsAsVolume }}\n - key: mariadb-root-password\n path: mariadb-root-password\n {{- end }}\n {{- if .Values.replication.injectSecretsAsVolume }}\n - key: mariadb-replication-password\n path: mariadb-replication-password\n {{- end }}\n {{- end }}\n{{- if not .Values.slave.persistence.enabled }}\n - name: \"data\"\n emptyDir: {}\n{{- else }}\n volumeClaimTemplates:\n - metadata:\n name: data\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"slave\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n spec:\n accessModes:\n {{- range .Values.slave.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.slave.persistence.size | quote }}\n {{ include \"mariadb.slave.storageClass\" . }}\n{{- end }}\n{{- end }}\n",
"# slave-svc.yaml\n{{- if .Values.replication.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"slave.fullname\" . }}\n labels:\n app: \"{{ template \"mariadb.name\" . }}\"\n chart: \"{{ template \"mariadb.chart\" . }}\"\n component: \"slave\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- if or .Values.metrics.enabled .Values.slave.service.annotations }}\n annotations:\n{{- if .Values.metrics.enabled }}\n{{ toYaml .Values.metrics.annotations | indent 4 }}\n{{- end }}\n{{- if .Values.slave.service.annotations }}\n{{ toYaml .Values.slave.service.annotations | indent 4 }}\n{{- end }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n {{- if eq .Values.service.type \"ClusterIP\" }}\n {{- if .Values.service.clusterIp }}\n clusterIP: {{ .Values.service.clusterIp.slave }}\n {{- end }}\n {{- end }}\n ports:\n - name: mysql\n port: {{ .Values.service.port }}\n targetPort: mysql\n{{- if (eq .Values.service.type \"NodePort\") }}\n{{- if .Values.service.nodePort }}\n{{- if .Values.service.nodePort.slave }}\n nodePort: {{ .Values.service.nodePort.slave }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n port: 9104\n targetPort: metrics\n{{- end }}\n selector:\n app: \"{{ template \"mariadb.name\" . }}\"\n component: \"slave\"\n release: \"{{ .Release.Name }}\"\n{{- end }}\n",
"# test-runner.yaml\n{{- if .Values.tests.enabled }}\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ template \"mariadb.fullname\" . }}-test-{{ randAlphaNum 5 | lower }}\"\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: \"test-framework\"\n image: {{ template \"mariadb.tests.testFramework.image\" . }}\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n {{- if .Values.tests.testFramework.resources }}\n resources: {{ toYaml .Values.tests.testFramework.resources | nindent 8 }}\n {{- end }}\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: mariadb-test\n image: {{ template \"mariadb.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n env:\n - name: MARIADB_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mariadb.secretName\" . }}\n key: mariadb-root-password\n {{- if .Values.tests.resources }}\n resources: {{ toYaml .Values.tests.resources | nindent 8 }}\n {{- end }}\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"mariadb.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n{{- end }}\n",
"# tests.yaml\n{{- if .Values.tests.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"mariadb.fullname\" . }}-tests\ndata:\n run.sh: |-\n @test \"Testing MariaDB is accessible\" {\n mysql -h {{ template \"mariadb.fullname\" . }} -uroot -p$MARIADB_ROOT_PASSWORD -e 'show databases;'\n }\n{{- end }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Bitnami MariaDB image
## ref: https://hub.docker.com/r/bitnami/mariadb/tags/
##
image:
registry: docker.io
repository: bitnami/mariadb
tag: 10.3.22-debian-10-r27
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## String to partially override mariadb.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override mariadb.fullname template
##
# fullnameOverride:
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
service:
## Kubernetes service type, ClusterIP and NodePort are supported at present
type: ClusterIP
# clusterIp:
# master: xx.xx.xx.xx
# slave: xx.xx.xx.xx
port: 3306
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
# master: 30001
# slave: 30002
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the mariadb.fullname template
# name:
## Role Based Access
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
create: false
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Use existing secret (ignores root, db and replication passwords)
##
# existingSecret:
## MariaDB admin credentials
##
rootUser:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-the-root-password-on-first-run
##
password: ""
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: false
## Mount admin password as a file instead of using an environment variable
##
injectSecretsAsVolume: false
## Custom user/db credentials
##
db:
## MariaDB username and password
## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-user-on-first-run
##
user: ""
password: ""
## Database to create
## ref: https://github.com/bitnami/bitnami-docker-mariadb#creating-a-database-on-first-run
##
name: my_database
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: false
## Mount user password as a file instead of using an environment variable
##
injectSecretsAsVolume: false
## Replication configuration
##
replication:
## Enable replication. This enables the creation of replicas of MariaDB. If false, only a
## master deployment would be created
##
enabled: true
## MariaDB replication user
## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster
##
user: replicator
## MariaDB replication user password
## ref: https://github.com/bitnami/bitnami-docker-mariadb#setting-up-a-replication-cluster
##
password: ""
## Option to force users to specify a password. That is required for 'helm upgrade' to work properly.
## If it is not force, a random password will be generated.
##
forcePassword: false
## Mount replication user password as a file instead of using an environment variable
##
injectSecretsAsVolume: false
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
#
## ConfigMap with scripts to be run at first boot
## Note: This will override initdbScripts
# initdbScriptsConfigMap:
master:
## Mariadb Master additional pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
# annotations:
# key: value
# another-key: another-value
## MariaDB additional command line flags
## Can be used to specify command line flags, for example:
##
## extraFlags: "--max-connect-errors=1000 --max_connections=155"
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Kept for backwards compatibility. You can now disable it by removing it.
## if you wish to set it through master.affinity.podAntiAffinity instead.
##
antiAffinity: soft
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## updateStrategy for MariaDB Master StatefulSet
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
# Enable persistence using an existing PVC
# existingClaim:
# Subdirectory of the volume to mount
# subPath:
mountPath: /bitnami/mariadb
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## Persistent Volume Claim annotations
##
annotations: {}
## Persistent Volume Access Mode
##
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: 8Gi
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## An array to add extra environment variables
## For example:
## extraEnvVars:
## - name: TZ
## value: "Europe/Paris"
##
# extraEnvVars:
## Configure MySQL with a custom my.cnf file
## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
##
config: |-
[mysqld]
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mariadb
plugin_dir=/opt/bitnami/mariadb/plugin
port=3306
socket=/opt/bitnami/mariadb/tmp/mysql.sock
tmpdir=/opt/bitnami/mariadb/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
log-error=/opt/bitnami/mariadb/logs/mysqld.log
character-set-server=UTF8
collation-server=utf8_general_ci
[client]
port=3306
socket=/opt/bitnami/mariadb/tmp/mysql.sock
default-character-set=UTF8
plugin_dir=/opt/bitnami/mariadb/plugin
[manager]
port=3306
socket=/opt/bitnami/mariadb/tmp/mysql.sock
pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
## Configure master resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
livenessProbe:
enabled: true
##
## Initializing the database could take some time
initialDelaySeconds: 120
##
## Default Kubernetes values
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
initialDelaySeconds: 30
##
## Default Kubernetes values
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
## Allow customization of the service resource
##
service:
## Add custom annotations to the service
##
annotations: {}
# external-dns.alpha.kubernetes.io/hostname: db.example.com
slave:
replicas: 1
## Mariadb Slave additional pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
# annotations:
# key: value
# another-key: another-value
## MariaDB additional command line flags
## Can be used to specify command line flags, for example:
##
## extraFlags: "--max-connect-errors=1000 --max_connections=155"
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Kept for backwards compatibility. You can now disable it by removing it.
## if you wish to set it through slave.affinity.podAntiAffinity instead.
##
antiAffinity: soft
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## updateStrategy for MariaDB Slave StatefulSet
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
persistence:
## If true, use a Persistent Volume Claim, If false, use emptyDir
##
enabled: true
# storageClass: "-"
annotations:
accessModes:
- ReadWriteOnce
## Persistent Volume size
##
size: 8Gi
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## An array to add extra environment variables
## For example:
## extraEnvVars:
## - name: TZ
## value: "Europe/Paris"
##
# extraEnvVars:
## Configure MySQL slave with a custom my.cnf file
## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file
##
config: |-
[mysqld]
skip-name-resolve
explicit_defaults_for_timestamp
basedir=/opt/bitnami/mariadb
port=3306
socket=/opt/bitnami/mariadb/tmp/mysql.sock
tmpdir=/opt/bitnami/mariadb/tmp
max_allowed_packet=16M
bind-address=0.0.0.0
pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
log-error=/opt/bitnami/mariadb/logs/mysqld.log
character-set-server=UTF8
collation-server=utf8_general_ci
[client]
port=3306
socket=/opt/bitnami/mariadb/tmp/mysql.sock
default-character-set=UTF8
[manager]
port=3306
socket=/opt/bitnami/mariadb/tmp/mysql.sock
pid-file=/opt/bitnami/mariadb/tmp/mysqld.pid
##
## Configure slave resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
livenessProbe:
enabled: true
##
## Initializing the database could take some time
initialDelaySeconds: 120
##
## Default Kubernetes values
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
initialDelaySeconds: 45
##
## Default Kubernetes values
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
## Allow customization of the service resource
##
service:
## Add custom annotations to the service
##
annotations: {}
# external-dns.alpha.kubernetes.io/hostname: rodb.example.com
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/mysqld-exporter
tag: 0.12.1-debian-10-r27
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9104"
## Extra args to be passed to mysqld_exporter
## ref: https://github.com/prometheus/mysqld_exporter/
##
extraArgs:
master: []
slave: []
# - --collect.auto_increment.columns
# - --collect.binlog_size
# - --collect.engine_innodb_status
# - --collect.engine_tokudb_status
# - --collect.global_status
# - --collect.global_variables
# - --collect.info_schema.clientstats
# - --collect.info_schema.innodb_metrics
# - --collect.info_schema.innodb_tablespaces
# - --collect.info_schema.innodb_cmp
# - --collect.info_schema.innodb_cmpmem
# - --collect.info_schema.processlist
# - --collect.info_schema.processlist.min_time
# - --collect.info_schema.query_response_time
# - --collect.info_schema.tables
# - --collect.info_schema.tables.databases
# - --collect.info_schema.tablestats
# - --collect.info_schema.userstats
# - --collect.perf_schema.eventsstatements
# - --collect.perf_schema.eventsstatements.digest_text_limit
# - --collect.perf_schema.eventsstatements.limit
# - --collect.perf_schema.eventsstatements.timelimit
# - --collect.perf_schema.eventswaits
# - --collect.perf_schema.file_events
# - --collect.perf_schema.file_instances
# - --collect.perf_schema.indexiowaits
# - --collect.perf_schema.tableiowaits
# - --collect.perf_schema.tablelocks
# - --collect.perf_schema.replication_group_member_stats
# - --collect.slave_status
# - --collect.slave_hosts
# - --collect.heartbeat
# - --collect.heartbeat.database
# - --collect.heartbeat.table
livenessProbe:
enabled: true
##
## Initializing the database could take some time
initialDelaySeconds: 120
##
## Default Kubernetes values
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
enabled: true
initialDelaySeconds: 30
##
## Default Kubernetes values
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
## Specify a namespace if needed
# namespace: monitoring
# fallback to the prometheus default unless specified
# interval: 10s
# scrapeTimeout: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
## Bats Framework (= Bash Automated Testing System) is needed to test if MariaDB is accessible
## See test-runner.yaml and tests.yaml for details.
## To run the tests after the deployment, enter "helm test <release-name>".
tests:
enabled: true
# resources: {}
testFramework:
image:
registry: docker.io
repository: dduportal/bats
tag: 0.4.0
# resources: {}
|
sumologic-fluentd | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sumologic-fluentd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" }}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"sumologic-fluentd.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" }}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified fluentd user conf name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"sumologic-fluentd.fluentdUserConfig.fullname\" -}}\n{{- printf \"%s-confd\" (include \"sumologic-fluentd.fullname\" .) | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n- apiGroups: [\"\"]\n resources: [\"namespaces\", \"pods\"]\n verbs: [\"get\", \"list\", \"watch\"]\n{{- end }}\n",
"# clusterrolebinding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\n{{ end }}\n",
"# configmap-user.yaml\n{{- if .Values.sumologic.fluentdUserConfig }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sumologic-fluentd.fluentdUserConfig.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.fluentdUserConfig.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n checksum/user-conf-config: {{ toYaml .Values.sumologic.fluentdUserConfig | sha256sum }}\ndata:\n{{ toYaml .Values.sumologic.fluentdUserConfig | indent 2 }}\n{{- end -}}\n\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n {{ $root := . }}\n {{ range $path, $bytes := .Files.Glob \"files/*\"}}\n {{ base $path }}: |\n{{ $root.Files.Get $path | indent 4 }}\n {{ end }}\n",
"# daemonset.yaml\n{{- if (or (.Values.sumologic.collectorUrlExistingSecret) (.Values.sumologic.collectorUrl)) -}}\n# Sumologic collector URL is required\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.daemonsetAnnotations }}\n annotations:\n{{ toYaml .Values.daemonsetAnnotations | indent 6 }}\n{{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.daemonset.priorityClassName }}\n priorityClassName: {{ .Values.daemonset.priorityClassName }}\n {{- end }}\n containers:\n - name: {{ template \"sumologic-fluentd.fullname\" . }}\n image: \"{{ .Values.image.name }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{.Values.image.pullPolicy}}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: {{ template \"sumologic-fluentd.fullname\" . }}\n mountPath: /fluentd/etc\n readOnly: true\n - name: host-logs\n mountPath: /mnt/log/\n readOnly: true\n - name: host-logs\n mountPath: /var/log/\n readOnly: true\n - name: docker-logs\n mountPath: /var/lib/docker/\n readOnly: true\n - name: pos-files\n mountPath: /mnt/pos/\n {{- if eq .Values.sumologic.fluentdSource \"systemd\" }}\n - name: rkt-logs\n mountPath: /var/lib/rkt/\n readOnly: true\n {{- end }}\n {{- if .Values.sumologic.fluentdUserConfig }}\n {{- if .Values.sumologic.fluentdUserConfigDir }}\n - name: fluentd-user-conf\n mountPath: {{ quote .Values.sumologic.fluentdUserConfigDir }}\n readOnly: true\n {{- else }}\n - name: fluentd-user-conf\n mountPath: /fluentd/conf.d/user\n readOnly: true\n {{- end }}\n {{- end }}\n env:\n - name: COLLECTOR_URL\n valueFrom:\n secretKeyRef:\n {{- if .Values.sumologic.collectorUrlExistingSecret }}\n name: \"{{ .Values.sumologic.collectorUrlExistingSecret }}\"\n {{- else }}\n name: \"{{ template \"sumologic-fluentd.fullname\" . }}\"\n {{- end }}\n key: collector-url\n - name: K8S_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: FLUENTD_SOURCE\n value: {{ quote .Values.sumologic.fluentdSource }}\n {{- if .Values.sumologic.fluentdUserConfigDir }}\n - name: FLUENTD_USER_CONFIG_DIR\n value: {{ quote .Values.sumologic.fluentdUserConfigDir }}\n {{- end }}\n {{- if .Values.sumologic.flushInterval }}\n - name: FLUSH_INTERVAL\n value: {{ quote .Values.sumologic.flushInterval }}\n {{- end }}\n {{- if .Values.sumologic.numThreads }}\n - name: NUM_THREADS\n value: {{ quote .Values.sumologic.numThreads }}\n {{- end }}\n {{- if .Values.sumologic.sourceName }}\n - name: SOURCE_NAME\n value: {{ quote .Values.sumologic.sourceName }}\n {{- end }}\n {{- if .Values.sumologic.sourceHost }}\n - name: SOURCE_HOST\n value: {{ quote .Values.sumologic.sourceHost }}\n {{- end }}\n {{- if .Values.sumologic.sourceCategory }}\n - name: SOURCE_CATEGORY\n value: {{ quote .Values.sumologic.sourceCategory }}\n {{- end }}\n {{- if .Values.sumologic.sourceCategoryPrefix }}\n - name: SOURCE_CATEGORY_PREFIX\n value: {{ quote .Values.sumologic.sourceCategoryPrefix }}\n {{- end }}\n {{- if .Values.sumologic.sourceCategoryReplaceDash }}\n - name: SOURCE_CATEGORY_REPLACE_DASH\n value: {{ quote .Values.sumologic.sourceCategoryReplaceDash }}\n {{- end }}\n {{- if .Values.sumologic.logFormat }}\n - name: LOG_FORMAT\n value: {{ quote .Values.sumologic.logFormat }}\n {{- end }}\n {{- if quote .Values.sumologic.kubernetesMeta }}\n - name: KUBERNETES_META\n value: {{ quote .Values.sumologic.kubernetesMeta }}\n {{- end }}\n {{- if .Values.sumologic.excludeContainerRegex }}\n - name: EXCLUDE_CONTAINER_REGEX\n value: {{ quote .Values.sumologic.excludeContainerRegex }}\n {{- end }}\n {{- if .Values.sumologic.excludeFacilityRegex }}\n - name: EXCLUDE_FACILITY_REGEX\n value: {{ quote .Values.sumologic.excludeFacilityRegex }}\n {{- end }}\n {{- if .Values.sumologic.excludeHostRegex }}\n - name: EXCLUDE_HOST_REGEX\n value: {{ quote .Values.sumologic.excludeHostRegex }}\n {{- end }}\n {{- if .Values.sumologic.excludeNamespaceRegex }}\n - name: EXCLUDE_NAMESPACE_REGEX\n value: {{ quote .Values.sumologic.excludeNamespaceRegex }}\n {{- end }}\n {{- if .Values.sumologic.excludePath }}\n - name: EXCLUDE_PATH\n value: {{ quote .Values.sumologic.excludePath }}\n {{- end }}\n {{- if .Values.sumologic.excludePodRegex }}\n - name: EXCLUDE_POD_REGEX\n value: {{ quote .Values.sumologic.excludePodRegex }}\n {{- end }}\n {{- if .Values.sumologic.excludePriorityRegex }}\n - name: EXCLUDE_PRIORITY_REGEX\n value: {{ quote .Values.sumologic.excludePriorityRegex }}\n {{- end }}\n {{- if .Values.sumologic.excludeUnitRegex }}\n - name: EXCLUDE_UNIT_REGEX\n value: {{ quote .Values.sumologic.excludeUnitRegex }}\n {{- end }}\n {{- if .Values.sumologic.fluentdOpt }}\n - name: FLUENTD_OPT\n value: {{ quote .Values.sumologic.fluentdOpt }}\n {{- end }}\n {{- if quote .Values.sumologic.timeKey }}\n - name: TIME_KEY\n value: {{ quote .Values.sumologic.timeKey }}\n {{- end }}\n {{- if quote .Values.sumologic.addTimeStamp }}\n - name: ADD_TIMESTAMP\n value: {{ quote .Values.sumologic.addTimeStamp }}\n {{- end }}\n {{- if quote .Values.sumologic.addTime }}\n - name: ADD_TIME\n value: {{ quote .Values.sumologic.addTime }}\n {{- end }}\n {{- if quote .Values.sumologic.addStream }}\n - name: ADD_STREAM\n value: {{ quote .Values.sumologic.addStream }}\n {{- end }}\n {{- if quote .Values.sumologic.verifySsl }}\n - name: VERIFY_SSL\n value: {{ quote .Values.sumologic.verifySsl }}\n {{- end }}\n {{- if .Values.sumologic.multilineStartRegexp }}\n - name: MULTILINE_START_REGEXP\n value: {{ quote .Values.sumologic.multilineStartRegexp }}\n {{- end }}\n {{- if quote .Values.sumologic.readFromHead }}\n - name: READ_FROM_HEAD\n value: {{ quote .Values.sumologic.readFromHead }}\n {{- end }}\n {{- if .Values.sumologic.concatSeparator }}\n - name: CONCAT_SEPARATOR\n value: {{ quote .Values.sumologic.concatSeparator }}\n {{- end }}\n {{- if .Values.sumologic.auditLogPath }}\n - name: AUDIT_LOG_PATH\n value: {{ quote .Values.sumologic.auditLogPath }}\n {{- end }}\n {{- if .Values.sumologic.containerLogsPath }}\n - name: CONTAINER_LOGS_PATH\n value: {{ quote .Values.sumologic.containerLogsPath }}\n {{- end }}\n {{- if .Values.sumologic.proxyUri }}\n - name: PROXY_URI\n value: {{ quote .Values.sumologic.proxyUri }}\n {{- end }}\n {{- if .Values.sumologic.enableStatWatcher }}\n - name: ENABLE_STAT_WATCHER\n value: {{ quote .Values.sumologic.enableStatWatcher }}\n {{- end }}\n{{- if .Values.extraEnv }}\n{{ toYaml .Values.extraEnv | indent 12 }}\n{{- end }}\n serviceAccountName: {{ if .Values.rbac.create }}{{ template \"sumologic-fluentd.fullname\" . }}{{ else }}\"{{ .Values.rbac.serviceAccountName }}\"{{ end }}\n volumes:\n - name: {{ template \"sumologic-fluentd.fullname\" . }}\n configMap:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n - name: pos-files\n {{- if .Values.persistence.enabled }}\n hostPath:\n path: {{ .Values.persistence.hostPath }}\n type: \"{{ if .Values.persistence.createPath }}DirectoryOrCreate{{ end }}\"\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: host-logs\n hostPath:\n path: /var/log/\n {{- if eq .Values.sumologic.fluentdSource \"systemd\" }}\n - name: rkt-logs\n hostPath:\n path: /var/lib/rkt\n {{- end }}\n - name: docker-logs\n hostPath:\n path: /var/lib/docker\n {{- if .Values.sumologic.fluentdUserConfig }}\n - name: fluentd-user-conf\n configMap:\n name: {{ template \"sumologic-fluentd.fluentdUserConfig.fullname\" . }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{ if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n updateStrategy:\n type: \"{{ .Values.updateStrategy }}\"\n{{- end }}\n",
"# secrets.yaml\n{{- if not .Values.sumologic.collectorUrlExistingSecret -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n \"helm.sh/hook\": pre-install,pre-upgrade\n \"helm.sh/hook-delete-policy\": \"before-hook-creation\"\ntype: Opaque\ndata:\n collector-url: {{ default \"MISSING\" .Values.sumologic.collectorUrl | b64enc | quote }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.rbac.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"sumologic-fluentd.fullname\" . }}\n labels:\n app: {{ template \"sumologic-fluentd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end }}\n"
] | # Default values for sumologic-fluentd.
image:
name: sumologic/fluentd-kubernetes-sumologic
tag: v2.4.2
pullPolicy: IfNotPresent
## Annotations to add to the DaemonSet's Pods
podAnnotations: {}
# scheduler.alpha.kubernetes.io/tolerations: '[{"key": "example", "value": "foo"}]'
## Annotations to add to the DaemonSet
daemonsetAnnotations: {}
## Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6)
tolerations: []
# - key: node.alpha.kubernetes.io/role
# effect: NoSchedule
# operator: "Exists"
## Node labels for fluentd pod assignment
nodeSelector: {}
## Expressions for affinity
affinity: {}
# Extra environment variables to set for fluentd
extraEnv: []
## Allow the DaemonSet to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
updateStrategy: OnDelete
sumologic:
## You'll need to set this to sumo collector, before the agent will run.
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#sumologic
# collectorUrl:
## Use existing Secret which stores collector url instead of creating a new one
# collectorUrlExistingSecret:
## The source of fluentd logs, either file or systemd
fluentdSource: file
## A directory of user-defined fluentd configuration files, which must be in the "*.conf" directory
## in the container (Default "/fluentd/conf.d/user")
fluentdUserConfigDir: ""
## Provide user-defined fluentd configuration files
## Each key will become a file in `fluentdUserConfigDir`
##
# fluentdUserConfig:
# source.systemd.conf: |-
# <source>
# ...
# </source>
## How frequently to push logs to SumoLogic (default 5s)
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
# flushInterval: 5
## Increase number of http threads to Sumo. May be required in heavy logging clusters (default 1)
# numThreads: 1
## Set the _sourceName metadata field in SumoLogic. (Default "%{namespace}.%{pod}.%{container}")
sourceName: ""
## Set the _sourceHost metadata field in SumoLogic. (Default Nil)
sourceHost: ""
## Set the _sourceCategory metadata field in SumoLogic. (Default "%{namespace}/%{pod_name}")
sourceCategory: ""
## Set the prefix, for _sourceCategory metadata. (Default kubernetes/)
sourceCategoryPrefix: "kubernetes/"
## Used to replace - with another character. (default /)
sourceCategoryReplaceDash: ""
## Format to post logs into Sumo. json, json_merge, or text (default json)
logFormat: json
## Include or exclude Kubernetes metadata such as namespace and pod_name if
## using json log format. (default true)
kubernetesMeta: true
## A ruby regex for containers. All matching containers will be excluded
## from Sumo Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
excludeContainerRegex: ""
## Files matching this pattern will be ignored by the in_tail plugin, and will
## not be sent to Kubernetes or Sumo Logic. This can be a comma separated list.
## ref: http://docs.fluentd.org/v0.12/articles/in_tail#excludepath
excludePath: ""
## A ruby regex for hosts. All matching hosts will be excluded from Sumo
## Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
excludeHostRegex: ""
## A ruby regex for namespaces. All matching namespaces will be excluded
## from Sumo Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
excludeNamespaceRegex: ""
## A ruby regex for pods. All matching pods will be excluded from Sumo
## Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
excludePodRegex: ""
## A ruby regex for systemd units. All matching facilities will be excluded from
## Sumo Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
excludeFacilityRegex: ""
## A ruby regex for syslog priorities, which are integers represented as
## strings. All matching priorities will be excluded from
## Sumo Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
excludePriorityRegex: ""
## A ruby regex for systemd units. All matching hosts will be excluded from
## Sumo Logic. The logs will still be sent to FluentD
## ref: http://rubular.com/
## ref: https://github.com/SumoLogic/fluentd-kubernetes-sumologic#options
excludeUnitRegex: ""
## The field name for json formatted sources that should be used as the time
## ref: https://docs.fluentd.org/v0.12/articles/formatter_json#time_key-(string,-optional,-defaults-to-%E2%80%9Ctime%E2%80%9D)
# timeKey:
## Specify the path in_tail should watch for container logs. (Default /mnt/log/containers/*.log)
# containerLogsPath:
## Add the uri of the proxy environment if present.
# proxyUri:
## Option to control the enabling of stat_watcher. (Default true)
## ref: https://docs.fluentd.org/v1.0/articles/in_tail#enable_stat_watcher
# enableStatWatcher:
## Option to control adding timestamp to logs. (Default true)
addTimeStamp: true
## Option to control adding time to logs. (Default true)
addTime: true
## Option to control adding stream to logs. (Default true)
addStream: true
## Fluentd command line options
## ref: http://docs.fluentd.org/v0.12/articles/command-line-option
fluentdOpt: ""
## Verify SumoLogic HTTPS certificates (Default true)
verifySsl: true
## The regular expression for the "concat" plugin to use when merging multi-line messages
## (Default "/^\w{3} \d{1,2}, \d{4}/", i.e. Julian dates)
multilineStartRegexp: ""
## Start to read the logs from the head of file, not bottom.
## Only applies to containers log files. See in_tail doc for more information (Default true)
readFromHead: true
## The character to use to delimit lines within the final concatenated message.
## Most multi-line messages contain a newline at the end of each line (Default Nil)
concatSeparator: ""
## Define the path to the Kubernetes Audit Log (Default "/mnt/log/kube-apiserver-audit.log")
## ref: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/
auditLogPath: ""
## Note: Requires Kubernetes 1.8+ to enable this feature via hostPath volume type
## By default, the daemonset will store position files, for logs tailed, in an
## emptyDir. If you have already have a directory on the host for storing pos files,
## specify it here. Otherwise, it can create the directory for you by setting the type
## to "DirectoryOrCreate".
persistence:
enabled: false
hostPath: /var/run/fluentd-pos
createPath: false
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 256m
memory: 256Mi
rbac:
## If true, create and use RBAC resources
create: false
## Ignored if rbac.create is true
serviceAccountName: default
daemonset: {}
# Priority Class to use for deployed daemonsets
# priorityClassName: ""
|
cerebro | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"cerebro.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"cerebro.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"cerebro.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"cerebro.fullname\" . }}\n labels:\n app: {{ template \"cerebro.name\" . }}\n chart: {{ template \"cerebro.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n application.conf: |-\n {{- if .Values.config.secret }}\n secret = {{ .Values.config.secret | quote }}\n {{- else }}\n secret = {{ randAlphaNum 64 | quote }}\n {{- end }}\n\n {{- if .Values.config.tlsVerify }}\n play.ws.ssl.loose.acceptAnyCertificate = false\n {{- else }}\n play.ws.ssl.loose.acceptAnyCertificate = true\n {{- end }}\n {{- if .Values.config.tlsCaCert }}\n play.ws.ssl {\n trustManager = {\n stores = [\n { type = \"PEM\", path = {{ .Values.config.tlsCaCert | quote }} }\n ]\n }\n }\n {{- end }}\n\n basePath = {{ .Values.config.basePath | quote }}\n\n pidfile.path = /dev/null\n\n rest.history.size = {{ .Values.config.restHistorySize }}\n\n data.path = \"/var/db/cerebro/cerebro.db\"\n\n es = {\n gzip = true\n }\n\n auth = {\n # either basic or ldap\n type: ${?AUTH_TYPE}\n settings {\n # LDAP\n url = ${?LDAP_URL}\n base-dn = ${?LDAP_BASE_DN}\n method = ${?LDAP_METHOD}\n user-template = ${?LDAP_USER_TEMPLATE}\n bind-dn = ${?LDAP_BIND_DN}\n bind-pw = ${?LDAP_BIND_PWD}\n group-search {\n base-dn = ${?LDAP_GROUP_BASE_DN}\n user-attr = ${?LDAP_USER_ATTR}\n user-attr-template = ${?LDAP_USER_ATTR_TEMPLATE}\n group = ${?LDAP_GROUP}\n }\n\n # Basic auth\n username = ${?BASIC_AUTH_USER}\n password = ${?BASIC_AUTH_PWD}\n }\n }\n\n hosts = [\n {{- range $index, $element := .Values.config.hosts }}\n {{ if $index }},{{ end }}\n {\n host = {{ $element.host | quote }}\n name = {{ $element.name | quote }}\n }\n {{- end }}\n ]\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"cerebro.fullname\" . }}\n labels:\n app: {{ template \"cerebro.name\" . }}\n chart: {{ template \"cerebro.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- range $key, $value := .Values.deployment.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{- if .Values.deployment.annotations }}\n annotations:\n{{ toYaml .Values.deployment.annotations | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}\n selector:\n matchLabels:\n app: {{ template \"cerebro.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"cerebro.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.deployment.podLabels }}\n {{- toYaml .Values.deployment.podLabels | nindent 8 }}\n {{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if .Values.deployment.podAnnotations }}\n {{- toYaml .Values.deployment.podAnnotations | nindent 8 }}\n {{- end }}\n spec:\n {{- if .Values.securityContext }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n {{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args: [\"-Dconfig.file=/etc/cerebro/application.conf\"]\n ports:\n - name: http\n containerPort: 9000\n protocol: TCP\n volumeMounts:\n - name: db\n mountPath: /var/db/cerebro\n - name: config\n mountPath: /etc/cerebro\n - name: logs\n mountPath: /opt/cerebro/logs/\n - name: tmp\n mountPath: /tmp\n {{- if .Values.volumeMounts }}\n{{ toYaml .Values.volumeMounts | indent 12 }}\n {{- end }}\n {{- if .Values.env }}\n env:\n {{- range $index, $element := .Values.env }}\n - name: {{ $index | quote }}\n value: {{ $element | quote }}\n {{- end }}\n {{- end }}\n {{- if .Values.envFromSecretRef }}\n envFrom:\n - secretRef:\n name: \"{{ .Values.envFromSecretRef }}\"\n {{- end }}\n {{- if .Values.deployment.livenessProbe.enabled}}\n livenessProbe:\n httpGet:\n path: {{ .Values.config.basePath }}\n port: http\n initialDelaySeconds: 120\n {{- end }}\n {{- if .Values.deployment.readinessProbe.enabled}}\n readinessProbe:\n httpGet:\n path: {{ .Values.config.basePath }}\n port: http\n initialDelaySeconds: 5\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumes:\n - name: db\n emptyDir: {}\n - name: config\n configMap:\n name: {{ template \"cerebro.fullname\" . }}\n - name: logs\n emptyDir: {}\n - name: tmp\n emptyDir: {}\n {{- if .Values.volumes }}\n{{ toYaml .Values.volumes | indent 8 }}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"cerebro.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: {{- if .Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"cerebro.name\" . }}\n chart: {{ template \"cerebro.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"cerebro.fullname\" . }}\n labels:\n app: {{ template \"cerebro.name\" . }}\n chart: {{ template \"cerebro.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\n{{- with .Values.service.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"cerebro.name\" . }}\n release: {{ .Release.Name }}\n"
] | replicaCount: 1
revisionHistoryLimit: 3
env:
# AUTH_TYPE: "basic"
# BASIC_AUTH_USER: "admin"
image:
repository: lmenezes/cerebro
# Note: when updating the version, ensure `config` and the ConfigMap are kept
# in sync with the default configuration of the upstream image
tag: 0.9.2
pullPolicy: IfNotPresent
deployment:
# additional labels
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
livenessProbe:
enabled: true
readinessProbe:
enabled: true
service:
type: ClusterIP
port: 80
annotations: {}
labels: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
securityContext:
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
priorityClassName: ""
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Reference to a Secret object with environment variables
# envFromSecretRef: 'my-secret-ref'
config:
basePath: '/'
restHistorySize: 50
hosts: []
# - host:
# name:
# Secret used to sign session cookies. If empty it will be replaced with a
# random 64 length string
secret: ''
tlsVerify: true
# tlsCaCert: /opt/cerebro/conf/certs/ca.crt
# volumeMounts:
# - mountPath: /opt/cerebro/conf/certs/
# name: ca_cert_volume
# readOnly: true
# volumes:
# - name: ca_cert_volume
# secret:
# defaultMode: 420
# optional: false
# secretName: cerebro_ca_cert_secret
|
mysql | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"mysql.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"mysql.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGenerate chart secret name\n*/}}\n{{- define \"mysql.secretName\" -}}\n{{ default (include \"mysql.fullname\" .) .Values.existingSecret }}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"mysql.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n{{ default (include \"mysql.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n{{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# configurationFiles-configmap.yaml\n{{- if .Values.configurationFiles }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"mysql.fullname\" . }}-configuration\n namespace: {{ .Release.Namespace }}\ndata:\n{{- range $key, $val := .Values.configurationFiles }}\n {{ $key }}: |-\n{{ $val | indent 4}}\n{{- end }}\n{{- end -}}",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"mysql.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- with .Values.deploymentAnnotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\n\nspec:\n strategy:\n{{ toYaml .Values.strategy | indent 4 }}\n selector:\n matchLabels:\n app: {{ template \"mysql.fullname\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"mysql.fullname\" . }}\n release: {{ .Release.Name }}\n{{- with .Values.podLabels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.podAnnotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 8 }}\n {{- end }}\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n serviceAccountName: {{ template \"mysql.serviceAccountName\" . }}\n initContainers:\n - name: \"remove-lost-found\"\n image: \"{{ .Values.busybox.image}}:{{ .Values.busybox.tag }}\"\n imagePullPolicy: {{ .Values.imagePullPolicy | quote }}\n resources:\n{{ toYaml .Values.initContainer.resources | indent 10 }}\n command: [\"rm\", \"-fr\", \"/var/lib/mysql/lost+found\"]\n volumeMounts:\n - name: data\n mountPath: /var/lib/mysql\n {{- if .Values.persistence.subPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{- end }}\n {{- if .Values.extraInitContainers }}\n{{ tpl .Values.extraInitContainers . | indent 6 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n containers:\n - name: {{ template \"mysql.fullname\" . }}\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: {{ .Values.imagePullPolicy | quote }}\n\n {{- with .Values.args }}\n args:\n {{- range . }}\n - {{ . | quote }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n env:\n {{- if .Values.mysqlAllowEmptyPassword }}\n - name: MYSQL_ALLOW_EMPTY_PASSWORD\n value: \"true\"\n {{- end }}\n {{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlRootPassword)) }}\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mysql.secretName\" . }}\n key: mysql-root-password\n {{- if .Values.mysqlAllowEmptyPassword }}\n optional: true\n {{- end }}\n {{- end }}\n {{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlPassword)) }}\n - name: MYSQL_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mysql.secretName\" . }}\n key: mysql-password\n {{- if or .Values.mysqlAllowEmptyPassword (empty .Values.mysqlUser) }}\n optional: true\n {{- end }}\n {{- end }}\n - name: MYSQL_USER\n value: {{ default \"\" .Values.mysqlUser | quote }}\n - name: MYSQL_DATABASE\n value: {{ default \"\" .Values.mysqlDatabase | quote }}\n {{- if .Values.timezone }}\n - name: TZ\n value: {{ .Values.timezone }}\n {{- end }}\n {{- if .Values.extraEnvVars }}\n{{ tpl .Values.extraEnvVars . | indent 8 }}\n {{- end }}\n ports:\n - name: mysql\n containerPort: 3306\n {{- if .Values.mysqlx.port.enabled }}\n - name: mysqlx\n port: 33060\n {{- end }}\n livenessProbe:\n exec:\n command:\n {{- if .Values.mysqlAllowEmptyPassword }}\n - mysqladmin\n - ping\n {{- else }}\n - sh\n - -c\n - \"mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}\"\n {{- end }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n readinessProbe:\n exec:\n command:\n {{- if .Values.mysqlAllowEmptyPassword }}\n - mysqladmin\n - ping\n {{- else }}\n - sh\n - -c\n - \"mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}\"\n {{- end }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n volumeMounts:\n - name: data\n mountPath: /var/lib/mysql\n {{- if .Values.persistence.subPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{- end }}\n {{- if .Values.configurationFiles }}\n {{- range $key, $val := .Values.configurationFiles }}\n - name: configurations\n mountPath: {{ $.Values.configurationFilesPath }}{{ $key }}\n subPath: {{ $key }}\n {{- end -}}\n {{- end }}\n {{- if .Values.initializationFiles }}\n - name: migrations\n mountPath: /docker-entrypoint-initdb.d\n {{- end }}\n {{- if .Values.ssl.enabled }}\n - name: certificates\n mountPath: /ssl\n {{- end }}\n {{- if .Values.extraVolumeMounts }}\n{{ tpl .Values.extraVolumeMounts . | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.enabled }}\n - name: metrics\n image: \"{{ .Values.metrics.image }}:{{ .Values.metrics.imageTag }}\"\n imagePullPolicy: {{ .Values.metrics.imagePullPolicy | quote }}\n {{- if .Values.mysqlAllowEmptyPassword }}\n command:\n - 'sh'\n - '-c'\n - 'DATA_SOURCE_NAME=\"root@(localhost:3306)/\" /bin/mysqld_exporter'\n {{- else }}\n env:\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mysql.secretName\" . }}\n key: mysql-root-password\n command:\n - 'sh'\n - '-c'\n - 'DATA_SOURCE_NAME=\"root:$MYSQL_ROOT_PASSWORD@(localhost:3306)/\" /bin/mysqld_exporter'\n {{- end }}\n {{- range $f := .Values.metrics.flags }}\n - {{ $f | quote }}\n {{- end }}\n ports:\n - name: metrics\n containerPort: 9104\n livenessProbe:\n httpGet:\n path: /\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}\n readinessProbe:\n httpGet:\n path: /\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}\n resources:\n{{ toYaml .Values.metrics.resources | indent 10 }}\n {{- end }}\n volumes:\n {{- if .Values.configurationFiles }}\n - name: configurations\n configMap:\n name: {{ template \"mysql.fullname\" . }}-configuration\n {{- end }}\n {{- if .Values.initializationFiles }}\n - name: migrations\n configMap:\n name: {{ template \"mysql.fullname\" . }}-initialization\n {{- end }}\n {{- if .Values.ssl.enabled }}\n - name: certificates\n secret:\n secretName: {{ .Values.ssl.secret }}\n {{- end }}\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"mysql.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n {{- if .Values.extraVolumes }}\n{{ tpl .Values.extraVolumes . | indent 6 }}\n {{- end }}\n",
"# initializationFiles-configmap.yaml\n{{- if .Values.initializationFiles }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"mysql.fullname\" . }}-initialization\n namespace: {{ .Release.Namespace }}\ndata:\n{{- range $key, $val := .Values.initializationFiles }}\n {{ $key }}: |-\n{{ $val | indent 4}}\n{{- end }}\n{{- end -}}",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"mysql.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- with .Values.persistence.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# secrets.yaml\n{{- if not .Values.existingSecret }}\n{{- if or (not .Values.allowEmptyRootPassword) (or .Values.mysqlRootPassword .Values.mysqlPassword) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"mysql.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.mysqlRootPassword }}\n mysql-root-password: {{ .Values.mysqlRootPassword | b64enc | quote }}\n {{ else }}\n {{ if not .Values.allowEmptyRootPassword }}\n mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n {{ end }}\n {{ if .Values.mysqlPassword }}\n mysql-password: {{ .Values.mysqlPassword | b64enc | quote }}\n {{ else }}\n {{ if not .Values.allowEmptyRootPassword }}\n mysql-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n {{ end }}\n{{ end }}\n{{- if .Values.ssl.enabled }}\n{{ if .Values.ssl.certificates }}\n{{- range .Values.ssl.certificates }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ .name }}\n labels:\n app: {{ template \"mysql.fullname\" $ }}\n chart: \"{{ $.Chart.Name }}-{{ $.Chart.Version }}\"\n release: \"{{ $.Release.Name }}\"\n heritage: \"{{ $.Release.Service }}\"\ntype: Opaque\ndata:\n ca.pem: {{ .ca | b64enc }}\n server-cert.pem: {{ .cert | b64enc }}\n server-key.pem: {{ .key | b64enc }}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"mysql.serviceAccountName\" . }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ include \"mysql.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- if .Values.metrics.serviceMonitor.additionalLabels }}\n{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n endpoints:\n - port: metrics\n interval: 30s\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ include \"mysql.fullname\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"mysql.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n{{- if .Values.service.annotations }}\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n{{- if and (.Values.metrics.enabled) (.Values.metrics.annotations) }}\n{{ toYaml .Values.metrics.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n {{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: mysql\n port: {{ .Values.service.port }}\n targetPort: mysql\n {{- if .Values.service.nodePort }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n {{- if .Values.mysqlx.port.enabled }}\n - name: mysqlx\n port: 33060\n targetPort: mysqlx\n protocol: TCP\n {{- end }}\n {{- if .Values.metrics.enabled }}\n - name: metrics\n port: 9104\n targetPort: metrics\n {{- end }}\n selector:\n app: {{ template \"mysql.fullname\" . }}\n",
"# test-configmap.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"mysql.fullname\" . }}-test\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ndata:\n run.sh: |-\n {{- if .Values.ssl.enabled | and .Values.mysqlRootPassword }}\n @test \"Testing SSL MySQL Connection\" {\n mysql --host={{ template \"mysql.fullname\" . }} --port={{ .Values.service.port | default \"3306\" }} --ssl-cert=/ssl/server-cert.pem --ssl-key=ssl/server-key.pem -u root -p{{ .Values.mysqlRootPassword }}\n }\n {{- else if .Values.mysqlRootPassword }}\n @test \"Testing MySQL Connection\" {\n mysql --host={{ template \"mysql.fullname\" . }} --port={{ .Values.service.port | default \"3306\" }} -u root -p{{ .Values.mysqlRootPassword }}\n }\n {{- end }}\n{{- end }}\n",
"# test.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: Pod\nmetadata:\n name: {{ template \"mysql.fullname\" . }}-test\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"mysql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n {{- if .Values.testFramework.securityContext }}\n securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }}\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- range .Values.imagePullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 4 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 4 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 4 }}\n {{- end }}\n containers:\n - name: {{ .Release.Name }}-test\n image: \"{{ .Values.testFramework.image }}:{{ .Values.testFramework.tag }}\"\n imagePullPolicy: \"{{ .Values.testFramework.imagePullPolicy}}\"\n command: [\"/opt/bats/bin/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n {{- if .Values.ssl.enabled }}\n - name: certificates\n mountPath: /ssl\n {{- end }}\n volumes:\n - name: tests\n configMap:\n name: {{ template \"mysql.fullname\" . }}-test\n {{- if .Values.ssl.enabled }}\n - name: certificates\n secret:\n secretName: {{ .Values.ssl.secret }}\n {{- end }}\n restartPolicy: Never\n{{- end }}\n"
] | ## mysql image version
## ref: https://hub.docker.com/r/library/mysql/tags/
##
image: "mysql"
imageTag: "5.7.30"
strategy:
type: Recreate
busybox:
image: "busybox"
tag: "1.32"
testFramework:
enabled: true
image: "bats/bats"
tag: "1.2.1"
imagePullPolicy: IfNotPresent
securityContext: {}
## Specify password for root user
##
## Default: random 10 character string
# mysqlRootPassword: testing
## Create a database user
##
# mysqlUser:
## Default: random 10 character string
# mysqlPassword:
## Allow unauthenticated access, uncomment to enable
##
# mysqlAllowEmptyPassword: true
## Create a database
##
# mysqlDatabase:
## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
##
imagePullPolicy: IfNotPresent
## Additionnal arguments that are passed to the MySQL container.
## For example use --default-authentication-plugin=mysql_native_password if older clients need to
## connect to a MySQL 8 instance.
args: []
extraVolumes: |
# - name: extras
# emptyDir: {}
extraVolumeMounts: |
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## A string to add extra environment variables
# extraEnvVars: |
# - name: EXTRA_VAR
# value: "extra"
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Persist data to a persistent volume
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
annotations: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Security context
securityContext:
enabled: false
runAsUser: 999
fsGroup: 999
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 100m
# Custom mysql configuration files path
configurationFilesPath: /etc/mysql/conf.d/
# Custom mysql configuration files used to override default mysql settings
configurationFiles: {}
# mysql.cnf: |-
# [mysqld]
# skip-name-resolve
# ssl-ca=/ssl/ca.pem
# ssl-cert=/ssl/server-cert.pem
# ssl-key=/ssl/server-key.pem
# Custom mysql init SQL files used to initialize the database
initializationFiles: {}
# first-db.sql: |-
# CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
# second-db.sql: |-
# CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
# To enaable the mysql X Protocol's port
# .. will expose the port 33060
# .. Note the X Plugin needs installation
# ref: https://dev.mysql.com/doc/refman/8.0/en/x-plugin-checking-installation.html
mysqlx:
port:
enabled: false
metrics:
enabled: false
image: prom/mysqld-exporter
imageTag: v0.10.0
imagePullPolicy: IfNotPresent
resources: {}
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "9104"
livenessProbe:
initialDelaySeconds: 15
timeoutSeconds: 5
readinessProbe:
initialDelaySeconds: 5
timeoutSeconds: 1
flags: []
serviceMonitor:
enabled: false
additionalLabels: {}
## Configure the service
## ref: http://kubernetes.io/docs/user-guide/services/
service:
annotations: {}
## Specify a service type
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
type: ClusterIP
port: 3306
# nodePort: 32000
# loadBalancerIP:
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the mariadb.fullname template
# name:
ssl:
enabled: false
secret: mysql-ssl-certs
certificates:
# - name: mysql-ssl-certs
# ca: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# cert: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# key: |-
# -----BEGIN RSA PRIVATE KEY-----
# ...
# -----END RSA PRIVATE KEY-----
## Populates the 'TZ' system timezone environment variable
## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html
##
## Default: nil (mysql will use image's default timezone, normally UTC)
## Example: 'Australia/Sydney'
# timezone:
# Deployment Annotations
deploymentAnnotations: {}
# To be added to the database server pod(s)
podAnnotations: {}
podLabels: {}
## Set pod priorityClassName
# priorityClassName: {}
## Init container resources defaults
initContainer:
resources:
requests:
memory: 10Mi
cpu: 10m
|
prometheus-node-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-node-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prometheus-node-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/* Generate basic labels */}}\n{{- define \"prometheus-node-exporter.labels\" }}\napp: {{ template \"prometheus-node-exporter.name\" . }}\nheritage: {{.Release.Service }}\nrelease: {{.Release.Name }}\nchart: {{ template \"prometheus-node-exporter.chart\" . }}\n{{- if .Values.podLabels}}\n{{ toYaml .Values.podLabels }}\n{{- end }}\n{{- end }}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-node-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"prometheus-node-exporter.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"prometheus-node-exporter.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nAllow the release namespace to be overridden for multi-namespace deployments in combined charts\n*/}}\n{{- define \"prometheus-node-exporter.namespace\" -}}\n {{- if .Values.namespaceOverride -}}\n {{- .Values.namespaceOverride -}}\n {{- else -}}\n {{- .Release.Namespace -}}\n {{- end -}}\n{{- end -}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"prometheus-node-exporter.fullname\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"prometheus-node-exporter.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.updateStrategy }}\n updateStrategy:\n{{ toYaml .Values.updateStrategy | indent 4 }}\n {{- end }}\n template:\n metadata:\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 8 }}\n {{- if .Values.podAnnotations }}\n annotations:\n {{- toYaml .Values.podAnnotations | nindent 8 }}\n {{- end }}\n spec:\n{{- if and .Values.rbac.create .Values.serviceAccount.create }}\n serviceAccountName: {{ template \"prometheus-node-exporter.serviceAccountName\" . }}\n{{- end }}\n{{- if .Values.securityContext }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n{{- end }}\n{{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n{{- end }}\n containers:\n - name: node-exporter\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n - --path.procfs=/host/proc\n - --path.sysfs=/host/sys\n - --web.listen-address=$(HOST_IP):{{ .Values.service.port }}\n{{- if .Values.extraArgs }}\n{{ toYaml .Values.extraArgs | indent 12 }}\n{{- end }}\n env:\n - name: HOST_IP\n {{- if .Values.service.listenOnAllInterfaces }}\n value: 0.0.0.0\n {{- else }}\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: status.hostIP\n {{- end }}\n ports:\n - name: metrics\n containerPort: {{ .Values.service.targetPort }}\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: {{ .Values.service.port }}\n readinessProbe:\n httpGet:\n path: /\n port: {{ .Values.service.port }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: proc\n mountPath: /host/proc\n readOnly: true\n - name: sys\n mountPath: /host/sys\n readOnly: true\n {{- if .Values.extraHostVolumeMounts }}\n {{- range $_, $mount := .Values.extraHostVolumeMounts }}\n - name: {{ $mount.name }}\n mountPath: {{ $mount.mountPath }}\n readOnly: {{ $mount.readOnly }}\n {{- if $mount.mountPropagation }}\n mountPropagation: {{ $mount.mountPropagation }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.sidecarVolumeMount }}\n {{- range $_, $mount := .Values.sidecarVolumeMount }}\n - name: {{ $mount.name }}\n mountPath: {{ $mount.mountPath }}\n readOnly: true\n {{- end }}\n {{- end }}\n {{- if .Values.configmaps }}\n {{- range $_, $mount := .Values.configmaps }}\n - name: {{ $mount.name }}\n mountPath: {{ $mount.mountPath }}\n {{- end }}\n {{- end }}\n{{- if .Values.sidecars }}\n{{ toYaml .Values.sidecars | indent 8 }}\n {{- if .Values.sidecarVolumeMount }}\n volumeMounts:\n {{- range $_, $mount := .Values.sidecarVolumeMount }}\n - name: {{ $mount.name }}\n mountPath: {{ $mount.mountPath }}\n readOnly: {{ $mount.readOnly }}\n {{- end }}\n {{- end }}\n{{- end }}\n hostNetwork: {{ .Values.hostNetwork }}\n hostPID: true\n{{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n{{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: proc\n hostPath:\n path: /proc\n - name: sys\n hostPath:\n path: /sys\n {{- if .Values.extraHostVolumeMounts }}\n {{- range $_, $mount := .Values.extraHostVolumeMounts }}\n - name: {{ $mount.name }}\n hostPath:\n path: {{ $mount.hostPath }}\n {{- end }}\n {{- end }}\n {{- if .Values.sidecarVolumeMount }}\n {{- range $_, $mount := .Values.sidecarVolumeMount }}\n - name: {{ $mount.name }}\n emptyDir:\n medium: Memory\n {{- end }}\n {{- end }}\n {{- if .Values.configmaps }}\n {{- range $_, $mount := .Values.configmaps }}\n - name: {{ $mount.name }}\n configMap:\n name: {{ $mount.name }}\n {{- end }}\n {{- end }}\n",
"# endpoints.yaml\n{{- if .Values.endpoints }}\napiVersion: v1\nkind: Endpoints\nmetadata:\n name: {{ template \"prometheus-node-exporter.fullname\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n labels:\n{{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\nsubsets:\n - addresses:\n {{- range .Values.endpoints }}\n - ip: {{ . }}\n {{- end }}\n ports:\n - name: metrics\n port: 9100\n protocol: TCP\n{{- end }}\n",
"# monitor.yaml\n{{- if .Values.prometheus.monitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"prometheus-node-exporter.fullname\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\n {{- if .Values.prometheus.monitor.additionalLabels }}\n{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"prometheus-node-exporter.name\" . }}\n release: {{ .Release.Name }}\n endpoints:\n - port: metrics\n {{- if .Values.prometheus.monitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }}\n {{- end }}\n{{- if .Values.prometheus.monitor.relabelings }}\n relabelings:\n{{ toYaml .Values.prometheus.monitor.relabelings | indent 6 }}\n{{- end }}\n{{- end }}\n",
"# psp-clusterrole.yaml\n{{- if .Values.rbac.create }}\n{{- if .Values.rbac.pspEnabled }}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: psp-{{ template \"prometheus-node-exporter.fullname\" . }}\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"prometheus-node-exporter.fullname\" . }}\n{{- end }}\n{{- end }}\n",
"# psp-clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\n{{- if .Values.rbac.pspEnabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: psp-{{ template \"prometheus-node-exporter.fullname\" . }}\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: psp-{{ template \"prometheus-node-exporter.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"prometheus-node-exporter.fullname\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n{{- end }}\n{{- end }}\n",
"# psp.yaml\n{{- if .Values.rbac.create }}\n{{- if .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"prometheus-node-exporter.fullname\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\nspec:\n privileged: false\n # Required to prevent escalations to root.\n # allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n #requiredDropCapabilities:\n # - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n - 'persistentVolumeClaim'\n - 'hostPath'\n hostNetwork: true\n hostIPC: false\n hostPID: true\n hostPorts:\n - min: 0\n max: 65535\n runAsUser:\n # Permits the container to run with root privileges as well.\n rule: 'RunAsAny'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 0\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 0\n max: 65535\n readOnlyRootFilesystem: false\n{{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prometheus-node-exporter.fullname\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n labels: {{ include \"prometheus-node-exporter.labels\" . | indent 4 }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n {{- if ( and (eq .Values.service.type \"NodePort\" ) (not (empty .Values.service.nodePort)) ) }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n targetPort: {{ .Values.service.targetPort }}\n protocol: TCP\n name: metrics\n selector:\n app: {{ template \"prometheus-node-exporter.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.rbac.create -}}\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"prometheus-node-exporter.serviceAccountName\" . }}\n namespace: {{ template \"prometheus-node-exporter.namespace\" . }}\n labels:\n app: {{ template \"prometheus-node-exporter.name\" . }}\n chart: {{ template \"prometheus-node-exporter.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nimagePullSecrets:\n{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }}\n{{- end -}}\n{{- end -}}\n"
] | # Default values for prometheus-node-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: quay.io/prometheus/node-exporter
tag: v1.0.1
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 9100
targetPort: 9100
nodePort:
listenOnAllInterfaces: true
annotations:
prometheus.io/scrape: "true"
prometheus:
monitor:
enabled: false
additionalLabels: {}
namespace: ""
relabelings: []
scrapeTimeout: 10s
## Customize the updateStrategy if set
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 200m
# memory: 50Mi
# requests:
# cpu: 100m
# memory: 30Mi
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
imagePullSecrets: []
securityContext:
fsGroup: 65534
runAsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
rbac:
## If true, create & use RBAC resources
##
create: true
## If true, create & use Pod Security Policy resources
## https://kubernetes.io/docs/concepts/policy/pod-security-policy/
pspEnabled: true
# for deployments that have node_exporter deployed outside of the cluster, list
# their addresses here
endpoints: []
# Expose the service to the host network
hostNetwork: true
## Assign a group of affinity scheduling rules
##
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchFields:
# - key: metadata.name
# operator: In
# values:
# - target-host-name
# Annotations to be added to node exporter pods
podAnnotations: {}
# Extra labels to be added to node exporter pods
podLabels: {}
## Assign a nodeSelector if operating a hybrid cluster
##
nodeSelector: {}
# beta.kubernetes.io/arch: amd64
# beta.kubernetes.io/os: linux
tolerations:
- effect: NoSchedule
operator: Exists
## Assign a PriorityClassName to pods if set
# priorityClassName: ""
## Additional container arguments
##
extraArgs: []
# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$
# - --collector.textfile.directory=/run/prometheus
## Additional mounts from the host
##
extraHostVolumeMounts: []
# - name: <mountName>
# hostPath: <hostPath>
# mountPath: <mountPath>
# readOnly: true|false
# mountPropagation: None|HostToContainer|Bidirectional
## Additional configmaps to be mounted.
##
configmaps: []
# - name: <configMapName>
# mountPath: <mountPath>
## Override the deployment namespace
##
namespaceOverride: ""
## Additional containers for export metrics to text file
##
sidecars: []
## - name: nvidia-dcgm-exporter
## image: nvidia/dcgm-exporter:1.4.3
## Volume for sidecar containers
##
sidecarVolumeMount: []
## - name: collector-textfiles
## mountPath: /run/prometheus
## readOnly: false
|
contour | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"contour.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"Release.Heritage\" -}}\n{{- default .Release.Service .Values.heritage }}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"contour.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"contour.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"contour.labels\" -}}\napp.kubernetes.io/name: {{ include \"contour.name\" . }}\nhelm.sh/chart: {{ include \"contour.chart\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n",
"# cleanup-crds.yaml\n# This job is meant primarily for cleaning up on CI systems.\n# Using this on production systems, especially those that have multiple releases of Contour, will be destructive.\n{{- if .Values.customResourceDefinitions.cleanup }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ template \"contour.fullname\" . }}-cleanup\n namespace: {{ .Release.Namespace }}\n annotations:\n \"helm.sh/hook\": pre-delete\n \"helm.sh/hook-weight\": \"3\"\n \"helm.sh/hook-delete-policy\": hook-succeeded\n labels:\n app.kubernetes.io/name: {{ include \"contour.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\nspec:\n template:\n metadata:\n name: contour-cleanup\n spec:\n serviceAccountName: {{ include \"contour.fullname\" . }}\n containers:\n - name: kubectl\n image: docker.io/bitnami/kubectl:1.14.1\n imagePullPolicy: IfNotPresent\n command:\n - /bin/sh\n - -c\n - >\n kubectl delete httpproxy --all;\n kubectl delete ingressroutes --all;\n kubectl delete tlscertificatedelegation --all;\n kubectl delete crd -l app.kubernetes.io/name=contour\n restartPolicy: OnFailure\n{{- end }}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: {{ template \"contour.fullname\" . }}\n labels:\n app.kubernetes.io/name: contour\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n\n - configmaps\n - endpoints\n - nodes\n - pods\n - secrets\n verbs:\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n- apiGroups:\n - \"\"\n resources:\n - services\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - extensions\n resources:\n - ingresses\n verbs:\n - get\n - list\n - watch\n- apiGroups: [\"contour.heptio.com\"]\n resources: [\"ingressroutes\", \"tlscertificatedelegations\"]\n verbs:\n - get\n - list\n - watch\n - put\n - post\n - patch\n- apiGroups: [\"projectcontour.io\"]\n resources: [\"httpproxies\", \"tlscertificatedelegations\"]\n verbs:\n - get\n - list\n - watch\n - put\n - post\n - patch\n- apiGroups: [\"apiextensions.k8s.io\"]\n resources: [\"customresourcedefinitions\"]\n verbs:\n - get\n - list\n - watch\n - put\n - post\n - patch\n - delete\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"contour.fullname\" . }}\n labels:\n app.kubernetes.io/name: contour\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"contour.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"contour.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"contour.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"contour.fullname\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\ndata:\n contour.yaml: |\n # should contour expect to be running inside a k8s cluster\n # incluster: true\n #\n # path to kubeconfig (if not running inside a k8s cluster)\n # kubeconfig: /path/to/.kube/config\n #\n # disable ingressroute permitInsecure field\n # disablePermitInsecure: false\n tls:\n # minimum TLS version that Contour will negotiate\n # minimum-protocol-version: \"1.1\"\n # The following config shows the defaults for the leader election.\n # leaderelection:\n # configmap-name: contour\n # configmap-namespace: leader-elect\n",
"# crds.yaml\n{{- if and .Values.customResourceDefinitions.create -}}\n{{- range $path, $bytes := .Files.Glob \"crds/*.yaml\" }}\n{{ $.Files.Get $path }}\n---\n{{- end }}\n{{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: {{ template \"contour.fullname\" . }}\n app.kubernetes.io/name: contour\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\n name: {{ template \"contour.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"contour.fullname\" . }}\n{{- if not .Values.hpa.create }}\n replicas: 2\n{{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"contour.fullname\" . }}\n annotations:\n prometheus.io/scrape: \"true\"\n prometheus.io/port: \"8002\"\n prometheus.io/path: \"/stats/prometheus\"\n spec:\n containers:\n - image: \"{{ .Values.contour.image.registry }}:{{ .Values.contour.image.tag }}\"\n imagePullPolicy: {{ .Values.contour.image.pullPolicy }}\n resources:\n{{ toYaml .Values.contour.resources | indent 10 }}\n name: contour\n command: [\"contour\"]\n args:\n - serve\n - --incluster\n - --insecure\n - --envoy-service-http-port=8080\n - --envoy-service-https-port=8443\n - --config-path=/config/contour.yaml\n livenessProbe:\n httpGet:\n path: /healthz\n port: 8000\n readinessProbe:\n httpGet:\n path: /healthz\n port: 8000\n volumeMounts:\n - name: contour-config\n mountPath: /config\n readOnly: true\n - image: \"{{ .Values.envoy.image.registry }}:{{ .Values.envoy.image.tag }}\"\n imagePullPolicy: {{ .Values.envoy.image.pullPolicy }}\n resources:\n{{ toYaml .Values.envoy.resources | indent 10 }}\n name: envoy\n ports:\n - containerPort: 8080\n name: http\n - containerPort: 8443\n name: https\n command: [\"envoy\"]\n args:\n - --config-path /config/envoy.json\n - --service-cluster $(CONTOUR_NAMESPACE)\n - --service-node $(ENVOY_POD_NAME)\n - --log-level info\n env:\n - name: CONTOUR_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n - name: ENVOY_POD_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.name\n readinessProbe:\n httpGet:\n path: /healthz\n port: 8002\n initialDelaySeconds: 3\n periodSeconds: 3\n volumeMounts:\n - name: envoy-config\n mountPath: /config\n lifecycle:\n preStop:\n exec:\n command:\n - bash\n - -c\n - --\n - echo\n - -ne\n - \"POST /healthcheck/fail HTTP/1.1\\r\\nHost: localhost\\r\\nConnection: close\\r\\n\\r\\n\"\n - '>/dev/tcp/localhost/9001'\n initContainers:\n - image: \"{{ .Values.init.image.registry }}:{{ .Values.init.image.tag }}\"\n imagePullPolicy: {{ .Values.init.image.pullPolicy }}\n resources:\n{{ toYaml .Values.init.resources | indent 10 }}\n name: envoy-initconfig\n command: [\"contour\"]\n args:\n - bootstrap\n - /config/envoy.json\n volumeMounts:\n - name: envoy-config\n mountPath: /config\n env:\n - name: CONTOUR_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumes:\n - name: envoy-config\n emptyDir: {}\n - name: contour-config\n configMap:\n name: {{ template \"contour.fullname\" . }}\n defaultMode: 0643\n items:\n - key: contour.yaml\n path: contour.yaml\n dnsPolicy: ClusterFirst\n serviceAccountName: {{ template \"contour.fullname\" . }}\n terminationGracePeriodSeconds: 30\n # The affinity stanza below tells Kubernetes to try hard not to place 2 of\n # these pods on the same node.\n affinity:\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 100\n podAffinityTerm:\n labelSelector:\n matchLabels:\n app: contour\n topologyKey: kubernetes.io/hostname\n",
"# hpa.yaml\n{{- if .Values.hpa.create -}}\napiVersion: autoscaling/v1\nkind: HorizontalPodAutoscaler\nmetadata:\n labels:\n app: {{ template \"contour.fullname\" . }}\n app.kubernetes.io/name: {{ template \"contour.fullname\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\n name: {{ template \"contour.fullname\" . }}\nspec:\n scaleTargetRef:\n apiVersion: apps/v1\n kind: Deployment\n name: {{ template \"contour.fullname\" . }}\n minReplicas: {{ .Values.hpa.minReplicas }}\n maxReplicas: {{ .Values.hpa.maxReplicas }}\n targetCPUUtilizationPercentage: {{ .Values.hpa.targetCPUUtilizationPercentage }}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: {{ template \"contour.fullname\" . }}-certgen\n labels:\n app.kubernetes.io/name: contour\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - list\n - watch\n - create\n - get\n - put\n - post\n - patch\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: {{ template \"contour.fullname\" . }}-certgen\n labels:\n app.kubernetes.io/name: contour\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"contour.fullname\" . }}-certgen\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"contour.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"contour.fullname\" . }}\n annotations:\n # This annotation puts the AWS ELB into \"TCP\" mode so that it does not\n # do HTTP negotiation for HTTPS connections at the ELB edge.\n # The downside of this is the remote IP address of all connections will\n # appear to be the internal address of the ELB. See docs/proxy-proto.md\n # for information about enabling the PROXY protocol on the ELB to recover\n # the original remote IP address.\n service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp\n\n # Scrape metrics for the contour container\n # The envoy container is scraped by annotations on the pod spec\n prometheus.io/port: \"8000\"\n prometheus.io/scrape: \"true\"\nspec:\n ports:\n - port: 80\n name: http\n {{- if (and (eq .Values.serviceType \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n targetPort: http\n - port: 443\n name: https\n {{- if (and (eq .Values.serviceType \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n targetPort: https\n selector:\n app: {{ template \"contour.fullname\" . }}\n type: {{ .Values.serviceType }}",
"# serviceaccount.yaml\n{{- if .Values.serviceAccounts.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/name: {{ template \"contour.fullname\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"contour.chart\" . }}\n name: {{ template \"contour.fullname\" . }}\n{{- end }}\n"
] | contour:
image:
registry: gcr.io/heptio-images/contour
tag: v0.15.0
pullPolicy: IfNotPresent
replicas: 2
resources: {}
# limits:
# cpu: "400m"
# requests:
# cpu: "200m"
customResourceDefinitions:
create: true
cleanup: false
envoy:
image:
registry: docker.io/envoyproxy/envoy
tag: v1.11.1
pullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: "400m"
# requests:
# cpu: "200m"
hpa:
create: false
# minReplicas: 2
# maxReplicas: 15
# targetCPUUtilizationPercentage: 70
init:
image:
registry: gcr.io/heptio-images/contour
tag: v0.15.0
pullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: "400m"
# requests:
# cpu: "200m"
rbac:
create: true
serviceType: LoadBalancer
service:
## Further config for service of type NodePort
## Default config with empty string "" will assign a dynamic
## nodePort to http and https ports
nodePorts:
http: ""
https: ""
serviceAccounts:
create: true
|
prisma | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prisma.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prisma.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prisma.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified postgresql name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"prisma.postgresql.fullname\" -}}\n{{- $name := default \"postgresql\" .Values.postgresql.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use.\n*/}}\n{{- define \"prisma.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"prisma.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified name for the secret that contains the database password.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"prisma.databaseSecret.fullname\" -}}\n{{- if .Values.postgresql.enabled -}}\n{{- include \"prisma.postgresql.fullname\" . }}\n{{- else -}}\n{{- include \"prisma.fullname\" . }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nSet the proper name for the secretKeyRef key that contains the database password.\n*/}}\n{{- define \"prisma.databaseSecret.key\" -}}\n{{- if .Values.postgresql.enabled -}}\npostgres-password\n{{- else -}}\ndb-password\n{{- end -}}\n{{- end -}}\n\n{{/*\nSet the proper database host. If postgresql is installed as part of this chart, use the default service name,\nelse use user-provided host\n*/}}\n{{- define \"prisma.database.host\" }}\n{{- if .Values.postgresql.enabled -}}\n{{- include \"prisma.postgresql.fullname\" . }}\n{{- else -}}\n{{- .Values.database.host | quote }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nSet the proper database port. If postgresql is installed as part of this chart, use the default postgresql port,\nelse use user-provided port\n*/}}\n{{- define \"prisma.database.port\" }}\n{{- if .Values.postgresql.enabled -}}\n{{- default \"5432\" ( .Values.postgresql.service.port | quote ) }}\n{{- else -}}\n{{- .Values.database.port | quote }}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"prisma.fullname\" . }}\n labels:\n app: {{ template \"prisma.name\" . }}\n chart: {{ template \"prisma.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n config: |\n port: 4466\n {{- if .Values.auth.enabled }}\n managementApiSecret: $PRISMA_API_SECRET\n {{- end }}\n databases:\n default:\n connector: $PRISMA_DB_CONNECTOR\n host: $PRISMA_DB_HOST\n port: $PRISMA_DB_PORT\n user: $PRISMA_DB_USER\n password: $PRISMA_DB_PASSWORD\n migrations: $PRISMA_DB_MIGRATIONS\n database: $PRISMA_DB_NAME\n ssl: $PRISMA_DB_SSL\n connectionLimit: $PRISMA_DB_CONNECTIONLIMIT\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"prisma.fullname\" . }}\n labels:\n app: {{ template \"prisma.name\" . }}\n chart: {{ template \"prisma.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ template \"prisma.name\" . }}\n release: {{ .Release.Name }}\n strategy:\n type: Recreate\n template:\n metadata:\n labels:\n app: {{ template \"prisma.name\" . }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"prisma.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: prisma\n containerPort: 4466\n protocol: TCP\n env:\n - name: CONFIG\n valueFrom:\n configMapKeyRef:\n name: {{ template \"prisma.fullname\" . }}\n key: config\n {{- if .Values.auth.secret }}\n - name: PRISMA_API_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"prisma.fullname\" . }}\n key: api-secret\n {{- end}}\n - name: PRISMA_CONFIG_PATH\n value: \"/app/config.yml\"\n - name: PRISMA_DB_CONNECTOR\n value: {{ .Values.database.connector | quote }}\n - name: PRISMA_DB_HOST\n value: {{ template \"prisma.database.host\" . }}\n - name: PRISMA_DB_PORT\n value: {{ template \"prisma.database.port\" . }}\n - name: PRISMA_DB_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"prisma.fullname\" . }}\n key: db-user\n - name: PRISMA_DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"prisma.databaseSecret.fullname\" . }}\n key: {{ template \"prisma.databaseSecret.key\" . }}\n - name: PRISMA_DB_MIGRATIONS\n value: {{ .Values.database.migrations | quote }}\n - name: PRISMA_DB_NAME\n value: {{ .Values.database.name | quote }}\n - name: PRISMA_DB_SSL\n value: {{ .Values.database.ssl | quote }}\n - name: PRISMA_DB_CONNECTIONLIMIT\n value: {{ .Values.database.connectionLimit | quote }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"prisma.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"prisma.name\" . }}\n chart: {{ template \"prisma.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"prisma.fullname\" . }}\n labels:\n app: {{ template \"prisma.name\" . }}\n chart: {{ template \"prisma.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if and .Values.auth.enabled .Values.auth.secret }}\n api-secret: {{ .Values.auth.secret | b64enc | quote }}\n {{- else if .Values.auth.enabled }}\n api-secret: {{ randAlphaNum 40 | b64enc | quote }}\n {{- end }}\n db-user: {{ .Values.database.user | b64enc | quote }}\n {{- if not .Values.postgresql.enabled }}\n {{- if .Values.database.password }}\n db-password: {{ .Values.database.password | b64enc | quote }}\n {{- else }}\n db-password: {{ randAlphaNum 40 | b64enc | quote }}\n {{- end }}\n {{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prisma.fullname\" . }}\n labels:\n app: {{ template \"prisma.name\" . }}\n chart: {{ template \"prisma.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: service\n port: {{ .Values.service.port }}\n protocol: TCP\n targetPort: prisma\n selector:\n app: {{ template \"prisma.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"prisma.name\" . }}\n chart: {{ template \"prisma.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"prisma.serviceAccountName\" . }}\n{{- end }}\n"
] | # ------------------------------------------------------------------------------
# Prisma:
# ------------------------------------------------------------------------------
## Service account configuration
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
##
serviceAccount:
create: true
## Define serviceAccount name. Defaults to fully qualified name or "default"
## when create is false
##
name: ""
image:
## Prisma image repository
##
repository: prismagraphql/prisma
## Prisma image version
##
tag: 1.29.1-heroku
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
database:
## The current supported connectors are [mysql, postgres]
connector: postgres
## If 'postgresql.enabled' is 'false', you will need to provide the
## following values so that Prisma can use them as the database endpoint
host: ""
port: ""
## Database name where model will be created
##
name: prisma
## Enable SSL
##
ssl: false
## The maximum number of database connections (must be at least 2).
##
connectionLimit: 2
## Database credentials
##
user: prisma
password: ""
## Enable database migrations
##
migrations: true
auth:
## Prisma's Management API authentication
##
enabled: false
## Secret to use. If it isn't specified and 'auth.enabled' is set to 'true',
## a random generated one will be used
##
# secret: ""
service:
type: ClusterIP
port: 4466
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts: []
# - prisma.local
tls: []
# - secretName: prisma-tls
# hosts:
# - prisma.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# ------------------------------------------------------------------------------
# PostgreSQL:
# ------------------------------------------------------------------------------
postgresql:
## If true, install the PostgreSQL chart alongside Prisma
## ref: https://github.com/kubernetes/charts/tree/master/stable/postgresql
##
enabled: false
## PostgresSQL image version
## ref: https://hub.docker.com/r/library/postgres/tags/
##
imageTag: "9.6.2"
## Specify a PostgreSQL imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: "IfNotPresent"
## Persist data to a persistent volume
##
persistence:
enabled: false
## PostgreSQL credentials
##
postgresUser: prisma
postgresPassword: ""
## PostgreSQL service TCP port
##
service:
port: 5432
## Configure PostgreSQL resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
|
socat-tunneller | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"tunneller.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"tunneller.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"tunneller.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"tunneller.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"tunneller.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"tunneller.chart\" . }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"tunneller.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"tunneller.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- with .Values.podAnnotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - socat\n args:\n - \"TCP-LISTEN:$(TUNNEL_PORT),fork\"\n - \"TCP:$(TUNNEL_HOST):$(TUNNEL_PORT)\"\n env:\n - name: TUNNEL_HOST\n value: {{ required \"Must specify a target host for the tunnel.\" .Values.tunnel.host | quote }}\n - name: TUNNEL_PORT\n value: {{ required \"Must specify a target port for the tunnel.\" .Values.tunnel.port | quote }}\n ports:\n - name: tunnel-port\n containerPort: {{ (int64 .Values.tunnel.port) }}\n protocol: TCP\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"tunneller.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"tunneller.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"tunneller.chart\" . }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.tunnel.port }}\n targetPort: tunnel-port\n protocol: TCP\n name: tunnel-port\n selector:\n app.kubernetes.io/name: {{ include \"tunneller.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n"
] | replicaCount: 1
image:
repository: alpine/socat
tag: 1.0.3
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
tunnel:
host: myhost
port: 9999
|
dask-distributed | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"dask-distributed.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate fully qualified names.\nWe truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"dask-distributed.scheduler-fullname\" -}}\n{{- $name := default .Chart.Name .Values.scheduler.name -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 -}}\n{{- end -}}\n\n{{- define \"dask-distributed.webui-fullname\" -}}\n{{- $name := default .Chart.Name .Values.webUI.name -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 -}}\n{{- end -}}\n\n{{- define \"dask-distributed.worker-fullname\" -}}\n{{- $name := default .Chart.Name .Values.worker.name -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 -}}\n{{- end -}}\n\n{{- define \"dask-distributed.jupyter-fullname\" -}}\n{{- $name := default .Chart.Name .Values.jupyter.name -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 -}}\n{{- end -}}\n",
"# dask-jupyter-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"dask-distributed.jupyter-fullname\" . }}-config\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n component: \"{{ .Release.Name }}-{{ .Values.jupyter.component }}\"\ndata:\n jupyter_notebook_config.py: |\n c = get_config()\n c.NotebookApp.password = '{{ .Values.jupyter.password }}'\n",
"# dask-jupyter-deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"dask-distributed.jupyter-fullname\" . }}\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n component: \"{{ .Release.Name }}-{{ .Values.jupyter.component }}\"\nspec:\n replicas: {{ .Values.jupyter.replicas }}\n strategy:\n type: RollingUpdate\n template:\n metadata:\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n release: {{ .Release.Name | quote }}\n component: \"{{ .Release.Name }}-{{ .Values.jupyter.component }}\"\n spec:\n containers:\n - name: {{ template \"dask-distributed.jupyter-fullname\" . }}\n image: \"{{ .Values.jupyter.image }}:{{ .Values.jupyter.imageTag }}\"\n ports:\n - containerPort: {{ .Values.jupyter.containerPort }}\n resources:\n{{ toYaml .Values.jupyter.resources | indent 12 }}\n volumeMounts:\n - name: config-volume\n mountPath: /home/jovyan/.jupyter\n volumes:\n - name: config-volume\n configMap:\n name: {{ template \"dask-distributed.jupyter-fullname\" . }}-config\n",
"# dask-jupyter-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"dask-distributed.jupyter-fullname\" . }}\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n component: \"{{ .Release.Name }}-{{ .Values.jupyter.component }}\"\nspec:\n ports:\n - port: {{ .Values.jupyter.servicePort }}\n targetPort: {{ .Values.jupyter.containerPort }}\n selector:\n app: {{ template \"dask-distributed.name\" . }}\n release: {{ .Release.Name | quote }}\n component: \"{{ .Release.Name }}-{{ .Values.jupyter.component }}\"\n type: {{ .Values.jupyter.serviceType }}\n",
"# dask-scheduler-deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"dask-distributed.scheduler-fullname\" . }}\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n component: \"{{ .Release.Name }}-{{ .Values.scheduler.component }}\"\nspec:\n replicas: {{ .Values.scheduler.replicas }}\n strategy:\n type: RollingUpdate\n template:\n metadata:\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n release: {{ .Release.Name | quote }}\n component: \"{{ .Release.Name }}-{{ .Values.scheduler.component }}\"\n spec:\n containers:\n - name: {{ template \"dask-distributed.scheduler-fullname\" . }}\n image: \"{{ .Values.scheduler.image }}:{{ .Values.scheduler.imageTag }}\"\n command: [\"dask-scheduler\", \"--port\", \"{{ .Values.scheduler.servicePort }}\", \"--bokeh-port\", \"{{ .Values.webUI.containerPort }}\"]\n ports:\n - containerPort: {{ .Values.scheduler.containerPort }}\n - containerPort: {{ .Values.webUI.containerPort }}\n resources:\n{{ toYaml .Values.scheduler.resources | indent 12 }}\n",
"# dask-scheduler-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"dask-distributed.scheduler-fullname\" . }}\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n component: \"{{ .Release.Name }}-{{ .Values.scheduler.component }}\"\nspec:\n ports:\n - name: {{ template \"dask-distributed.scheduler-fullname\" . }}\n port: {{ .Values.scheduler.servicePort }}\n targetPort: {{ .Values.scheduler.containerPort }}\n - name: {{ template \"dask-distributed.webui-fullname\" . }}\n port: {{ .Values.webUI.servicePort }}\n targetPort: {{ .Values.webUI.containerPort }}\n selector:\n app: {{ template \"dask-distributed.name\" . }}\n release: {{ .Release.Name | quote }}\n component: \"{{ .Release.Name }}-{{ .Values.scheduler.component }}\"\n type: {{ .Values.scheduler.serviceType }}\n",
"# dask-worker-deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"dask-distributed.worker-fullname\" . }}\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n component: \"{{ .Release.Name }}-{{ .Values.worker.component }}\"\nspec:\n replicas: {{ .Values.worker.replicas }}\n strategy:\n type: RollingUpdate\n template:\n metadata:\n labels:\n app: {{ template \"dask-distributed.name\" . }}\n release: {{ .Release.Name | quote }}\n component: \"{{ .Release.Name }}-{{ .Values.worker.component }}\"\n spec:\n containers:\n - name: {{ template \"dask-distributed.worker-fullname\" . }}\n image: \"{{ .Values.worker.image }}:{{ .Values.worker.imageTag }}\"\n command: [\"dask-worker\", \"{{ template \"dask-distributed.scheduler-fullname\" . }}:{{ .Values.scheduler.servicePort }}\"]\n ports:\n - containerPort: {{ .Values.worker.containerPort }}\n resources:\n{{ toYaml .Values.worker.resources | indent 12 }}\n"
] | # Default values for dask.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
# nameOverride: dask
scheduler:
name: scheduler
image: "daskdev/dask"
imageTag: "latest"
replicas: 1
component: "dask-scheduler"
serviceType: "LoadBalancer"
servicePort: 8786
containerPort: 8786
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 500m
# memory: 512Mi
webUI:
name: webui
servicePort: 80
containerPort: 8787
worker:
name: worker
image: "daskdev/dask"
imageTag: "latest"
replicas: 3
component: "dask-worker"
containerPort: 8081
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 500m
# memory: 512Mi
jupyter:
name: jupyter
image: "jupyter/base-notebook"
imageTag: "11be019e4079"
replicas: 1
component: "jupyter-notebook"
serviceType: "LoadBalancer"
servicePort: 80
containerPort: 8888
password: 'sha1:aae8550c0a44:9507d45e087d5ee481a5ce9f4f16f37a0867318c' # 'dask'
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 500m
# memory: 512Mi
|
lamp | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"lamp.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"lamp.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the domain name of the chart - used for ingress rules\n*/}}\n{{- define \"lamp.domain\" -}}\n{{- if .Values.wordpress.develop.enabled -}}\n{{- required \"Please specify a develop domain at .Values.wordpress.develop.devDomain\" .Values.wordpress.develop.devDomain | printf \"%s.%s\" ( include \"lamp.fullname\" .) -}}\n{{- else -}}\n{{- if not .Values.ingress.enabled -}}\nno_domain_specified\n{{- else -}}\n{{- required \"Please specify an ingress domain at .Values.ingress.domain\" .Values.ingress.domain -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# configmap-httpd.yaml\n{{- if .Values.php.fpmEnabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"lamp.fullname\" . }}-httpd\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n{{ (.Files.Glob \"files/httpd/httpd.conf\").AsConfig | indent 2 }}\n{{ (.Files.Glob \"files/httpd/httpd-vhosts.conf\").AsConfig | indent 2 }}\n{{ (.Files.Glob \"files/httpd/httpd-vhosts-socket.conf\").AsConfig | indent 2 }}\n{{- end }}\n",
"# configmap-init.yaml\n{{- if or .Values.init.clone.release .Values.wordpress.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{template \"lamp.fullname\" .}}-init\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n {{- if .Values.init.clone.release }}\n{{ (.Files.Glob \"files/init/init_clone.sh\").AsConfig | indent 2 }}\n{{ (.Files.Glob \"files/init/init_db_clone.sh\").AsConfig | indent 2 }}\n {{- end }}\n {{- if .Values.wordpress.enabled }}\n{{ (.Files.Glob \"files/init/init_wp.sh\").AsConfig | indent 2 }}\n{{ (.Files.Glob \"files/init/init_wp_db.sh\").AsConfig | indent 2 }}\n {{- end }}\n{{- end }}\n",
"# configmap-php.yaml\n{{- if or .Values.mysql.sockets ( or .Values.php.fpmEnabled .Values.php.ini ) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{template \"lamp.fullname\" .}}-php\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n additional.ini: |\n {{- if .Values.php.ini }}\n{{ .Values.php.ini | indent 4 }}\n {{- end }}\n {{- if .Values.mysql.sockets }}\n mysqli.default_socket=/var/run/mysqld/mysqld.sock\n pdo_mysql.default_socket=/var/run/mysqld/mysqld.sock\n {{- end }}\n {{- if .Values.php.fpmEnabled }}\n zz-docker.conf: |\n [global]\n daemonize = no\n [www]\n listen = /var/run/php/php-fpm.sock\n listen.mode = 0666\n {{- if .Values.php.fpm }}\n{{ .Values.php.fpm | indent 4 }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# deployment.yaml\n{{- if semverCompare \">=1.16-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: apps/v1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Deployment\nmetadata:\n name: {{ template \"lamp.fullname\" . }}\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n replicas: {{ .Values.replicaCount }}\n {{- if semverCompare \">=1.16-0\" .Capabilities.KubeVersion.GitVersion}}\n selector:\n matchLabels:\n app: {{ template \"lamp.name\" . }}\n {{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"lamp.name\" . }}\n release: {{ .Release.Name }}\n spec:\n initContainers:\n - name: \"init-chown-mysql\"\n image: \"busybox\"\n command: [\"chown\", \"-R\", \"999:999\", \"/tmp/mysqld\"]\n volumeMounts:\n - name: sockets\n mountPath: /tmp/mysqld\n subPath: mysqld\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n - name: \"init-chown-data\"\n image: \"busybox\"\n command: [\"sh\", \"-c\", \"chown -R 33:33 /data/web\"]\n volumeMounts:\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /data/web/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n mountPath: /data/web\n subPath: web\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- if .Values.php.copyRoot}}\n - name: \"copy-root\"\n image: \"{{ .Values.php.repository }}:{{ .Values.php.tag }}\"\n imagePullPolicy: \"Always\"\n command: [\"/bin/sh\", \"-c\", \"cp -ua /var/www/html/* /data/web/ && cp -ua /var/www/html/.[^.]* /data/web/ && chown 33:33 -R /data/web/\"]\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /data/web/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /data/web\n subPath: web\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if and .Values.php.fpmEnabled .Values.php.sockets }}\n - name: \"init-chown-php\"\n image: \"busybox\"\n command: [\"chown\", \"-R\", \"33:33\", \"/tmp/php\"]\n volumeMounts:\n - name: sockets\n mountPath: /tmp/php\n subPath: php\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n {{- if .Values.init.clone.release }}\n - name: \"init-clone\"\n image: \"lead4good/xtrabackup\"\n imagePullPolicy: \"Always\"\n command: [\"sh\", \"/init/init_clone.sh\"]\n volumeMounts:\n - name: httpd-data\n mountPath: /data\n - name: clone-data\n mountPath: /clone_data\n - name: init\n mountPath: /init\n env:\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.init.clone.release }}\n key: mysql-root-password\n - name: MYSQL_HOST\n value: {{ .Values.init.clone.release }}\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n - name: \"init-db-clone\"\n image: \"{{ .Values.mysql.repository }}:{{ .Values.mysql.tag }}\"\n imagePullPolicy: {{ .Values.mysql.pullPolicy }}\n command: [\"bash\", \"/init/init_db_clone.sh\"]\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/html/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www/html\n subPath: web\n - name: httpd-data\n mountPath: /var/lib/mysql\n subPath: db\n - name: init\n mountPath: /init\n env:\n - name: OLD_MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.init.clone.release }}\n key: mysql-root-password\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-root-password\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n {{- if .Values.init.manually.enabled }}\n - name: init-manually\n image: \"{{ .Values.init.manually.repository }}:{{ .Values.init.manually.tag }}\"\n imagePullPolicy: {{ .Values.init.manually.pullPolicy }}\n command: [\"bash\", \"-c\", \"if [ -z $(ls -A /var/www/html) ]; then echo -e \\\"touch /done\\\" > /bin/im-done && chmod +x /bin/im-done && while ! [ -e /done ]; do sleep 5; done; fi\"]\n volumeMounts:\n {{- if or .Values.mysql.sockets .Values.php.ini }}\n - name: configmap-php\n mountPath: /etc/php5/cli/conf.d/additional.ini\n subPath: additional.ini\n {{- end }}\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/html/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www/html\n subPath: web\n {{- if .Values.php.oldHTTPRoot }}\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: {{ .Values.php.oldHTTPRoot }}/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: {{ .Values.php.oldHTTPRoot }}\n subPath: web\n {{- end }}\n - name: httpd-data\n mountPath: /var/www/mysql\n subPath: mysql\n - name: httpd-data\n mountPath: /var/lib/mysql\n subPath: db\n env:\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-root-password\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n {{- if .Values.wordpress.enabled }}\n {{- if not .Values.init.clone.release }}\n - name: init-wp\n image: lead4good/init-wp\n imagePullPolicy: Always\n command: [\"bash\", \"-c\", \"chown -R www-data:www-data /var/www/ && su -s /bin/bash -c \\\"bash /init/init_wp.sh\\\" www-data\"]\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/html/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www/html\n subPath: web\n - name: httpd-data\n mountPath: /var/www/mysql\n subPath: mysql\n - name: init\n mountPath: /init\n env:\n {{- if .Values.wordpress.develop.delete_uploads }}\n - name: DELETE_UPLOADS\n value: \"1\"\n {{- end }}\n {{- if .Values.svn.enabled }}\n - name: SVN_ENABLED\n value: \"1\"\n {{- end }}\n {{- if .Values.mysql.sockets }}\n - name: USE_MYSQL_SOCKETS\n value: \"1\"\n {{- end }}\n {{- if and .Values.ingress.htpasswdString ( not .Values.ingress.enabled ) }}\n - name: HTACCESS_AUTH\n value: {{ .Values.htpasswdString }}\n {{- end }}\n {{- if .Values.svn.allowOverwrite }}\n - name: ALLOW_OVERWRITE\n value: \"true\"\n {{- end }}\n {{- if .Values.ingress.ssl }}\n - name: SSL_ENABLED\n value: \"true\"\n {{- end }}\n {{- if .Values.init.manually.enabled }}\n - name: MANUAL_INIT\n value: \"true\"\n {{- end }}\n {{- if .Values.wordpress.develop.enabled }}\n - name: DEVELOPMENT\n value: \"true\"\n {{- end }}\n - name: GDRIVE_FOLDER\n {{- if .Values.wordpress.gdriveFolder }}\n value: {{ .Values.wordpress.gdriveFolder }}\n {{- else }}\n value: {{ required \"Please specify the domain of the wordpress backup at .Values.wordpress.domain\" .Values.wordpress.domain }}\n {{- end }}\n - name: RTOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: gdrive-rtoken\n - name: WEB_DOMAIN\n value: {{ required \"Please specify the domain of the wordpress backup at .Values.wordpress.domain\" .Values.wordpress.domain }}\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n - name: init-wp-db\n image: lead4good/init-wp\n imagePullPolicy: Always\n command: [\"bash\", \"-c\", \"bash /init/init_wp_db.sh\"]\n volumeMounts:\n {{- if or .Values.mysql.sockets .Values.php.ini }}\n - name: configmap-php\n mountPath: /etc/php5/cli/conf.d/additional.ini\n subPath: \"additional.ini\"\n {{- end }}\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/html/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www/html\n subPath: web\n {{- if .Values.php.oldHTTPRoot }}\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: {{ .Values.php.oldHTTPRoot }}/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: {{ .Values.php.oldHTTPRoot }}\n subPath: web\n {{- end }}\n - name: httpd-data\n mountPath: /var/www/mysql\n subPath: mysql\n - name: httpd-data\n mountPath: /var/lib/mysql\n subPath: db\n - name: init\n mountPath: /init\n env:\n {{- if .Values.wordpress.develop.enabled }}\n - name: WEB_TEST_DOMAIN\n value: {{ template \"lamp.domain\" . }}\n - name: WEB_DOMAIN\n value: {{ required \"Please specify the domain of the wordpress backup at .Values.wordpress.domain\" .Values.wordpress.domain }}\n {{- end }}\n {{ if .Values.ingress.ssl }}\n - name: SSL_ENABLED\n value: \"true\"\n {{- end }}\n {{- if .Values.init.clone.release }}\n - name: CLONE_INIT\n value: \"true\"\n {{- end }}\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-root-password\n resources:\n {{- if .Values.init.resources }}\n{{ toYaml .Values.init.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n containers:\n {{- if .Values.php.fpmEnabled }}\n - name: \"httpd\"\n image: \"{{ .Values.httpd.repository }}:{{ .Values.httpd.tag }}\"\n imagePullPolicy: \"Always\"\n ports:\n - containerPort: 80\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/html/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www/html\n subPath: web\n {{- if .Values.php.sockets }}\n - mountPath: /var/run/php\n name: sockets\n subPath: php\n {{- end }}\n - mountPath: /usr/local/apache2/conf/extra/httpd-vhosts.conf\n name: httpd-config\n subPath: httpd-vhosts{{ if .Values.php.sockets }}-socket{{ end }}.conf\n - mountPath: /usr/local/apache2/conf/httpd.conf\n name: httpd-config\n subPath: httpd.conf\n resources:\n {{- if .Values.httpd.resources }}\n{{ toYaml .Values.httpd.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n - name: \"php\"\n image: \"{{ .Values.php.repository }}:{{ .Values.php.tag }}\"\n {{- if not .Values.php.sockets }}\n ports:\n - containerPort: 9000\n {{- end }}\n {{- else }}\n - name: \"httpdphp\"\n image: \"{{ .Values.php.repository }}:{{ .Values.php.tag }}\"\n ports:\n - containerPort: 80\n {{- end }}\n imagePullPolicy: {{ .Values.php.pullPolicy }}\n {{- if .Values.php.envVars }}\n env:\n{{ toYaml .Values.php.envVars | indent 8 }}\n {{- end }}\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/html/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www/html\n subPath: web\n - name: httpd-data\n mountPath: /var/www/mysql\n subPath: mysql\n {{- if .Values.php.oldHTTPRoot }}\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: {{ .Values.php.oldHTTPRoot }}/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: {{ .Values.php.oldHTTPRoot }}\n subPath: web\n {{- end }}\n {{- if and .Values.php.fpmEnabled .Values.php.sockets }}\n - mountPath: /var/run/php\n name: sockets\n subPath: php\n - mountPath: /usr/local/etc/php-fpm.d/zz-docker.conf\n name: configmap-php\n subPath: zz-docker.conf\n {{- end }}\n {{- if .Values.mysql.sockets }}\n - mountPath: /var/run/mysqld\n name: sockets\n subPath: mysqld\n {{- end }}\n {{- if or .Values.php.ini .Values.mysql.sockets }}\n - name: configmap-php\n mountPath: /usr/local/etc/php/conf.d/additional.ini\n subPath: additional.ini\n {{- end }}\n resources:\n {{- if .Values.php.resources }}\n{{ toYaml .Values.php.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- if .Values.mysql.rootPassword }}\n - name: \"mysql\"\n image: \"{{ .Values.mysql.repository }}:{{ .Values.mysql.tag }}\"\n imagePullPolicy: {{ .Values.mysql.pullPolicy }}\n env:\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-root-password\n {{- if and .Values.mysql.user .Values.mysql.password }}\n - name: MYSQL_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-user\n - name: MYSQL_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-password\n {{- if .Values.mysql.database }}\n - name: MYSQL_DATABASE\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: mysql-database\n {{- end }}\n {{- end }}\n volumeMounts:\n - name: httpd-data\n mountPath: /var/lib/mysql\n subPath: db\n {{- if .Values.mysql.sockets }}\n - mountPath: /var/run/mysqld\n name: sockets\n subPath: mysqld\n {{- end }}\n resources:\n {{- if .Values.mysql.resources }}\n{{ toYaml .Values.mysql.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{- end }}\n {{ if .Values.sftp.enabled }}\n - name: sftp\n image: \"{{ .Values.sftp.repository }}:{{ .Values.sftp.tag }}\"\n command: [/bin/bash, -c, \"bash /entrypoint $SFTP_USER:$SFTP_PASSWORD:33:33\"]\n ports:\n - containerPort: 22\n env:\n - name: SFTP_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: sftp-user\n - name: SFTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: sftp-password\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /home/{{ .Values.sftp.user }}/web/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /home/{{ .Values.sftp.user }}/web\n subPath: web\n resources:\n {{- if .Values.sftp.resources }}\n{{ toYaml .Values.sftp.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{ end }}\n {{ if .Values.webdav.enabled }}\n - name: webdav\n image: lead4good/webdav\n command: [/bin/bash, -c, \"sed -i s/80/8001/g /etc/nginx/sites-enabled/webdav-site.conf && /set_htpasswd.sh && nginx -g \\\"daemon off;\\\"\"]\n env:\n - name: WEBDAV_USERNAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: webdav-user\n - name: WEBDAV_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: webdav-password\n ports:\n - containerPort: 8001\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /var/www/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /var/www\n subPath: web\n resources:\n {{- if .Values.svn.resources }}\n{{ toYaml .Values.svn.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{ end }}\n {{ if .Values.git.enabled }}\n - name: git\n image: openweb/git-sync\n command: [/bin/bash, -c, \"chown www-data:www-data /git/ && su -s /bin/bash -c '/go/bin/git-sync' www-data\"]\n env:\n - name: GIT_SYNC_REPO\n value: {{ required \"Please specify the GIT repository at .Values.git.repoURL\" .Values.git.repoURL }}\n - name: GIT_SYNC_BRANCH\n value: {{ .Values.git.branch }}\n - name: GIT_SYNC_REV\n value: {{ .Values.git.revision }}\n - name: GIT_SYNC_WAIT\n value: {{ quote .Values.git.wait }}\n - name: GIT_SYNC_DEST\n value: /git\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /git/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /git\n subPath: web\n resources:\n {{- if .Values.git.resources }}\n{{ toYaml .Values.git.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{ end }}\n {{ if .Values.svn.enabled }}\n - name: svn\n image: lead4good/svn-sync\n command: [/bin/sh, -c, \"chown xfs:xfs /svn/ && su -s /bin/sh -c '/bin/sh /entrypoint.sh /sync.sh' xfs\"]\n env:\n - name: SYNC_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: svn-user\n - name: SYNC_PASS\n valueFrom:\n secretKeyRef:\n name: {{ template \"lamp.fullname\" . }}\n key: svn-password\n - name: SYNC_URL\n value: {{required \"Please specify the SVN repository at .Values.svn.repoURL\" .Values.svn.repoURL }}\n {{ if .Values.svn.allowOverwrite }}\n - name: ALLOW_OVERWRITE\n value: \"true\"\n {{- end }}\n volumeMounts:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n {{- range $subPath := .Values.php.persistentSubpaths }}\n - name: httpd-data\n mountPath: /svn/{{ $subPath }}\n subPath: {{ $subPath }}\n {{- end }}\n - name: httpd-data-nonpersistent\n {{- else }}\n - name: httpd-data\n {{- end }}\n mountPath: /svn\n subPath: web\n resources:\n {{- if .Values.svn.resources }}\n{{ toYaml .Values.svn.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{ end }}\n {{ if .Values.phpmyadmin.enabled }}\n - image: \"{{ .Values.phpmyadmin.repository }}:{{ .Values.phpmyadmin.tag }}\"\n name: phpmyadmin\n command: [sh, -c, sed -i 's/listen\\ 80/listen {{ .Values.phpmyadmin.port }}/g' /etc/nginx.conf && /run.sh supervisord -n]\n env:\n - name: PMA_HOST\n value: 127.0.0.1\n ports:\n - {containerPort: 8080}\n resources:\n {{- if .Values.phpmyadmin.resources }}\n{{ toYaml .Values.phpmyadmin.resources | indent 12 }}\n {{- else }}\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n {{ end }}\n volumes:\n {{- if and .Values.php.persistentSubpaths (or .Values.persistence.hostPath .Values.persistence.enabled)}}\n - name: httpd-data-nonpersistent\n emptyDir: {}\n {{- end }}\n - name: httpd-data\n {{- if .Values.persistence.hostPath }}\n hostPath:\n path: {{ .Values.persistence.hostPath }}\n {{- else }}{{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"lamp.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- end }}\n {{- if or .Values.mysql.sockets .Values.php.sockets }}\n - name: sockets\n emptyDir: {}\n {{- end }}\n {{- if or .Values.mysql.sockets ( or ( and .Values.php.fpmEnabled .Values.php.sockets) .Values.php.ini ) }}\n - configMap: {name: {{template \"lamp.fullname\" .}}-php }\n name: configmap-php\n {{- end }}\n {{- if or .Values.init.clone.release .Values.wordpress.enabled }}\n - configMap: {name: {{template \"lamp.fullname\" .}}-init }\n name: init\n {{- end }}\n {{- if .Values.php.fpmEnabled }}\n - configMap: {name: {{template \"lamp.fullname\" .}}-httpd }\n name: httpd-config\n {{- end }}\n {{- if .Values.init.clone.release }}\n - name: clone-data\n {{- if .Values.init.clone.hostPath }}\n hostPath:\n path: {{ .Values.init.clone.hostPath }}\n {{- else }}\n persistentVolumeClaim:\n claimName: {{ .Values.init.clone.release }}\n {{- end }}\n {{- end }}\n",
"# ingress-services.yaml\n{{- if and .Values.ingress.enabled (or .Values.phpmyadmin.enabled .Values.webdav.enabled) }}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ template \"lamp.fullname\" . }}-service\n annotations:\n {{- if .Values.ingress.ssl }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n{{ toYaml .Values.ingress.annotations | indent 4 }}\nspec:\n {{- if .Values.ingress.ssl }}\n tls:\n - secretName: {{ template \"lamp.fullname\" . }}-tls-service\n hosts:\n {{- if .Values.phpmyadmin.enabled }}\n - {{ .Values.phpmyadmin.subdomain }}.{{ template \"lamp.domain\" . }}\n {{- end }}\n {{- if .Values.webdav.enabled }}\n - {{ .Values.webdav.subdomain }}.{{ template \"lamp.domain\" . }}\n {{- end }}\n {{- end }}\n rules:\n {{- if .Values.phpmyadmin.enabled }}\n - host: {{ .Values.phpmyadmin.subdomain }}.{{ template \"lamp.domain\" . }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"lamp.fullname\" . }}\n servicePort: {{ .Values.phpmyadmin.port }}\n {{- end }}\n {{- if .Values.webdav.enabled }}\n - host: {{ .Values.webdav.subdomain }}.{{ template \"lamp.domain\" . }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"lamp.fullname\" . }}\n servicePort: {{ .Values.webdav.port }}\n {{- end }}\n{{- end }}\n",
"# ingress-www.yaml\n{{- if and .Values.ingress.enabled .Values.ingress.subdomainWWW }}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ template \"lamp.fullname\" . }}-www\n annotations:\n {{- if .Values.ingress.ssl }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- if .Values.ingress.htpasswdString }}\n ingress.kubernetes.io/auth-type: basic\n ingress.kubernetes.io/auth-secret: {{ template \"lamp.fullname\" . }}\n ingress.kubernetes.io/auth-realm: \"Authenticate\"\n {{- end }}\n{{ toYaml .Values.ingress.annotations | indent 4 }}\nspec:\n {{- if .Values.ingress.ssl }}\n tls:\n - secretName: {{ template \"lamp.fullname\" . }}-tls-www\n hosts:\n - www.{{ template \"lamp.domain\" . }}\n {{- end }}\n rules:\n - host: www.{{ template \"lamp.domain\" . }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"lamp.fullname\" . }}\n servicePort: {{ .Values.service.HTTPPort }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ template \"lamp.fullname\" . }}-app\n annotations:\n {{- if .Values.ingress.ssl }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- if .Values.ingress.htpasswdString }}\n ingress.kubernetes.io/auth-type: basic\n ingress.kubernetes.io/auth-secret: {{ template \"lamp.fullname\" . }}\n ingress.kubernetes.io/auth-realm: \"Authenticate\"\n {{- end }}\n {{- if .Values.ingress.subdomainWWW }}\n ingress.kubernetes.io/configuration-snippet: |\n rewrite ^(.*)$ $scheme://www.{{ template \"lamp.domain\" . }}$1;\n {{- end }}\n{{ toYaml .Values.ingress.annotations | indent 4 }}\nspec:\n {{- if .Values.ingress.ssl }}\n tls:\n - secretName: {{ template \"lamp.fullname\" . }}-tls-app\n hosts:\n - {{ template \"lamp.domain\" . }}\n {{- end }}\n rules:\n - host: {{ template \"lamp.domain\" . }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"lamp.fullname\" . }}\n servicePort: {{ .Values.service.HTTPPort }}\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.hostPath) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"lamp.fullname\" . }}\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n {{- if .Values.persistence.keep }}\n helm.sh/resource-policy: keep\n {{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"lamp.fullname\" . }}\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n {{- if .Values.keepSecrets }}\n annotations:\n helm.sh/resource-policy: keep\n {{- end }}\ntype: Opaque\ndata:\n {{- if .Values.mysql.rootPassword }}\n mysql-root-password: {{ .Values.mysql.rootPassword | b64enc | quote }}\n {{- if and .Values.mysql.user .Values.mysql.password }}\n mysql-user: {{ .Values.mysql.user | b64enc | quote }}\n mysql-password: {{ .Values.mysql.password | b64enc | quote }}\n {{- if .Values.mysql.database}}\n mysql-database: {{ .Values.mysql.database | b64enc | quote }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.wordpress.gdriveRToken }}\n gdrive-rtoken: {{ .Values.wordpress.gdriveRToken | b64enc | quote }}\n {{- end }}\n {{- if .Values.ingress.htpasswdString }}\n auth: {{ .Values.ingress.htpasswdString | b64enc | quote }}\n {{- end }}\n {{- if .Values.sftp.enabled }}\n sftp-user: {{ required \"Please specify the SFTP user name at .Values.sftp.user\" .Values.sftp.user | b64enc | quote }}\n sftp-password: {{ required \"Please specify the SFTP user password at .Values.sftp.password\" .Values.sftp.password | b64enc | quote }}\n {{- end }}\n {{- if .Values.svn.enabled }}\n svn-user: {{ required \"Please specify the SVN user name at .Values.svn.user\" .Values.svn.user | b64enc | quote }}\n svn-password: {{ required \"Please specify the SVN user password at .Values.svn.password\" .Values.svn.password | b64enc | quote }}\n {{- end }}\n {{- if .Values.webdav.enabled }}\n webdav-user: {{ required \"Please specify the WebDAV user name at .Values.webdav.user\" .Values.webdav.user | b64enc | quote }}\n webdav-password: {{ required \"Please specify the WebDAV user password at .Values.webdav.password\" .Values.webdav.password | b64enc | quote }}\n {{- end }}\n",
"# service-sftp.yaml\n{{- if and .Values.sftp.enabled .Values.ingress.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"lamp.fullname\" . }}-sftp\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n type: {{ .Values.sftp.serviceType }}\n ports:\n - targetPort: 22\n port: {{ .Values.sftp.port }}\n {{- if .Values.sftp.nodePort }}\n nodePort: {{ .Values.sftp.nodePort }}\n {{- end }}\n protocol: TCP\n name: sftp\n selector:\n app: {{ template \"lamp.name\" . }}\n release: {{ .Release.Name }}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"lamp.fullname\" . }}\n labels:\n app: {{ template \"lamp.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n {{- if .Values.ingress.enabled }}\n type: ClusterIP\n {{- else }}\n type: {{ .Values.service.type }}\n {{- end }}\n ports:\n - port: {{ .Values.service.HTTPPort }}\n targetPort: 80\n protocol: TCP\n name: httpd\n {{ if .Values.mysql.rootPassword }}\n - port: 3306\n targetPort: 3306\n protocol: TCP\n name: mysql\n {{ end }}\n {{ if and .Values.sftp.enabled (not .Values.ingress.enabled) }}\n - port: {{ .Values.sftp.port }}\n targetPort: 22\n protocol: TCP\n name: sftp\n {{ end }}\n {{ if .Values.webdav.enabled }}\n - port: {{ .Values.webdav.port }}\n targetPort: 8001\n protocol: TCP\n name: webdav\n {{ end }}\n {{ if .Values.phpmyadmin.enabled }}\n - port: {{ .Values.phpmyadmin.port }}\n targetPort: 8080\n protocol: TCP\n name: phpmyadmin\n {{ end }}\n selector:\n app: {{ template \"lamp.name\" . }}\n release: {{ .Release.Name }}\n"
] | ### CONTAINERS ###
init:
clone:
## init.clone.release Fullname of the release to clone
release: false
## init.clone.hostPath If the release to clone uses hostPath instead of PVC, set
## it here. This will only work if both releases are deployed on the same node
# hostPath: /path/to/volume
manually:
## init.manually.enabled Enables container for manual initialization
enabled: false
## init.manually.repository Container image
repository: "lead4good/init-wp"
## init.manually.tag Container image tag
tag: "latest"
## init.manually.pullPolicy Image pull policy
pullPolicy: Always
## init.resources init containers resource requests/limits
resources: false
php:
## php.repository default php image
repository: "php"
## php.tag default php image tag
tag: "7-fpm-alpine"
## php.pullPolicy Image pull policy
pullPolicy: Always
## php.fpmEnabled Enables docker FPM repository, be sure to disable if working with
## a custom repository based on the apache tag
fpmEnabled: true
## php.sockets If FPM is enabled, enables communication between HTTPD and PHP via
## sockets instead of TCP
sockets: true
## php.oldHTTPRoot Additionally mounts the webroot at `php.oldHTTPRoot` to compensate
## for absolute path file links
# oldHTTPRoot: /var/www/html
## php.ini additional PHP config values, see examples on how to use
# ini: |
# short_open_tag=On
## php-fpm.conf: additional PHP FPM config values
# fpm: |
# pm.max_children = 120
## php.copyRoot if true, copies the containers web root `/var/www/html` into
copyRoot: false
## persistent storage. This must be enabled, if the container already comes with
## files installed to `/var/www/html`
## php.persistentSubpaths instead of enabling persistence for the whole webroot,
## only subpaths of webroot can be enabled for persistence. Have a look at the
## github.com/kubernetes/charts/stable/lamp/examples/nextcloud.yaml to see how it works
persistentSubpaths: false
# persistentSubpaths:
# - subPath1
# - subPath2
## php.resources PHP container resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
httpd:
## httpd.repository default httpd image
repository: "httpd"
## httpd.tag default httpd image tag
tag: "2.4-alpine"
## httpd.resources HTTPD container resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
mysql:
## mysql.rootPassword Sets the MySQL root password, enables MySQL service if not empty
# rootPassword: root_password
## mysql.user MySQL user
# user: user
## mysql.password MySQL user password
# password: user_password
## mysql.database MySQL user database
# database: database
## mysql.repository MySQL image - choose one of the official images
## [mysql](https://hub.docker.com/_/mysql/)
## [mariadb](https://hub.docker.com/_/mariadb/)
## [percona](https://hub.docker.com/_/percona/)
repository: "mysql"
## mysql.tag MySQL image tag
tag: "5.7"
## mysql.imagePullPolicy Image pull policy
imagePullPolicy: Always
## mysql.sockets Enables communication between MySQL and PHP via sockets instead of TCP
sockets: true
## mysql.resources Resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
sftp:
## sftp.repository default sftp image
repository: "atmoz/sftp"
## sftp.tag default sftp image tag
tag: "alpine"
## sftp.enabled Enables sftp service
enabled: false
## sftp.serviceType Type of sftp service in Ingress mode
serviceType: NodePort
## sftp.port Port to advertise service in LoadBalancer mode
port: 22
## sftp.nodePort Port to advertise service in Ingress mode
## `sftp.serviceType` must be set to `NodePort`
# nodePort: 30111
## sftp.user SFTP User
# user: user
## sftp.password SFTP Password
# password: password
## sftp.resources resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
webdav:
## webdav.enabled Enables webdav service
enabled: false
## webdav.port Port to advertise service in LoadBalancer mode
port: 8001
## webdav.subdomain Subdomain to advertise service on if ingress is enabled
subdomain: webdav
## webdav.user WebDAV User
# user:
## webdav.password WebDAV Password
# password:
## webdav.resources resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
git:
## git.enabled Enables Git service
enabled: false
## git.repoURL Git Repository URL
# repoURL:
## git.branch Repository branch to sync
branch: master
## git.revision Revision to sync
revision: FETCH_HEAD
## git.wait Time between Git syncs
wait: 30
## git.resources resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
svn:
## svn.enabled Enables svn service
enabled: false
## svn.user SVN User
# user: user
## svn.password SVN Password
# password: password
## svn.repoURL SVN Repository URL
# repoURL:
## svn.allowOverwrite if disabled and files already exist in the web folder will
## not create working clone or sync files
allowOverwrite: true
## svn.resources resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
phpmyadmin:
## phpmyadmin.repository default phpmyadmin image
repository: "phpmyadmin"
## phpmyadmin.tag default phpmyadmin image tag
tag: "phpmyadmin"
## phpmyadmin.enabled Enables phpmyadmin service
enabled: false
## phpmyadmin.port Port to advertise service in LoadBalancer mode
port: 8080
## phpmyadmin.subdomain Subdomain to advertise service on if ingress is enabled
subdomain: phpmyadmin
## phpmyadmin.resources resource requests/limits
resources: false
# requests:
# cpu: 1m
# memory: 1Mi
resources:
requests:
## resources.requests.cpu CPU resource requests
cpu: 1m
## resources.requests.memory Memory resource requests
memory: 1Mi
# limits:
## resources.limits.cpu CPU resource limits
# cpu: 1000m
## resources.limits.memory Memory resource limits
# memory: 1024Mi
### STORAGE ###
persistence:
## persistence.enabled Enables persistent volume - PV provisioner support necessary
enabled: true
## persistence.keep Keep persistent volume after helm delete
keep: false
## persistence.accessMode PVC Access Mode
accessMode: ReadWriteOnce
## persistence.size PVC Size
size: 5Gi
## persistence.storageClass PVC Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## persistence.hostPath if specified, used as persistent storage instead of PVC
# hostPath: /path/to/volume
### NETWORKING ###
service:
## service.type Changes to ClusterIP automatically if ingress enabled
type: LoadBalancer
## service.HTTPPort Port to advertise the main web service in LoadBalancer mode
HTTPPort: 80
ingress:
## ingress.enabled Enables ingress support - working ingress controller necessary
enabled: false
## ingress.domain domain to advertise the services - A records need to point to
## ingress controllers IP
# domain: example.com
## ingress.subdomainWWW enables www subdomain and 301 redirect from domain
## > NOTE: Requires nginx ingress controller
# subdomainWWW: false
## ingress.ssl Enables [lego](https://github.com/jetstack/kube-lego) letsencrypt
## ssl support - working lego container necessary
## > NOTE: Requires nginx ingress controller
# ssl: false
## ingress.htpasswdString if specified main web service requires authentication.
## Format: _user:$apr1$F..._
## > NOTE: Requires nginx ingress controller
# htpasswdString: "user:$apr1$FfVI2PRr$f9MW8rsr5RmmxbdV0Iipk1"
## ingress.annotations specify custom ingress annotations such as e.g.
# annotations:
# ingress.kubernetes.io/proxy-body-size: "50m"
# kubernetes.io/ingress.class: nginx
### WORDPRESS ###
wordpress:
## wordpress.enabled Enables wordpress normal mode
enabled: false
## wordpress.gdriveRToken gdrive rtoken for authentication used for downloading
## InfiniteWP backup from gdrive
# gdriveRToken:
## wordpress.gdriveFolder gdrive backup folder - the latest backup inside of the
## folder where the name includes the string `_full` will be downloaded
# gdriveFolder: example.com
## wordpress.domain wordpress domain used in dev mode to be search replaced
# domain: "example.com"
develop:
## wordpress.develop.enabled enables develop mode
enabled: false
## wordpress.develop.deleteUploads deletes `wp_content/uploads` folder and links
## to live site within htaccess
deleteUploads: false
## wordpress.develop.devDomain used to search replace `wordpress.domain` to
## `fullname of template`.`develop.devDomain` e.g `mysite-com-lamp.dev.example.com`
# devDomain: dev.example.com
### OTHER ###
## keepSecrets Keep secrets after helm delete
keepSecrets: false
## replicaCount > 1 will corrupt your database if one is used. Future releases
## might enable elastic scaling via galeradb
replicaCount: 1
|
distribution | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"distribution.name\" -}}\n{{- default .Chart.Name .Values.distribution.name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nThe distributor name\n*/}}\n{{- define \"distributor.name\" -}}\n{{- default .Chart.Name .Values.distributor.name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified distribution name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"distribution.fullname\" -}}\n{{- if .Values.distribution.fullnameOverride -}}\n{{- .Values.distribution.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.distribution.name -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n\n{{/*\nCreate a default fully qualified distributor name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"distributor.fullname\" -}}\n{{- if .Values.distributor.fullnameOverride -}}\n{{- .Values.distributor.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.distributor.name -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nSet the final MongoDB connection URL\n*/}}\n{{- define \"mongodb.url\" -}}\n{{- if .Values.global.mongoUrl -}}\n{{- .Values.global.mongoUrl -}}\n{{- else -}}\n{{- $mongoDatabase := .Values.mongodb.mongodbDatabase -}}\n{{- $mongoUser := .Values.mongodb.mongodbUsername -}}\n{{- $mongoPassword := required \"A valid .Values.mongodb.mongodbPassword entry required!\" .Values.mongodb.mongodbPassword -}}\n{{- printf \"%s://%s:%s@%s-%s/%s\" \"mongodb\" $mongoUser $mongoPassword .Release.Name \"mongodb:27017\" $mongoDatabase | b64enc | quote -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nSet the final MongoDB audit URL\n*/}}\n{{- define \"mongodb.audit.url\" -}}\n{{- if .Values.global.mongoAuditUrl -}}\n{{- .Values.global.mongoAuditUrl -}}\n{{- else -}}\n{{- $mongoUser := .Values.mongodb.mongodbUsername -}}\n{{- $mongoPassword := required \"A valid .Values.mongodb.mongodbPassword entry required!\" .Values.mongodb.mongodbPassword -}}\n{{- printf \"%s://%s:%s@%s-%s/%s\" \"mongodb\" $mongoUser $mongoPassword .Release.Name \"mongodb:27017\" \"audit?maxpoolsize=500\" | b64enc | quote -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nSet the final Redis connection URL\n*/}}\n{{- define \"redis.url\" -}}\n{{- if .Values.global.redisUrl -}}\n{{- .Values.global.redisUrl -}}\n{{- else -}}\n{{- $redisPassword := required \"A valid .Values.redis.redisPassword entry required!\" .Values.redis.redisPassword -}}\n{{- $redisPort := .Values.redis.master.port -}}\n{{- printf \"%s://:%s@%s-%s:%g\" \"redis\" $redisPassword .Release.Name \"redis\" $redisPort | b64enc | quote -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"distribution.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n{{ default (include \"distribution.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n{{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"distribution.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# distribution-pvc.yaml\n{{- if and .Values.distribution.persistence.enabled (not .Values.distribution.persistence.existingClaim) }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"distribution.fullname\" . }}\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.distribution.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.distribution.persistence.size }}\n{{- if .Values.distribution.persistence.storageClass }}\n{{- if (eq \"-\" .Values.distribution.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.distribution.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# distribution-role.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n component: {{ .Values.distribution.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"distribution.fullname\" . }}\nrules:\n{{ toYaml .Values.rbac.role.rules }}\n{{- end }}\n",
"# distribution-rolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n component: {{ .Values.distribution.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"distribution.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"distribution.serviceAccountName\" . }}\nroleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: {{ template \"distribution.fullname\" . }}\n{{- end }}\n",
"# distribution-serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n component: {{ .Values.distribution.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"distribution.serviceAccountName\" . }}\n{{- end }}\n",
"# distribution-statefulset.yaml\napiVersion: apps/v1beta2\nkind: StatefulSet\nmetadata:\n name: {{ template \"distribution.fullname\" . }}\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n component: {{ .Values.distribution.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n serviceName: {{ template \"distribution.name\" . }}\n replicas: {{ .Values.distribution.replicaCount }}\n updateStrategy:\n type: RollingUpdate\n selector:\n matchLabels:\n app: {{ template \"distribution.name\" . }}\n release: {{ .Release.Name }}\n role: {{ template \"distribution.name\" . }}\n component: {{ .Values.distribution.name }}\n template:\n metadata:\n labels:\n app: {{ template \"distribution.name\" . }}\n component: {{ .Values.distribution.name }}\n role: {{ template \"distribution.name\" . }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"distribution.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n initContainers:\n - name: \"init-data\"\n image: \"{{ .Values.initContainerImage }}\"\n command:\n - '/bin/sh'\n - '-c'\n - >\n until nc -z -w 2 {{ .Release.Name }}-mongodb 27017 && echo {{ .Release.Name }}-mongodb ok; do sleep 2; done;\n until nc -z -w 2 {{ .Release.Name }}-redis {{ .Values.redis.master.port }} && echo {{ .Release.Name }}-redis ok; do sleep 2; done;\n containers:\n - name: {{ .Values.distribution.name }}\n image: '{{ .Values.distribution.image.repository }}:{{ default .Chart.AppVersion .Values.distribution.image.version }}'\n imagePullPolicy: {{ .Values.distribution.image.imagePullPolicy }}\n ports:\n - containerPort: {{ .Values.distribution.internalPort }}\n protocol: TCP\n env:\n - name: DEFAULT_JAVA_OPTS\n value: '-Ddistribution.home={{ .Values.distribution.persistence.mountPath }}\n -Djfrog.master.key={{ .Values.distribution.masterKey }}\n -Dcom.sun.management.jmxremote.authenticate=false\n -Dcom.sun.management.jmxremote.ssl=false -Duser.timezone=UTC\n {{- if .Values.distribution.javaOpts.xms }}\n -Xms{{ .Values.distribution.javaOpts.xms }}\n {{- end}}\n {{- if .Values.distribution.javaOpts.xmx }}\n -Xmx{{ .Values.distribution.javaOpts.xmx }}\n {{- end}}\n -Dspring.profiles.active=production'\n - name: mongo_connectionString\n valueFrom:\n secretKeyRef:\n name: {{ template \"distribution.fullname\" . }}-mongo-connection\n key: mongo_connectionString\n - name: audit_mongo_connectionString\n valueFrom:\n secretKeyRef:\n name: {{ template \"distribution.fullname\" . }}-mongo-connection\n key: audit_mongo_connectionString\n - name: redis_connectionString\n valueFrom:\n secretKeyRef:\n name: {{ template \"distribution.fullname\" . }}-redis-connection\n key: redis_connectionString\n - name: BT_ARTIFACTORY_URL\n value: {{ .Values.distribution.env.artifactoryUrl | quote }}\n - name: BT_SERVER_URL\n value: {{ .Values.distribution.env.btServerUrl | quote }}\n {{- if .Values.distribution.env.artifactoryEdge1Url }}\n - name: artifactory_edge_1_url\n value: {{ .Values.distribution.env.artifactoryEdge1Url }}\n {{- end }}\n {{- if .Values.distribution.env.artifactoryEdge2Url }}\n - name: artifactory_edge_2_url\n value: {{ .Values.distribution.env.artifactoryEdge2Url }}\n {{- end }}\n {{- if .Values.distribution.env.artifactoryEdge3Url }}\n - name: artifactory_edge_3_url\n value: {{ .Values.distribution.env.artifactoryEdge3Url }}\n {{- end }}\n {{- if .Values.distribution.env.artifactoryCi1Url }}\n - name: artifactory_ci_1_url\n value: {{ .Values.distribution.env.artifactoryCi1Url }}\n {{- end }}\n volumeMounts:\n - name: distribution-data\n mountPath: {{ .Values.distribution.persistence.mountPath | quote }}\n resources:\n{{ toYaml .Values.distribution.resources | indent 10 }}\n readinessProbe:\n httpGet:\n path: /api/v1/system/ping\n port: 8080\n initialDelaySeconds: 60\n periodSeconds: 10\n failureThreshold: 10\n livenessProbe:\n httpGet:\n path: /api/v1/system/ping\n port: 8080\n initialDelaySeconds: 180\n periodSeconds: 10\n {{- if .Values.distribution.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: distribution-data\n spec:\n {{- if .Values.distribution.persistence.existingClaim }}\n selector:\n matchLabels:\n app: {{ template \"distribution.name\" . }}\n {{- else }}\n {{- if .Values.distribution.persistence.storageClass }}\n {{- if (eq \"-\" .Values.distribution.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.distribution.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n accessModes: [ \"{{ .Values.distribution.persistence.accessMode }}\" ]\n resources:\n requests:\n storage: {{ .Values.distribution.persistence.size }}\n {{- end }}\n {{- else }}\n volumes:\n - name: distribution-data\n emptyDir: {}\n {{- end }}",
"# distribution-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"distribution.fullname\" . }}\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n component: {{ .Values.distribution.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n{{- if .Values.distribution.service.annotations }}\n annotations:\n{{ toYaml .Values.distribution.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.distribution.service.type }}\n ports:\n - port: {{ .Values.distribution.externalPort }}\n protocol: TCP\n targetPort: {{ .Values.distribution.internalPort }}\n selector:\n app: {{ template \"distribution.name\" . }}\n component: {{ .Values.distribution.name }}\n release: {{ .Release.Name }}\n",
"# distributor-pvc.yaml\n{{- if and .Values.distributor.persistence.enabled (not .Values.distributor.persistence.existingClaim) }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"distributor.fullname\" . }}\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.distributor.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.distributor.persistence.size }}\n{{- if .Values.distributor.persistence.storageClass }}\n{{- if (eq \"-\" .Values.distributor.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.distributor.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# distributor-statefulset.yaml\napiVersion: apps/v1beta2\nkind: StatefulSet\nmetadata:\n name: {{ template \"distributor.fullname\" . }}\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n component: {{ .Values.distributor.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n serviceName: {{ template \"distributor.name\" . }}\n replicas: {{ .Values.distributor.replicaCount }}\n updateStrategy:\n type: RollingUpdate\n selector:\n matchLabels:\n app: {{ template \"distribution.name\" . }}\n release: {{ .Release.Name }}\n role: {{ template \"distributor.name\" . }}\n component: {{ .Values.distributor.name }}\n template:\n metadata:\n labels:\n app: {{ template \"distribution.name\" . }}\n component: {{ .Values.distributor.name }}\n role: {{ template \"distributor.name\" . }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"distribution.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n initContainers:\n - name: \"prepare-data\"\n image: \"{{ .Values.initContainerImage }}\"\n imagePullPolicy: {{ .Values.distributor.image.pullPolicy }}\n command:\n - '/bin/sh'\n - '-c'\n - >\n until nc -z -w 2 {{ .Release.Name }}-redis {{ .Values.redis.master.port }} && echo {{ .Release.Name }}-redis ok; do sleep 2; done;\n {{- if .Values.distributor.token }}\n mkdir -pv {{ .Values.distributor.persistence.mountPath }}/etc/security;\n cp -fv /tmp/security/token {{ .Values.distributor.persistence.mountPath }}/etc/security/token;\n chmod 400 {{ .Values.distributor.persistence.mountPath }}/etc/security/token;\n {{- end }}\n chown -R 1020:1020 {{ .Values.distributor.persistence.mountPath }}\n volumeMounts:\n - name: distributor-data\n mountPath: {{ .Values.distributor.persistence.mountPath | quote }}\n {{- if .Values.distributor.token }}\n - name: distributor-token\n mountPath: \"/tmp/security/token\"\n subPath: token\n {{- end }}\n containers:\n - name: {{ .Values.distributor.name }}\n image: '{{ .Values.distributor.image.repository }}:{{ default .Chart.AppVersion .Values.distributor.image.version }}'\n imagePullPolicy: {{ .Values.distributor.image.imagePullPolicy }}\n env:\n - name: DEFAULT_JAVA_OPTS\n value: '-Ddistribution.home={{ .Values.distributor.persistence.mountPath }} -Dfile.encoding=UTF8 -Dcom.sun.management.jmxremote.authenticate=false\n -Dcom.sun.management.jmxremote.ssl=false -Duser.timezone=UTC\n {{- if .Values.distributor.javaOpts.xms }}\n -Xms{{ .Values.distributor.javaOpts.xms }}\n {{- end}}\n {{- if .Values.distributor.javaOpts.xmx }}\n -Xmx{{ .Values.distributor.javaOpts.xmx }}\n {{- end}}\n -Dspring.profiles.active=production'\n - name: redis_connectionString\n valueFrom:\n secretKeyRef:\n name: {{ template \"distribution.fullname\" . }}-redis-connection\n key: redis_connectionString\n - name: BT_SERVER_URL\n value: 'http://{{ include \"distribution.fullname\" . }}:{{ .Values.distribution.externalPort }}'\n volumeMounts:\n - name: distributor-data\n mountPath: {{ .Values.distributor.persistence.mountPath | quote }}\n resources:\n{{ toYaml .Values.distributor.resources | indent 10 }}\n volumes:\n {{- if .Values.distributor.token }}\n - name: distributor-token\n configMap:\n name: {{ template \"distributor.fullname\" . }}-token\n {{- end }}\n {{- if .Values.distributor.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: distributor-data\n spec:\n {{- if .Values.distributor.persistence.existingClaim }}\n selector:\n matchLabels:\n app: {{ template \"distributor.name\" . }}\n {{- else }}\n {{- if .Values.distributor.persistence.storageClass }}\n {{- if (eq \"-\" .Values.distributor.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.distributor.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n accessModes: [ \"{{ .Values.distributor.persistence.accessMode }}\" ]\n resources:\n requests:\n storage: {{ .Values.distributor.persistence.size }}\n {{- end }}\n {{- else }}\n - name: distributor-data\n emptyDir: {}\n {{- end }}\n",
"# distributor-token.yaml\n{{- if .Values.distributor.token }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"distributor.fullname\" . }}-token\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n token: |\n {{ .Values.distributor.token }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"distribution.fullname\" . -}}\n{{- $servicePort := .Values.distribution.externalPort -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"distribution.fullname\" . }}\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n{{- if .Values.ingress.hosts }}\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n{{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# mongo-connection-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"distribution.fullname\" . }}-mongo-connection\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n mongo_connectionString: {{ template \"mongodb.url\" . }}\n audit_mongo_connectionString: {{ template \"mongodb.audit.url\" . }}",
"# redis-connection-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"distribution.fullname\" . }}-redis-connection\n labels:\n app: {{ template \"distribution.name\" . }}\n chart: {{ template \"distribution.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n redis_connectionString: {{ template \"redis.url\" . }}"
] | # Default values for distribution.
# This is a YAML-formatted file.
# Beware when changing values here. You should know what you are doing!
# Access the values with {{ .Values.key.subkey }}
# Common
initContainerImage: "alpine:3.6"
# For supporting pulling from private registries
imagePullSecrets:
## Role Based Access Control
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
rbac:
create: true
role:
## Rules to create. It follows the role specification
rules:
- apiGroups:
- ''
resources:
- services
- endpoints
- pods
verbs:
- get
- watch
- list
## Service Account
## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/
##
serviceAccount:
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
ingress:
enabled: false
# Used to create an Ingress record.
hosts:
- distribution.domain.example
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - distribution.domain.example
# Sub charts
## Configuration values for the mongodb dependency
## ref: https://github.com/kubernetes/charts/blob/master/stable/mongodb/README.md
##
mongodb:
enabled: true
image:
tag: 3.6.3
pullPolicy: IfNotPresent
persistence:
enabled: true
size: 10Gi
resources: {}
# requests:
# memory: "2Gi"
# cpu: "100m"
# limits:
# memory: "2Gi"
# cpu: "250m"
## Make sure the --wiredTigerCacheSizeGB is no more than half the memory limit!
## This is critical to protect against OOMKill by Kubernetes!
mongodbExtraFlags:
- "--wiredTigerCacheSizeGB=1"
mongodbRootPassword:
mongodbUsername: distribution
mongodbPassword:
mongodbDatabase: bintray
livenessProbe:
initialDelaySeconds: 40
readinessProbe:
initialDelaySeconds: 30
## Configuration values for the redis dependency
## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
##
redis:
enabled: true
redisPassword:
persistence:
enabled: true
size: 10Gi
master:
port: 6379
resources: {}
# requests:
# memory: "1Gi"
# cpu: "100m"
# limits:
# memory: "2Gi"
# cpu: "250m"
# For setting up external services, must pass the connection URL for them
global:
mongoUrl:
mongoAuditUrl:
redisUrl:
distribution:
replicaCount: 1
name: distribution
image:
repository: "docker.bintray.io/jfrog/distribution-distribution"
## Note that by default we use appVersion to get image tag
# version: 1.1.0
imagePullPolicy: IfNotPresent
internalPort: 8080
externalPort: 80
masterKey: BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
env:
artifactoryUrl:
btServerUrl:
artifactoryCi1Url:
artifactoryEdge1Url:
artifactoryEdge2Url:
artifactoryEdge3Url:
service:
type: LoadBalancer
resources: {}
# requests:
# memory: "2Gi"
# cpu: "500m"
# limits:
# memory: "4Gi"
# cpu: "2"
## Control Java options (JAVA_OPTIONS)
## IMPORTANT: keep javaOpts.xmx no higher than resources.limits.memory
javaOpts:
xms:
xmx:
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
mountPath: "/var/opt/jfrog/distribution"
size: 50Gi
## distribution data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
distributor:
replicaCount: 1
name: distributor
image:
repository: "docker.bintray.io/jfrog/distribution-distributor"
## Note that by default we use appVersion to get image tag
# version: 1.1.0
imagePullPolicy: IfNotPresent
token:
resources: {}
# requests:
# memory: "2Gi"
# cpu: "500m"
# limits:
# memory: "4Gi"
# cpu: "2"
## Control Java options (JAVA_OPTIONS)
## IMPORTANT: keep javaOpts.xmx no higher than resources.limits.memory
javaOpts:
xms:
xmx:
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
mountPath: "/var/opt/jfrog/distributor"
size: 50Gi
## distribution data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
|
magic-ip-address | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"magic-ip-address.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"magic-ip-address.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"magic-ip-address.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"magic-ip-address.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"magic-ip-address.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"magic-ip-address.fullname\" . }}\n labels:\n app: {{ template \"magic-ip-address.name\" . }}\n chart: {{ template \"magic-ip-address.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n- apiGroups: [\"\"]\n resources:\n - namespaces\n - pods\n verbs: [\"get\", \"list\"]\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"magic-ip-address.fullname\" . }}\n labels:\n app: {{ template \"magic-ip-address.name\" . }}\n chart: {{ template \"magic-ip-address.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"magic-ip-address.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"magic-ip-address.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"magic-ip-address.fullname\" . }}\n labels:\n app: {{ template \"magic-ip-address.name\" . }}\n chart: {{ template \"magic-ip-address.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"magic-ip-address.name\" . }}\n release: {{ .Release.Name }}\n minReadySeconds: 10\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n template:\n metadata:\n labels:\n app: {{ template \"magic-ip-address.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/secret: {{ toYaml .Values.config | sha256sum }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n envFrom:\n - secretRef:\n name: {{ template \"magic-ip-address.fullname\" . }}\n env:\n - name: NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n securityContext:\n runAsUser: 0\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n terminationGracePeriodSeconds: 60\n serviceAccountName: {{ template \"magic-ip-address.serviceAccountName\" . }}\n tolerations:\n - key: node-role.kubernetes.io/master\n operator: Exists\n effect: NoSchedule\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"magic-ip-address.fullname\" . }}\n labels:\n app: {{ template \"magic-ip-address.name\" . }}\n chart: {{ template \"magic-ip-address.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n PORT: {{ printf \"%d\" .Values.config.port | b64enc }}\n SELECTOR: {{ .Values.config.selector | b64enc }}\n PROTOCOL: {{ .Values.config.protocol | b64enc }}\n MAGIC_IP: {{ .Values.config.ipAddress | b64enc }}\n HOST_INTERFACE: {{ .Values.config.hostInterface | b64enc }}\n verbose: {{ printf \"%v\" .Values.config.verbose | b64enc }}\n{{- range $key, $value := .Values.extraVars }}\n {{ $key }}: {{ $value | quote }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"magic-ip-address.serviceAccountName\" . }}\n labels:\n app: {{ template \"magic-ip-address.name\" . }}\n chart: {{ template \"magic-ip-address.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n"
] | image:
repository: mumoshu/kube-magic-ip-assigner
tag: 0.9.0-1.9.8
pullPolicy: IfNotPresent
config:
ipAddress: 169.254.210.210
selector: app=myapp
hostInterface: cni0
port: 9200
protocol: tcp
verbose:
pollInterval: 5
# A map of additional environment variables
extraVars: {}
# test1: "test2"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
|
acs-engine-autoscaler | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"acs-engine-autoscaler.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"acs-engine-autoscaler.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- /*\nCredit: @technosophos\nhttps://github.com/technosophos/common-chart/\nlabels.standard prints the standard Helm labels.\nThe standard labels are frequently used in metadata.\n*/ -}}\n{{- define \"acs-engine-autoscaler.labels.standard\" -}}\napp: {{ template \"acs-engine-autoscaler.name\" . }}\nheritage: {{ .Release.Service | quote }}\nrelease: {{ .Release.Name | quote }}\nchart: {{ template \"acs-engine-autoscaler.chartref\" . }}\n{{- end -}}\n\n{{- /*\nCredit: @technosophos\nhttps://github.com/technosophos/common-chart/\nchartref prints a chart name and version.\nIt does minimal escaping for use in Kubernetes labels.\nExample output:\n zookeeper-1.2.3\n wordpress-3.2.1_20170219\n*/ -}}\n{{- define \"acs-engine-autoscaler.chartref\" -}}\n {{- replace \"+\" \"_\" .Chart.Version | printf \"%s-%s\" .Chart.Name -}}\n{{- end -}}",
"# deployment.yaml\n{{- if and .Values.acsenginecluster.resourcegroup .Values.acsenginecluster.azurespappid .Values.acsenginecluster.azurespsecret .Values.acsenginecluster.azuresptenantid .Values.acsenginecluster.kubeconfigprivatekey .Values.acsenginecluster.clientprivatekey -}}\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n labels:\n{{ include \"acs-engine-autoscaler.labels.standard\" . | indent 4 }}\n annotations:\n description: {{ .Chart.Description }}\n{{- with .Values.deploymentAnnotations }}\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n{{ include \"acs-engine-autoscaler.labels.standard\" . | indent 8 }}\n openai/do-not-drain: \"true\"\n{{- with .Values.podAnnotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n containers:\n - name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: AZURE_SP_APP_ID\n valueFrom:\n secretKeyRef:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n key: azure-sp-app-id\n - name: AZURE_SP_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n key: azure-sp-secret\n - name: AZURE_SP_TENANT_ID\n valueFrom:\n secretKeyRef:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n key: azure-sp-tenant-id\n - name: KUBECONFIG_PRIVATE_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n key: kubeconfig-private-key\n - name: CLIENT_PRIVATE_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n key: client-private-key\n - name: CA_PRIVATE_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n key: ca-private-key\n command:\n - python\n - main.py\n - --resource-group\n - {{ .Values.acsenginecluster.resourcegroup }}\n {{- if .Values.acsenginecluster.acsdeployment }}\n - --acs-deployment\n - {{ .Values.acsenginecluster.acsdeployment }}\n {{- end }}\n {{- if .Values.acsenginecluster.sleeptime }}\n - --sleep\n - {{ .Values.acsenginecluster.sleeptime | quote }}\n {{- end }}\n {{- if .Values.acsenginecluster.ignorepools }}\n - --ignore-pools \n - {{ .Values.acsenginecluster.ignorepools }}\n {{- end }}\n {{- if .Values.acsenginecluster.spareagents }}\n - --spare-agents \n - {{ .Values.acsenginecluster.spareagents | quote }}\n {{- end }}\n {{- if .Values.acsenginecluster.idlethreshold }}\n - --idle-threshold \n - {{ .Values.acsenginecluster.idlethreshold | quote }}\n {{- end }}\n {{- if .Values.acsenginecluster.overprovision }}\n - --over-provision \n - {{ .Values.acsenginecluster.overprovision | quote }}\n {{- end }}\n - -vvv\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n{{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n restartPolicy: Always\n dnsPolicy: Default\n{{ end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"acs-engine-autoscaler.fullname\" . }}\n labels:\n{{ include \"acs-engine-autoscaler.labels.standard\" . | indent 4 }}\ntype: Opaque\ndata:\n azure-sp-app-id: {{ default \"MISSING\" .Values.acsenginecluster.azurespappid | b64enc | quote }}\n azure-sp-secret: {{ default \"MISSING\" .Values.acsenginecluster.azurespsecret | b64enc | quote }}\n azure-sp-tenant-id: {{ default \"MISSING\" .Values.acsenginecluster.azuresptenantid | b64enc | quote }}\n kubeconfig-private-key: {{ default \"MISSING\" .Values.acsenginecluster.kubeconfigprivatekey | b64enc | quote }}\n client-private-key: {{ default \"MISSING\" .Values.acsenginecluster.clientprivatekey | b64enc | quote }}\n ca-private-key: {{ default \"MISSING\" .Values.acsenginecluster.caprivatekey | b64enc | quote }}"
] | # Default values for acs-engine-autoscaler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
## Image for kubernetes-acs-engine-autoscaler
## Will update the image and tag later
image:
repository: wbuchwalter/kubernetes-acs-engine-autoscaler
tag: 2.1.1
pullPolicy: IfNotPresent
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Pod Annotations
podAnnotations: {}
## Deployment Annotations
deploymentAnnotations: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
acsenginecluster:
resourcegroup:
azurespappid:
azurespsecret:
azuresptenantid:
kubeconfigprivatekey:
clientprivatekey:
caprivatekey:
## Optional parameter for deployment name if not using default
# acsdeployment:
## Optional parameter for sleep time between scaling loops (default: 60)
# sleeptime:
## Optional parameter for pools to ignore scaling on
# ignorepools:
## Optional parameter denominating number of nodes to spare in a pool after scaling
# spareagents:
## Optional parameter denominating the maximum duration (in seconds) an agent can stay idle before being deleted
# idlethreshold:
## Optional parameter denominating the number of extra agents to create when scaling out
# overprovision:
|
elasticsearch | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"elasticsearch.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"elasticsearch.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified client name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"elasticsearch.client.fullname\" -}}\n{{ template \"elasticsearch.fullname\" . }}-{{ .Values.client.name }}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified data name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"elasticsearch.data.fullname\" -}}\n{{ template \"elasticsearch.fullname\" . }}-{{ .Values.data.name }}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified master name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"elasticsearch.master.fullname\" -}}\n{{ template \"elasticsearch.fullname\" . }}-{{ .Values.master.name }}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use for the client component\n*/}}\n{{- define \"elasticsearch.serviceAccountName.client\" -}}\n{{- if .Values.serviceAccounts.client.create -}}\n {{ default (include \"elasticsearch.client.fullname\" .) .Values.serviceAccounts.client.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccounts.client.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use for the data component\n*/}}\n{{- define \"elasticsearch.serviceAccountName.data\" -}}\n{{- if .Values.serviceAccounts.data.create -}}\n {{ default (include \"elasticsearch.data.fullname\" .) .Values.serviceAccounts.data.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccounts.data.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use for the master component\n*/}}\n{{- define \"elasticsearch.serviceAccountName.master\" -}}\n{{- if .Values.serviceAccounts.master.create -}}\n {{ default (include \"elasticsearch.master.fullname\" .) .Values.serviceAccounts.master.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccounts.master.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nplugin installer template\n*/}}\n{{- define \"plugin-installer\" -}}\n- name: es-plugin-install\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n securityContext:\n capabilities:\n add:\n - IPC_LOCK\n - SYS_RESOURCE\n command:\n - \"sh\"\n - \"-c\"\n - |\n {{- range .Values.cluster.plugins }}\n PLUGIN_NAME=\"{{ . }}\"\n echo \"Installing $PLUGIN_NAME...\"\n if /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep \"$PLUGIN_NAME\" > /dev/null; then\n echo \"Plugin $PLUGIN_NAME already exists, skipping.\"\n else\n /usr/share/elasticsearch/bin/elasticsearch-plugin install -b $PLUGIN_NAME\n fi\n {{- end }}\n volumeMounts:\n - mountPath: /usr/share/elasticsearch/plugins/\n name: plugindir\n - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n name: config\n subPath: elasticsearch.yml\n{{- end -}}\n",
"# client-auth.yaml\n{{- if and ( .Values.client.ingress.user ) ( .Values.client.ingress.password ) }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: '{{ include \"elasticsearch.client.fullname\" . }}-auth'\ntype: Opaque\ndata:\n auth: {{ printf \"%s:{PLAIN}%s\\n\" .Values.client.ingress.user .Values.client.ingress.password | b64enc | quote }}\n{{- end }}\n\n",
"# client-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.client.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.client.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.client.name }}\"\n release: {{ .Release.Name }}\n replicas: {{ .Values.client.replicas }}\n template:\n metadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.client.name }}\"\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if .Values.client.podAnnotations }}\n{{ toYaml .Values.client.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"elasticsearch.serviceAccountName.client\" . }}\n{{- if .Values.client.priorityClassName }}\n priorityClassName: \"{{ .Values.client.priorityClassName }}\"\n{{- end }}\n securityContext:\n fsGroup: 1000\n {{- if or .Values.client.antiAffinity .Values.client.nodeAffinity }}\n affinity:\n {{- end }}\n {{- if eq .Values.client.antiAffinity \"hard\" }}\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: \"{{ template \"elasticsearch.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n component: \"{{ .Values.client.name }}\"\n {{- else if eq .Values.client.antiAffinity \"soft\" }}\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n podAffinityTerm:\n topologyKey: kubernetes.io/hostname\n labelSelector:\n matchLabels:\n app: \"{{ template \"elasticsearch.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n component: \"{{ .Values.client.name }}\"\n {{- end }}\n {{- with .Values.client.nodeAffinity }}\n nodeAffinity:\n{{ toYaml . | indent 10 }}\n {{- end }}\n{{- if .Values.client.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.client.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.client.tolerations }}\n tolerations:\n{{ toYaml .Values.client.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.client.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.client.terminationGracePeriodSeconds }}\n{{- end }}\n{{- if or .Values.extraInitContainers .Values.sysctlInitContainer.enabled .Values.cluster.plugins }}\n initContainers:\n{{- if .Values.sysctlInitContainer.enabled }}\n # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html\n # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall\n - name: \"sysctl\"\n image: \"{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}\"\n imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.client.initResources | indent 12 }}\n command: [\"sysctl\", \"-w\", \"vm.max_map_count=262144\"]\n securityContext:\n privileged: true\n{{- end }}\n{{- if .Values.extraInitContainers }}\n{{ tpl .Values.extraInitContainers . | indent 6 }}\n{{- end }}\n{{- if .Values.cluster.plugins }}\n{{ include \"plugin-installer\" . | indent 6 }}\n{{- end }}\n{{- end }}\n containers:\n - name: elasticsearch\n env:\n - name: NODE_DATA\n value: \"false\"\n{{- if hasPrefix \"5.\" .Values.appVersion }}\n - name: NODE_INGEST\n value: \"false\"\n{{- end }}\n - name: NODE_MASTER\n value: \"false\"\n - name: DISCOVERY_SERVICE\n value: {{ template \"elasticsearch.fullname\" . }}-discovery\n - name: PROCESSORS\n valueFrom:\n resourceFieldRef:\n resource: limits.cpu\n - name: ES_JAVA_OPTS\n value: \"-Djava.net.preferIPv4Stack=true -Xms{{ .Values.client.heapSize }} -Xmx{{ .Values.client.heapSize }} {{ .Values.cluster.additionalJavaOpts }} {{ .Values.client.additionalJavaOpts }}\"\n {{- range $key, $value := .Values.cluster.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n resources:\n{{ toYaml .Values.client.resources | indent 12 }}\n readinessProbe:\n httpGet:\n path: /_cluster/health\n port: 9200\n initialDelaySeconds: 5\n livenessProbe:\n httpGet:\n path: /_cluster/health?local=true\n port: 9200\n initialDelaySeconds: 90\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n ports:\n - containerPort: 9200\n name: http\n - containerPort: 9300\n name: transport\n volumeMounts:\n - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n name: config\n subPath: elasticsearch.yml\n{{- if .Values.cluster.plugins }}\n - mountPath: /usr/share/elasticsearch/plugins/\n name: plugindir\n{{- end }}\n{{- if hasPrefix \"2.\" .Values.appVersion }}\n - mountPath: /usr/share/elasticsearch/config/logging.yml\n name: config\n subPath: logging.yml\n{{- end }}\n{{- if hasPrefix \"5.\" .Values.appVersion }}\n - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n name: config\n subPath: log4j2.properties\n{{- end }}\n{{- if .Values.cluster.keystoreSecret }}\n - name: keystore\n mountPath: \"/usr/share/elasticsearch/config/elasticsearch.keystore\"\n subPath: elasticsearch.keystore\n readOnly: true\n{{- end }}\n{{- if .Values.client.hooks.preStop }}\n - name: config\n mountPath: /client-pre-stop-hook.sh\n subPath: client-pre-stop-hook.sh\n{{- end }}\n{{- if .Values.client.hooks.postStart }}\n - name: config\n mountPath: /client-post-start-hook.sh\n subPath: client-post-start-hook.sh\n{{- end }}\n{{- if or .Values.client.hooks.preStop .Values.client.hooks.postStart }}\n lifecycle:\n {{- if .Values.client.hooks.preStop }}\n preStop:\n exec:\n command: [\"/bin/bash\",\"/client-pre-stop-hook.sh\"]\n {{- end }}\n {{- if .Values.client.hooks.postStart }}\n postStart:\n exec:\n command: [\"/bin/bash\",\"/client-post-start-hook.sh\"]\n {{- end }}\n{{- end }}\n{{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $pullSecret := .Values.image.pullSecrets }}\n - name: {{ $pullSecret }}\n {{- end }}\n{{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ template \"elasticsearch.fullname\" . }}\n{{- if .Values.cluster.plugins }}\n - name: plugindir\n emptyDir: {}\n{{- end }}\n{{- if .Values.cluster.keystoreSecret }}\n - name: keystore\n secret:\n secretName: {{ .Values.cluster.keystoreSecret }}\n{{- end }}\n",
"# client-ingress.yaml\n{{- if .Values.client.ingress.enabled -}}\n{{- $fullName := include \"elasticsearch.client.fullname\" . -}}\n{{- $ingressPath := .Values.client.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.client.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n{{- with .Values.client.ingress.annotations }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n{{- if and ( .Values.client.ingress.user ) ( .Values.client.ingress.password ) }}\n nginx.ingress.kubernetes.io/auth-type: basic\n nginx.ingress.kubernetes.io/auth-secret: '{{ include \"elasticsearch.client.fullname\" . }}-auth'\n nginx.ingress.kubernetes.io/auth-realm: \"Authentication-Required\"\n{{- end }}\nspec:\n{{- if .Values.client.ingress.tls }}\n tls:\n {{- range .Values.client.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.client.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# client-pdb.yaml\n{{- if .Values.client.podDisruptionBudget.enabled }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.client.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.client.fullname\" . }}\nspec:\n{{- if .Values.client.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.client.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.client.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.client.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.client.name }}\"\n release: {{ .Release.Name }}\n{{- end }}\n",
"# client-serviceaccount.yaml\n{{- if .Values.serviceAccounts.client.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.client.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.client.fullname\" . }}\n{{- end }}\n",
"# client-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.client.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.client.fullname\" . }}\n{{- if .Values.client.serviceAnnotations }}\n annotations:\n{{ toYaml .Values.client.serviceAnnotations | indent 4 }}\n{{- end }}\n\nspec:\n ports:\n - name: http\n port: 9200\n{{- if and .Values.client.httpNodePort (eq .Values.client.serviceType \"NodePort\") }}\n nodePort: {{ .Values.client.httpNodePort }}\n{{- end }}\n targetPort: http\n{{- if .Values.client.exposeTransportPort }}\n - name: transport\n port: 9300\n targetPort: transport\n{{- end }}\n selector:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.client.name }}\"\n release: {{ .Release.Name }}\n type: {{ .Values.client.serviceType }}\n{{- if .Values.client.loadBalancerIP }}\n loadBalancerIP: \"{{ .Values.client.loadBalancerIP }}\"\n{{- end }}\n {{if .Values.client.loadBalancerSourceRanges}}\n loadBalancerSourceRanges:\n {{range $rangeList := .Values.client.loadBalancerSourceRanges}}\n - {{ $rangeList }}\n {{end}}\n {{end}}",
"# configmap.yaml\n{{ $minorAppVersion := regexFind \"[0-9]*.[0-9]*\" .Values.appVersion | float64 -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n elasticsearch.yml: |-\n cluster.name: {{ .Values.cluster.name }}\n\n node.data: ${NODE_DATA:true}\n node.master: ${NODE_MASTER:true}\n{{- if hasPrefix \"5.\" .Values.appVersion }}\n node.ingest: ${NODE_INGEST:true}\n{{- else if hasPrefix \"6.\" .Values.appVersion }}\n node.ingest: ${NODE_INGEST:true}\n{{- end }}\n node.name: ${HOSTNAME}\n\n{{- if .Values.forceIpv6 }}\n network.host: \"::\"\n{{- else }}\n network.host: 0.0.0.0\n{{- end }}\n\n{{- if hasPrefix \"2.\" .Values.appVersion }}\n # see https://github.com/kubernetes/kubernetes/issues/3595\n bootstrap.mlockall: ${BOOTSTRAP_MLOCKALL:false}\n\n discovery:\n zen:\n ping.unicast.hosts: ${DISCOVERY_SERVICE:}\n minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}\n{{- else if hasPrefix \"5.\" .Values.appVersion }}\n # see https://github.com/kubernetes/kubernetes/issues/3595\n bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}\n\n discovery:\n zen:\n ping.unicast.hosts: ${DISCOVERY_SERVICE:}\n minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}\n\n{{- if .Values.cluster.xpackEnable }}\n # see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html\n {{- if or ( gt $minorAppVersion 5.4 ) ( eq $minorAppVersion 5.4 ) }}\n xpack.ml.enabled: ${XPACK_ML_ENABLED:false}\n {{- end }}\n xpack.monitoring.enabled: ${XPACK_MONITORING_ENABLED:false}\n xpack.security.enabled: ${XPACK_SECURITY_ENABLED:false}\n xpack.watcher.enabled: ${XPACK_WATCHER_ENABLED:false}\n{{- else }}\n {{- if or ( gt $minorAppVersion 5.4 ) ( eq $minorAppVersion 5.4 ) }}\n xpack.ml.enabled: false\n {{- end }}\n xpack.monitoring.enabled: false\n xpack.security.enabled: false\n xpack.watcher.enabled: false\n{{- end }}\n{{- else if hasPrefix \"6.\" .Values.appVersion }}\n # see https://github.com/kubernetes/kubernetes/issues/3595\n bootstrap.memory_lock: ${BOOTSTRAP_MEMORY_LOCK:false}\n\n discovery:\n zen:\n ping.unicast.hosts: ${DISCOVERY_SERVICE:}\n minimum_master_nodes: ${MINIMUM_MASTER_NODES:2}\n\n{{- if and ( .Values.cluster.xpackEnable ) ( gt $minorAppVersion 6.3 ) }}\n # see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html\n # After 6.3 xpack systems changed and are enabled by default and different configs manage them this enables monitoring\n xpack.monitoring.collection.enabled: ${XPACK_MONITORING_ENABLED:false}\n{{- else if .Values.cluster.xpackEnable }}\n # see https://www.elastic.co/guide/en/x-pack/current/xpack-settings.html\n xpack.ml.enabled: ${XPACK_ML_ENABLED:false}\n xpack.monitoring.enabled: ${XPACK_MONITORING_ENABLED:false}\n xpack.security.enabled: ${XPACK_SECURITY_ENABLED:false}\n xpack.watcher.enabled: ${XPACK_WATCHER_ENABLED:false}\n{{- end }}\n{{- end }}\n\n # see https://github.com/elastic/elasticsearch-definitive-guide/pull/679\n processors: ${PROCESSORS:}\n\n # avoid split-brain w/ a minimum consensus of two masters plus a data node\n gateway.expected_master_nodes: ${EXPECTED_MASTER_NODES:2}\n gateway.expected_data_nodes: ${EXPECTED_DATA_NODES:1}\n gateway.recover_after_time: ${RECOVER_AFTER_TIME:5m}\n gateway.recover_after_master_nodes: ${RECOVER_AFTER_MASTER_NODES:2}\n gateway.recover_after_data_nodes: ${RECOVER_AFTER_DATA_NODES:1}\n{{- with .Values.cluster.config }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n{{- if hasPrefix \"2.\" .Values.appVersion }}\n logging.yml: |-\n{{ toYaml .Values.cluster.loggingYml | indent 4 }}\n{{- else }}\n log4j2.properties: |-\n{{ tpl .Values.cluster.log4j2Properties . | indent 4 }}\n{{- end }}\n{{- if .Values.data.hooks.drain.enabled }}\n data-pre-stop-hook.sh: |-\n #!/bin/bash\n exec &> >(tee -a \"/var/log/elasticsearch-hooks.log\")\n NODE_NAME=${HOSTNAME}\n echo \"Prepare to migrate data of the node ${NODE_NAME}\"\n echo \"Move all data from node ${NODE_NAME}\"\n curl -s -XPUT -H 'Content-Type: application/json' '{{ template \"elasticsearch.client.fullname\" . }}:9200/_cluster/settings' -d \"{\n \\\"transient\\\" :{\n \\\"cluster.routing.allocation.exclude._name\\\" : \\\"${NODE_NAME}\\\"\n }\n }\"\n echo \"\"\n\n while true ; do\n echo -e \"Wait for node ${NODE_NAME} to become empty\"\n SHARDS_ALLOCATION=$(curl -s -XGET 'http://{{ template \"elasticsearch.client.fullname\" . }}:9200/_cat/shards')\n if ! echo \"${SHARDS_ALLOCATION}\" | grep -E \"${NODE_NAME}\"; then\n break\n fi\n sleep 1\n done\n echo \"Node ${NODE_NAME} is ready to shutdown\"\n data-post-start-hook.sh: |-\n #!/bin/bash\n exec &> >(tee -a \"/var/log/elasticsearch-hooks.log\")\n NODE_NAME=${HOSTNAME}\n CLUSTER_SETTINGS=$(curl -s -XGET \"http://{{ template \"elasticsearch.client.fullname\" . }}:9200/_cluster/settings\")\n if echo \"${CLUSTER_SETTINGS}\" | grep -E \"${NODE_NAME}\"; then\n echo \"Activate node ${NODE_NAME}\"\n curl -s -XPUT -H 'Content-Type: application/json' \"http://{{ template \"elasticsearch.client.fullname\" . }}:9200/_cluster/settings\" -d \"{\n \\\"transient\\\" :{\n \\\"cluster.routing.allocation.exclude._name\\\" : null\n }\n }\"\n fi\n echo \"Node ${NODE_NAME} is ready to be used\"\n{{- else }}\n {{- if .Values.data.hooks.preStop }}\n data-pre-stop-hook.sh: |-\n{{ tpl .Values.data.hooks.preStop . | indent 4 }}\n {{- end }}\n {{- if .Values.data.hooks.postStart }}\n data-post-start-hook.sh: |-\n{{ tpl .Values.data.hooks.postStart . | indent 4 }}\n {{- end }}\n{{- end }}\n\n{{- if .Values.client.hooks.preStop }}\n client-pre-stop-hook.sh: |-\n{{ tpl .Values.client.hooks.preStop . | indent 4 }}\n{{- end }}\n{{- if .Values.client.hooks.postStart }}\n client-post-start-hook.sh: |-\n{{ tpl .Values.client.hooks.postStart . | indent 4 }}\n{{- end }}\n\n{{- if .Values.master.hooks.preStop }}\n master-pre-stop-hook.sh: |-\n{{ tpl .Values.master.hooks.preStop . | indent 4 }}\n{{- end }}\n{{- if .Values.master.hooks.postStart }}\n master-post-start-hook.sh: |-\n{{ tpl .Values.master.hooks.postStart . | indent 4 }}\n{{- end }}\n",
"# data-pdb.yaml\n{{- if .Values.data.podDisruptionBudget.enabled }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.data.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.data.fullname\" . }}\nspec:\n{{- if .Values.data.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.data.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.data.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.data.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.data.name }}\"\n release: {{ .Release.Name }}\n{{- end }}\n",
"# data-serviceaccount.yaml\n{{- if .Values.serviceAccounts.data.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.data.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.data.fullname\" . }}\n{{- end }}\n",
"# data-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.data.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.data.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.data.name }}\"\n release: {{ .Release.Name }}\n role: data\n serviceName: {{ template \"elasticsearch.data.fullname\" . }}\n replicas: {{ .Values.data.replicas }}\n template:\n metadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.data.name }}\"\n release: {{ .Release.Name }}\n role: data\n{{- if or .Values.data.podAnnotations (eq .Values.data.updateStrategy.type \"RollingUpdate\") }}\n annotations:\n {{- if .Values.data.podAnnotations }}\n{{ toYaml .Values.data.podAnnotations | indent 8 }}\n {{- end }}\n {{- if eq .Values.data.updateStrategy.type \"RollingUpdate\" }}\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- end }}\n{{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n serviceAccountName: {{ template \"elasticsearch.serviceAccountName.data\" . }}\n{{- if .Values.data.priorityClassName }}\n priorityClassName: \"{{ .Values.data.priorityClassName }}\"\n{{- end }}\n securityContext:\n fsGroup: 1000\n {{- if or .Values.data.antiAffinity .Values.data.nodeAffinity }}\n affinity:\n {{- end }}\n {{- if eq .Values.data.antiAffinity \"hard\" }}\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: \"{{ template \"elasticsearch.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n component: \"{{ .Values.data.name }}\"\n {{- else if eq .Values.data.antiAffinity \"soft\" }}\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n podAffinityTerm:\n topologyKey: kubernetes.io/hostname\n labelSelector:\n matchLabels:\n app: \"{{ template \"elasticsearch.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n component: \"{{ .Values.data.name }}\"\n {{- end }}\n {{- with .Values.data.nodeAffinity }}\n nodeAffinity:\n{{ toYaml . | indent 10 }}\n {{- end }}\n{{- if .Values.data.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.data.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.data.tolerations }}\n tolerations:\n{{ toYaml .Values.data.tolerations | indent 8 }}\n{{- end }}\n{{- if or .Values.extraInitContainers .Values.sysctlInitContainer.enabled .Values.chownInitContainer.enabled .Values.cluster.plugins }}\n initContainers:\n{{- end }}\n{{- if .Values.sysctlInitContainer.enabled }}\n # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html\n # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall\n - name: \"sysctl\"\n image: \"{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}\"\n imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.data.initResources | indent 12 }}\n command: [\"sysctl\", \"-w\", \"vm.max_map_count=262144\"]\n securityContext:\n privileged: true\n{{- end }}\n{{- if .Values.chownInitContainer.enabled }}\n - name: \"chown\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.data.initResources | indent 12 }}\n command:\n - /bin/bash\n - -c\n - >\n set -e;\n set -x;\n chown elasticsearch:elasticsearch /usr/share/elasticsearch/data;\n for datadir in $(find /usr/share/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name \".snapshot\"); do\n chown -R elasticsearch:elasticsearch $datadir;\n done;\n chown elasticsearch:elasticsearch /usr/share/elasticsearch/logs;\n for logfile in $(find /usr/share/elasticsearch/logs -mindepth 1 -maxdepth 1 -not -name \".snapshot\"); do\n chown -R elasticsearch:elasticsearch $logfile;\n done\n securityContext:\n runAsUser: 0\n volumeMounts:\n - mountPath: /usr/share/elasticsearch/data\n name: data\n{{- end }}\n{{- if .Values.extraInitContainers }}\n{{ tpl .Values.extraInitContainers . | indent 6 }}\n{{- end }}\n{{- if .Values.cluster.plugins }}\n{{ include \"plugin-installer\" . | indent 6 }}\n{{- end }}\n containers:\n - name: elasticsearch\n env:\n - name: DISCOVERY_SERVICE\n value: {{ template \"elasticsearch.fullname\" . }}-discovery\n - name: NODE_MASTER\n value: \"false\"\n - name: PROCESSORS\n valueFrom:\n resourceFieldRef:\n resource: limits.cpu\n - name: ES_JAVA_OPTS\n value: \"-Djava.net.preferIPv4Stack=true -Xms{{ .Values.data.heapSize }} -Xmx{{ .Values.data.heapSize }} {{ .Values.cluster.additionalJavaOpts }} {{ .Values.data.additionalJavaOpts }}\"\n {{- range $key, $value := .Values.cluster.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n ports:\n - containerPort: 9300\n name: transport\n{{ if .Values.data.exposeHttp }}\n - containerPort: 9200\n name: http\n{{ end }}\n resources:\n{{ toYaml .Values.data.resources | indent 12 }}\n readinessProbe:\n{{ toYaml .Values.data.readinessProbe | indent 10 }}\n volumeMounts:\n - mountPath: /usr/share/elasticsearch/data\n name: data\n - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n name: config\n subPath: elasticsearch.yml\n{{- if .Values.cluster.plugins }}\n - mountPath: /usr/share/elasticsearch/plugins/\n name: plugindir\n{{- end }}\n{{- if hasPrefix \"2.\" .Values.appVersion }}\n - mountPath: /usr/share/elasticsearch/config/logging.yml\n name: config\n subPath: logging.yml\n{{- end }}\n{{- if hasPrefix \"5.\" .Values.appVersion }}\n - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n name: config\n subPath: log4j2.properties\n{{- end }}\n{{- if .Values.cluster.keystoreSecret }}\n - name: keystore\n mountPath: \"/usr/share/elasticsearch/config/elasticsearch.keystore\"\n subPath: elasticsearch.keystore\n readOnly: true\n{{- end }}\n{{- if or .Values.data.hooks.preStop .Values.data.hooks.drain.enabled }}\n - name: config\n mountPath: /data-pre-stop-hook.sh\n subPath: data-pre-stop-hook.sh\n{{- end }}\n{{- if or .Values.data.hooks.postStart .Values.data.hooks.drain.enabled }}\n - name: config\n mountPath: /data-post-start-hook.sh\n subPath: data-post-start-hook.sh\n{{- end }}\n{{- if or .Values.data.hooks.preStop .Values.data.hooks.postStart .Values.data.hooks.drain.enabled }}\n lifecycle:\n {{- if or .Values.data.hooks.preStop .Values.data.hooks.drain.enabled }}\n preStop:\n exec:\n command: [\"/bin/bash\",\"/data-pre-stop-hook.sh\"]\n {{- end }}\n {{- if or .Values.data.hooks.postStart .Values.data.hooks.drain.enabled }}\n postStart:\n exec:\n command: [\"/bin/bash\",\"/data-post-start-hook.sh\"]\n {{- end }}\n{{- end }}\n terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }}\n{{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $pullSecret := .Values.image.pullSecrets }}\n - name: {{ $pullSecret }}\n {{- end }}\n{{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ template \"elasticsearch.fullname\" . }}\n{{- if .Values.cluster.plugins }}\n - name: plugindir\n emptyDir: {}\n{{- end }}\n{{- if .Values.cluster.keystoreSecret }}\n - name: keystore\n secret:\n secretName: {{ .Values.cluster.keystoreSecret }}\n{{- end }}\n {{- if not .Values.data.persistence.enabled }}\n - name: data\n emptyDir: {}\n {{- end }}\n podManagementPolicy: {{ .Values.data.podManagementPolicy }}\n updateStrategy:\n type: {{ .Values.data.updateStrategy.type }}\n {{- if .Values.data.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: {{ .Values.data.persistence.name }}\n spec:\n accessModes:\n - {{ .Values.data.persistence.accessMode | quote }}\n {{- if .Values.data.persistence.storageClass }}\n {{- if (eq \"-\" .Values.data.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.data.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n resources:\n requests:\n storage: \"{{ .Values.data.persistence.size }}\"\n {{- end }}\n",
"# job.yaml\n{{- if .Values.cluster.bootstrapShellCommand }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}-bootstrap\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n \"helm.sh/hook\": post-install,post-upgrade\n \"helm.sh/hook-weight\": \"10\"\n \"helm.sh/hook-delete-policy\": hook-succeeded\nspec:\n template:\n metadata:\n name: {{ template \"elasticsearch.fullname\" . }}-bootstrap\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n spec:\n containers:\n - name: bootstrap-elasticsearch\n image: byrnedo/alpine-curl\n command:\n - \"sh\"\n - \"-c\"\n - {{ .Values.cluster.bootstrapShellCommand | quote }}\n restartPolicy: Never\n backoffLimit: 20\n{{- end }}\n",
"# master-pdb.yaml\n{{- if .Values.master.podDisruptionBudget.enabled }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.master.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.master.fullname\" . }}\nspec:\n{{- if .Values.master.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.master.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.master.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.master.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.master.name }}\"\n release: {{ .Release.Name }}\n{{- end }}\n",
"# master-serviceaccount.yaml\n{{- if .Values.serviceAccounts.master.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.master.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.master.fullname\" . }}\n{{- end }}\n",
"# master-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.master.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.master.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.master.name }}\"\n release: {{ .Release.Name }}\n role: master\n serviceName: {{ template \"elasticsearch.master.fullname\" . }}\n replicas: {{ .Values.master.replicas }}\n template:\n metadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.master.name }}\"\n release: {{ .Release.Name }}\n role: master\n{{- if or .Values.master.podAnnotations (eq .Values.master.updateStrategy.type \"RollingUpdate\") }}\n annotations:\n {{- if .Values.master.podAnnotations }}\n{{ toYaml .Values.master.podAnnotations | indent 8 }}\n {{- end }}\n {{- if eq .Values.master.updateStrategy.type \"RollingUpdate\" }}\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- end }}\n{{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n serviceAccountName: {{ template \"elasticsearch.serviceAccountName.master\" . }}\n{{- if .Values.master.priorityClassName }}\n priorityClassName: \"{{ .Values.master.priorityClassName }}\"\n{{- end }}\n securityContext:\n fsGroup: 1000\n {{- if or .Values.master.antiAffinity .Values.master.nodeAffinity }}\n affinity:\n {{- end }}\n {{- if eq .Values.master.antiAffinity \"hard\" }}\n podAntiAffinity:\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: \"{{ template \"elasticsearch.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n component: \"{{ .Values.master.name }}\"\n {{- else if eq .Values.master.antiAffinity \"soft\" }}\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 1\n podAffinityTerm:\n topologyKey: kubernetes.io/hostname\n labelSelector:\n matchLabels:\n app: \"{{ template \"elasticsearch.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n component: \"{{ .Values.master.name }}\"\n {{- end }}\n {{- with .Values.master.nodeAffinity }}\n nodeAffinity:\n{{ toYaml . | indent 10 }}\n {{- end }}\n{{- if .Values.master.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.master.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.master.tolerations }}\n tolerations:\n{{ toYaml .Values.master.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.master.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }}\n{{- end }}\n{{- if or .Values.extraInitContainers .Values.sysctlInitContainer.enabled .Values.chownInitContainer.enabled .Values.cluster.plugins }}\n initContainers:\n{{- end }}\n{{- if .Values.sysctlInitContainer.enabled }}\n # see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html\n # and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall\n - name: \"sysctl\"\n image: \"{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}\"\n imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.master.initResources | indent 12 }}\n command: [\"sysctl\", \"-w\", \"vm.max_map_count=262144\"]\n securityContext:\n privileged: true\n{{- end }}\n{{- if .Values.chownInitContainer.enabled }}\n - name: \"chown\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.master.initResources | indent 12 }}\n command:\n - /bin/bash\n - -c\n - >\n set -e;\n set -x;\n chown elasticsearch:elasticsearch /usr/share/elasticsearch/data;\n for datadir in $(find /usr/share/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name \".snapshot\"); do\n chown -R elasticsearch:elasticsearch $datadir;\n done;\n chown elasticsearch:elasticsearch /usr/share/elasticsearch/logs;\n for logfile in $(find /usr/share/elasticsearch/logs -mindepth 1 -maxdepth 1 -not -name \".snapshot\"); do\n chown -R elasticsearch:elasticsearch $logfile;\n done\n securityContext:\n runAsUser: 0\n volumeMounts:\n - mountPath: /usr/share/elasticsearch/data\n name: data\n{{- end }}\n{{- if .Values.extraInitContainers }}\n{{ tpl .Values.extraInitContainers . | indent 6 }}\n{{- end }}\n{{- if .Values.cluster.plugins }}\n{{ include \"plugin-installer\" . | indent 6 }}\n{{- end }}\n containers:\n - name: elasticsearch\n env:\n - name: NODE_DATA\n value: \"false\"\n{{- if hasPrefix \"5.\" .Values.appVersion }}\n - name: NODE_INGEST\n value: \"false\"\n{{- end }}\n - name: DISCOVERY_SERVICE\n value: {{ template \"elasticsearch.fullname\" . }}-discovery\n - name: PROCESSORS\n valueFrom:\n resourceFieldRef:\n resource: limits.cpu\n - name: ES_JAVA_OPTS\n value: \"-Djava.net.preferIPv4Stack=true -Xms{{ .Values.master.heapSize }} -Xmx{{ .Values.master.heapSize }} {{ .Values.cluster.additionalJavaOpts }} {{ .Values.master.additionalJavaOpts }}\"\n {{- range $key, $value := .Values.cluster.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n resources:\n{{ toYaml .Values.master.resources | indent 12 }}\n readinessProbe:\n{{ toYaml .Values.master.readinessProbe | indent 10 }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n ports:\n - containerPort: 9300\n name: transport\n{{ if .Values.master.exposeHttp }}\n - containerPort: 9200\n name: http\n{{ end }}\n volumeMounts:\n - mountPath: /usr/share/elasticsearch/data\n name: data\n - mountPath: /usr/share/elasticsearch/config/elasticsearch.yml\n name: config\n subPath: elasticsearch.yml\n{{- if .Values.cluster.plugins }}\n - mountPath: /usr/share/elasticsearch/plugins/\n name: plugindir\n{{- end }}\n{{- if hasPrefix \"2.\" .Values.appVersion }}\n - mountPath: /usr/share/elasticsearch/config/logging.yml\n name: config\n subPath: logging.yml\n{{- end }}\n{{- if hasPrefix \"5.\" .Values.appVersion }}\n - mountPath: /usr/share/elasticsearch/config/log4j2.properties\n name: config\n subPath: log4j2.properties\n{{- end }}\n{{- if .Values.cluster.keystoreSecret }}\n - name: keystore\n mountPath: \"/usr/share/elasticsearch/config/elasticsearch.keystore\"\n subPath: elasticsearch.keystore\n readOnly: true\n{{- end }}\n{{- if .Values.master.hooks.preStop }}\n - name: config\n mountPath: /master-pre-stop-hook.sh\n subPath: master-pre-stop-hook.sh\n{{- end }}\n{{- if .Values.master.hooks.postStart }}\n - name: config\n mountPath: /master-post-start-hook.sh\n subPath: master-post-start-hook.sh\n{{- end }}\n{{- if or .Values.master.hooks.preStop .Values.master.hooks.postStart }}\n lifecycle:\n {{- if .Values.master.hooks.preStop }}\n preStop:\n exec:\n command: [\"/bin/bash\",\"/master-pre-stop-hook.sh\"]\n {{- end }}\n {{- if .Values.master.hooks.postStart }}\n postStart:\n exec:\n command: [\"/bin/bash\",\"/master-post-start-hook.sh\"]\n {{- end }}\n{{- end }}\n{{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $pullSecret := .Values.image.pullSecrets }}\n - name: {{ $pullSecret }}\n {{- end }}\n{{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ template \"elasticsearch.fullname\" . }}\n{{- if .Values.cluster.plugins }}\n - name: plugindir\n emptyDir: {}\n{{- end }}\n{{- if .Values.cluster.keystoreSecret }}\n - name: keystore\n secret:\n secretName: {{ .Values.cluster.keystoreSecret }}\n{{- end }}\n {{- if not .Values.master.persistence.enabled }}\n - name: data\n emptyDir: {}\n {{- end }}\n podManagementPolicy: {{ .Values.master.podManagementPolicy }}\n updateStrategy:\n type: {{ .Values.master.updateStrategy.type }}\n {{- if .Values.master.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: {{ .Values.master.persistence.name }}\n spec:\n accessModes:\n - {{ .Values.master.persistence.accessMode | quote }}\n {{- if .Values.master.persistence.storageClass }}\n {{- if (eq \"-\" .Values.master.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.master.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n resources:\n requests:\n storage: \"{{ .Values.master.persistence.size }}\"\n {{ end }}\n",
"# master-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.master.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"elasticsearch.fullname\" . }}-discovery\nspec:\n clusterIP: None\n ports:\n - port: 9300\n targetPort: transport\n selector:\n app: {{ template \"elasticsearch.name\" . }}\n component: \"{{ .Values.master.name }}\"\n release: {{ .Release.Name }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n{{- if .Values.podSecurityPolicy.annotations }}\n{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}\n{{- end }}\nspec:\n privileged: true\n allowPrivilegeEscalation: true\n volumes:\n - 'configMap'\n - 'secret'\n - 'emptyDir'\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostPID: false\n hostIPC: false\n runAsUser:\n rule: 'RunAsAny'\n runAsGroup:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n - min: 1000\n max: 1000\n readOnlyRootFilesystem: false\n hostPorts:\n - min: 1\n max: 65535\n{{- end }}\n",
"# role.yaml\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"elasticsearch.fullname\" . }}\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nroleRef:\n kind: Role\n name: {{ template \"elasticsearch.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"elasticsearch.serviceAccountName.client\" . }}\n namespace: {{ .Release.Namespace }}\n- kind: ServiceAccount\n name: {{ template \"elasticsearch.serviceAccountName.data\" . }}\n namespace: {{ .Release.Namespace }}\n- kind: ServiceAccount\n name: {{ template \"elasticsearch.serviceAccountName.master\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n\n",
"# test-configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}-test\n labels:\n app: {{ template \"elasticsearch.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ndata:\n run.sh: |-\n @test \"Test Access and Health\" {\n curl -D - http://{{ template \"elasticsearch.client.fullname\" . }}:9200\n curl -D - http://{{ template \"elasticsearch.client.fullname\" . }}:9200/_cluster/health?wait_for_status=green\n }\n",
"# test.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}-test\n labels:\n app: {{ template \"elasticsearch.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n{{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $pullSecret := .Values.image.pullSecrets }}\n - name: {{ $pullSecret }}\n {{- end }}\n{{- end }}\n initContainers:\n - name: test-framework\n image: \"{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-test\n image: \"{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}\"\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"elasticsearch.fullname\" . }}-test\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n"
] | # Default values for elasticsearch.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
appVersion: "6.8.6"
## Define serviceAccount names for components. Defaults to component's fully qualified name.
##
serviceAccounts:
client:
create: true
name:
master:
create: true
name:
data:
create: true
name:
## Specify if a Pod Security Policy for node-exporter must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
enabled: false
annotations: {}
## Specify pod annotations
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
##
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
securityContext:
enabled: false
runAsUser: 1000
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
image:
repository: "docker.elastic.co/elasticsearch/elasticsearch-oss"
tag: "6.8.6"
pullPolicy: "IfNotPresent"
# If specified, use these secrets to access the image
# pullSecrets:
# - registry-secret
testFramework:
image: "dduportal/bats"
tag: "0.4.0"
initImage:
repository: "busybox"
tag: "latest"
pullPolicy: "Always"
cluster:
name: "elasticsearch"
# If you want X-Pack installed, switch to an image that includes it, enable this option and toggle the features you want
# enabled in the environment variables outlined in the README
xpackEnable: false
# Some settings must be placed in a keystore, so they need to be mounted in from a secret.
# Use this setting to specify the name of the secret
# keystoreSecret: eskeystore
config: {}
# Custom parameters, as string, to be added to ES_JAVA_OPTS environment variable
additionalJavaOpts: ""
# Command to run at the end of deployment
bootstrapShellCommand: ""
env:
# IMPORTANT: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#minimum_master_nodes
# To prevent data loss, it is vital to configure the discovery.zen.minimum_master_nodes setting so that each master-eligible
# node knows the minimum number of master-eligible nodes that must be visible in order to form a cluster.
MINIMUM_MASTER_NODES: "2"
# List of plugins to install via dedicated init container
plugins: []
# - ingest-attachment
# - mapper-size
loggingYml:
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console
logger:
# log action execution errors for easier debugging
action: DEBUG
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
log4j2Properties: |
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
logger.searchguard.name = com.floragunn
logger.searchguard.level = info
client:
name: client
replicas: 2
serviceType: ClusterIP
## If coupled with serviceType = "NodePort", this will set a specific nodePort to the client HTTP port
# httpNodePort: 30920
loadBalancerIP: {}
loadBalancerSourceRanges: {}
## (dict) If specified, apply these annotations to the client service
# serviceAnnotations:
# example: client-svc-foo
heapSize: "512m"
# additionalJavaOpts: "-XX:MaxRAM=512m"
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
# terminationGracePeriodSeconds: 60
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
resources:
limits:
cpu: "1"
# memory: "1024Mi"
requests:
cpu: "25m"
memory: "512Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each client Pod
# podAnnotations:
# example: client-foo
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
hooks: {}
## (string) Script to execute prior the client pod stops.
# preStop: |-
## (string) Script to execute after the client pod starts.
# postStart: |-
ingress:
enabled: false
# user: NAME
# password: PASSWORD
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
master:
name: master
exposeHttp: false
replicas: 3
heapSize: "512m"
# additionalJavaOpts: "-XX:MaxRAM=512m"
persistence:
enabled: true
accessMode: ReadWriteOnce
name: data
size: "4Gi"
# storageClass: "ssd"
readinessProbe:
httpGet:
path: /_cluster/health?local=true
port: 9200
initialDelaySeconds: 5
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
# terminationGracePeriodSeconds: 60
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
resources:
limits:
cpu: "1"
# memory: "1024Mi"
requests:
cpu: "25m"
memory: "512Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each master Pod
# podAnnotations:
# example: master-foo
podManagementPolicy: OrderedReady
podDisruptionBudget:
enabled: false
minAvailable: 2 # Same as `cluster.env.MINIMUM_MASTER_NODES`
# maxUnavailable: 1
updateStrategy:
type: OnDelete
hooks: {}
## (string) Script to execute prior the master pod stops.
# preStop: |-
## (string) Script to execute after the master pod starts.
# postStart: |-
data:
name: data
exposeHttp: false
replicas: 2
heapSize: "1536m"
# additionalJavaOpts: "-XX:MaxRAM=1536m"
persistence:
enabled: true
accessMode: ReadWriteOnce
name: data
size: "30Gi"
# storageClass: "ssd"
readinessProbe:
httpGet:
path: /_cluster/health?local=true
port: 9200
initialDelaySeconds: 5
terminationGracePeriodSeconds: 3600
antiAffinity: "soft"
nodeAffinity: {}
nodeSelector: {}
tolerations: []
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
resources:
limits:
cpu: "1"
# memory: "2048Mi"
requests:
cpu: "25m"
memory: "1536Mi"
priorityClassName: ""
## (dict) If specified, apply these annotations to each data Pod
# podAnnotations:
# example: data-foo
podDisruptionBudget:
enabled: false
# minAvailable: 1
maxUnavailable: 1
podManagementPolicy: OrderedReady
updateStrategy:
type: OnDelete
hooks:
## Drain the node before stopping it and re-integrate it into the cluster after start.
## When enabled, it supersedes `data.hooks.preStop` and `data.hooks.postStart` defined below.
drain:
enabled: true
## (string) Script to execute prior the data pod stops. Ignored if `data.hooks.drain.enabled` is true (default)
# preStop: |-
# #!/bin/bash
# exec &> >(tee -a "/var/log/elasticsearch-hooks.log")
# NODE_NAME=${HOSTNAME}
# curl -s -XPUT -H 'Content-Type: application/json' '{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings' -d "{
# \"transient\" :{
# \"cluster.routing.allocation.exclude._name\" : \"${NODE_NAME}\"
# }
# }"
# echo "Node ${NODE_NAME} is exluded from the allocation"
## (string) Script to execute after the data pod starts. Ignored if `data.hooks.drain.enabled` is true (default)
# postStart: |-
# #!/bin/bash
# exec &> >(tee -a "/var/log/elasticsearch-hooks.log")
# NODE_NAME=${HOSTNAME}
# CLUSTER_SETTINGS=$(curl -s -XGET "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings")
# if echo "${CLUSTER_SETTINGS}" | grep -E "${NODE_NAME}"; then
# echo "Activate node ${NODE_NAME}"
# curl -s -XPUT -H 'Content-Type: application/json' "http://{{ template "elasticsearch.client.fullname" . }}:9200/_cluster/settings" -d "{
# \"transient\" :{
# \"cluster.routing.allocation.exclude._name\" : null
# }
# }"
# fi
# echo "Node ${NODE_NAME} is ready to be used"
## Sysctl init container to setup vm.max_map_count
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
# and https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration-memory.html#mlockall
sysctlInitContainer:
enabled: true
## Chown init container to change ownership of data and logs directories to elasticsearch user
chownInitContainer:
enabled: true
## Additional init containers
extraInitContainers: |
forceIpv6: false
|
magento | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"magento.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"magento.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a random alphanumeric password string.\nWe append a random number to the string to avoid password validation errors\n*/}}\n{{- define \"magento.randomPassword\" -}}\n{{- randAlphaNum 9 -}}{{- randNumeric 1 -}}\n{{- end -}}\n\n{{/*\nGet the user defined password or use a random string\n*/}}\n{{- define \"magento.password\" -}}\n{{- $password := index .Values (printf \"%sPassword\" .Chart.Name) -}}\n{{- default (include \"magento.randomPassword\" .) $password -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"magento.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"magento.elasticsearch.fullname\" -}}\n{{- printf \"%s-%s-client\" .Release.Name \"elasticsearch\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"magento.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"magento.host\" -}}\n{{- $host := index .Values (printf \"%sHost\" .Chart.Name) | default \"\" -}}\n{{- default (include \"magento.serviceIP\" .) $host -}}\n{{- end -}}\n\n{{/*\nReturn the proper Magento image name\n*/}}\n{{- define \"magento.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"magento.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"magento.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"magento.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if and (include \"magento.host\" .) (or .Values.mariadb.enabled .Values.externalDatabase.host) -}}\napiVersion: {{ template \"magento.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"magento.fullname\" . }}\n labels:\n app: {{ template \"magento.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"magento.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"magento.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"magento.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"magento.fullname\" . }}\n image: {{ template \"magento.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n {{- if .Values.image.debug}}\n - name: BASH_DEBUG\n value: \"1\"\n - name: NAMI_DEBUG\n value: \"1\"\n {{- end }}\n - name: MARIADB_HOST\n {{- if .Values.mariadb.enabled }}\n value: {{ template \"magento.mariadb.fullname\" . }}\n {{- else }}\n value: {{ .Values.externalDatabase.host | quote }}\n {{- end }}\n - name: MARIADB_PORT_NUMBER\n {{- if .Values.mariadb.enabled }}\n value: \"3306\"\n {{- else }}\n value: {{ .Values.externalDatabase.port | quote }}\n {{- end }}\n - name: ELASTICSEARCH_HOST\n {{- if .Values.elasticsearch.enabled }}\n value: {{ template \"magento.elasticsearch.fullname\" . }}\n {{- else if .Values.externalElasticsearch.host }}\n value: {{ .Values.externalElasticsearch.host | quote }}\n {{- else }}\n value: \"\"\n {{- end }}\n - name: ELASTICSEARCH_PORT_NUMBER\n {{- if .Values.elasticsearch.enabled }}\n value: \"9200\"\n {{- else if .Values.externalElasticsearch.port }}\n value: {{ .Values.externalElasticsearch.port | quote }}\n {{- else }}\n value: \"\"\n {{- end }}\n - name: MAGENTO_DATABASE_NAME\n {{- if .Values.mariadb.enabled }}\n value: {{ .Values.mariadb.db.name | quote }}\n {{- else }}\n value: {{ .Values.externalDatabase.database | quote }}\n {{- end }}\n - name: MAGENTO_DATABASE_USER\n {{- if .Values.mariadb.enabled }}\n value: {{ .Values.mariadb.db.user | quote }}\n {{- else }}\n value: {{ .Values.externalDatabase.user | quote }}\n {{- end }}\n - name: MAGENTO_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n {{- if .Values.mariadb.enabled }}\n name: {{ template \"magento.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n name: {{ template \"magento.fullname\" . }}-externaldb\n key: db-password\n {{- end }}\n {{- $port:=.Values.service.port | toString }}\n - name: MAGENTO_HOST\n value: \"{{ include \"magento.host\" . }}{{- if ne $port \"80\" }}:{{ .Values.service.port }}{{ end }}\"\n - name: MAGENTO_USERNAME\n value: {{ .Values.magentoUsername | quote }}\n - name: MAGENTO_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"magento.fullname\" . }}\n key: magento-password\n - name: MAGENTO_EMAIL\n value: {{ .Values.magentoEmail | quote }}\n - name: MAGENTO_ADMINURI\n value: {{ .Values.magentoAdminUri | quote }}\n - name: MAGENTO_FIRSTNAME\n value: {{ .Values.magentoFirstName | quote }}\n - name: MAGENTO_LASTNAME\n value: {{ .Values.magentoLastName | quote }}\n - name: MAGENTO_MODE\n value: {{ .Values.magentoMode | quote }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /index.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"magento.host\" . | quote }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /index.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"magento.host\" . | quote }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: magento-data\n mountPath: /bitnami/magento\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"magento.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: magento-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"magento.fullname\" . }}-magento\n {{- else }}\n emptyDir: {}\n {{- end }}\n{{- end -}}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"magento.fullname\" . }}-externaldb\n labels:\n app: {{ template \"magento.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"magento.fullname\" . }}\n labels:\n app: \"{{ template \"magento.fullname\" . }}\"\n chart: {{ .Chart.Name | quote }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"magento.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# magento-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"magento.fullname\" . }}-magento\n labels:\n app: {{ template \"magento.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.magento.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.magento.size | quote }}\n{{- if .Values.persistence.magento.storageClass }}\n{{- if (eq \"-\" .Values.persistence.magento.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.magento.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"magento.fullname\" . }}\n labels:\n app: {{ template \"magento.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n magento-password: \"{{ b64enc (include \"magento.password\" .) }}\"\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"magento.fullname\" . }}\n labels:\n app: {{ template \"magento.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n {{- if eq .Values.service.type \"LoadBalancer\" }}\n loadBalancerIP: {{ default \"\" .Values.service.loadBalancerIP | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n selector:\n app: {{ template \"magento.fullname\" . }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
## Bitnami Magento image version
## ref: https://hub.docker.com/r/bitnami/magento/tags/
##
image:
registry: docker.io
repository: bitnami/magento
tag: 2.3.1-debian-9-r56
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
##
debug: false
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Magento host to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
# magentoHost:
## loadBalancerIP for the Magento Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
# magentoLoadBalancerIP:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
magentoUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
# magentoPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
magentoEmail: [email protected]
## Prefix for Magento Admin
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
magentoAdminUri: admin
## First Name
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
magentoFirstName: FirstName
## Last Name
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
magentoLastName: LastName
## Mode
## ref: https://github.com/bitnami/bitnami-docker-magento#configuration
##
magentoMode: developer
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-magento#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database port
port: 3306
## Database user
user: bn_magento
## Database password
password:
## Database name
database: bitnami_magento
##
## External elasticsearch configuration
##
externalElasticsearch:
## Elasticsearch host
host:
## Elasticsearch port
port:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_magento
user: bn_magento
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
##
## Elasticsearch chart configuration
##
## https://github.com/helm/charts/blob/master/stable/elasticsearch/values.yaml
##
elasticsearch:
## Whether to deploy a elasticsearch server to use as magento's search engine
## To use an external server set this to false and configure the externalElasticsearch parameters
enabled: false
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
##
## loadBalancerIP:
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Configure liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 1000
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 3
successThreshold: 1
failureThreshold: 3
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
magento:
## magento data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Configure the ingress resource that allows you to access the
## Magento installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: magento.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.magento.local
# - magento.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: magento.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: magento.local-tls
# key:
# certificate:
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: lusotycoon/apache-exporter
tag: v0.5.0
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
openiban | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openiban.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"openiban.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"openiban.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"openiban.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"openiban.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openiban.fullname\" . }}\n labels:\n app: {{ template \"openiban.name\" . }}\n chart: {{ template \"openiban.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"openiban.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"openiban.name\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: 8080\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /validate/DE89370400440532013000\n port: http\n initialDelaySeconds: 5\n readinessProbe:\n httpGet:\n path: /validate/DE89370400440532013000\n port: http\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"openiban.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"openiban.name\" . }}\n chart: {{ template \"openiban.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"openiban.fullname\" . }}\n labels:\n app: {{ template \"openiban.name\" . }}\n chart: {{ template \"openiban.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n- apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\"]\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"openiban.fullname\" . }}\n labels:\n app: {{ template \"openiban.name\" . }}\n chart: {{ template \"openiban.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"openiban.serviceAccountName\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"openiban.fullname\" . }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"openiban.fullname\" . }}\n labels:\n app: {{ template \"openiban.name\" . }}\n chart: {{ template \"openiban.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: 8080\n protocol: TCP\n name: http\n selector:\n app: {{ template \"openiban.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"openiban.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"openiban.serviceAccountName\" . }}\n{{- end -}}\n"
] | # Default values for openiban.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: fourcube/openiban
tag: 1.0.1
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 8080
securityContext:
enabled: true
runAsUser: 1000
fsGroup: 1000
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
|
hadoop | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hadoop.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"hadoop.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"hadoop.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# hadoop-configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\ndata:\n bootstrap.sh: |\n #!/bin/bash\n\n : ${HADOOP_PREFIX:=/usr/local/hadoop}\n\n . $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh\n\n # Directory to find config artifacts\n CONFIG_DIR=\"/tmp/hadoop-config\"\n\n # Copy config files from volume mount\n\n for f in slaves core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml; do\n if [[ -e ${CONFIG_DIR}/$f ]]; then\n cp ${CONFIG_DIR}/$f $HADOOP_PREFIX/etc/hadoop/$f\n else\n echo \"ERROR: Could not find $f in $CONFIG_DIR\"\n exit 1\n fi\n done\n\n # installing libraries if any - (resource urls added comma separated to the ACP system variable)\n cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd -\n\n if [[ \"${HOSTNAME}\" =~ \"hdfs-nn\" ]]; then\n mkdir -p /root/hdfs/namenode\n $HADOOP_PREFIX/bin/hdfs namenode -format -force -nonInteractive\n $HADOOP_PREFIX/sbin/hadoop-daemon.sh start namenode\n fi\n\n if [[ \"${HOSTNAME}\" =~ \"hdfs-dn\" ]]; then\n mkdir -p /root/hdfs/datanode\n\n # wait up to 30 seconds for namenode\n (while [[ $count -lt 15 && -z `curl -sf http://{{ include \"hadoop.fullname\" . }}-hdfs-nn:50070` ]]; do ((count=count+1)) ; echo \"Waiting for {{ include \"hadoop.fullname\" . }}-hdfs-nn\" ; sleep 2; done && [[ $count -lt 15 ]])\n [[ $? -ne 0 ]] && echo \"Timeout waiting for hdfs-nn, exiting.\" && exit 1\n\n $HADOOP_PREFIX/sbin/hadoop-daemon.sh start datanode\n fi\n\n if [[ \"${HOSTNAME}\" =~ \"yarn-rm\" ]]; then\n cp ${CONFIG_DIR}/start-yarn-rm.sh $HADOOP_PREFIX/sbin/\n cd $HADOOP_PREFIX/sbin\n chmod +x start-yarn-rm.sh\n ./start-yarn-rm.sh\n fi\n\n if [[ \"${HOSTNAME}\" =~ \"yarn-nm\" ]]; then\n sed -i '/<\\/configuration>/d' $HADOOP_PREFIX/etc/hadoop/yarn-site.xml\n cat >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml <<- EOM\n <property>\n <name>yarn.nodemanager.resource.memory-mb</name>\n <value>${MY_MEM_LIMIT:-2048}</value>\n </property>\n\n <property>\n <name>yarn.nodemanager.resource.cpu-vcores</name>\n <value>${MY_CPU_LIMIT:-2}</value>\n </property>\n EOM\n echo '</configuration>' >> $HADOOP_PREFIX/etc/hadoop/yarn-site.xml\n cp ${CONFIG_DIR}/start-yarn-nm.sh $HADOOP_PREFIX/sbin/\n cd $HADOOP_PREFIX/sbin\n chmod +x start-yarn-nm.sh\n\n # wait up to 30 seconds for resourcemanager\n (while [[ $count -lt 15 && -z `curl -sf http://{{ include \"hadoop.fullname\" . }}-yarn-rm:8088/ws/v1/cluster/info` ]]; do ((count=count+1)) ; echo \"Waiting for {{ include \"hadoop.fullname\" . }}-yarn-rm\" ; sleep 2; done && [[ $count -lt 15 ]])\n [[ $? -ne 0 ]] && echo \"Timeout waiting for yarn-rm, exiting.\" && exit 1\n\n ./start-yarn-nm.sh\n fi\n\n if [[ $1 == \"-d\" ]]; then\n until find ${HADOOP_PREFIX}/logs -mmin -1 | egrep -q '.*'; echo \"`date`: Waiting for logs...\" ; do sleep 2 ; done\n tail -F ${HADOOP_PREFIX}/logs/* &\n while true; do sleep 1000; done\n fi\n\n if [[ $1 == \"-bash\" ]]; then\n /bin/bash\n fi\n\n core-site.xml: |\n <?xml version=\"1.0\"?>\n <?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n <configuration>\n <property>\n <name>fs.defaultFS</name>\n <value>hdfs://{{ include \"hadoop.fullname\" . }}-hdfs-nn:9000/</value>\n <description>NameNode URI</description>\n </property>\n </configuration>\n\n hdfs-site.xml: |\n <?xml version=\"1.0\"?>\n <?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n <configuration>\n \n{{- if .Values.hdfs.webhdfs.enabled -}}\n <property>\n <name>dfs.webhdfs.enabled</name>\n <value>true</value>\n </property> \n{{- end -}}\n\n <property>\n <name>dfs.datanode.use.datanode.hostname</name>\n <value>false</value>\n </property>\n\n <property>\n <name>dfs.client.use.datanode.hostname</name>\n <value>false</value>\n </property>\n\n <property>\n <name>dfs.replication</name>\n <value>3</value>\n </property>\n\n <property>\n <name>dfs.datanode.data.dir</name>\n <value>file:///root/hdfs/datanode</value>\n <description>DataNode directory</description>\n </property>\n\n <property>\n <name>dfs.namenode.name.dir</name>\n <value>file:///root/hdfs/namenode</value>\n <description>NameNode directory for namespace and transaction logs storage.</description>\n </property>\n\n <property>\n <name>dfs.namenode.datanode.registration.ip-hostname-check</name>\n <value>false</value>\n </property>\n\n <!-- Bind to all interfaces -->\n <property>\n <name>dfs.namenode.rpc-bind-host</name>\n <value>0.0.0.0</value>\n </property>\n <property>\n <name>dfs.namenode.servicerpc-bind-host</name>\n <value>0.0.0.0</value>\n </property>\n <!-- /Bind to all interfaces -->\n\n </configuration>\n\n mapred-site.xml: |\n <?xml version=\"1.0\"?>\n <?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n\n <configuration>\n <property>\n <name>mapreduce.framework.name</name>\n <value>yarn</value>\n </property>\n <property>\n <name>mapreduce.jobhistory.address</name>\n <value>{{ include \"hadoop.fullname\" . }}-yarn-rm-0.{{ include \"hadoop.fullname\" . }}-yarn-rm.{{ .Release.Namespace }}.svc.cluster.local:10020</value>\n </property>\n <property>\n <name>mapreduce.jobhistory.webapp.address</name>\n <value>{{ include \"hadoop.fullname\" . }}-yarn-rm-0.{{ include \"hadoop.fullname\" . }}-yarn-rm.{{ .Release.Namespace }}.svc.cluster.local:19888</value>\n </property>\n </configuration>\n\n slaves: |\n localhost\n\n start-yarn-nm.sh: |\n #!/usr/bin/env bash\n\n # Licensed to the Apache Software Foundation (ASF) under one or more\n # contributor license agreements. See the NOTICE file distributed with\n # this work for additional information regarding copyright ownership.\n # The ASF licenses this file to You under the Apache License, Version 2.0\n # (the \"License\"); you may not use this file except in compliance with\n # the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n\n # Start all yarn daemons. Run this on master node.\n\n echo \"starting yarn daemons\"\n\n bin=`dirname \"${BASH_SOURCE-$0}\"`\n bin=`cd \"$bin\"; pwd`\n\n DEFAULT_LIBEXEC_DIR=\"$bin\"/../libexec\n HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}\n . $HADOOP_LIBEXEC_DIR/yarn-config.sh\n\n # start resourceManager\n # \"$bin\"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager\n # start nodeManager\n \"$bin\"/yarn-daemon.sh --config $YARN_CONF_DIR start nodemanager\n # start proxyserver\n #\"$bin\"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver\n\n start-yarn-rm.sh: |\n #!/usr/bin/env bash\n\n # Licensed to the Apache Software Foundation (ASF) under one or more\n # contributor license agreements. See the NOTICE file distributed with\n # this work for additional information regarding copyright ownership.\n # The ASF licenses this file to You under the Apache License, Version 2.0\n # (the \"License\"); you may not use this file except in compliance with\n # the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n\n # Start all yarn daemons. Run this on master node.\n\n echo \"starting yarn daemons\"\n\n bin=`dirname \"${BASH_SOURCE-$0}\"`\n bin=`cd \"$bin\"; pwd`\n\n DEFAULT_LIBEXEC_DIR=\"$bin\"/../libexec\n HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}\n . $HADOOP_LIBEXEC_DIR/yarn-config.sh\n\n # start resourceManager\n \"$bin\"/yarn-daemon.sh --config $YARN_CONF_DIR start resourcemanager\n # start nodeManager\n # \"$bin\"/yarn-daemons.sh --config $YARN_CONF_DIR start nodemanager\n # start proxyserver\n \"$bin\"/yarn-daemon.sh --config $YARN_CONF_DIR start proxyserver\n\n yarn-site.xml: |\n <?xml version=\"1.0\"?>\n <?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n\n <configuration>\n <property>\n <name>yarn.resourcemanager.hostname</name>\n <value>{{ include \"hadoop.fullname\" . }}-yarn-rm</value>\n </property>\n\n <!-- Bind to all interfaces -->\n <property>\n <name>yarn.resourcemanager.bind-host</name>\n <value>0.0.0.0</value>\n </property>\n <property>\n <name>yarn.nodemanager.bind-host</name>\n <value>0.0.0.0</value>\n </property>\n <property>\n <name>yarn.timeline-service.bind-host</name>\n <value>0.0.0.0</value>\n </property>\n <!-- /Bind to all interfaces -->\n\n <property>\n <name>yarn.nodemanager.vmem-check-enabled</name>\n <value>false</value>\n </property>\n\n <property>\n <name>yarn.nodemanager.aux-services</name>\n <value>mapreduce_shuffle</value>\n </property>\n\n <property>\n <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>\n <value>org.apache.hadoop.mapred.ShuffleHandler</value>\n </property>\n\n <property>\n <description>List of directories to store localized files in.</description>\n <name>yarn.nodemanager.local-dirs</name>\n <value>/var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir</value>\n </property>\n\n <property>\n <description>Where to store container logs.</description>\n <name>yarn.nodemanager.log-dirs</name>\n <value>/var/log/hadoop-yarn/containers</value>\n </property>\n\n <property>\n <description>Where to aggregate logs to.</description>\n <name>yarn.nodemanager.remote-app-log-dir</name>\n <value>/var/log/hadoop-yarn/apps</value>\n </property>\n\n <property>\n <name>yarn.application.classpath</name>\n <value>\n /usr/local/hadoop/etc/hadoop,\n /usr/local/hadoop/share/hadoop/common/*,\n /usr/local/hadoop/share/hadoop/common/lib/*,\n /usr/local/hadoop/share/hadoop/hdfs/*,\n /usr/local/hadoop/share/hadoop/hdfs/lib/*,\n /usr/local/hadoop/share/hadoop/mapreduce/*,\n /usr/local/hadoop/share/hadoop/mapreduce/lib/*,\n /usr/local/hadoop/share/hadoop/yarn/*,\n /usr/local/hadoop/share/hadoop/yarn/lib/*\n </value>\n </property>\n </configuration>\n",
"# hdfs-dn-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-dn\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\nspec:\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\n minAvailable: {{ .Values.hdfs.dataNode.pdbMinAvailable }}\n",
"# hdfs-dn-pvc.yaml\n{{- if .Values.persistence.dataNode.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-dn\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\nspec:\n accessModes:\n - {{ .Values.persistence.dataNode.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.dataNode.size | quote }}\n{{- if .Values.persistence.dataNode.storageClass }}\n{{- if (eq \"-\" .Values.persistence.dataNode.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.dataNode.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# hdfs-dn-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-dn\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/hadoop-configmap.yaml\") . | sha256sum }}\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\nspec:\n serviceName: {{ include \"hadoop.fullname\" . }}-hdfs-dn\n replicas: {{ .Values.hdfs.dataNode.replicas }}\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\n template:\n metadata:\n labels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\n spec:\n affinity:\n podAntiAffinity:\n {{- if eq .Values.antiAffinity \"hard\" }}\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: hdfs-dn\n {{- else if eq .Values.antiAffinity \"soft\" }}\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 5\n podAffinityTerm:\n topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: hdfs-dn\n {{- end }}\n terminationGracePeriodSeconds: 0\n containers:\n - name: hdfs-dn\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n command:\n - \"/bin/bash\"\n - \"/tmp/hadoop-config/bootstrap.sh\"\n - \"-d\"\n resources:\n{{ toYaml .Values.hdfs.dataNode.resources | indent 10 }}\n readinessProbe:\n httpGet:\n path: /\n port: 50075\n initialDelaySeconds: 5\n timeoutSeconds: 2\n livenessProbe:\n httpGet:\n path: /\n port: 50075\n initialDelaySeconds: 10\n timeoutSeconds: 2\n volumeMounts:\n - name: hadoop-config\n mountPath: /tmp/hadoop-config\n - name: dfs\n mountPath: /root/hdfs/datanode\n volumes:\n - name: hadoop-config\n configMap:\n name: {{ include \"hadoop.fullname\" . }}\n - name: dfs\n {{- if .Values.persistence.dataNode.enabled }}\n persistentVolumeClaim:\n claimName: {{ include \"hadoop.fullname\" . }}-hdfs-dn\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# hdfs-dn-svc.yaml\n# A headless service to create DNS records\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-dn\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\nspec:\n ports:\n - name: dfs\n port: 9000\n protocol: TCP\n - name: webhdfs\n port: 50075\n clusterIP: None\n selector:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-dn\n",
"# hdfs-nn-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-nn\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\nspec:\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\n minAvailable: {{ .Values.hdfs.nameNode.pdbMinAvailable }}\n",
"# hdfs-nn-pvc.yaml\n{{- if .Values.persistence.nameNode.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-nn\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\nspec:\n accessModes:\n - {{ .Values.persistence.nameNode.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.nameNode.size | quote }}\n{{- if .Values.persistence.nameNode.storageClass }}\n{{- if (eq \"-\" .Values.persistence.nameNode.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.nameNode.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# hdfs-nn-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-nn\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/hadoop-configmap.yaml\") . | sha256sum }}\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\nspec:\n serviceName: {{ include \"hadoop.fullname\" . }}-hdfs-nn\n replicas: 1\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\n template:\n metadata:\n labels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\n spec:\n affinity:\n podAntiAffinity:\n {{- if eq .Values.antiAffinity \"hard\" }}\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: hdfs-nn\n {{- else if eq .Values.antiAffinity \"soft\" }}\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 5\n podAffinityTerm:\n topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: hdfs-nn\n {{- end }}\n terminationGracePeriodSeconds: 0\n containers:\n - name: hdfs-nn\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n command:\n - \"/bin/bash\"\n - \"/tmp/hadoop-config/bootstrap.sh\"\n - \"-d\"\n resources:\n{{ toYaml .Values.hdfs.nameNode.resources | indent 10 }}\n readinessProbe:\n httpGet:\n path: /\n port: 50070\n initialDelaySeconds: 5\n timeoutSeconds: 2\n livenessProbe:\n httpGet:\n path: /\n port: 50070\n initialDelaySeconds: 10\n timeoutSeconds: 2\n volumeMounts:\n - name: hadoop-config\n mountPath: /tmp/hadoop-config\n - name: dfs\n mountPath: /root/hdfs/namenode\n volumes:\n - name: hadoop-config\n configMap:\n name: {{ include \"hadoop.fullname\" . }}\n - name: dfs\n {{- if .Values.persistence.nameNode.enabled }}\n persistentVolumeClaim:\n claimName: {{ include \"hadoop.fullname\" . }}-hdfs-nn\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# hdfs-nn-svc.yaml\n# A headless service to create DNS records\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-hdfs-nn\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn\nspec:\n ports:\n - name: dfs\n port: 9000\n protocol: TCP\n - name: webhdfs\n port: 50070\n clusterIP: None\n selector:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: hdfs-nn",
"# yarn-nm-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-nm\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\nspec:\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\n minAvailable: {{ .Values.yarn.nodeManager.pdbMinAvailable }}\n",
"# yarn-nm-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-nm\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/hadoop-configmap.yaml\") . | sha256sum }}\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\nspec:\n serviceName: {{ include \"hadoop.fullname\" . }}-yarn-nm\n replicas: {{ .Values.yarn.nodeManager.replicas }}\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\n{{- if .Values.yarn.nodeManager.parallelCreate }}\n podManagementPolicy: Parallel\n{{- end }}\n template:\n metadata:\n labels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\n spec:\n affinity:\n podAntiAffinity:\n {{- if eq .Values.antiAffinity \"hard\" }}\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: yarn-nm\n {{- else if eq .Values.antiAffinity \"soft\" }}\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 5\n podAffinityTerm:\n topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: yarn-nm\n {{- end }}\n terminationGracePeriodSeconds: 0\n containers:\n - name: yarn-nm\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n ports:\n - containerPort: 8088\n name: web\n command:\n - \"/bin/bash\"\n - \"/tmp/hadoop-config/bootstrap.sh\"\n - \"-d\"\n resources:\n{{ toYaml .Values.yarn.nodeManager.resources | indent 10 }}\n readinessProbe:\n httpGet:\n path: /node\n port: 8042\n initialDelaySeconds: 10\n timeoutSeconds: 2\n livenessProbe:\n httpGet:\n path: /node\n port: 8042\n initialDelaySeconds: 10\n timeoutSeconds: 2\n env:\n - name: MY_CPU_LIMIT\n valueFrom:\n resourceFieldRef:\n containerName: yarn-nm\n resource: limits.cpu\n divisor: 1\n - name: MY_MEM_LIMIT\n valueFrom:\n resourceFieldRef:\n containerName: yarn-nm\n resource: limits.memory\n divisor: 1M\n volumeMounts:\n - name: hadoop-config\n mountPath: /tmp/hadoop-config\n volumes:\n - name: hadoop-config\n configMap:\n name: {{ include \"hadoop.fullname\" . }}\n",
"# yarn-nm-svc.yaml\n# A headless service to create DNS records\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-nm\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\nspec:\n ports:\n - port: 8088\n name: web\n - port: 8082\n name: web2\n - port: 8042\n name: api\n clusterIP: None\n selector:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-nm\n",
"# yarn-rm-pdb.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-rm\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\nspec:\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\n minAvailable: {{ .Values.yarn.resourceManager.pdbMinAvailable }}\n",
"# yarn-rm-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-rm\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/hadoop-configmap.yaml\") . | sha256sum }}\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\nspec:\n serviceName: {{ include \"hadoop.fullname\" . }}-yarn-rm\n replicas: 1\n selector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\n template:\n metadata:\n labels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\n spec:\n affinity:\n podAntiAffinity:\n {{- if eq .Values.antiAffinity \"hard\" }}\n requiredDuringSchedulingIgnoredDuringExecution:\n - topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: yarn-rm\n {{- else if eq .Values.antiAffinity \"soft\" }}\n preferredDuringSchedulingIgnoredDuringExecution:\n - weight: 5\n podAffinityTerm:\n topologyKey: \"kubernetes.io/hostname\"\n labelSelector:\n matchLabels:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name | quote }}\n component: yarn-rm\n {{- end }}\n terminationGracePeriodSeconds: 0\n containers:\n - name: yarn-rm\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n ports:\n - containerPort: 8088\n name: web\n command:\n - \"/bin/bash\"\n - \"/tmp/hadoop-config/bootstrap.sh\"\n - \"-d\"\n resources:\n{{ toYaml .Values.yarn.resourceManager.resources | indent 10 }}\n readinessProbe:\n httpGet:\n path: /ws/v1/cluster/info\n port: 8088\n initialDelaySeconds: 5\n timeoutSeconds: 2\n livenessProbe:\n httpGet:\n path: /ws/v1/cluster/info\n port: 8088\n initialDelaySeconds: 10\n timeoutSeconds: 2\n volumeMounts:\n - name: hadoop-config\n mountPath: /tmp/hadoop-config\n volumes:\n - name: hadoop-config\n configMap:\n name: {{ include \"hadoop.fullname\" . }}\n",
"# yarn-rm-svc.yaml\n# A headless service to create DNS records\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-rm\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\nspec:\n ports:\n - port: 8088\n name: web\n clusterIP: None\n selector:\n app: {{ include \"hadoop.name\" . }}\n release: {{ .Release.Name }}\n component: yarn-rm\n",
"# yarn-ui-svc.yaml\n# Service to access the yarn web ui\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hadoop.fullname\" . }}-yarn-ui\n labels:\n app: {{ include \"hadoop.name\" . }}\n chart: {{ include \"hadoop.chart\" . }}\n release: {{ .Release.Name }}\n component: yarn-ui\nspec:\n ports:\n - port: 8088\n name: web\n selector:\n app: {{ include \"hadoop.name\" . }}\n component: yarn-rm\n"
] | # The base hadoop image to use for all components.
# See this repo for image build details: https://github.com/Comcast/kube-yarn/tree/master/image
image:
repository: danisla/hadoop
tag: 2.9.0
pullPolicy: IfNotPresent
# The version of the hadoop libraries being used in the image.
hadoopVersion: 2.9.0
# Select antiAffinity as either hard or soft, default is soft
antiAffinity: "soft"
hdfs:
nameNode:
pdbMinAvailable: 1
resources:
requests:
memory: "256Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
dataNode:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "256Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "1000m"
webhdfs:
enabled: false
yarn:
resourceManager:
pdbMinAvailable: 1
resources:
requests:
memory: "256Mi"
cpu: "10m"
limits:
memory: "2048Mi"
cpu: "2000m"
nodeManager:
pdbMinAvailable: 1
# The number of YARN NodeManager instances.
replicas: 2
# Create statefulsets in parallel (K8S 1.7+)
parallelCreate: false
# CPU and memory resources allocated to each node manager pod.
# This should be tuned to fit your workload.
resources:
requests:
memory: "2048Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
persistence:
nameNode:
enabled: false
storageClass: "-"
accessMode: ReadWriteOnce
size: 50Gi
dataNode:
enabled: false
storageClass: "-"
accessMode: ReadWriteOnce
size: 200Gi
|
dokuwiki | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"dokuwiki.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"dokuwiki.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"dokuwiki.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper DokuWiki image name\n*/}}\n{{- define \"dokuwiki.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"dokuwiki.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"dokuwiki.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"dokuwiki.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.dokuwiki.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.dokuwiki.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.dokuwiki.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.dokuwiki.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.dokuwiki.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.dokuwiki.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"dokuwiki.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"dokuwiki.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"dokuwiki.fullname\" . }}\n labels:\n app: {{ template \"dokuwiki.name\" . }}\n chart: {{ template \"dokuwiki.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"dokuwiki.name\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"dokuwiki.name\" . }}\n chart: {{ template \"dokuwiki.chart\" . }}\n release: {{ .Release.Name | quote }}\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n{{- include \"dokuwiki.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"dokuwiki.fullname\" . }}\n image: {{ template \"dokuwiki.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: DOKUWIKI_USERNAME\n value: {{ .Values.dokuwikiUsername | quote }}\n - name: DOKUWIKI_FULL_NAME\n value: {{ .Values.dokuwikiFullName | quote }}\n - name: DOKUWIKI_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"dokuwiki.fullname\" . }}\n key: dokuwiki-password\n - name: DOKUWIKI_EMAIL\n value: {{ .Values.dokuwikiEmail | quote }}\n - name: DOKUWIKI_WIKI_NAME\n value: {{ .Values.dokuwikiWikiName | quote }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /doku.php\n port: http\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /doku.php\n port: http\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: dokuwiki-data\n mountPath: /bitnami/dokuwiki\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"dokuwiki.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: dokuwiki-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"dokuwiki.fullname\" . }}-dokuwiki\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# dokuwiki-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"dokuwiki.fullname\" . }}-dokuwiki\n labels:\n app: {{ template \"dokuwiki.name\" . }}\n chart: {{ template \"dokuwiki.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n accessModes:\n - {{ .Values.persistence.dokuwiki.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.dokuwiki.size | quote }}\n {{ include \"dokuwiki.storageClass\" . }}\n{{- end -}}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\n{{- range .Values.ingress.hosts }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"dokuwiki.fullname\" $ }}\n labels:\n app: {{ template \"dokuwiki.name\" $ }}\n chart: {{ template \"dokuwiki.chart\" $ }}\n release: {{ $.Release.Name | quote }}\n heritage: {{ $.Release.Service | quote }}\n annotations:\n {{- if .tls }}\n ingress.kubernetes.io/secure-backends: \"true\"\n {{- end }}\n {{- if .certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"dokuwiki.fullname\" $ }}\n servicePort: 80\n{{- if .tls }}\n tls:\n - hosts:\n - {{ .name }}\n secretName: {{ .tlsSecret }}\n{{- end }}\n---\n{{- end }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"dokuwiki.fullname\" . }}\n labels:\n app: {{ template \"dokuwiki.name\" . }}\n chart: {{ template \"dokuwiki.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n {{ if .Values.dokuwikiPassword }}\n dokuwiki-password: {{ .Values.dokuwikiPassword | b64enc | quote }}\n {{ else }}\n dokuwiki-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"dokuwiki.fullname\" . }}\n labels:\n app: {{ template \"dokuwiki.name\" . }}\n chart: {{ template \"dokuwiki.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n type: {{ .Values.service.type }}\n {{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app: {{ template \"dokuwiki.name\" . }}\n",
"# tls-secrets.yaml\n{{- if .Values.ingress.enabled }}\n{{- range .Values.ingress.secrets }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ .name }}\n labels:\n app: {{ template \"dokuwiki.name\" $ }}\n chart: {{ template \"dokuwiki.chart\" $ }}\n release: {{ $.Release.Name | quote }}\n heritage: {{ $.Release.Service | quote }}\ntype: kubernetes.io/tls\ndata:\n tls.crt: {{ .certificate | b64enc }}\n tls.key: {{ .key | b64enc }}\n---\n{{- end }}\n{{- end }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami DokuWiki image version
## ref: https://hub.docker.com/r/bitnami/dokuwiki/tags/
##
image:
registry: docker.io
repository: bitnami/dokuwiki
tag: 0.20180422.201901061035-debian-10-r28
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override dokuwiki.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override dokuwiki.fullname template
##
# fullnameOverride:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-dokuwiki#environment-variables
##
dokuwikiUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-dokuwiki#environment-variables
# dokuwikiPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-dokuwiki#environment-variables
##
dokuwikiEmail: [email protected]
## User's Full Name
## ref: https://github.com/bitnami/bitnami-docker-dokuwiki#environment-variables
##
dokuwikiFullName: User Name
## Name of the Wiki
## ref: https://github.com/bitnami/bitnami-docker-dokuwiki#environment-variables
##
dokuwikiWikiName: My Wiki
## Kubernetes svc configuration
##
service:
## Kubernetes svc type
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
type: LoadBalancer
## Use serviceLoadBalancerIP to request a specific static IP,
## otherwise leave blank
##
# loadBalancerIP:
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
## Use nodePorts to requets some specific ports when usin NodePort
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
##
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Configure the ingress resource that allows you to access the
## Dokuwiki installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: dokuwiki.local
## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend dokuwiki service will be connected at port 443
tls: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: dokuwiki.local-tls
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/tls-acme: true
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: dokuwiki.local-tls
# key:
# certificate:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
dokuwiki:
## Dokuwiki data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configuration options for nodeSelector, tolerations and affinity for pod
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
nodeSelector: {}
tolerations: []
affinity: {}
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r29
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
mission-control | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"mission-control.name\" -}}\n{{- default .Chart.Name .Values.missionControl.name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nThe insight-executor name\n*/}}\n{{- define \"insight-executor.name\" -}}\n{{- default .Chart.Name .Values.insightExecutor.name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nThe insight-scheduler name\n*/}}\n{{- define \"insight-scheduler.name\" -}}\n{{- default .Chart.Name .Values.insightScheduler.name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nThe insight-server name\n*/}}\n{{- define \"insight-server.name\" -}}\n{{- default .Chart.Name .Values.insightServer.name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"mission-control.fullname\" -}}\n{{- if .Values.missionControl.fullnameOverride -}}\n{{- .Values.missionControl.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.missionControl.name -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"insight-executor.fullname\" -}}\n{{- if .Values.insightExecutor.fullnameOverride -}}\n{{- .Values.insightExecutor.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.insightExecutor.name -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"insight-scheduler.fullname\" -}}\n{{- if .Values.insightScheduler.fullnameOverride -}}\n{{- .Values.insightScheduler.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.insightScheduler.name -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"insight-server.fullname\" -}}\n{{- if .Values.insightServer.fullnameOverride -}}\n{{- .Values.insightServer.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.insightServer.name -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"mission-control.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n{{ default (include \"mission-control.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n{{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"mission-control.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# create-user.yaml\n{{- if .Values.mongodb.enabled }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}-create-user\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n component: mongodb\n annotations:\n \"helm.sh/hook\": post-install\n \"helm.sh/hook-delete-policy\": hook-succeeded\nspec:\n template:\n metadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n restartPolicy: OnFailure\n containers:\n - name: post-install-job\n image: \"{{ .Values.postInstallHook.image.repository }}:{{ .Values.postInstallHook.image.tag }}\"\n env:\n - name: MONGODB_ADMIN_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: adminPassword\n - name: MONGODB_MC_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: mcPassword\n - name: MONGODB_INSIGHT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: insightPassword\n command:\n - 'sh'\n - '-c'\n - 'sh /scripts/setup.sh'\n volumeMounts:\n - name: mongodb-setup\n mountPath: \"/scripts\"\n volumes:\n - name: mongodb-setup\n configMap:\n name: {{ template \"mission-control.fullname\" . }}-setup-script\n{{- end }}",
"# elasticsearch-deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n initContainers:\n - name: init-data\n image: \"{{ .Values.initContainerImage }}\"\n securityContext:\n privileged: true\n command:\n - '/bin/sh'\n - '-c'\n - >\n chmod -R 777 {{ .Values.persistence.mountPath }};\n sysctl -w vm.max_map_count={{ .Values.env.maxMapCount }}\n volumeMounts:\n - name: elasticsearch-data\n mountPath: {{ .Values.persistence.mountPath | quote }}\n containers:\n - name: {{ template \"elasticsearch.fullname\" . }}\n image: {{ .Values.image.repository }}:{{ .Values.image.version }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n env:\n - name: 'cluster.name'\n value: {{ .Values.env.clusterName }}\n - name: 'network.host'\n value: {{ .Values.env.networkHost }}\n - name: 'transport.host'\n value: {{ .Values.env.transportHost }}\n - name: 'xpack.security.enabled'\n value: {{ .Values.env.xpackSecurityEnabled | quote }}\n - name: ES_JAVA_OPTS\n value: \"-Xms{{ .Values.resources.requests.memory | trunc 1 }}g -Xmx{{ .Values.resources.requests.memory | trunc 1 }}g\"\n - name: ELASTIC_SEARCH_URL\n value: {{ .Values.env.esUrl }}\n - name: ELASTIC_SEARCH_USERNAME\n value: {{ .Values.env.esUsername }}\n - name: ELASTIC_SEARCH_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"elasticsearch.fullname\" . }}\n key: esPassword\n lifecycle:\n postStart:\n exec:\n command:\n - '/bin/sh'\n - '-c'\n - >\n sleep 5;\n mkdir -p /var/log/elasticsearch;\n bash /scripts/setup.sh > /var/log/elasticsearch/setup-$(date +%Y%m%d%H%M%S).log 2>&1\n ports:\n - containerPort: {{ .Values.internalHttpPort }}\n protocol: TCP\n - containerPort: {{ .Values.internalTransportPort }}\n protocol: TCP\n volumeMounts:\n - name: setup-script\n mountPath: \"/scripts\"\n - name: elasticsearch-data\n mountPath: {{ .Values.persistence.mountPath | quote }}\n resources:\n requests:\n memory: \"{{ .Values.resources.requests.memory }}\"\n cpu: \"{{ .Values.resources.requests.cpu }}\"\n limits:\n memory: \"{{ .Values.resources.limits.memory }}\"\n cpu: \"{{ .Values.resources.limits.cpu }}\"\n livenessProbe:\n httpGet:\n path: /_cluster/health?local=true\n port: 9200\n initialDelaySeconds: 90\n periodSeconds: 10\n readinessProbe:\n httpGet:\n path: /_cluster/health?local=true\n port: 9200\n initialDelaySeconds: 60\n volumes:\n - name: setup-script\n configMap:\n name: {{ template \"elasticsearch.fullname\" . }}-setup-script\n - name: elasticsearch-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{ else }}{{ template \"elasticsearch.fullname\" . }}{{ end }}\n {{- else }}\n emptyDir: {}\n {{- end }}",
"# elasticsearch-pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# elasticsearch-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n {{ if .Values.env.esPassword }}\n esPassword: {{ .Values.env.esPassword | b64enc | quote }}\n {{ else }}\n esPassword: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n",
"# elasticsearch-setup-scripts.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}-setup-script\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n setup.sh: |\n #!/bin/bash\n # Startup script for preparing ElasticSearch pod for running with Mission Control\n\n echo \"Waiting for ElasticSearch to be ready\"\n until [[ \"$(curl -s -o /dev/null -w \\\"%{http_code}\\\" ${ELASTIC_SEARCH_URL}/_cluster/health?local=true)\" =~ \"200\" ]]; do\n echo \"Waiting for ElasticSearch availability\"\n sleep 2\n done\n\n mkdir -p /var/log/elasticsearch\n bash /scripts/createIndices.sh > /var/log/elasticsearch/createIndices.sh.log 2>&1\n\n createIndices.sh: |\n #!/bin/bash\n ELASTIC_SEARCH_LABEL='Elasticsearch'\n\n #Print the input with additional formatting to indicate a section/title\n title () {\n echo\n echo \"-----------------------------------------------------\"\n printf \"| %-50s|\\n\" \"$1\"\n echo \"-----------------------------------------------------\"\n }\n\n # This function prints the echo with color.\n #If invoked with a single string, treats it as INFO level\n #Valid inputs for the second parameter are DEBUG and INFO\n log() {\n echo \"\"\n echo -e $1\n echo \"\"\n }\n\n # Utility method to display warnings and important information\n warn() {\n echo \"\"\n echo -e \"\\033[33m $1 \\033[0m\"\n echo \"\"\n }\n\n errorExit() {\n echo; echo -e \"\\033[31mERROR:$1\\033[0m\"; echo\n exit 1\n }\n\n attempt_number=0\n elasticSearchIsNotReady(){\n #echo \"in method isElasticSearchReady\"\n curl \"$ELASTIC_SEARCH_URL\" > /dev/null 2>&1\n if [ $? -ne 0 ]; then\n if [ $attempt_number -gt 10 ]; then\n errorExit \"Unable to proceed. $ELASTIC_SEARCH_LABEL is not reachable. The command [curl $ELASTIC_SEARCH_URL] is failing. Gave up after $attempt_number attempts\"\n fi\n let \"attempt_number=attempt_number+1\"\n return 0\n else\n return 1\n fi\n }\n\n runCommand() {\n curl_response=\n local operation=$1\n local commandToRun=$2\n local request_body=$3\n local params=$4\n local waitTime=$5\n\n if [[ ! -z \"$waitTime\" && \"$waitTime\" != \"\" ]]; then\n sleep $waitTime\n fi\n\n commandToRun=\"\\\"$ELASTIC_SEARCH_URL/$commandToRun\\\"\"\n if [[ ! -z \"$ELASTIC_SEARCH_USERNAME\" && ! -z \"$ELASTIC_SEARCH_PASSWORD\" ]]; then\n commandToRun=\"$commandToRun --user $ELASTIC_SEARCH_USERNAME:$ELASTIC_SEARCH_PASSWORD\"\n fi\n\n if [[ ! -z \"$params\" ]]; then\n commandToRun=\"$commandToRun $params\"\n fi\n\n if [[ ! -z \"$request_body\" ]]; then\n commandToRun=\"$commandToRun -d '\"$request_body\"'\"\n fi\n if [[ \"$operation\" == \"GET\" ]]; then\n commandToRun=\"curl --silent -XGET $commandToRun\"\n curl_response=$(eval \"${commandToRun}\")\n else\n eval \"curl --silent -X $operation ${commandToRun}\" || errorExit \"could not update Elastic Search\"\n fi\n }\n\n setElasticSearchParams() {\n log \"Waiting for $ELASTIC_SEARCH_LABEL to get ready (using the command: [curl $ELASTIC_SEARCH_URL])\"\n while elasticSearchIsNotReady\n do\n sleep 5\n echo -n '.'\n done\n log \"$ELASTIC_SEARCH_LABEL is ready. Executing commands\"\n runCommand \"GET\" \"_template/storage_insight_template\" \"\" \"\" 10\n #echo \"$ELASTIC_SEARCH_LABEL curl response: $curl_response\"\n if [[ $curl_response = {} ]]; then\n migrateToElastic61\n runCommand \"GET\" \"_template/active_insight_data\"\n if [[ $curl_response = {} ]]; then\n log \"Creating new template\"\n runCommand \"PUT\" \"_template/storage_insight_template\" '{\"template\":\"active_insight_data_*\",\"aliases\":{\"active_insight_data\":{},\"search_insight_data\":{}},\"mappings\":{\"artifacts_storage\":{\"properties\":{\"used_space\":{\"type\":\"double\"},\"timestamp\":{\"type\":\"date\"},\"artifacts_size\":{\"type\":\"double\"}}}}}' '-H \"Content-Type: application/json\"' > /dev/null 2>&1\n runCommand \"PUT\" \"%3Cactive_insight_data_%7Bnow%2Fd%7D-1%3E\" > /dev/null 2>&1\n\n else\n performUpgrade\n fi\n\n fi\n runCommand \"GET\" \"_template/build_info_template\" \"\" \"\" 10\n if [[ $curl_response = {} ]]; then\n log \"Create Build Info template\"\n createBuildInfoTemplate\n fi\n\n updateBuildInfoTemplate\n log \"$ELASTIC_SEARCH_LABEL setup is now complete\"\n\n log \"Created build info templates\"\n }\n\n performUpgrade(){\n log \"Performing upgrade\"\n runCommand \"DELETE\" \"_template/active_insight_data\" > /dev/null 2>&1\n runCommand \"PUT\" \"_template/storage_insight_template\" '{\"template\":\"active_insight_data_*\",\"aliases\":{\"active_insight_data\":{},\"search_insight_data\":{}},\"mappings\":{\"artifacts_storage\":{\"properties\":{\"used_space\":{\"type\":\"double\"},\"timestamp\":{\"type\":\"date\"},\"artifacts_size\":{\"type\":\"double\"}}}}}' '-H \"Content-Type: application/json\"' > /dev/null 2>&1\n log \"Created new template\"\n runCommand \"GET\" \"_alias/active_insight_data\"\n if [[ $curl_response = *missing* ]]; then\n runCommand \"PUT\" \"%3Cactive_insight_data_%7Bnow%2Fd%7D-1%3E\" > /dev/null 2>&1\n else\n indexname=$(echo $curl_response |cut -d'\"' -f 2)\n log \"Old index $indexname\"\n curl_response=$(runCommand \"PUT\" \"%3Cactive_insight_data_%7Bnow%2Fd%7D-1%3E\")\n if [[ \"$curl_response\" = *\"resource_already_exists_exception\"* ]]; then\n log \"Index with same name exists, creating with different name\"\n runCommand \"PUT\" \"%3Cactive_insight_data_%7Bnow%2Fd%7D-2%3E\" > /dev/null 2>&1\n fi\n log \"Created new index\"\n runCommand \"GET\" \"_alias/active_insight_data\"\n runCommand \"POST\" \"_aliases\" '{\"actions\":[{\"remove\":{\"index\":\"'$indexname'\",\"alias\":\"active_insight_data\"}}]}' '-H \"Content-Type: application/json\"' > /dev/null 2>&1\n log \"Removed the old index from active alias\"\n fi\n }\n\n createBuildInfoTemplate(){\n runCommand \"PUT\" \"_template/build_info_template\" '{\"template\":\"active_build_data_*\",\"aliases\":{\"active_build_data\":{},\"search_build_data\":{}},\"mappings\":{\"build_info\":{\"properties\":{\"created_time\":{\"type\":\"date\"},\"timestamp\":{\"type\":\"date\"},\"build_name\":{\"type\":\"keyword\"},\"build_number\":{\"type\":\"integer\"},\"build_URL\":{\"type\":\"keyword\"},\"build_created_by\":{\"type\":\"keyword\"},\"project_name\":{\"type\":\"keyword\"},\"project_id\":{\"type\":\"keyword\"},\"service_id\":{\"type\":\"keyword\"},\"access_service_id\":{\"type\":\"keyword\"},\"build_promotion\":{\"type\":\"keyword\"},\"build_status\":{\"type\":\"keyword\"},\"build_duration_seconds\":{\"type\":\"integer\"},\"total_no_of_commits\":{\"type\":\"short\"},\"total_no_of_modules\":{\"type\":\"short\"},\"total_dependency_count\":{\"type\":\"short\"},\"total_artifact_count\":{\"type\":\"short\"},\"total_artifact_count_downloaded\":{\"type\":\"short\"},\"total_artifact_count_not_downloaded\":{\"type\":\"short\"},\"total_artifact_size\":{\"type\":\"double\"},\"total_dependency_size\":{\"type\":\"double\"},\"module_dependency\":{\"type\":\"nested\",\"properties\":{\"module_name\":{\"type\":\"keyword\"},\"dependency_name\":{\"type\":\"keyword\"},\"dependency_type\":{\"type\":\"keyword\"},\"dependency_size\":{\"type\":\"double\"}}},\"module_artifacts\":{\"type\":\"nested\",\"properties\":{\"module_name\":{\"type\":\"keyword\"},\"artifact_name\":{\"type\":\"keyword\"},\"artifact_size\":{\"type\":\"double\"},\"no_of_downloads\":{\"type\":\"short\"},\"last_download_by\":{\"type\":\"keyword\"}}},\"commits\":{\"type\":\"nested\",\"properties\":{\"repo\":{\"type\":\"keyword\"},\"branch\":{\"type\":\"keyword\"},\"commit_message\":{\"type\":\"text\"},\"revision_no\":{\"type\":\"keyword\"}}},\"total_vulnerability\":{\"properties\":{\"low\":{\"type\":\"short\"},\"medium\":{\"type\":\"short\"},\"high\":{\"type\":\"short\"}}},\"total_open_source_violoation\":{\"properties\":{\"low\":{\"type\":\"short\"},\"medium\":{\"type\":\"short\"},\"high\":{\"type\":\"short\"}}},\"major_xray_issues\":{\"type\":\"long\"},\"minor_xray_issues\":{\"type\":\"long\"},\"unknown_xray_issues\":{\"type\":\"long\"},\"critical_xray_issues\":{\"type\":\"long\"}}}}}' '-H \"Content-Type: application/json\"' > /dev/null 2>&1\n runCommand \"PUT\" \"%3Cactive_build_data_%7Bnow%2Fd%7D-1%3E\" > /dev/null 2>&1\n }\n\n updateBuildInfoTemplate(){\n runCommand \"PUT\" \"active_build*/_mapping/build_info\" '{\"properties\":{\"created_time\":{\"type\":\"date\"},\"timestamp\":{\"type\":\"date\"},\"build_name\":{\"type\":\"keyword\"},\"build_number\":{\"type\":\"integer\"},\"build_URL\":{\"type\":\"keyword\"},\"build_created_by\":{\"type\":\"keyword\"},\"project_name\":{\"type\":\"keyword\"},\"project_id\":{\"type\":\"keyword\"},\"service_id\":{\"type\":\"keyword\"},\"access_service_id\":{\"type\":\"keyword\"},\"build_promotion\":{\"type\":\"keyword\"},\"build_status\":{\"type\":\"keyword\"},\"build_duration_seconds\":{\"type\":\"integer\"},\"total_no_of_commits\":{\"type\":\"short\"},\"total_no_of_modules\":{\"type\":\"short\"},\"total_dependency_count\":{\"type\":\"short\"},\"total_artifact_count\":{\"type\":\"short\"},\"total_artifact_count_downloaded\":{\"type\":\"short\"},\"total_artifact_count_not_downloaded\":{\"type\":\"short\"},\"total_artifact_size\":{\"type\":\"double\"},\"total_dependency_size\":{\"type\":\"double\"},\"module_dependency\":{\"type\":\"nested\",\"properties\":{\"module_name\":{\"type\":\"keyword\"},\"dependency_name\":{\"type\":\"keyword\"},\"dependency_type\":{\"type\":\"keyword\"},\"dependency_size\":{\"type\":\"double\"}}},\"module_artifacts\":{\"type\":\"nested\",\"properties\":{\"module_name\":{\"type\":\"keyword\"},\"artifact_name\":{\"type\":\"keyword\"},\"artifact_size\":{\"type\":\"double\"},\"no_of_downloads\":{\"type\":\"short\"},\"last_download_by\":{\"type\":\"keyword\"}}},\"commits\":{\"type\":\"nested\",\"properties\":{\"repo\":{\"type\":\"keyword\"},\"branch\":{\"type\":\"keyword\"},\"commit_message\":{\"type\":\"text\"},\"revision_no\":{\"type\":\"keyword\"}}},\"total_vulnerability\":{\"properties\":{\"low\":{\"type\":\"short\"},\"medium\":{\"type\":\"short\"},\"high\":{\"type\":\"short\"}}},\"total_open_source_violoation\":{\"properties\":{\"low\":{\"type\":\"short\"},\"medium\":{\"type\":\"short\"},\"high\":{\"type\":\"short\"}}},\"major_xray_issues\":{\"type\":\"long\"},\"minor_xray_issues\":{\"type\":\"long\"},\"unknown_xray_issues\":{\"type\":\"long\"},\"critical_xray_issues\":{\"type\":\"long\"}}}' '-H \"Content-Type: application/json\"' > /dev/null 2>&1\n log \"Updated build info indices\"\n }\n\n migrateToElastic61(){\n local activeIndexPrefix=\"active_insight_data\"\n local repoStorageName=\"migrate-repostorage\"\n local storageSummaryName=\"migrate-storage\"\n local index=\"\"\n\n log \"Getting current indices with name : ${activeIndexPrefix}\"\n result=$(curl --silent \"$ELASTIC_SEARCH_URL/_cat/indices/${activeIndexPrefix}*\")\n if [[ \"$result\" = *\"${activeIndexPrefix}\"* ]]; then\n echo $result | while read indices ; do\n index=$(echo $indices | awk -F \" \" '{print $3}')\n log \"Attempting migrate of index : ${index}\"\n indexDate=$(echo \"${index}\" | sed -e \"s#${activeIndexPrefix}##g\")\n modifiedRepoStorageName=${repoStorageName}${indexDate}\n modifiedStorageSummaryName=${storageSummaryName}${indexDate}\n\n # Reindex from each type\n runCommand 'POST' '_reindex' '{\"source\":{\"index\":\"'${index}'\",\"type\":\"repo_storage_info\"},\"dest\":{\"index\":\"'${modifiedRepoStorageName}'\"}}' '-H \"Content-Type: application/json\"' 2 > /dev/null 2>&1\n runCommand 'POST' '_reindex' '{\"source\":{\"index\":\"'${index}'\",\"type\":\"storage_summary_info\"},\"dest\":{\"index\":\"'${modifiedStorageSummaryName}'\"}}' '-H \"Content-Type: application/json\"' 2 > /dev/null 2>&1\n\n # Add type field\n runCommand 'POST' ${modifiedRepoStorageName}'/_update_by_query' '{\"script\": {\"inline\": \"ctx._source.type = \\\"repo_storage_info\\\"\",\"lang\": \"painless\"}}' '-H \"Content-Type: application/json\"' 2 > /dev/null 2>&1\n runCommand 'POST' ${modifiedStorageSummaryName}'/_update_by_query' '{\"script\": {\"inline\": \"ctx._source.type = \\\"storage_summary_info\\\"\",\"lang\": \"painless\"}}' '-H \"Content-Type: application/json\"' 2 > /dev/null 2>&1\n\n # Add the new indices to search alias\n runCommand 'POST' '_aliases' '{\"actions\" : [{ \"add\" : { \"index\" : \"'${modifiedRepoStorageName}'\", \"alias\" : \"search_insight_data\" } }]}' '-H \"Content-Type: application/json\"' 2 > /dev/null 2>&1\n runCommand 'POST' '_aliases' '{\"actions\" : [{ \"add\" : { \"index\" : \"'${modifiedStorageSummaryName}'\", \"alias\" : \"search_insight_data\" } }]}' '-H \"Content-Type: application/json\"' 2 > /dev/null 2>&1\n\n # Delete the old index\n log \"Deleting index : ${index}\"\n runCommand 'DELETE' \"${index}\" > /dev/null 2>&1\n done\n fi\n }\n\n main() {\n if [[ -z $ELASTIC_SEARCH_URL ]]; then\n title \"$ELASTIC_SEARCH_LABEL Manual Setup\"\n log \"This script will attempt to seed $ELASTIC_SEARCH_LABEL with the templates and indices needed by JFrog Mission Control\"\n\n warn \"Please enter the same details as you entered during installation. If the details are incorrect, you may need to rerun the installation\"\n\n local DEFAULT_URL=\"http://docker.for.mac.localhost:9200\"\n read -p \"Please enter the $ELASTIC_SEARCH_LABEL URL [$DEFAULT_URL]:\" choice\n : ${choice:=$DEFAULT_URL}\n ELASTIC_SEARCH_URL=$choice\n fi\n echo \"Beginning $ELASTIC_SEARCH_LABEL bootstrap\"\n setElasticSearchParams\n }\n\n main\n",
"# elasticsearch-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"elasticsearch.fullname\" . }}\n labels:\n app: {{ template \"elasticsearch.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: http\n port: {{ .Values.internalHttpPort }}\n targetPort: {{ .Values.externalHttpPort }}\n - name: transport\n port: {{ .Values.internalTransportPort }}\n targetPort: {{ .Values.externalTransportPort }}\n selector:\n app: {{ template \"elasticsearch.name\" . }}\n release: {{ .Release.Name }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"mission-control.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# insight-executor-deployment.yaml\napiVersion: apps/v1beta2\nkind: Deployment\nmetadata:\n name: {{ template \"insight-executor.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.insightExecutor.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n replicas: {{ .Values.insightExecutor.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightExecutor.name }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightExecutor.name }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"mission-control.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n containers:\n - name: {{ .Values.insightExecutor.name }}\n image: {{ .Values.insightExecutor.image }}:{{ default .Chart.AppVersion .Values.insightExecutor.version }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n env:\n - name: CORE_URL\n value: 'http://{{ template \"insight-server.fullname\" . }}:{{ .Values.insightServer.internalHttpPort }}'\n - name: JFI_HOME\n value: '/var/cloudbox'\n - name: JFI_HOME_EXECUTOR\n value: '/var/cloudbox/executor'\n ports:\n - containerPort: {{ .Values.insightExecutor.internalPort }}\n protocol: TCP\n volumeMounts:\n - name: insight-executor-data\n mountPath: {{ .Values.insightExecutor.persistence.mountPath | quote }}\n livenessProbe:\n httpGet:\n path: /executorservice/api\n port: 8080\n initialDelaySeconds: 180\n periodSeconds: 10\n readinessProbe:\n httpGet:\n path: /executorservice/api\n port: 8080\n initialDelaySeconds: 180\n periodSeconds: 10\n resources:\n{{ toYaml .Values.insightExecutor.resources | indent 10 }}\n {{- with .Values.insightExecutor.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.insightExecutor.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.insightExecutor.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: insight-executor-data\n {{- if .Values.insightExecutor.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.insightExecutor.persistence.existingClaim }}{{ .Values.insightExecutor.persistence.existingClaim }}{{ else }}{{ template \"insight-executor.fullname\" . }}{{ end }}\n {{- else }}\n emptyDir: {}\n {{- end }}",
"# insight-executor-pvc.yaml\n{{- if and .Values.insightExecutor.persistence.enabled (not .Values.insightExecutor.persistence.existingClaim) }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"insight-executor.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.insightExecutor.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.insightExecutor.persistence.size }}\n{{- if .Values.insightExecutor.persistence.storageClass }}\n{{- if (eq \"-\" .Values.insightExecutor.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.insightExecutor.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# insight-executor-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"insight-executor.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.insightExecutor.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n type: {{ .Values.insightExecutor.service.type }}\n ports:\n - name: http\n port: {{ .Values.insightExecutor.internalPort }}\n targetPort: {{ .Values.insightExecutor.externalPort }}\n protocol: TCP\n selector:\n app: {{ template \"mission-control.name\" . }}\n component: \"{{ .Values.insightExecutor.name }}\"\n release: {{ .Release.Name }}",
"# insight-scheduler-deployment.yaml\napiVersion: apps/v1beta2\nkind: Deployment\nmetadata:\n name: {{ template \"insight-scheduler.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.insightScheduler.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n replicas: {{ .Values.insightScheduler.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightScheduler.name }}\n release: {{ .Release.Name }}\n template:\n metadata:\n name: {{ .Values.insightScheduler.name }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightScheduler.name }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"mission-control.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n initContainers:\n - name: init-data\n image: \"{{ .Values.initContainerImage }}\"\n command:\n - 'sh'\n - '-c'\n - >\n until nc -z -w 2 {{ .Release.Name }}-mongodb 27017 && echo mongodb ok;\n do sleep 2;\n done;\n sleep 10\n containers:\n - name: {{ .Values.insightScheduler.name }}\n image: {{ .Values.insightScheduler.image }}:{{ default .Chart.AppVersion .Values.insightScheduler.version }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n env:\n - name: CORE_URL\n value: 'http://{{ template \"insight-server.fullname\" . }}:{{ .Values.insightServer.internalHttpPort }}'\n - name: JFI_HOME\n value: '/var/cloudbox'\n - name: JFI_HOME_SCHEDULER\n value: '/var/cloudbox/scheduler'\n - name: MONGO_URL\n value: '{{ .Release.Name }}-mongodb:27017'\n - name: MONGODB_USERNAME\n value: '{{ .Values.mongodb.db.insightUser }}'\n - name: MONGODB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: insightPassword\n - name: MONGODB_ADMIN_USERNAME\n value: '{{ .Values.mongodb.db.adminUser }}'\n - name: MONGODB_ADMIN_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: adminPassword\n - name: JFMC_SCHEDULER_MONGO_DB\n value: '{{ .Values.mongodb.db.insightSchedulerDb }}'\n ports:\n - containerPort: {{ .Values.insightScheduler.internalPort }}\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /schedulerservice/api/status\n port: 8080\n initialDelaySeconds: 120\n periodSeconds: 10\n readinessProbe:\n httpGet:\n path: /schedulerservice/api/status\n port: 8080\n initialDelaySeconds: 120\n periodSeconds: 10\n resources:\n{{ toYaml .Values.insightScheduler.resources | indent 10 }}\n {{- with .Values.insightScheduler.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.insightScheduler.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.insightScheduler.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}",
"# insight-scheduler-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"insight-scheduler.fullname\" . }}\n labels:\n role: {{ .Values.insightScheduler.service.name }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.insightScheduler.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n type: {{ .Values.insightScheduler.service.type }}\n ports:\n - name: http\n port: {{ .Values.insightScheduler.internalPort }}\n targetPort: {{ .Values.insightScheduler.externalPort }}\n protocol: TCP\n selector:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightScheduler.name }}\n release: {{ .Release.Name }}\n",
"# insight-server-deployment.yaml\napiVersion: apps/v1beta2\nkind: Deployment\nmetadata:\n name: {{ template \"insight-server.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.insightServer.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n replicas: {{ .Values.insightServer.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightServer.name }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightServer.name }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"mission-control.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n initContainers:\n - name: init-data\n image: \"{{ .Values.initContainerImage }}\"\n command:\n - 'sh'\n - '-c'\n - >\n until nc -z -w 2 {{ .Release.Name }}-mongodb 27017 && echo mongodb ok && \\\n nc -z -w 2 {{ .Release.Name }}-elasticsearch 9200 && echo elasticsearch ok;\n do sleep 2;\n done;\n sleep 10\n containers:\n - name: {{ .Values.insightServer.name }}\n image: {{ .Values.insightServer.image }}:{{ default .Chart.AppVersion .Values.insightServer.version }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n env:\n - name: CORE_URL\n value: 'http://{{ template \"insight-server.fullname\" . }}:{{ .Values.insightServer.internalHttpPort }}'\n - name: EXECUTOR_URL\n value: 'http://{{ template \"insight-executor.fullname\" . }}:{{ .Values.insightExecutor.internalPort }}/executorservice'\n - name: SCHEDULER_URL\n value: 'http://{{ template \"insight-scheduler.fullname\" . }}:{{ .Values.insightScheduler.internalPort }}/schedulerservice'\n - name: MONGO_URL\n value: '{{ .Release.Name }}-mongodb:27017'\n - name: MONGODB_USERNAME\n value: '{{ .Values.mongodb.db.insightUser }}'\n - name: MONGODB_PASSWORD\n value: '{{ .Values.mongodb.db.insightPassword }}'\n - name: MONGODB_ADMIN_USERNAME\n value: '{{ .Values.mongodb.db.adminUser }}'\n - name: MONGODB_ADMIN_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: adminPassword\n - name: JFMC_URL\n value: 'http://{{ template \"mission-control.fullname\" . }}:{{ .Values.missionControl.internalPort }}'\n - name: ELASTIC_SEARCH_URL\n value: 'http://{{ .Release.Name }}-elasticsearch:9200'\n - name: ELASTIC_CLUSTER_NAME\n value: '{{ .Values.elasticsearch.env.clusterName }}'\n - name: ELASTIC_SEARCH_USERNAME\n value: '{{ .Values.elasticsearch.env.esUsername }}'\n - name: ELASTIC_SEARCH_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Release.Name }}-elasticsearch\n key: esPassword\n - name: ELASTIC_COMMUNICATION_NODE_URL\n value: '{{ .Release.Name }}-elasticsearch:9300'\n - name: JFI_HOME\n value: '/var/cloudbox'\n - name: JFI_HOME_CORE\n value: '/var/cloudbox/core'\n - name: JFMC_MISSION_CONTROL_CERT\n value: \"/var/cloudbox/core/_MASTER_/data/contexts/security/jfmc.crt\"\n - name: JFMC_INSIGHT_SERVER_CERT\n value: \"/var/cloudbox/core/_MASTER_/data/contexts/security/insight.crt\"\n - name: JFMC_INSIGHT_SERVER_KEY\n value: \"/var/cloudbox/core/_MASTER_/data/contexts/security/insight.key\"\n - name: JFMC_INSIGHT_SERVER_PORT\n value: \"{{ .Values.insightServer.internalHttpPort }}\"\n - name: JFMC_INSIGHT_SERVER_SSL_PORT\n value: \"{{ .Values.insightServer.internalHttpsPort }}\"\n ports:\n - containerPort: {{ .Values.insightServer.internalHttpPort }}\n protocol: TCP\n - containerPort: {{ .Values.insightServer.internalHttpsPort }}\n protocol: TCP\n volumeMounts:\n - name: mission-control-certs\n mountPath: /var/cloudbox/core/_MASTER_/data/contexts/security/insight.key\n subPath: insight.key\n - name: mission-control-certs\n mountPath: /var/cloudbox/core/_MASTER_/data/contexts/security/insight.crt\n subPath: insight.crt\n - name: mission-control-certs\n mountPath: /var/cloudbox/core/_MASTER_/data/contexts/security/jfmc.crt\n subPath: jfmc.crt\n livenessProbe:\n httpGet:\n path: /api/status\n port: 8082\n initialDelaySeconds: 300\n periodSeconds: 10\n readinessProbe:\n httpGet:\n path: /api/status\n port: 8082\n initialDelaySeconds: 300\n periodSeconds: 10\n resources:\n{{ toYaml .Values.insightServer.resources | indent 10 }}\n {{- with .Values.insightServer.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.insightServer.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.insightServer.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: mission-control-certs\n secret:\n {{- if .Values.existingCertsSecret }}\n secretName: {{ .Values.existingCertsSecret }}\n {{- else }}\n secretName: {{ template \"mission-control.fullname\" . }}-certs\n {{- end }}\n",
"# insight-server-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"insight-server.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.insightServer.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n type: {{ .Values.insightServer.service.type }}\n ports:\n - name: http\n port: {{ .Values.insightServer.internalHttpPort }}\n targetPort: {{ .Values.insightServer.externalHttpPort }}\n protocol: TCP\n - name: https\n port: {{ .Values.insightServer.internalHttpsPort }}\n targetPort: {{ .Values.insightServer.externalHttpsPort }}\n protocol: TCP\n selector:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.insightServer.name }}\n release: {{ .Release.Name }}\n",
"# mission-control-certs.yaml\n{{- if (not .Values.existingCertsSecret) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}-certs\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n insight.key: {{ required \"A valid .Values.insightKey entry required!\" .Values.insightKey | b64enc | quote }}\n\n insight.crt: {{ required \"A valid .Values.insightCrt entry required!\" .Values.insightCrt | b64enc | quote }}\n\n jfmc.crt: {{ required \"A valid .Values.jfmcCrt entry required!\" .Values.jfmcCrt | b64enc | quote }}\n\n jfmc-keystore.jks-b64: {{ required \"A valid .Values.jfmcKeystore entry required!\" .Values.jfmcKeystore | b64enc | quote }}\n\n jfmc-truststore.jks-b64: {{ required \"A valid .Values.jfmcTruststore entry required!\" .Values.jfmcTruststore | b64enc | quote }}\n{{- end }}",
"# mission-control-deployment.yaml\napiVersion: apps/v1beta2\nkind: Deployment\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.missionControl.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n replicas: {{ .Values.missionControl.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.missionControl.name }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.missionControl.name }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"mission-control.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n initContainers:\n - name: init-data\n image: \"{{ .Values.initContainerImage }}\"\n command:\n - 'sh'\n - '-c'\n - >\n until nc -z -w 2 {{ .Release.Name }}-mongodb 27017 && echo mongodb ok && \\\n nc -z -w 2 {{ .Release.Name }}-elasticsearch 9200 && echo elasticsearch ok;\n do sleep 2;\n done\n containers:\n - name: {{ .Values.missionControl.name }}\n image: {{ .Values.missionControl.image }}:{{ default .Chart.AppVersion .Values.missionControl.version }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n env:\n - name: SPRING_DATA_MONGODB_HOST\n value: '{{ .Release.Name }}-mongodb'\n - name: SPRING_DATA_MONGODB_PORT\n value: '27017'\n - name: SPRING_DATA_MONGODB_USERNAME\n value: '{{ .Values.mongodb.db.mcUser }}'\n - name: SPRING_DATA_MONGODB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n key: mcPassword\n - name: INSIGHT_URL\n value: \"http://{{ template \"insight-server.fullname\" . }}:{{ .Values.insightServer.internalHttpPort }}\"\n - name: INSIGHT_SSL_URL\n value: \"https://{{ template \"insight-server.fullname\" . }}:{{ .Values.insightServer.internalHttpsPort }}\"\n - name: POD_RESTART_TIME\n value: \"{{ .Values.podRestartTime }}\"\n - name: SERVER_INTERNAL_SSL_KEY_STORE_PASSWORD\n value: \"18f85c331f5e3cd4\"\n - name: SERVER_INTERNAL_SSL_TRUST_STORE_PASSWORD\n value: \"18f85c331f5e3cd4\"\n - name: ARTIFACTORY_CLIENT_CONNECTIONTIMEOUT\n value: '20'\n - name: XRAY_CLIENT_CONNECTIONTIMEOUT\n value: '20'\n - name: JENKINS_CLIENT_CONNECTIONTIMEOUT\n value: '20'\n - name: GIT_CLIENT_CONNECTIONTIMEOUT\n value: '20'\n - name: INSIGHT_CLIENT_CONNECTIONTIMEOUT\n value: '20'\n - name: MC_URL\n value: \"{{ .Values.missionControl.missionControlUrl }}\"\n - name: JAVA_OPTIONS\n value: \"{{ .Values.missionControl.javaOpts.other }} {{- if .Values.missionControl.javaOpts.xms }}-Xms{{ .Values.missionControl.javaOpts.xms }}{{- end }} {{- if .Values.missionControl.javaOpts.xmx }}-Xmx{{ .Values.missionControl.javaOpts.xmx }} {{- end }}\"\n ports:\n - containerPort: {{ .Values.missionControl.internalPort }}\n protocol: TCP\n volumeMounts:\n - name: mission-control-data\n mountPath: {{ .Values.missionControl.persistence.mountPath | quote }}\n - name: mission-control-certs\n mountPath: /tmp/jfmc-keystore.jks-b64\n subPath: jfmc-keystore.jks-b64\n - name: mission-control-certs\n mountPath: /tmp/jfmc-truststore.jks-b64\n subPath: jfmc-truststore.jks-b64\n lifecycle:\n postStart:\n exec:\n command:\n - '/bin/sh'\n - '-c'\n - >\n until [ -f /tmp/jfmc-keystore.jks-b64 ] && [ -f /tmp/jfmc-truststore.jks-b64 ]; do sleep 1; done;\n mkdir -p /var/opt/jfrog/mission-control/etc/security;\n base64 -d /tmp/jfmc-keystore.jks-b64 > /var/opt/jfrog/mission-control/etc/security/jfmc-keystore.jks;\n base64 -d /tmp/jfmc-truststore.jks-b64 > /var/opt/jfrog/mission-control/etc/security/jfmc-truststore.jks\n resources:\n{{ toYaml .Values.missionControl.resources | indent 10 }}\n livenessProbe:\n httpGet:\n path: /api/v3/ping\n port: 8080\n periodSeconds: 10\n initialDelaySeconds: 240\n readinessProbe:\n httpGet:\n path: /api/v3/ping\n port: 8080\n periodSeconds: 10\n initialDelaySeconds: 240\n {{- with .Values.missionControl.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.missionControl.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.missionControl.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: mission-control-data\n {{- if .Values.missionControl.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.missionControl.persistence.existingClaim }}{{ .Values.missionControl.persistence.existingClaim }}{{ else }}{{ template \"mission-control.fullname\" . }}{{ end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: mission-control-certs\n secret:\n {{- if .Values.existingCertsSecret }}\n secretName: {{ .Values.existingCertsSecret }}\n {{- else }}\n secretName: {{ template \"mission-control.fullname\" . }}-certs\n {{- end }}\n",
"# mission-control-pvc.yaml\n{{- if and .Values.missionControl.persistence.enabled (not .Values.missionControl.persistence.existingClaim) }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.missionControl.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.missionControl.persistence.size }}\n{{- if .Values.missionControl.persistence.storageClass }}\n{{- if (eq \"-\" .Values.missionControl.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.missionControl.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# mission-control-role.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.missionControl.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"mission-control.fullname\" . }}\nrules:\n{{ toYaml .Values.rbac.role.rules }}\n{{- end }}\n",
"# mission-control-rolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.missionControl.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"mission-control.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"mission-control.serviceAccountName\" . }}\nroleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: {{ template \"mission-control.fullname\" . }}\n{{- end }}\n",
"# mission-control-serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.missionControl.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"mission-control.serviceAccountName\" . }}\n{{- end }}\n",
"# mission-control-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n component: {{ .Values.missionControl.name }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n type: {{ .Values.missionControl.service.type }}\n ports:\n - name: http\n port: {{ .Values.missionControl.externalPort }}\n targetPort: {{ .Values.missionControl.internalPort }}\n protocol: TCP\n selector:\n app: {{ template \"mission-control.name\" . }}\n component: {{ .Values.missionControl.name }}\n release: {{ .Release.Name }}\n",
"# mongodb-secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}-mongodb-cred\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n adminPassword: {{ required \"A valid .Values.mongodb.db.adminPassword entry required!\" .Values.mongodb.db.adminPassword | b64enc | quote }}\n mcPassword: {{ required \"A valid .Values.mongodb.db.mcPassword entry required!\" .Values.mongodb.db.mcPassword | b64enc | quote }}\n insightPassword: {{ required \"A valid .Values.mongodb.db.insightPassword entry required!\" .Values.mongodb.db.insightPassword | b64enc | quote }}\n",
"# mongodb-setup-scripts.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"mission-control.fullname\" . }}-setup-script\n labels:\n app: {{ template \"mission-control.name\" . }}\n chart: {{ template \"mission-control.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n setup.sh: |\n #!/bin/sh\n # Setup script to create MongoDB users\n\n errorExit () {\n echo; echo \"ERROR: $1\"; echo; exit 1\n }\n\n echo \"Waiting for mongodb to come up\"\n until mongo --host {{ .Release.Name }}-mongodb --port 27017 --eval \"db.adminCommand('ping')\" > /dev/null 2>&1; do\n echo \"Waiting for db availability\"\n sleep 1\n done\n echo \"DB ready. Configuring...\"\n mongo --eval \"var adminPassword = '$MONGODB_ADMIN_PASSWORD', mcPassword = '$MONGODB_MC_PASSWORD', insightPassword = '$MONGODB_INSIGHT_PASSWORD';\" --host {{ .Release.Name }}-mongodb --port 27017 /scripts/createMongoDBUsers.js || errorExit \"DB user setup failed\"\n echo \"DB config done\"\n\n createMongoDBUsers.js: |\n // JFrog Mission-Control MongoDB Bootstrap\n\n // Default admin user\n var adminUser = {\n user: \"{{ .Values.mongodb.db.adminUser }}\",\n pwd: adminPassword\n };\n\n // Create the admin user\n adminUser.roles = [\"root\"];\n adminUser.customData = {\n createdBy: \"JFrog Mission-Control installer\"\n };\n db.getSiblingDB(adminUser.user).auth(adminUser.user, adminUser.pwd) || db.getSiblingDB(adminUser.user).createUser(adminUser);\n\n // Default mc user\n var jfmcUser = {\n user: \"{{ .Values.mongodb.db.mcUser }}\",\n pwd: mcPassword,\n roles: [\"dbOwner\"],\n customData: {\n createdBy: \"JFrog Mission-Control installer\"\n }\n };\n\n // Default insight-server user\n var jiUser = {\n user: \"{{ .Values.mongodb.db.insightUser }}\",\n pwd: insightPassword,\n roles: [\"dbOwner\"],\n customData: {\n createdBy: \"JFrog Mission-Control installer\"\n }\n };\n\n // Authenticating as admin to create mc user\n var loginOutput = db.getSiblingDB(adminUser.user).auth(adminUser.user, adminUser.pwd);\n\n // Check if user exists before creation\n function createUserDB(dbName, dbUser) {\n db.getSiblingDB(dbName).getUser(dbUser.user) || db.getSiblingDB(dbName).createUser(dbUser);\n }\n\n createUserDB(\"{{ .Values.mongodb.db.mcUser }}\", jfmcUser);\n createUserDB(\"insight_CUSTOM_\", jiUser);\n createUserDB(\"insight_team\", jiUser);\n createUserDB(\"{{ .Values.mongodb.db.insightSchedulerDb }}\", jiUser)\n\n\n\n"
] | # Default values for elasticsearch.
# This is a YAML-formatted file.
# Beware when changing values here. You should know what you are doing!
# Access the values with {{ .Values.key.subkey }}
# Common
initContainerImage: "alpine:3.6"
imagePullPolicy: IfNotPresent
imagePullSecrets:
replicaCount: 1
image:
repository: "docker.bintray.io/elasticsearch/elasticsearch"
version: 6.1.1
resources:
requests:
memory: "2Gi"
cpu: "200m"
limits:
memory: "3Gi"
cpu: "250m"
env:
clusterName: "es-cluster"
networkHost: "0.0.0.0"
transportHost: "0.0.0.0"
xpackSecurityEnabled: false
esUrl: "http://localhost:9200"
esUsername: "elastic"
esPassword: "changeme"
maxMapCount: 262144
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
mountPath: "/usr/share/elasticsearch/data"
accessMode: ReadWriteOnce
size: 100Gi
## artifactory data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
service:
type: ClusterIP
annotations: {}
externalHttpPort: 9200
internalHttpPort: 9200
externalTransportPort: 9300
internalTransportPort: 9300
|
nfs-server-provisioner | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nfs-provisioner.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"nfs-provisioner.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"nfs-provisioner.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"nfs-provisioner.provisionerName\" -}}\n{{- if .Values.storageClass.provisionerName -}}\n{{- printf .Values.storageClass.provisionerName -}}\n{{- else -}}\ncluster.local/{{ template \"nfs-provisioner.fullname\" . -}}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{ if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"nfs-provisioner.fullname\" . }}\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nrules:\n - apiGroups: [\"\"]\n resources: [\"persistentvolumes\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n - apiGroups: [\"\"]\n resources: [\"persistentvolumeclaims\"]\n verbs: [\"get\", \"list\", \"watch\", \"update\"]\n - apiGroups: [\"storage.k8s.io\"]\n resources: [\"storageclasses\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"events\"]\n verbs: [\"list\", \"watch\", \"create\", \"update\", \"patch\"]\n - apiGroups: [\"\"]\n resources: [\"services\", \"endpoints\"]\n verbs: [\"get\"]\n - apiGroups: [\"extensions\"]\n resources: [\"podsecuritypolicies\"]\n resourceNames: [\"nfs-provisioner\"]\n verbs: [\"use\"]\n - apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\", \"update\", \"patch\"]\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"nfs-provisioner.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"nfs-provisioner.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"nfs-provisioner.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nfs-provisioner.fullname\" . }}\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.nfsPort }}\n targetPort: nfs\n protocol: TCP\n name: nfs\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nfsNodePort))) }}\n nodePort: {{ .Values.service.nfsNodePort }}\n {{- end }}\n - port: {{ .Values.service.nfsPort }}\n targetPort: nfs-udp\n protocol: UDP\n name: nfs-udp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nfsNodePort))) }}\n nodePort: {{ .Values.service.nfsNodePort }}\n {{- end }}\n - port: {{ .Values.service.nlockmgrPort }}\n targetPort: nlockmgr\n protocol: TCP\n name: nlockmgr\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nlockmgrNodePort))) }}\n nodePort: {{ .Values.service.nlockmgrNodePort }}\n {{- end }}\n - port: {{ .Values.service.nlockmgrPort }}\n targetPort: nlockmgr-udp\n protocol: UDP\n name: nlockmgr-udp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nlockmgrPort))) }}\n nodePort: {{ .Values.service.nlockmgrNodePort }}\n {{- end }}\n - port: {{ .Values.service.mountdPort }}\n targetPort: mountd\n protocol: TCP\n name: mountd\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.mountdNodePort))) }}\n nodePort: {{ .Values.service.mountdNodePort }}\n {{- end }}\n - port: {{ .Values.service.mountdPort }}\n targetPort: mountd-udp\n protocol: UDP\n name: mountd-udp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.mountdNodePort))) }}\n nodePort: {{ .Values.service.mountdNodePort }}\n {{- end }}\n - port: {{ .Values.service.rquotadPort }}\n targetPort: rquotad\n protocol: TCP\n name: rquotad\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.rquotadNodePort))) }}\n nodePort: {{ .Values.service.rquotadNodePort }}\n {{- end }}\n - port: {{ .Values.service.rquotadPort }}\n targetPort: rquotad-udp\n protocol: UDP\n name: rquotad-udp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.rquotadNodePort))) }}\n nodePort: {{ .Values.service.rquotadNodePort }}\n {{- end }}\n - port: {{ .Values.service.rpcbindPort }}\n targetPort: rpcbind\n protocol: TCP\n name: rpcbind\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.rpcbindNodePort))) }}\n nodePort: {{ .Values.service.rpcbindNodePort }}\n {{- end }}\n - port: {{ .Values.service.rpcbindPort }}\n targetPort: rpcbind-udp\n protocol: UDP\n name: rpcbind-udp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.rpcbindNodePort))) }}\n nodePort: {{ .Values.service.rpcbindNodePort }}\n {{- end }}\n - port: {{ .Values.service.statdPort }}\n targetPort: statd\n protocol: TCP\n name: statd\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.statdPort))) }}\n nodePort: {{ .Values.service.statdNodePort }}\n {{- end }}\n - port: {{ .Values.service.statdPort }}\n targetPort: statd-udp\n protocol: UDP\n name: statd-udp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.statdPort))) }}\n nodePort: {{ .Values.service.statdNodePort }}\n {{- end }}\n {{- if .Values.service.externalIPs }}\n externalIPs:\n {{- toYaml .Values.service.externalIPs | nindent 4 }}\n {{- end }}\n selector:\n app: {{ template \"nfs-provisioner.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.rbac.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"nfs-provisioner.fullname\" . }}\n{{- end -}}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"nfs-provisioner.fullname\" . }}\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n # TODO: Investigate how/if nfs-provisioner can be scaled out beyond 1 replica\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"nfs-provisioner.name\" . }}\n release: {{ .Release.Name }}\n serviceName: {{ template \"nfs-provisioner.fullname\" . }}\n template:\n metadata:\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n spec:\n # NOTE: This is 10 seconds longer than the default nfs-provisioner --grace-period value of 90sec\n terminationGracePeriodSeconds: 100\n serviceAccountName: {{ if .Values.rbac.create }}{{ template \"nfs-provisioner.fullname\" . }}{{ else }}{{ .Values.rbac.serviceAccountName | quote }}{{ end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- toYaml .Values.imagePullSecrets | nindent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: nfs\n containerPort: 2049\n protocol: TCP\n - name: nfs-udp\n containerPort: 2049\n protocol: UDP\n - name: nlockmgr\n containerPort: 32803\n protocol: TCP\n - name: nlockmgr-udp\n containerPort: 32803\n protocol: UDP\n - name: mountd\n containerPort: 20048\n protocol: TCP\n - name: mountd-udp\n containerPort: 20048\n protocol: UDP\n - name: rquotad\n containerPort: 875\n protocol: TCP\n - name: rquotad-udp\n containerPort: 875\n protocol: UDP\n - name: rpcbind\n containerPort: 111\n protocol: TCP\n - name: rpcbind-udp\n containerPort: 111\n protocol: UDP\n - name: statd\n containerPort: 662\n protocol: TCP\n - name: statd-udp\n containerPort: 662\n protocol: UDP\n securityContext:\n capabilities:\n add:\n - DAC_READ_SEARCH\n - SYS_RESOURCE\n args:\n - \"-provisioner={{ template \"nfs-provisioner.provisionerName\" . }}\"\n {{- range $key, $value := .Values.extraArgs }}\n - \"-{{ $key }}={{ $value }}\"\n {{- end }}\n env:\n - name: POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n - name: SERVICE_NAME\n value: {{ template \"nfs-provisioner.fullname\" . }}\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n volumeMounts:\n - name: data\n mountPath: /export\n {{- with .Values.resources }}\n resources:\n {{- toYaml . | nindent 12 }}\n {{- end }}\n {{- with .Values.podSecurityContext }}\n securityContext:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n\n {{- if not .Values.persistence.enabled }}\n volumes:\n - name: data\n emptyDir: {}\n {{- end }}\n\n {{- if .Values.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: data\n spec:\n accessModes: [ {{ .Values.persistence.accessMode | quote }} ]\n {{- if .Values.persistence.storageClass }}\n {{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: {{ .Values.persistence.storageClass | quote }}\n {{- end }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- end }}\n",
"# storageclass.yaml\n{{ if .Values.storageClass.create -}}\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n name: {{ .Values.storageClass.name }}\n labels:\n app: {{ template \"nfs-provisioner.name\" . }}\n chart: {{ template \"nfs-provisioner.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n {{- if .Values.storageClass.defaultClass }}\n annotations:\n storageclass.kubernetes.io/is-default-class: \"true\"\n {{- end }}\nprovisioner: {{ template \"nfs-provisioner.provisionerName\" . }}\nreclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}\n{{ if .Values.storageClass.allowVolumeExpansion }}\nallowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}\n{{ end }}\n{{- with .Values.storageClass.parameters }}\nparameters:\n{{- toYaml . | nindent 2 }}\n{{- end }}\n{{- with .Values.storageClass.mountOptions }}\nmountOptions:\n{{- toYaml . | nindent 2 }}\n{{- end }}\n{{ end -}}\n"
] | # Default values for nfs-provisioner.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# imagePullSecrets:
image:
repository: quay.io/kubernetes_incubator/nfs-provisioner
tag: v2.3.0
pullPolicy: IfNotPresent
# For a list of available arguments
# Please see https://github.com/kubernetes-incubator/external-storage/blob/master/nfs/docs/deployment.md#arguments
extraArgs: {}
# device-based-fsids: false
service:
type: ClusterIP
nfsPort: 2049
nlockmgrPort: 32803
mountdPort: 20048
rquotadPort: 875
rpcbindPort: 111
statdPort: 662
# nfsNodePort:
# nlockmgrNodePort:
# mountdNodePort:
# rquotadNodePort:
# rpcbindNodePort:
# statdNodePort:
externalIPs: []
persistence:
enabled: false
## Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 1Gi
## For creating the StorageClass automatically:
storageClass:
create: true
## Set a provisioner name. If unset, a name will be generated.
# provisionerName:
## Set StorageClass as the default StorageClass
## Ignored if storageClass.create is false
defaultClass: false
## Set a StorageClass name
## Ignored if storageClass.create is false
name: nfs
# set to null to prevent expansion
allowVolumeExpansion: true
## StorageClass parameters
parameters: {}
mountOptions:
- vers=3
## ReclaimPolicy field of the class, which can be either Delete or Retain
reclaimPolicy: Delete
## For RBAC support:
rbac:
create: true
## Ignored if rbac.create is true
##
serviceAccountName: default
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
|
factorio | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"factorio.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"factorio.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"factorio.fullname\" . }}\n labels:\n app: {{ template \"factorio.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"factorio.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"factorio.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n spec:\n containers:\n - name: {{ template \"factorio.fullname\" . }}\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: Always\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n env:\n - name: FACTORIO_SERVER_NAME\n value: {{ .Values.factorioServer.name | quote }}\n - name: FACTORIO_DESCRIPTION\n value: {{ .Values.factorioServer.description | quote }}\n - name: FACTORIO_PORT\n value: {{ .Values.factorioServer.port | quote }}\n - name: FACTORIO_MAX_PLAYERS\n value: {{ .Values.factorioServer.maxPlayers | quote }}\n - name: FACTORIO_IS_PUBLIC\n value: {{ .Values.factorioServer.isPublic | quote }}\n - name: FACTORIO_REQUIRE_USER_VERIFICATION\n value: {{ .Values.factorioServer.verifyIdentity | quote }}\n - name: FACTORIO_ALLOW_COMMANDS\n value: {{ .Values.factorioServer.allowCommands | quote }}\n - name: FACTORIO_NO_AUTO_PAUSE\n value: {{ .Values.factorioServer.noAutoPause | quote }}\n - name: FACTORIO_AUTOSAVE_INTERVAL\n value: {{ .Values.factorioServer.autosave.interval | quote }}\n - name: FACTORIO_AUTOSAVE_SLOTS\n value: {{ .Values.factorioServer.autosave.slots | quote }}\n\n {{- if .Values.factorioServer.password }}\n - name: FACTORIO_GAME_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"factorio.fullname\" . }}\n key: server-password\n {{- end }}\n\n {{- if .Values.factorioServer.isPublic }}\n - name: FACTORIO_USER_USERNAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"factorio.fullname\" . }}\n key: factorio-username\n - name: FACTORIO_USER_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"factorio.fullname\" . }}\n key: factorio-password\n {{- end }}\n\n {{- if .Values.factorioServer.rcon.enabled }}\n - name: FACTORIO_RCON_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"factorio.fullname\" . }}\n key: rcon-password\n {{- end }}\n\n ports:\n - name: factorio\n containerPort: 34197\n protocol: UDP\n {{- if .Values.factorioServer.rcon.enabled }}\n - name: rcon\n containerPort: {{ .Values.factorioServer.rcon.port }}\n protocol: TCP\n {{- end }}\n volumeMounts:\n - name: saves\n mountPath: /opt/factorio/saves\n - name: mods\n mountPath: /opt/factorio/mods\n volumes:\n - name: saves\n {{- if .Values.persistence.savedGames.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"factorio.fullname\" . }}-savedgames\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: mods\n {{- if .Values.persistence.mods.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"factorio.fullname\" . }}-mods\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# factorio-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"factorio.fullname\" . }}\n labels:\n app: {{ template \"factorio.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.factorioServer.serviceType }}\n ports:\n - name: factorio\n port: {{ .Values.factorioServer.port | int }}\n targetPort: factorio\n protocol: UDP\n selector:\n app: {{ template \"factorio.fullname\" . }}\n",
"# mods-pvc.yaml\n{{- if .Values.persistence.mods.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"factorio.fullname\" . }}-mods\n labels:\n app: {{ template \"factorio.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: {{ .Values.persistence.mods.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# rcon-svc.yaml\n{{- if default \"\" .Values.factorioServer.rcon.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: \"{{ template \"factorio.fullname\" . }}-rcon\"\n labels:\n app: {{ template \"factorio.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.factorioServer.rcon.serviceType }}\n ports:\n - name: rcon\n port: {{ .Values.factorioServer.rcon.port }}\n targetPort: rcon\n protocol: TCP\n selector:\n app: {{ template \"factorio.fullname\" . }}\n{{- end }}\n",
"# saves-pvc.yaml\n{{- if .Values.persistence.savedGames.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"factorio.fullname\" . }}-savedgames\n labels:\n app: {{ template \"factorio.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: {{ .Values.persistence.savedGames.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"factorio.fullname\" . }}\n labels:\n app: {{ template \"factorio.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n server-password: {{ default \"\" .Values.factorioServer.password | b64enc | quote }}\n rcon-password: {{ default \"\" .Values.factorioServer.rcon.password | b64enc | quote }}\n factorio-username: {{ default \"\" .Values.factorio.user.username | b64enc | quote }}\n factorio-password: {{ default \"\" .Values.factorio.user.password | b64enc | quote }}\n"
] | # Factorio image version
# ref: https://quay.io/repository/games_on_k8s/factorio?tab=tags
image: quay.io/games_on_k8s/factorio
imageTag: 0.15.39
replicaCount: 1
# Configure resource requests and limits
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
requests:
memory: 512Mi
cpu: 500m
# Most of these map to environment variables. See docker-factorio for details:
# https://github.com/games-on-k8s/docker-factorio/blob/master/README.md#environment-variable-reference
factorioServer:
name: Kubernetes Server
description: Factorio running on Kubernetes
port: 34197
# Lock this server down with a password.
# password: change.me
maxPlayers: 255
# Publishes this server in the server browser if true.
# You'll want to set Factorio.User below if true, as it becomes required.
isPublic: false
verifyIdentity: false
# Allows or disallows console commands. Must be one of: `true`, `false`, or `admins-only`.
allowCommands: admins-only
# Pause the server when nobody is connected?
noAutoPause: "false"
# You'll want to change this to NodePort if you are on AWS.
serviceType: LoadBalancer
autosave:
# Auto-save interval in minutes.
interval: 2
slots: 3
rcon:
enabled: false
port: 27015
# Empty value here enables an auto-generated password.
password: ""
serviceType: LoadBalancer
factorio:
# Your factorio.com User/pass is needed if factorioServer.IsPublic is true.
user:
username: your.username
password: your.password
persistence:
## factorio data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
savedGames:
# Set this to false if you don't care to persist saved games between restarts.
enabled: true
size: 1Gi
mods:
enabled: false
size: 128Mi
|
coscale | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"coscale.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"coscale.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# daemonset.yaml\n{{- if and (and .Values.coscale.appId .Values.coscale.accessToken) .Values.coscale.templateId -}}\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n name: {{ template \"coscale.name\" . }}\n labels:\n app: {{ template \"coscale.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n template:\n metadata:\n name: {{ template \"coscale.name\" . }}\n labels:\n app: {{ template \"coscale.name\" . }}\n spec:\n hostNetwork: true \n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n env:\n - name: APP_ID\n valueFrom:\n secretKeyRef:\n name: {{ template \"coscale.name\" . }}\n key: app_id\n - name: ACCESS_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"coscale.name\" . }}\n key: access_token\n - name: TEMPLATE_ID\n value: {{ .Values.coscale.templateId | quote }}\n volumeMounts:\n - name: dockersocket\n mountPath: /var/run/docker.sock\n - name: hostroot\n mountPath: /host\n readOnly: true\n volumes:\n - hostPath:\n path: /var/run/docker.sock\n name: dockersocket\n - hostPath:\n path: /\n name: hostroot\n{{ end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"coscale.name\" . }}\n labels:\n app: {{ template \"coscale.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n app_id : {{ default \"MISSING\" .Values.coscale.appId | b64enc | quote }}\n access_token : {{ default \"MISSING\" .Values.coscale.accessToken | b64enc | quote }}"
] | # Default values for CoScale Helm package.
image:
repository: "coscale/coscale-agent"
tag: 3.16.0
pullPolicy: "IfNotPresent"
coscale:
# Required: You need a CoScale AppId before running agents.
appId: ""
# Required: You need a CoScale AccessToken before running agents.
accessToken: ""
# Required: You need a CoScale TemplateId before running agents.
templateId: ""
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 256m
memory: 512Mi
|
seq | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"seq.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"seq.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"seq.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"deployment.apiVersion\" -}}\n{{- if semverCompare \">=1.9-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for ingress.\n*/}}\n{{- define \"ingress.apiVersion\" -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"networking.k8s.io/v1beta1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use - only used when podsecuritypolicy is also enabled\n*/}}\n{{- define \"seq.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (printf \"%s\" (include \"seq.fullname\" .)) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiGroup for PodSecurityPolicy.\n*/}}\n{{- define \"podSecurityPolicy.apiGroup\" -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"policy\" -}}\n{{- else -}}\n{{- print \"extensions\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for podSecurityPolicy.\n*/}}\n{{- define \"podSecurityPolicy.apiVersion\" -}}\n{{- if semverCompare \">=1.10-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"policy/v1beta1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"seq.fullname\" . }}\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"seq.name\" . }}\n release: {{ .Release.Name }}\n strategy:\n type: Recreate\n template:\n metadata:\n labels:\n app: {{ template \"seq.name\" . }}\n release: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: \"ACCEPT_EULA\"\n value: \"{{ .Values.acceptEULA }}\"\n{{- if .Values.baseURI }}\n - name: \"BASE_URI\"\n value: \"{{ .Values.baseURI }}\"\n{{- end }}\n - name: \"SEQ_CACHE_SYSTEMRAMTARGET\"\n value: \"{{ .Values.cache.targetSize }}\"\n ports:\n - name: ingestion\n containerPort: 5341\n protocol: TCP\n - name: ui\n containerPort: 80\n protocol: TCP\n{{- if .Values.podSecurityPolicy.create }}\n securityContext:\n runAsUser: 0\n capabilities:\n add:\n - NET_BIND_SERVICE\n{{- end }}\n{{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /\n port: ui\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n{{- end }}\n{{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /\n port: ui\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n{{- end }}\n{{- if .Values.startupProbe.enabled }}\n startupProbe:\n httpGet:\n path: /\n port: ui\n failureThreshold: {{ .Values.startupProbe.failureThreshold }}\n periodSeconds: {{ .Values.startupProbe.periodSeconds }}\n{{- end }}\n volumeMounts:\n - name: seq-data\n mountPath: {{ .Values.persistence.path }}\n subPath: {{ .Values.persistence.subPath }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n{{- if .Values.gelf.enabled }}\n - name: {{ .Chart.Name }}-gelf\n image: \"{{ .Values.gelf.image.repository }}:{{ .Values.gelf.image.tag }}\"\n imagePullPolicy: {{ .Values.gelf.image.pullPolicy }}\n env:\n - name: \"SEQ_ADDRESS\"\n value: \"http://{{ template \"seq.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.ingestion.service.port }}\"\n - name: \"SEQ_API_KEY\"\n value: \"{{ .Values.gelf.apiKey }}\"\n - name: \"GELF_ADDRESS\"\n value: \"{{ .Values.gelf.service.protocol | lower }}://0.0.0.0:12201\"\n - name: \"GELF_ENABLE_DIAGNOSTICS\"\n value: \"True\"\n ports:\n - name: gelf\n containerPort: 12201\n protocol: {{ .Values.gelf.service.protocol }}\n securityContext:\n runAsUser: 0\n capabilities:\n add:\n - NET_BIND_SERVICE\n{{- end }}\n{{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n serviceAccountName: \"{{ template \"seq.serviceAccountName\" . }}\"\n volumes:\n - name: seq-data\n{{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"seq.fullname\" .) }}\n{{- else }}\n emptyDir: {}\n{{- end -}}\n",
"# ingress.yaml\n{{- if or .Values.ui.ingress.enabled .Values.ingestion.ingress.enabled }}\n{{- $serviceName := include \"seq.fullname\" . -}}\napiVersion: {{ template \"ingress.apiVersion\" . }}\nkind: Ingress\nmetadata:\n name: {{ $serviceName }}\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{ if .Values.ui.ingress.enabled }}\n {{- $uiPath := .Values.ui.ingress.path -}}\n {{- $uiPort := .Values.ui.service.port -}}\n {{- range .Values.ui.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $uiPath }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $uiPort }}\n {{- end }}\n {{- end }}\n {{ if .Values.ingestion.ingress.enabled }}\n {{- $ingestionPath := .Values.ingestion.ingress.path -}}\n {{- $ingestionPort := .Values.ingestion.service.port -}}\n {{- range .Values.ingestion.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingestionPath }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $ingestionPort }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# psp.yaml\n{{- if .Values.podSecurityPolicy.create -}}\napiVersion: {{ template \"podSecurityPolicy.apiVersion\" . }}\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"seq.fullname\" . }}\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n allowedCapabilities:\n - NET_BIND_SERVICE\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'secret'\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run with root privileges.\n # Change to MustRunAsNonRoot => https://github.com/datalust/seq-tickets/issues/903\n rule: 'RunAsAny'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"seq.fullname\" . }}\n labels:\n app: {{ template \"seq.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# role.yaml\n{{- if and .Values.rbac.create .Values.podSecurityPolicy.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n name: {{ template \"seq.fullname\" . }}\nrules:\n - apiGroups: ['{{ template \"podSecurityPolicy.apiGroup\" . }}']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"seq.fullname\" . }}]\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if and .Values.rbac.create .Values.podSecurityPolicy.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n name: {{ template \"seq.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"seq.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"seq.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"seq.fullname\" . }}\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.ui.service.port }}\n targetPort: ui\n protocol: TCP\n name: ui\n - port: {{ .Values.ingestion.service.port }}\n targetPort: ingestion\n protocol: TCP\n name: ingestion\n{{- if .Values.gelf.enabled }}\n - port: {{ .Values.gelf.service.port }}\n targetPort: gelf\n protocol: {{ .Values.gelf.service.protocol }}\n name: gelf\n{{- end }}\n selector:\n app: {{ template \"seq.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"seq.name\" . }}\n chart: {{ template \"seq.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n name: {{ template \"seq.serviceAccountName\" . }}\n{{- end }}"
] | # Default values for Seq.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: datalust/seq
tag: 2020
pullPolicy: IfNotPresent
# By passing the value Y in the ACCEPT_EULA environment variable,
# you are expressing that you have read and accepted the terms in
# Seq End User License Agreement applicable to the Seq Docker image
# that you intend to use.
acceptEULA: "Y"
# Set this URL if you enable ingress and/or AAD authentication.
# Without this URL set to include HTTPS, Seq will try to set a login redirect
# URL with HTTP instead of HTTPS and AAD's registration requires HTTPS.
# The result is that you'll get an error during login:
# AADSTS50011: The reply url specified in the request does not match the reply urls configured for the application
# baseURI: https://my.public.url/
# The complete Seq API and UI.
# This API can accept events and serve API requests.
ui:
service:
port: 80
ingress:
enabled: false
path: /
hosts: []
# The ingestion-only API.
# This API is a subset of ui that can only ingest events.
ingestion:
service:
port: 5341
ingress:
enabled: false
path: /
hosts: []
# Accept events in the GELF format and forward them to Seq.
gelf:
enabled: false
image:
repository: datalust/sqelf
tag: 2
pullPolicy: IfNotPresent
service:
port: 12201
# GELF can be ingested through either TCP or UDP
protocol: TCP
service:
type: ClusterIP
ingress:
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We recommend uncommenting these and specifying an explicit memory limit that
# suits your workload.
# limits:
# memory: 256Mi
# requests:
# memory: 256Mi
cache:
# The fraction of RAM that the cache should try fit within. Specifying a larger
# value may allow more events in RAM at the expense of potential instability.
# Setting it to `0` will disable the cache completely.
# 70% (`0.7`) is a good starting point for machines with up to ~8GB of RAM.
targetSize: 0.7
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
persistence:
enabled: true
## The path the volume will be mounted at
path: /data
## The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services.
subPath: ""
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## Seq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
serviceAccount:
create: false
name:
## Enable RBAC
rbac:
create: false
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
create: false
securityContext:
privileged: true
## Configure probe values
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 10
|
prometheus-postgres-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-postgres-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prometheus-postgres-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-postgres-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"prometheus-postgres-exporter.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"prometheus-postgres-exporter.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n\n{{/*\nSet DATA_SOURCE_URI environment variable\n*/}}\n{{- define \"prometheus-postgres-exporter.data_source_uri\" -}}\n{{ printf \"%s:%s/%s?sslmode=%s\" .Values.config.datasource.host .Values.config.datasource.port .Values.config.datasource.database .Values.config.datasource.sslmode | quote }}\n{{- end }}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n config.yaml: |\n{{ printf .Values.config.queries | indent 4 }}",
"# deployment.yaml\n{{- if and .Values.config.datasource.passwordSecret .Values.config.datasource.password -}}\n{{ fail (printf \"ERROR: only one of .Values.config.datasource.passwordSecret and .Values.config.datasource.password must be defined\") }}\n{{- end -}}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | trim | indent 8 }}\n{{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n{{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 8 }}\n{{- end }}\n spec:\n serviceAccountName: {{ template \"prometheus-postgres-exporter.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n args:\n - \"--extend.query-path=/etc/config.yaml\"\n {{- if .Values.config.disableDefaultMetrics }}\n - \"--disable-default-metrics\"\n {{- end }}\n {{- if .Values.config.disableSettingsMetrics }}\n - \"--disable-settings-metrics\"\n {{- end }}\n {{- if .Values.config.autoDiscoverDatabases }}\n - \"--auto-discover-databases\"\n {{- if .Values.config.excludeDatabases }}\n - \"--exclude-databases\"\n - {{ .Values.config.excludeDatabases | join \",\" }}\n {{- end }}\n {{- end }}\n env:\n {{- if .Values.config.datasourceSecret }}\n - name: DATA_SOURCE_NAME\n valueFrom:\n secretKeyRef:\n name: {{ .Values.config.datasourceSecret.name }}\n key: {{ .Values.config.datasourceSecret.key }}\n {{- else }}\n - name: DATA_SOURCE_URI\n value: {{ template \"prometheus-postgres-exporter.data_source_uri\" . }}\n - name: DATA_SOURCE_USER\n value: {{ .Values.config.datasource.user }}\n - name: DATA_SOURCE_PASS\n valueFrom:\n secretKeyRef:\n {{- if .Values.config.datasource.passwordSecret }}\n name: {{ .Values.config.datasource.passwordSecret.name }}\n key: {{ .Values.config.datasource.passwordSecret.key }}\n {{- else }}\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n key: data_source_password\n {{- end }}\n {{- end }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: {{ .Values.service.targetPort }}\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: queries\n mountPath: /etc/config.yaml\n subPath: config.yaml\n{{- with .Values.extraContainers }}\n{{ tpl . $ | indent 8 }}\n{{- end }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - configMap:\n defaultMode: 420\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n name: queries\n{{- with .Values.extraVolumes }}\n{{ tpl . $ | indent 6 }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n allowPrivilegeEscalation: false\n requiredDropCapabilities:\n - ALL\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n{{- if .Values.rbac.pspEnabled }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"prometheus-postgres-exporter.fullname\" . }}]\n{{- end }}\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"prometheus-postgres-exporter.serviceAccountName\" . }}\n{{- end -}}\n",
"# secrets.yaml\n{{- if .Values.config.datasource.password -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n data_source_password: {{ .Values.config.datasource.password | b64enc }}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n {{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | trim | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: {{ .Values.service.targetPort }}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"prometheus-postgres-exporter.serviceAccountName\" . }}\n labels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n chart: {{ template \"prometheus-postgres-exporter.chart\" . }} \n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end -}}\n ",
"# servicemonitor.yaml\n{{- if and ( .Capabilities.APIVersions.Has \"monitoring.coreos.com/v1\" ) ( .Values.serviceMonitor.enabled ) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n{{- if .Values.serviceMonitor.labels }}\n labels:\n{{ toYaml .Values.serviceMonitor.labels | indent 4}}\n{{- end }}\n name: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n{{- if .Values.serviceMonitor.namespace }}\n namespace: {{ .Values.serviceMonitor.namespace }}\n{{- end }}\nspec:\n endpoints:\n - targetPort: {{ .Values.service.name }}\n{{- if .Values.serviceMonitor.interval }}\n interval: {{ .Values.serviceMonitor.interval }}\n{{- end }}\n{{- if .Values.serviceMonitor.telemetryPath }}\n path: {{ .Values.serviceMonitor.telemetryPath }}\n{{- end }}\n{{- if .Values.serviceMonitor.timeout }}\n scrapeTimeout: {{ .Values.serviceMonitor.timeout }}\n{{- end }}\n jobLabel: {{ template \"prometheus-postgres-exporter.fullname\" . }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-postgres-exporter.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n"
] | # Default values for prometheus-postgres-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: wrouesnel/postgres_exporter
tag: v0.8.0
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
targetPort: 9187
name: http
labels: {}
annotations: {}
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to cloudwatch-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
# Specifies whether a PodSecurityPolicy should be created
pspEnabled: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
securityContext: {}
# The securityContext this Pod should use. See https://kubernetes.io/docs/concepts/policy/security-context/ for more.
# runAsUser: 65534
config:
datasource:
# Specify one of both datasource or datasourceSecret
host:
user: postgres
# Only one of password and passwordSecret can be specified
password: somepassword
# Specify passwordSecret if DB password is stored in secret.
passwordSecret: {}
# Secret name
# name:
# Password key inside secret
# key:
port: "5432"
database: ''
sslmode: disable
datasourceSecret: {}
# Specifies if datasource should be sourced from secret value in format: postgresql://login:password@hostname:port/dbname?sslmode=disable
# Multiple Postgres databases can be configured by comma separated postgres connection strings
# Secret name
# name:
# Connection string key inside secret
# key:
disableDefaultMetrics: false
disableSettingsMetrics: false
autoDiscoverDatabases: false
excludeDatabases: []
# this are the defaults queries that the exporter will run, extracted from: https://github.com/wrouesnel/postgres_exporter/blob/master/queries.yaml
queries: |-
pg_replication:
query: "SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp())) as lag"
master: true
metrics:
- lag:
usage: "GAUGE"
description: "Replication lag behind master in seconds"
pg_postmaster:
query: "SELECT pg_postmaster_start_time as start_time_seconds from pg_postmaster_start_time()"
master: true
metrics:
- start_time_seconds:
usage: "GAUGE"
description: "Time at which postmaster started"
pg_stat_user_tables:
query: "SELECT current_database() datname, schemaname, relname, seq_scan, seq_tup_read, idx_scan, idx_tup_fetch, n_tup_ins, n_tup_upd, n_tup_del, n_tup_hot_upd, n_live_tup, n_dead_tup, n_mod_since_analyze, COALESCE(last_vacuum, '1970-01-01Z'), COALESCE(last_vacuum, '1970-01-01Z') as last_vacuum, COALESCE(last_autovacuum, '1970-01-01Z') as last_autovacuum, COALESCE(last_analyze, '1970-01-01Z') as last_analyze, COALESCE(last_autoanalyze, '1970-01-01Z') as last_autoanalyze, vacuum_count, autovacuum_count, analyze_count, autoanalyze_count FROM pg_stat_user_tables"
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- seq_scan:
usage: "COUNTER"
description: "Number of sequential scans initiated on this table"
- seq_tup_read:
usage: "COUNTER"
description: "Number of live rows fetched by sequential scans"
- idx_scan:
usage: "COUNTER"
description: "Number of index scans initiated on this table"
- idx_tup_fetch:
usage: "COUNTER"
description: "Number of live rows fetched by index scans"
- n_tup_ins:
usage: "COUNTER"
description: "Number of rows inserted"
- n_tup_upd:
usage: "COUNTER"
description: "Number of rows updated"
- n_tup_del:
usage: "COUNTER"
description: "Number of rows deleted"
- n_tup_hot_upd:
usage: "COUNTER"
description: "Number of rows HOT updated (i.e., with no separate index update required)"
- n_live_tup:
usage: "GAUGE"
description: "Estimated number of live rows"
- n_dead_tup:
usage: "GAUGE"
description: "Estimated number of dead rows"
- n_mod_since_analyze:
usage: "GAUGE"
description: "Estimated number of rows changed since last analyze"
- last_vacuum:
usage: "GAUGE"
description: "Last time at which this table was manually vacuumed (not counting VACUUM FULL)"
- last_autovacuum:
usage: "GAUGE"
description: "Last time at which this table was vacuumed by the autovacuum daemon"
- last_analyze:
usage: "GAUGE"
description: "Last time at which this table was manually analyzed"
- last_autoanalyze:
usage: "GAUGE"
description: "Last time at which this table was analyzed by the autovacuum daemon"
- vacuum_count:
usage: "COUNTER"
description: "Number of times this table has been manually vacuumed (not counting VACUUM FULL)"
- autovacuum_count:
usage: "COUNTER"
description: "Number of times this table has been vacuumed by the autovacuum daemon"
- analyze_count:
usage: "COUNTER"
description: "Number of times this table has been manually analyzed"
- autoanalyze_count:
usage: "COUNTER"
description: "Number of times this table has been analyzed by the autovacuum daemon"
pg_statio_user_tables:
query: "SELECT current_database() datname, schemaname, relname, heap_blks_read, heap_blks_hit, idx_blks_read, idx_blks_hit, toast_blks_read, toast_blks_hit, tidx_blks_read, tidx_blks_hit FROM pg_statio_user_tables"
metrics:
- datname:
usage: "LABEL"
description: "Name of current database"
- schemaname:
usage: "LABEL"
description: "Name of the schema that this table is in"
- relname:
usage: "LABEL"
description: "Name of this table"
- heap_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table"
- heap_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table"
- idx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from all indexes on this table"
- idx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in all indexes on this table"
- toast_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table (if any)"
- toast_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table (if any)"
- tidx_blks_read:
usage: "COUNTER"
description: "Number of disk blocks read from this table's TOAST table indexes (if any)"
- tidx_blks_hit:
usage: "COUNTER"
description: "Number of buffer hits in this table's TOAST table indexes (if any)"
pg_database:
query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size FROM pg_database"
master: true
cache_seconds: 30
metrics:
- datname:
usage: "LABEL"
description: "Name of the database"
- size_bytes:
usage: "GAUGE"
description: "Disk space used by the database"
pg_stat_statements:
query: "SELECT t2.rolname, t3.datname, queryid, calls, total_time / 1000 as total_time_seconds, min_time / 1000 as min_time_seconds, max_time / 1000 as max_time_seconds, mean_time / 1000 as mean_time_seconds, stddev_time / 1000 as stddev_time_seconds, rows, shared_blks_hit, shared_blks_read, shared_blks_dirtied, shared_blks_written, local_blks_hit, local_blks_read, local_blks_dirtied, local_blks_written, temp_blks_read, temp_blks_written, blk_read_time / 1000 as blk_read_time_seconds, blk_write_time / 1000 as blk_write_time_seconds FROM pg_stat_statements t1 join pg_roles t2 on (t1.userid=t2.oid) join pg_database t3 on (t1.dbid=t3.oid)"
master: true
metrics:
- rolname:
usage: "LABEL"
description: "Name of user"
- datname:
usage: "LABEL"
description: "Name of database"
- queryid:
usage: "LABEL"
description: "Query ID"
- calls:
usage: "COUNTER"
description: "Number of times executed"
- total_time_seconds:
usage: "COUNTER"
description: "Total time spent in the statement, in milliseconds"
- min_time_seconds:
usage: "GAUGE"
description: "Minimum time spent in the statement, in milliseconds"
- max_time_seconds:
usage: "GAUGE"
description: "Maximum time spent in the statement, in milliseconds"
- mean_time_seconds:
usage: "GAUGE"
description: "Mean time spent in the statement, in milliseconds"
- stddev_time_seconds:
usage: "GAUGE"
description: "Population standard deviation of time spent in the statement, in milliseconds"
- rows:
usage: "COUNTER"
description: "Total number of rows retrieved or affected by the statement"
- shared_blks_hit:
usage: "COUNTER"
description: "Total number of shared block cache hits by the statement"
- shared_blks_read:
usage: "COUNTER"
description: "Total number of shared blocks read by the statement"
- shared_blks_dirtied:
usage: "COUNTER"
description: "Total number of shared blocks dirtied by the statement"
- shared_blks_written:
usage: "COUNTER"
description: "Total number of shared blocks written by the statement"
- local_blks_hit:
usage: "COUNTER"
description: "Total number of local block cache hits by the statement"
- local_blks_read:
usage: "COUNTER"
description: "Total number of local blocks read by the statement"
- local_blks_dirtied:
usage: "COUNTER"
description: "Total number of local blocks dirtied by the statement"
- local_blks_written:
usage: "COUNTER"
description: "Total number of local blocks written by the statement"
- temp_blks_read:
usage: "COUNTER"
description: "Total number of temp blocks read by the statement"
- temp_blks_written:
usage: "COUNTER"
description: "Total number of temp blocks written by the statement"
- blk_read_time_seconds:
usage: "COUNTER"
description: "Total time the statement spent reading blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)"
- blk_write_time_seconds:
usage: "COUNTER"
description: "Total time the statement spent writing blocks, in milliseconds (if track_io_timing is enabled, otherwise zero)"
nodeSelector: {}
tolerations: []
affinity: {}
annotations: {}
podLabels: {}
# Additional sidecar containers, e. g. for a database proxy, such as Google's cloudsql-proxy
extraContainers: |
# Additional volumes, e. g. for secrets used in an extraContainer
extraVolumes: |
|
hlf-couchdb | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hlf-couchdb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"hlf-couchdb.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"hlf-couchdb.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- /*\nCredit: @technosophos\nhttps://github.com/technosophos/common-chart/\nlabels.standard prints the standard Helm labels.\nThe standard labels are frequently used in metadata.\n*/ -}}\n{{- define \"labels.standard\" -}}\napp: {{ include \"hlf-couchdb.name\" . }}\nheritage: {{ .Release.Service | quote }}\nrelease: {{ .Release.Name | quote }}\nchart: {{ include \"hlf-couchdb.chart\" . }}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"hlf-couchdb.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ include \"hlf-couchdb.name\" . }}\n release: {{ .Release.Name }}\n # Ensure we allow our pod to be unavailable, so we can upgrade\n strategy:\n rollingUpdate:\n maxUnavailable: 1\n template:\n metadata:\n labels:\n{{ include \"labels.standard\" . | indent 8 }}\n spec:\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"hlf-couchdb.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n envFrom:\n - secretRef:\n name: {{ include \"hlf-couchdb.fullname\" . }}\n volumeMounts:\n - mountPath: /opt/couchdb/data\n name: data\n ports:\n - name: couchdb\n containerPort: 5984\n protocol: TCP\n livenessProbe:\n tcpSocket:\n port: 5984\n initialDelaySeconds: 60\n timeoutSeconds: 5\n failureThreshold: 6\n readinessProbe:\n tcpSocket:\n port: 5984\n initialDelaySeconds: 5\n timeoutSeconds: 3\n periodSeconds: 5\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"hlf-couchdb.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: couchdb\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ include \"hlf-couchdb.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- if .Values.persistence.storageClass }}\n {{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ include \"hlf-couchdb.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\ntype: Opaque\ndata:\n COUCHDB_USERNAME: {{ .Values.couchdbUsername | b64enc | quote }}\n {{ if .Values.couchdbPassword }}\n COUCHDB_PASSWORD: {{ .Values.couchdbPassword | b64enc | quote }}\n {{ else }}\n COUCHDB_PASSWORD: {{ randAlphaNum 24 | b64enc | quote }}\n {{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hlf-couchdb.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: couchdb\n protocol: TCP\n name: couchdb\n selector:\n app: {{ include \"hlf-couchdb.name\" . }}\n release: {{ .Release.Name }}\n"
] | ## Default values for hlf-couchdb.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: hyperledger/fabric-couchdb
tag: 0.4.10
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 5984
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
enabled: true
annotations: {}
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 1Gi
# existingClaim: ""
##################################
## Further configuration options #
##################################
## Database username
couchdbUsername: "couchdb"
## Database password (default: random 24 character string)
# couchdbPassword:
resources: {}
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity:
## Suggested antiAffinity, as each CouchDB instance should be on a separate Node for resilience
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# app: hlf-couchdb
|