chart_name
stringlengths 3
30
| templates
sequence | values
stringlengths 104
39.6k
|
---|---|---|
gce-ingress | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gce-ingress.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"gce-ingress.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"gce-ingress.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"gce-ingress.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"gce-ingress.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}",
"# config-map.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"gce-ingress.fullname\" . }}\n labels:\n app: {{ include \"gce-ingress.name\" . }}\n chart: {{ include \"gce-ingress.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n gce.conf: |\n [global]\n token-url = {{ .Values.config.tokenUrl }}\n project-id = {{ .Values.config.projectID }}\n network = {{ .Values.config.network }}\n subnetwork = {{ .Values.config.subnetwork }}\n node-instance-prefix = {{ .Values.config.nodeInstancePrefix }}\n node-tags = {{ .Values.config.nodeTags }}",
"# deployment-backend.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"gce-ingress.fullname\" . }}-backend\n labels:\n app: {{ include \"gce-ingress.name\" . }}\n component: backend\n chart: {{ include \"gce-ingress.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n kubernetes.io/name: \"GLBC\"\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\nspec:\n replicas: {{ .Values.defaultBackend.replicaCount }}\n selector:\n matchLabels:\n app: {{ include \"gce-ingress.name\" . }}\n component: backend\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ include \"gce-ingress.name\" . }}\n release: {{ .Release.Name }}\n component: backend\n spec:\n containers:\n - name: backend\n image: \"{{ .Values.defaultBackend.image.repository }}:{{ .Values.defaultBackend.image.tag }}\"\n imagePullPolicy: {{ .Values.defaultBackend.image.pullPolicy }}\n livenessProbe:\n httpGet:\n path: /healthz\n port: http\n scheme: HTTP\n readinessProbe:\n httpGet:\n path: /healthz\n port: http\n scheme: HTTP\n initialDelaySeconds: 30\n timeoutSeconds: 5\n ports:\n - name: http\n containerPort: 8080\n resources:\n{{ toYaml .Values.defaultBackend.resources | indent 12 }}\n {{- with .Values.defaultBackend.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.defaultBackend.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.defaultBackend.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# deployment-controller.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"gce-ingress.fullname\" . }}\n labels:\n app: {{ include \"gce-ingress.name\" . }}\n chart: {{ include \"gce-ingress.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.controller.replicaCount }}\n selector:\n matchLabels:\n app: {{ include \"gce-ingress.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ include \"gce-ingress.name\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.rbac.enabled }}\n serviceAccountName: {{ include \"gce-ingress.fullname\" . }}\n {{- end }}\n terminationGracePeriodSeconds: 600\n hostNetwork: true\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}\"\n imagePullPolicy: {{ .Values.controller.image.pullPolicy }}\n livenessProbe:\n httpGet:\n path: /healthz\n port: 8086\n scheme: HTTP\n initialDelaySeconds: 30\n # healthz reaches out to GCE\n periodSeconds: 30\n timeoutSeconds: 15\n successThreshold: 1\n failureThreshold: 5\n volumeMounts:\n - name: gce-config-volume\n mountPath: /etc/gce/\n {{- if .Values.secret }}\n - name: google-cloud-key\n mountPath: /var/secrets/google\n env:\n - name: GOOGLE_APPLICATION_CREDENTIALS\n value: /var/secrets/google/key.json\n {{- end }}\n command:\n - sh\n - -c\n - 'exec /glbc --gce-ratelimit=ga.Operations.Get,qps,10,100 --gce-ratelimit=alpha.Operations.Get,qps,10,100 --gce-ratelimit=ga.BackendServices.Get,qps,1.8,1 --gce-ratelimit=ga.HealthChecks.Get,qps,1.8,1 --gce-ratelimit=alpha.HealthChecks.Get,qps,1.8,1 --verbose --default-backend-service={{ .Release.Namespace }}/{{ include \"gce-ingress.fullname\" . }} --sync-period=600s --running-in-cluster=true --use-real-cloud=true --config-file-path=/etc/gce/gce.conf --healthz-port=8086 2>&1'\n resources:\n{{ toYaml .Values.controller.resources | indent 12 }}\n volumes:\n {{- if .Values.secret }}\n - name: google-cloud-key\n secret:\n secretName: {{ .Values.secret }}\n items:\n - key: {{ default \"key.json\" .Values.secretKey }}\n path: key.json\n {{- end }}\n - name: gce-config-volume\n configMap:\n name: {{ include \"gce-ingress.fullname\" . }}\n items:\n - key: gce.conf\n path: gce.conf\n {{- with .Values.controller.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.controller.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.controller.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# rbac.yaml\n{{ if .Values.rbac.create -}}\n{{ if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"gce-ingress.serviceAccountName\" . }}\n{{ end -}}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ include \"gce-ingress.fullname\" . }}\nrules:\n- apiGroups: [\"\"]\n resources: [\"secrets\", \"endpoints\", \"services\", \"pods\", \"nodes\", \"namespaces\", \"configmaps\", \"events\"]\n verbs: [\"get\", \"list\", \"watch\", \"update\", \"create\", \"patch\"]\n- apiGroups: [\"extensions\"]\n resources: [\"ingresses\"]\n verbs: [\"get\", \"list\", \"watch\", \"update\"]\n- apiGroups: [\"extensions\"]\n resources: [\"ingresses/status\"]\n verbs: [\"update\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ include \"gce-ingress.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ include \"gce-ingress.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ include \"gce-ingress.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end -}}",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"gce-ingress.fullname\" . }}\n labels:\n app: {{ include \"gce-ingress.name\" . }}\n chart: {{ include \"gce-ingress.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n k8s-app: glbc\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\n kubernetes.io/name: \"GLBCDefaultBackend\"\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ include \"gce-ingress.name\" . }}\n component: backend\n release: {{ .Release.Name }}\n\n"
] | # Default values for gce-ingress.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
nameOverride: ""
fullnameOverride: ""
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# gce-ingress needs credentials to log into GCE. Create a secret with the key
# of key.json with the contents of a GCE service account that has permissions to create
# and modify load balancers. The key should be in the JSON format.
# Example:
# Your secret should look like:
# apiVersion: v1
# kind: Secret
# metadata:
# name: gce-key
# type: Opaque
# data:
# key.json: < base64 encoded JSON service account key>
secret: ~
## If the google auth file is saved in a different secret key you can specify it here
# secretKey: key.json
# gce config, replace values to match your environment
config:
projectID:
network:
subnetwork:
nodeInstancePrefix:
nodeTags:
# tokenUrl should probably be left as nil
tokenUrl: "nil"
controller:
replicaCount: 1
image:
repository: k8s.gcr.io/ingress-gce-glbc-amd64
tag: v1.4.0
pullPolicy: IfNotPresent
resources: {}
# requests:
# cpu: 10m
# memory: 50Mi
nodeSelector: {}
tolerations: []
affinity: {}
defaultBackend:
replicaCount: 1
image:
repository: k8s.gcr.io/defaultbackend
tag: "1.4"
pullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 10m
# memory: 20Mi
# requests:
# cpu: 10m
# memory: 20Mi
nodeSelector: {}
tolerations: []
affinity: {}
service:
type: NodePort
port: 80
|
prometheus-blackbox-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-blackbox-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"prometheus-blackbox-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-blackbox-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"prometheus-blackbox-exporter.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"prometheus-blackbox-exporter.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\n{{- if .Values.config }}\napiVersion: v1\nkind: {{ if .Values.secretConfig -}} Secret {{- else -}} ConfigMap {{- end }}\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n{{ if .Values.secretConfig -}} stringData: {{- else -}} data: {{- end }}\n blackbox.yaml: |\n{{ toYaml .Values.config | indent 4 }}\n{{- end }}\n",
"# daemonset.yaml\n{{- if (eq .Values.kind \"DaemonSet\") }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"prometheus-blackbox-exporter.serviceAccountName\" . }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 6 }}\n {{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n\n restartPolicy: {{ .Values.restartPolicy }}\n\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n containers:\n - name: blackbox-exporter\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n securityContext:\n readOnlyRootFilesystem: {{ .Values.readOnlyRootFilesystem }}\n runAsNonRoot: {{ .Values.runAsNonRoot }}\n runAsUser: {{ .Values.runAsUser }}\n args:\n{{- if .Values.config }}\n - \"--config.file=/config/blackbox.yaml\"\n{{- else }}\n - \"--config.file=/etc/blackbox_exporter/config.yml\"\n{{- end }}\n {{- if .Values.extraArgs }}\n{{ toYaml .Values.extraArgs | indent 12 }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n ports:\n - containerPort: {{ .Values.service.port }}\n name: http\n livenessProbe:\n {{- toYaml .Values.livenessProbe | trim | nindent 12 }}\n readinessProbe:\n {{- toYaml .Values.readinessProbe | trim | nindent 12 }}\n volumeMounts:\n - mountPath: /config\n name: config\n {{- range .Values.extraConfigmapMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath | default \"\" }}\n readOnly: {{ .readOnly }}\n {{- end }}\n {{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath }}\n readOnly: {{ .readOnly }}\n {{- end }}\n volumes:\n - name: config\n{{- if .Values.secretConfig }}\n secret:\n secretName: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n{{- else }}\n configMap:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n{{- end }}\n {{- range .Values.extraConfigmapMounts }}\n - name: {{ .name }}\n configMap:\n name: {{ .configMap }}\n defaultMode: {{ .defaultMode }}\n {{- end }}\n {{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n secret:\n secretName: {{ .secretName }}\n defaultMode: {{ .defaultMode }}\n {{- end }}\n{{- end }}\n",
"# deployment.yaml\n{{- if (eq .Values.kind \"Deployment\") }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\nspec:\n replicas: {{ .Values.replicas }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n strategy:\n{{ toYaml .Values.strategy | indent 4 }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"prometheus-blackbox-exporter.serviceAccountName\" . }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 6 }}\n {{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n\n restartPolicy: {{ .Values.restartPolicy }}\n\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n containers:\n - name: blackbox-exporter\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n securityContext:\n readOnlyRootFilesystem: {{ .Values.readOnlyRootFilesystem }}\n {{- if .Values.allowIcmp }}\n capabilities:\n add: [\"NET_RAW\"]\n {{- else }}\n runAsNonRoot: {{ .Values.runAsNonRoot }}\n runAsUser: {{ .Values.runAsUser }}\n {{- end }}\n args:\n{{- if .Values.config }}\n - \"--config.file=/config/blackbox.yaml\"\n{{- else }}\n - \"--config.file=/etc/blackbox_exporter/config.yml\"\n{{- end }}\n {{- if .Values.extraArgs }}\n{{ toYaml .Values.extraArgs | indent 12 }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n ports:\n - containerPort: {{ .Values.service.port }}\n name: http\n livenessProbe:\n {{- toYaml .Values.livenessProbe | trim | nindent 12 }}\n readinessProbe:\n {{- toYaml .Values.readinessProbe | trim | nindent 12 }}\n volumeMounts:\n - mountPath: /config\n name: config\n {{- range .Values.extraConfigmapMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath | default \"\" }}\n readOnly: {{ .readOnly }}\n {{- end }}\n {{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath }}\n readOnly: {{ .readOnly }}\n {{- end }}\n volumes:\n - name: config\n{{- if .Values.secretConfig }}\n secret:\n secretName: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n{{- else }}\n configMap:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n{{- end }}\n {{- range .Values.extraConfigmapMounts }}\n - name: {{ .name }}\n configMap:\n name: {{ .configMap }}\n defaultMode: {{ .defaultMode }}\n {{- end }}\n {{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n secret:\n secretName: {{ .secretName }}\n defaultMode: {{ .defaultMode }}\n {{- end }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"prometheus-blackbox-exporter.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\n{{- if semverCompare \">=1.14.0-0\" .Capabilities.KubeVersion.GitVersion }}\napiVersion: networking.k8s.io/v1beta1\n{{- else }}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# networkpolicy.yaml\n{{- if .Values.networkPolicy.enabled }}\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\nspec:\n podSelector:\n matchLabels:\n app.kubernetes.io/instance: {{ $.Release.Name }}\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" $ }}\n ingress:\n{{- if .Values.networkPolicy.allowMonitoringNamespace }}\n - from:\n - namespaceSelector:\n matchLabels:\n name: monitoring\n ports:\n - port: {{ .Values.service.port }}\n protocol: TCP\n{{- else }}\n - {}\n{{- end }}\n policyTypes:\n - Ingress\n{{- end }}\n\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}-psp\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\nspec:\n # Prevents running in privileged mode\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n volumes:\n - configMap\n - secret\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: {{ .Values.readOnlyRootFilesystem }}\n {{- if .Values.allowIcmp }}\n allowedCapabilities: \n - NET_RAW\n {{- end }}\n{{- end }}\n",
"# prometheusrule.yaml\n{{- if .Values.prometheusRule.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n {{- with .Values.prometheusRule.namespace }}\n namespace: {{ . }}\n {{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" $ }}\n app.kubernetes.io/instance: {{ $.Release.Name }}\n app.kubernetes.io/managed-by: {{ $.Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" $ }}\n {{- with .Values.prometheusRule.additionalLabels -}}\n{{- toYaml . | nindent 4 -}}\n {{- end }}\nspec:\n {{- with .Values.prometheusRule.rules }}\n groups:\n - name: {{ template \"prometheus-blackbox-exporter.name\" $ }}\n rules: {{ tpl (toYaml .) $ | nindent 8 }}\n {{- end }}\n{{- end }}\n",
"# role.yaml\n{{- if .Values.pspEnabled }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\nrules:\n - apiGroups:\n - policy\n resources:\n - podsecuritypolicies\n resourceNames:\n - {{ template \"prometheus-blackbox-exporter.fullname\" . }}-psp\n verbs:\n - use\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.pspEnabled }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"prometheus-blackbox-exporter.serviceAccountName\" . }}\n{{- end -}}\n",
"# service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" . }}\n {{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n protocol: TCP\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.serviceAccountName\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" . }}\n annotations:\n{{ toYaml .Values.serviceAccount.annotations | indent 4 }}\n{{- end -}}\n",
"# servicemonitor.yaml\n{{- if .Values.serviceMonitor.enabled }}\n{{- range .Values.serviceMonitor.targets }}\n---\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"prometheus-blackbox-exporter.fullname\" $ }}-{{ .name }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" $ }}\n app.kubernetes.io/instance: {{ $.Release.Name }}\n app.kubernetes.io/managed-by: {{ $.Release.Service }}\n helm.sh/chart: {{ include \"prometheus-blackbox-exporter.chart\" $ }}\n {{- if or $.Values.serviceMonitor.defaults.labels .labels }}\n {{- toYaml (.labels | default $.Values.serviceMonitor.defaults.labels) | nindent 4 }}\n {{- end }}\nspec:\n endpoints:\n - port: http\n scheme: http\n path: \"/probe\"\n interval: {{ .interval | default $.Values.serviceMonitor.defaults.interval }}\n scrapeTimeout: {{ .scrapeTimeout | default $.Values.serviceMonitor.defaults.scrapeTimeout }}\n params:\n module:\n - {{ .module | default $.Values.serviceMonitor.defaults.module }}\n target:\n - {{ .url }}\n metricRelabelings:\n - targetLabel: instance\n replacement: {{ .url }}\n - targetLabel: target\n replacement: {{ .name }}\n jobLabel: \"{{ $.Release.Name }}\"\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-blackbox-exporter.name\" $ }}\n app.kubernetes.io/instance: {{ $.Release.Name }}\n namespaceSelector:\n matchNames:\n - {{ $.Release.Namespace }}\n{{- end }}\n{{- end }}\n"
] | restartPolicy: Always
kind: Deployment
podDisruptionBudget: {}
# maxUnavailable: 0
## Enable pod security policy
pspEnabled: true
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
image:
repository: prom/blackbox-exporter
tag: v0.16.0
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
## User to run blackbox-exporter container as
runAsUser: 1000
readOnlyRootFilesystem: true
runAsNonRoot: true
livenessProbe:
httpGet:
path: /health
port: http
readinessProbe:
httpGet:
path: /health
port: http
nodeSelector: {}
tolerations: []
affinity: {}
secretConfig: false
config:
modules:
http_2xx:
prober: http
timeout: 5s
http:
valid_http_versions: ["HTTP/1.1", "HTTP/2"]
no_follow_redirects: false
preferred_ip_protocol: "ip4"
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/secrets/ssl/
# subPath: certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
# defaultMode: 420
## Additional secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: blackbox-secret-files
# readOnly: true
# defaultMode: 420
allowIcmp: false
resources: {}
# limits:
# memory: 300Mi
# requests:
# memory: 50Mi
priorityClassName: ""
service:
annotations: {}
type: ClusterIP
port: 9115
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
## An Ingress resource can provide name-based virtual hosting and TLS
## termination among other things for CouchDB deployments which are accessed
## from outside the Kubernetes cluster.
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
hosts: []
# - chart-example.local
path: '/'
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls: []
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
podAnnotations: {}
extraArgs: []
# --history.limit=1000
replicas: 1
serviceMonitor:
## If true, a ServiceMonitor CRD is created for a prometheus operator
## https://github.com/coreos/prometheus-operator
##
enabled: false
# Default values that will be used for all ServiceMonitors created by `targets`
defaults:
labels: {}
interval: 30s
scrapeTimeout: 30s
module: http_2xx
targets:
# - name: example # Human readable URL that will appear in Prometheus / AlertManager
# url: http://example.com/healthz # The URL that blackbox will scrape
# labels: {} # List of labels for ServiceMonitor. Overrides value set in `defaults`
# interval: 60s # Scraping interval. Overrides value set in `defaults`
# scrapeTimeout: 60s # Scrape timeout. Overrides value set in `defaults`
# module: http_2xx # Module used for scraping. Overrides value set in `defaults`
## Custom PrometheusRules to be defined
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## Network policy for chart
networkPolicy:
# Enable network policy and allow access from anywhere
enabled: false
# Limit access only from monitoring namespace
allowMonitoringNamespace: false
|
gangway | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gangway.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"gangway.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"gangway.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"gangway.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ndata:\n gangway.yaml: |\n {{- .Values.gangway | toYaml | nindent 4 }}\n {{- if .Values.trustedCACert }}\n trustedCAPath: /gangway/rootca.crt\n rootca.crt: |\n {{- .Values.trustedCACert | nindent 4}}\n {{ end }}\n\n\n\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"gangway.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n annotations:\n check/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n check/values: {{ .Files.Get \"../values.yaml\" | sha256sum }}\n{{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | trim | indent 8 }} \n{{- end }}\n spec:\n {{- if ne .Values.gangway.serviceAccountName \"\" }}\n serviceAccountName: {{ .Values.gangway.serviceAccountName }}\n {{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.image.pullSecrets | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - gangway\n - -config\n - /gangway/gangway.yaml\n env:\n{{- if not .Values.env.GANGWAY_SESSION_SECURITY_KEY }}\n - name: GANGWAY_SESSION_SECURITY_KEY\n valueFrom:\n secretKeyRef:\n key: sessionkey\n name: {{ include \"gangway.fullname\" . }}-key\n{{- end }}\n{{- range $k, $v := .Values.env }}\n - name: {{ $k }}\n value: {{ $v }}\n{{- end }}\n ports:\n - name: http\n containerPort: {{ .Values.gangway.port }}\n protocol: TCP\n volumeMounts:\n - name: gangway\n mountPath: /gangway/\n {{- if or (and .Values.tls.certData .Values.tls.keyData) .Values.tls.existingSecret }}\n - name: gangway-tls\n mountPath: /etc/gangway/tls/\n readOnly: true\n {{ end }}\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | trim | indent 8 }}\n{{- end }}\n livenessProbe:\n failureThreshold: 3\n httpGet:\n path: {{ .Values.gangway.httpPath }}\n port: {{ .Values.gangway.port }}\n scheme: {{ .Values.livenessProbe.scheme }}\n initialDelaySeconds: 20\n periodSeconds: 60\n successThreshold: 1\n timeoutSeconds: 1\n readinessProbe:\n failureThreshold: 3\n httpGet:\n path: {{ .Values.gangway.httpPath }}\n port: {{ .Values.gangway.port }}\n scheme: {{ .Values.readinessProbe.scheme }}\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 1\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: gangway\n configMap:\n name: {{ include \"gangway.fullname\" . }}\n {{- if and .Values.tls.certData .Values.tls.keyData }}\n - name: gangway-tls\n secret:\n secretName: {{ include \"gangway.fullname\" . }}-tls\n {{ else if .Values.tls.existingSecret }}\n - name: gangway-tls\n secret:\n secretName: {{ .Values.tls.existingSecret }}\n {{ end -}}\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | trim | indent 6 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"gangway.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\n{{- if $.Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }}\napiVersion: networking.k8s.io/v1beta1\n{{- else }}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}svc\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# key.yaml\n{{- if not .Values.env.GANGWAY_SESSION_SECURITY_KEY -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ include \"gangway.fullname\" . }}-key\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n sessionkey: {{ ( default ( randAlphaNum 32 ) .Values.gangway.sessionKey ) | b64enc | quote }}\n{{- end -}}",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n # Need to append \"svc\" here because otherwise Kube will make an env var\n # called GANGWAY_PORT with something like \"tcp://100.67.143.54:80\" as a value.\n # The gangway binary then interprets this as a config variable and expects it\n # to hold the int for the port to listen on. Result = bang!\n name: {{ include \"gangway.fullname\" . }}svc\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- with .Values.service.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: {{ .Values.gangway.port }}\n protocol: TCP\n name: http\n{{- if and (eq \"LoadBalancer\" $.Values.service.type) (.Values.service.loadBalancerIP) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n{{- end}}\n selector:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# serviceAccount.yaml\n{{- if ne .Values.gangway.serviceAccountName \"\" }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n name: {{ .Values.gangway.serviceAccountName }}\n{{- end }}",
"# ssl.yaml\n{{- if and .Values.tls.certData .Values.tls.keyData -}}\napiVersion: v1\ntype: kubernetes.io/tls\nkind: Secret\nmetadata:\n name: {{ include \"gangway.fullname\" . }}-tls\n labels:\n app.kubernetes.io/name: {{ include \"gangway.name\" . }}\n helm.sh/chart: {{ include \"gangway.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ndata:\n tls.crt: {{ .Values.tls.certData | b64enc }}\n tls.key: {{ .Values.tls.keyData | b64enc }}\n{{- end -}}\n"
] | replicaCount: 1
image:
repository: gcr.io/heptio-images/gangway
tag: v3.2.0
pullPolicy: IfNotPresent
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
pullSecrets: []
# - name: secretName
nameOverride: ""
fullnameOverride: ""
# Specify a CA cert to trust for self signed certificates at the Oauth2 URLs. Be careful to indent one level beyond the
# trustedCACert key:
# trustedCACert: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# Add Env Variables to pod
env: {}
# Add annotations to the pod
podAnnotations: {}
gangway:
# The address to listen on. Defaults to 0.0.0.0 to listen on all interfaces.
# Env var: GANGWAY_HOST
# host: 0.0.0.0
serviceAccountName: ""
# The port to listen on. Defaults to 8080.
# Env var: GANGWAY_PORT
port: 8080
# Should Gangway serve TLS vs. plain HTTP? Default: false
# Env var: GANGWAY_SERVE_TLS
# serveTLS: false
# The public cert file (including root and intermediates) to use when serving TLS.
# Env var: GANGWAY_CERT_FILE
# certFile: /etc/gangway/tls/tls.crt
# The private key file when serving TLS.
# Env var: GANGWAY_KEY_FILE
# keyFile: /etc/gangway/tls/tls.key
# The cluster name. Used in UI and kubectl config instructions.
# Env var: GANGWAY_CLUSTER_NAME
clusterName: "${GANGWAY_CLUSTER_NAME}"
# OAuth2 URL to start authorization flow.
# Env var: GANGWAY_AUTHORIZE_URL
authorizeURL: "https://${DNS_NAME}/authorize"
# OAuth2 URL to obtain access tokens.
# Env var: GANGWAY_TOKEN_URL
tokenURL: "https://${DNS_NAME}/oauth/token"
# Endpoint that provides user profile information [optional]. Not all providers
# will require this.
# Env var: GANGWAY_AUDIENCE
audience: "https://${DNS_NAME}/userinfo"
# Used to specify the scope of the requested Oauth authorization.
scopes: ["openid", "profile", "email", "offline_access"]
# Where to redirect back to. This should be a URL where gangway is reachable.
# Typically this also needs to be registered as part of the oauth application
# with the oAuth provider.
# Env var: GANGWAY_REDIRECT_URL
redirectURL: "https://${GANGWAY_REDIRECT_URL}/callback"
# API client ID as indicated by the identity provider
# Env var: GANGWAY_CLIENT_ID
clientID: "${GANGWAY_CLIENT_ID}"
# API client secret as indicated by the identity provider
# Env var: GANGWAY_CLIENT_SECRET
clientSecret: "${GANGWAY_CLIENT_SECRET}"
# Some identity providers accept an empty client secret, this
# is not generally considered a good idea. If you have to use an
# empty secret and accept the risks that come with that then you can
# set this to true.
# allowEmptyClientSecret: false
# The JWT claim to use as the username. This is used in UI.
# Default is "nickname". This is combined with the clusterName
# for the "user" portion of the kubeconfig.
# Env var: GANGWAY_USERNAME_CLAIM
usernameClaim: "sub"
# The API server endpoint used to configure kubectl
# Env var: GANGWAY_APISERVER_URL
apiServerURL: "https://${GANGWAY_APISERVER_URL}"
# The path to find the CA bundle for the API server. Used to configure kubectl.
# This is typically mounted into the default location for workloads running on
# a Kubernetes cluster and doesn't need to be set.
# Env var: GANGWAY_CLUSTER_CA_PATH
# cluster_ca_path: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
# The path gangway uses to create urls (defaults to "")
# Env var: GANGWAY_HTTP_PATH
# httpPath: "https://${GANGWAY_HTTP_PATH}"
# The key to use when encrypting the contents of cookies.
# You can leave this blank and the chart will generate a random key, however
# you must use that with caution. Subsequent upgrades to the deployment will
# regenerate this key which will cause Gangway to error when attempting to
# decrypt cookies stored in users' browsers which were encrypted with the old
# key.
# TL;DR: Safe to use the auto generation in test environments, provide your
# own in procution.
# sessionKey:
tls: {}
# certData: |
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# keyData: |
# -----BEGIN ENCRYPTED PRIVATE KEY-----
# ...
# -----END ENCRYPTED PRIVATE KEY-----
# Name of an existing secret containing `tls.cert` and `tls.key`.
# Mounted on the default tls path `/etc/gangway/tls`
# existingSecret: ""
extraVolumes: []
extraVolumeMounts: []
livenessProbe:
# HTTP or HTTPS
scheme: HTTP
readinessProbe:
# HTTP or HTTPS
scheme: HTTP
service:
type: ClusterIP
port: 80
# Specifies a loadBalancerIP when using LoadBalancer service type
# loadBalancerIP: 192.168.0.51
annotations: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
|
kubernetes-dashboard | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kubernetes-dashboard.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"kubernetes-dashboard.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"kubernetes-dashboard.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"kubernetes-dashboard.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"kubernetes-dashboard.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole-readonly.yaml\n{{- if and .Values.rbac.create .Values.rbac.clusterReadOnlyRole (not .Values.rbac.clusterAdminRole) }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: \"{{ template \"kubernetes-dashboard.fullname\" . }}-readonly\"\n namespace: {{ .Release.Namespace }}\nrules:\n # Allow Dashboard to get, update and delete Dashboard exclusive secrets.\n - apiGroups:\n - \"\"\n resources:\n - secrets\n resourceNames:\n - kubernetes-dashboard-key-holder\n - {{ template \"kubernetes-dashboard.fullname\" . }}\n verbs:\n - get\n - update\n - delete\n\n # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - kubernetes-dashboard-settings\n verbs:\n - get\n - update\n\n - apiGroups:\n - \"\"\n resources:\n - configmaps\n - endpoints\n - persistentvolumeclaims\n - pods\n - replicationcontrollers\n - replicationcontrollers/scale\n - serviceaccounts\n - services\n - nodes\n - persistentvolumeclaims\n - persistentvolumes\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - bindings\n - events\n - limitranges\n - namespaces/status\n - pods/log\n - pods/status\n - replicationcontrollers/status\n - resourcequotas\n - resourcequotas/status\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - \"\"\n resources:\n - namespaces\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - apps\n resources:\n - daemonsets\n - deployments\n - deployments/scale\n - replicasets\n - replicasets/scale\n - statefulsets\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - autoscaling\n resources:\n - horizontalpodautoscalers\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - batch\n resources:\n - cronjobs\n - jobs\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - extensions\n resources:\n - daemonsets\n - deployments\n - deployments/scale\n - ingresses\n - networkpolicies\n - replicasets\n - replicasets/scale\n - replicationcontrollers/scale\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - policy\n resources:\n - poddisruptionbudgets\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - networking.k8s.io\n resources:\n - networkpolicies\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - storage.k8s.io\n resources:\n - storageclasses\n - volumeattachments\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - rbac.authorization.k8s.io\n resources:\n - clusterrolebindings\n - clusterroles\n - roles\n - rolebindings\n verbs:\n - get\n - list\n - watch\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n{{- if .Values.annotations }}\n annotations:\n{{ toYaml .Values.annotations | indent 4 }}\n{{- end }}\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.labels }}\n{{ toYaml .Values.labels | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n strategy:\n rollingUpdate:\n maxSurge: 0\n maxUnavailable: 1\n type: RollingUpdate\n selector:\n matchLabels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.securityContext }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n {{- end }}\n serviceAccountName: {{ template \"kubernetes-dashboard.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n{{- if .Values.enableSkipLogin }}\n - --enable-skip-login\n{{- end }} \n{{- if .Values.enableInsecureLogin }}\n - --enable-insecure-login\n{{- else }}\n - --auto-generate-certificates\n{{- end }}\n{{- if .Values.extraArgs }}\n{{ toYaml .Values.extraArgs | indent 10 }}\n{{- end }}\n{{- if .Values.extraEnv }}\n env:\n{{ toYaml .Values.extraEnv | indent 10 }}\n{{- end }}\n ports:\n{{- if .Values.enableInsecureLogin }}\n - name: http\n containerPort: 9090\n protocol: TCP\n{{- else }}\n - name: https\n containerPort: 8443\n protocol: TCP\n{{- end }}\n volumeMounts:\n - name: kubernetes-dashboard-certs\n mountPath: /certs\n # Create on-disk volume to store exec logs\n - mountPath: /tmp\n name: tmp-volume\n livenessProbe:\n httpGet:\n{{- if .Values.enableInsecureLogin }}\n scheme: HTTP\n path: /\n port: 9090\n{{- else }}\n scheme: HTTPS\n path: /\n port: 8443\n{{- end }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n{{- if .Values.dashboardContainerSecurityContext }}\n securityContext:\n{{ toYaml .Values.dashboardContainerSecurityContext | indent 10 }}\n{{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n volumes:\n - name: kubernetes-dashboard-certs\n secret:\n secretName: {{ template \"kubernetes-dashboard.fullname\" . }}\n - name: tmp-volume\n emptyDir: {}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"kubernetes-dashboard.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\n{{- $paths := .Values.ingress.paths -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- range $key, $value := .Values.ingress.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{- if .Values.ingress.annotations }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\n{{- end }}\nspec:\n rules:\n {{- if .Values.ingress.hosts }}\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n {{- range $p := $paths }}\n - path: {{ $p }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- end -}}\n {{- else }}\n - http:\n paths:\n {{- range $p := $paths }}\n - path: {{ $p }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# networkpolicy.yaml\n{{- if .Values.networkPolicy -}}\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n podSelector:\n matchLabels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n release: {{ .Release.Name }}\n ingress:\n - ports:\n - port: 9090\n protocol: TCP\n{{- end -}}\n",
"# pdb.yaml\n{{- if .Values.podDisruptionBudget.enabled -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n\nspec:\n {{- if .Values.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n {{- end }}\n {{- if .Values.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n{{- end -}}",
"# role.yaml\n{{- if and .Values.rbac.create (not .Values.rbac.clusterAdminRole) }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nrules:\n # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - create\n\n # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - create\n\n # Allow Dashboard to get, update and delete Dashboard exclusive secrets.\n- apiGroups:\n - \"\"\n resources:\n - secrets\n resourceNames:\n - kubernetes-dashboard-key-holder\n - {{ template \"kubernetes-dashboard.fullname\" . }}\n verbs:\n - get\n - update\n - delete\n\n # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n resourceNames:\n - kubernetes-dashboard-settings\n verbs:\n - get\n - update\n\n # Allow Dashboard to get metrics from heapster.\n- apiGroups:\n - \"\"\n resources:\n - services\n resourceNames:\n - heapster\n verbs:\n - proxy\n- apiGroups:\n - \"\"\n resources:\n - services/proxy\n resourceNames:\n - heapster\n - \"http:heapster:\"\n - \"https:heapster:\"\n verbs:\n - get\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\n\n{{- if or .Values.rbac.clusterAdminRole .Values.rbac.clusterReadOnlyRole }}\n# Cluster role binding for clusterAdminRole == true or clusterReadOnlyRole=true\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ if .Values.rbac.clusterAdminRole -}}\ncluster-admin\n{{- else if .Values.rbac.clusterReadOnlyRole -}}\n{{ template \"kubernetes-dashboard.fullname\" . }}-readonly\n{{- end }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"kubernetes-dashboard.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- else -}}\n# Role binding for clusterAdminRole == false and clusterReadOnlyRole=false\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"kubernetes-dashboard.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n{{- end -}}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n namespace: {{ .Release.Namespace }}\ntype: Opaque\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kubernetes-dashboard.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"kubernetes-dashboard.fullname\" . }}\n labels:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n chart: {{ template \"kubernetes-dashboard.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n kubernetes.io/cluster-service: \"true\"\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n{{- if .Values.enableInsecureLogin }}\n targetPort: 9090\n name: \"http\"\n{{- else }}\n targetPort: 8443\n name: \"https\"\n{{- end }}\n{{- if hasKey .Values.service \"nodePort\" }}\n nodePort: {{ .Values.service.nodePort }}\n{{- end }}\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n selector:\n app: {{ template \"kubernetes-dashboard.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for kubernetes-dashboard
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
image:
repository: k8s.gcr.io/kubernetes-dashboard-amd64
tag: v1.10.1
pullPolicy: IfNotPresent
pullSecrets: []
replicaCount: 1
## Here annotations can be added to the kubernetes dashboard deployment
annotations: {}
## Here labels can be added to the kubernetes dashboard deployment
##
labels: {}
# kubernetes.io/name: "Kubernetes Dashboard"
## Enable possibility to skip login
enableSkipLogin: false
## Serve application over HTTP without TLS
enableInsecureLogin: false
## Additional container arguments
##
# extraArgs:
# - --enable-skip-login
# - --enable-insecure-login
# - --system-banner="Welcome to Kubernetes"
## Additional container environment variables
##
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
# Annotations to be added to kubernetes dashboard pods
## Recommended value
# podAnnotations:
# seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
podAnnotations: {}
# Add custom labels to pods
podLabels: {}
## SecurityContext for the kubernetes dashboard container
## Recommended values
# dashboardContainerSecurityContext:
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: true
## The two values below can be set here or at podLevel (using variable .securityContext)
# runAsUser: 1001
# runAsGroup: 2001
dashboardContainerSecurityContext: {}
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## List of node taints to tolerate (requires Kubernetes >= 1.6)
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute"
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# priorityClassName: ""
service:
type: ClusterIP
externalPort: 443
## This allows an override of the heapster service name
## Default: {{ .Chart.Name }}
##
# nameOverride:
# LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
# set allowed inbound rules on the security group assigned to the master load balancer
# loadBalancerSourceRanges: []
## Kubernetes Dashboard Service annotations
##
## For GCE ingress, the following annotation is required:
## service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}' if enableInsecureLogin=false
## or
## service.alpha.kubernetes.io/app-protocols: '{"http":"HTTP"}' if enableInsecureLogin=true
annotations: {}
## Here labels can be added to the Kubernetes Dashboard service
##
labels: {}
# kubernetes.io/name: "Kubernetes Dashboard"
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
ingress:
## If true, Kubernetes Dashboard Ingress will be created.
##
enabled: false
## Kubernetes Dashboard Ingress annotations
##
## Add custom labels
# labels:
# key: value
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: 'true'
## If you plan to use TLS backend with enableInsecureLogin set to false
## (default), you need to uncomment the below.
## If you use ingress-nginx < 0.21.0
# nginx.ingress.kubernetes.io/secure-backends: "true"
## if you use ingress-nginx >= 0.21.0
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
## Kubernetes Dashboard Ingress paths
##
paths:
- /
# - /*
## Kubernetes Dashboard Ingress hostnames
## Must be provided if Ingress is enabled
##
# hosts:
# - kubernetes-dashboard.domain.com
## Kubernetes Dashboard Ingress TLS configuration
## Secrets must be manually created in the namespace
##
# tls:
# - secretName: kubernetes-dashboard-tls
# hosts:
# - kubernetes-dashboard.domain.com
rbac:
# Specifies whether RBAC resources should be created
create: true
# Specifies whether cluster-admin ClusterRole will be used for dashboard
# ServiceAccount (NOT RECOMMENDED).
clusterAdminRole: false
# Start in ReadOnly mode.
# Only dashboard-related Secrets and ConfigMaps will still be available for writing.
#
# Turn OFF clusterAdminRole to use clusterReadOnlyRole.
#
# The basic idea of the clusterReadOnlyRole comparing to the clusterAdminRole
# is not to hide all the secrets and sensitive data but more
# to avoid accidental changes in the cluster outside the standard CI/CD.
#
# Same as for clusterAdminRole, it is NOT RECOMMENDED to use this version in production.
# Instead you should review the role and remove all potentially sensitive parts such as
# access to persistentvolumes, pods/log etc.
clusterReadOnlyRole: false
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name:
livenessProbe:
# Number of seconds to wait before sending first probe
initialDelaySeconds: 30
# Number of seconds to wait for probe response
timeoutSeconds: 30
podDisruptionBudget:
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/
enabled: false
minAvailable:
maxUnavailable:
## PodSecurityContext for pod level securityContext
##
# securityContext:
# runAsUser: 1001
# runAsGroup: 2001
securityContext: {}
networkPolicy: false
|
nginx-lego | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nginx-lego.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"nginx-lego.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 -}}\n{{- end -}}\n",
"# default-deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}-default-backend\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.default.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"nginx-lego.fullname\" . }}-default-backend\n spec:\n containers:\n - name: {{ template \"nginx-lego.fullname\" . }}-default-backend\n image: \"{{ .Values.default.image.repository }}:{{ .Values.default.image.tag }}\"\n imagePullPolicy: {{ .Values.default.image.pullPolicy | quote }}\n ports:\n - containerPort: 8080\n resources:\n{{ toYaml .Values.default.resources | indent 10 }}",
"# default-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}-default-backend\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n type: ClusterIP\n ports:\n - port: 80\n targetPort: 8080\n selector:\n app: {{ template \"nginx-lego.fullname\" . }}-default-backend\n",
"# lego-configmap.yaml\n{{- if .Values.lego.enabled }}\napiVersion: v1\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}-lego\ndata:\n # modify this to specify your address\n lego.email: {{ .Values.lego.configmap.email | quote }}\n # configure letencrypt's production api\n lego.url: {{ .Values.lego.configmap.url | quote }}\nkind: ConfigMap\n{{- end }}\n",
"# lego-deployment.yaml\n{{- if .Values.lego.enabled }}\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}-lego\nspec:\n replicas: {{ .Values.lego.replicaCount }}\n template:\n metadata:\n labels:\n app: kube-lego\n spec:\n containers:\n - name: {{ .Chart.Name }}-lego\n image: \"{{ .Values.lego.image.repository }}:{{ .Values.lego.image.tag }}\"\n imagePullPolicy: {{ .Values.lego.image.pullPolicy }}\n ports:\n - containerPort: 8080\n env:\n - name: LEGO_EMAIL\n valueFrom:\n configMapKeyRef:\n name: {{ template \"nginx-lego.fullname\" . }}-lego\n key: lego.email\n - name: LEGO_URL\n valueFrom:\n configMapKeyRef:\n name: {{ template \"nginx-lego.fullname\" . }}-lego\n key: lego.url\n - name: LEGO_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: LEGO_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n resources:\n{{ toYaml .Values.nginx.resources | indent 10 }}\n{{- end }}\n",
"# nginx-configmap.yaml\napiVersion: v1\ndata:\n proxy-connect-timeout: {{ .Values.nginx.configmap.proxy_connect_timeout | quote }}\n proxy-read-timeout: {{ .Values.nginx.configmap.proxy_read_timeout | quote }}\n proxy-send-imeout: {{ .Values.nginx.configmap.proxy_send_imeout | quote }}\n hsts-include-subdomains: {{ .Values.nginx.configmap.hsts_include_subdomains | quote }}\n body-size: {{ .Values.nginx.configmap.body_size | quote }}\n server-name-hash-bucket-size: {{ .Values.nginx.configmap.server_name_hash_bucket_size | quote }}\n enable-vts-status: {{ .Values.nginx.configmap.enable_vts_status | quote }}\nkind: ConfigMap\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}\n",
"# nginx-deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.nginx.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"nginx-lego.fullname\" . }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}\"\n imagePullPolicy: {{ .Values.nginx.image.pullPolicy }}\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n ports:\n - containerPort: 80\n - containerPort: 443\n{{- if .Values.nginx.monitoring }}\n - containerPort: 8080\n{{- end }}\n resources:\n{{ toYaml .Values.nginx.resources | indent 10 }}\n args:\n - /nginx-ingress-controller\n - --default-backend-service={{ .Release.Namespace }}/{{ template \"nginx-lego.fullname\" . }}-default-backend\n - --nginx-configmap={{ .Release.Namespace }}/{{ template \"nginx-lego.fullname\" . }}",
"# nginx-monitoring.yaml\n{{- if .Values.nginx.monitoring }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}-monitoring\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n type: ClusterIP\n ports:\n - port: 18080\n name: vts-stats\n selector:\n app: {{ template \"nginx-lego.fullname\" . }}\n{{- end }}",
"# nginx-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nginx-lego.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n type: {{ .Values.nginx.service.type | quote }}\n ports:\n - port: 80\n name: http\n - port: 443\n name: https\n selector:\n app: {{ template \"nginx-lego.fullname\" . }}"
] | ## nginx-lego spins up a scalable ingress provider that can also provision SSL certs
## See https://github.com/jetstack/kube-lego/tree/master/examples/nginx for more information on implementation
## Nginx configuration
## ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx#automated-certificate-management-with-kube-lego
##
nginx:
replicaCount: 1
image:
repository: k8s.gcr.io/nginx-ingress-controller
tag: "0.8.3"
pullPolicy: IfNotPresent
service:
type: LoadBalancer
monitoring: false
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 128Mi
configmap:
proxy_connect_timeout: "30"
proxy_read_timeout: "600"
proxy_send_imeout: "600"
hsts_include_subdomains: "false"
body_size: "64m"
server_name_hash_bucket_size: "256"
# TODO: figure out how to expose `{nginx_addr}:8080/nginx_status`, on existing service or create new one?
enable_vts_status: "false"
## Default Backend configuration
## To run a different 404 page for the managed domains please see the documentation below
## ref: https://github.com/kubernetes/contrib/tree/master/404-server
##
default:
replicaCount: 1
image:
repository: k8s.gcr.io/defaultbackend
tag: "1.0"
pullPolicy: IfNotPresent
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 128Mi
## kube-lego configuration
## ref: https://github.com/jetstack/kube-lego
##
lego:
enabled: false
replicaCount: 1
image:
repository: jetstack/kube-lego
tag: "0.1.3"
pullPolicy: IfNotPresent
configmap:
email: "[email protected]"
# Production Let's Encrypt server
# url: "https://acme-v01.api.letsencrypt.org/directory"
# Test Let's Encrypt server
url: "https://acme-staging.api.letsencrypt.org/directory "
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 1
memory: 128Mi
|
wavefront | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"wavefront.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"wavefront.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"wavefront.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a name for Wavefront Collector\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"wavefront.collector.fullname\" -}}\n{{- printf \"%s-collector\" (include \"wavefront.fullname\" .) | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a name for Wavefront Proxy\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"wavefront.proxy.fullname\" -}}\n{{- printf \"%s-proxy\" (include \"wavefront.fullname\" .) | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"wavefront.collector.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"wavefront.collector.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n\n",
"# api-token-secret.yaml\n{{- if .Values.wavefront.token }}\napiVersion: v1\nkind: Secret\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector\n name: {{ template \"wavefront.fullname\" . }}\ntype: Opaque\ndata:\n api-token: {{ .Values.wavefront.token | b64enc | quote }}\n{{- end }}\n",
"# collector-cluster-role.yaml\n{{- if and .Values.rbac.create .Values.collector.enabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector\n kubernetes.io/bootstrapping: rbac-defaults\n annotations:\n rbac.authorization.kubernetes.io/autoupdate: \"true\"\n name: {{ template \"wavefront.collector.fullname\" . }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n - namespaces\n - nodes\n - nodes/stats\n - pods\n - services\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - get\n - update\n - create\n- apiGroups:\n - extensions\n resources:\n - deployments\n verbs:\n - get\n - list\n - watch\n- nonResourceURLs: [\"/metrics\"]\n verbs:\n - get\n{{- end }}\n",
"# collector-config.yaml\n{{- if .Values.collector.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector \n name: {{ template \"wavefront.collector.fullname\" . }}-config\ndata:\n config.yaml: |\n clusterName: {{ .Values.clusterName }} \n enableDiscovery: {{ .Values.collector.discovery.enabled }}\n defaultCollectionInterval: {{ .Values.collector.interval | default \"60s\" }}\n flushInterval: {{ .Values.collector.flushInterval | default \"10s\" }}\n sinkExportDataTimeout: {{ .Values.collector.sinkDelay | default \"20s\" }}\n\n sinks:\n {{- if .Values.collector.useProxy }}\n {{- if .Values.collector.proxyAddress }}\n - proxyAddress: {{ .Values.collector.proxyAddress }}\n {{- else }}\n - proxyAddress: {{ template \"wavefront.proxy.fullname\" . }}:{{ .Values.proxy.port }}\n {{- end }}\n {{- else }}\n - server: {{ .Values.wavefront.url }}\n token: {{ .Values.wavefront.token }}\n {{- end }}\n {{- if .Values.collector.tags }}\n tags:\n{{ tpl (toYaml .Values.collector.tags) . | indent 8 }}\n {{- end }} \n filters:\n # Filter out infrequently used kube-state-metrics.\n metricBlacklist:\n - 'kube.configmap.annotations.gauge'\n - 'kube.configmap.metadata.resource.version.gauge'\n - 'kube.endpoint.*'\n - 'kube.job.owner.gauge'\n - 'kube.job.labels.gauge'\n - 'kube.job.spec.completions.gauge'\n - 'kube.job.spec.parallelism.gauge'\n - 'kube.job.status.start.time.gauge'\n - 'kube.limitrange.*'\n - 'kube.namespace.annotations.gauge'\n - 'kube.persistentvolume.*'\n - 'kube.persistentvolumeclaim.*'\n - 'kube.pod.container.resource.limits.*'\n - 'kube.pod.container.*.reason.gauge'\n - 'kube.pod.owner.gauge'\n - 'kube.pod.start.time.gauge'\n - 'kube.pod.status.scheduled.gauge'\n - 'kube.pod.status.scheduled.time.gauge'\n - 'kube.replicationcontroller.created.gauge'\n - 'kube.replicationcontroller.metadata.generation.gauge'\n - 'kube.replicationcontroller.spec.replicas.gauge'\n - 'kube.resourcequota.*'\n - 'kube.secret.*'\n - 'kube.statefulset.*'\n - 'kube.storageclass.*'\n # Filter out generated labels\n tagExclude:\n - 'label?controller?revision*'\n - 'label?pod?template*'\n - 'annotation_kubectl_kubernetes_io_last_applied_configuration'\n\n sources:\n kubernetes_source:\n {{- if .Values.collector.useReadOnlyPort }} \n url: \n kubeletPort: 10255\n kubeletHttps: false\n {{- else }}\n url: https://kubernetes.default.svc\n kubeletPort: 10250\n kubeletHttps: true\n {{- end }}\n {{- if .Values.serviceAccount.create }}\n useServiceAccount: true\n {{- else }}\n useServiceAccount: false\n {{- end }}\n insecure: true\n prefix: kubernetes.\n filters:\n metricBlacklist:\n - 'kubernetes.sys_container.*'\n - 'kubernetes.node.ephemeral_storage.*' \n\n internal_stats_source:\n prefix: kubernetes.\n\n telegraf_sources:\n - plugins: [] \n\n {{- if .Values.collector.apiServerMetrics }}\n # Kubernetes API Server\n prometheus_sources:\n - url: https://kubernetes.default.svc.cluster.local:443/metrics\n httpConfig:\n bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n tls_config:\n ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n insecure_skip_verify: true\n prefix: kube.apiserver.\n filters:\n metricWhitelist:\n - 'kube.apiserver.apiserver.*'\n - 'kube.apiserver.etcd.*'\n - 'kube.apiserver.process.*' \n {{- end }}\n\n {{- if .Values.collector.discovery.enabled }}\n\n discovery:\n {{- if .Values.collector.discovery.annotationPrefix }}\n annotation_prefix: {{ .Values.collector.discovery.annotationPrefix | quote }}\n {{- end }}\n plugins:\n\n\n # auto-discover kube DNS\n - name: kube-dns-discovery\n type: prometheus\n selectors:\n images:\n - '*kube-dns/sidecar*'\n labels:\n k8s-app:\n - kube-dns\n port: 10054\n path: /metrics\n scheme: http\n prefix: kube.dns.\n filters:\n metricWhitelist:\n - 'kube.dns.http.request.duration.microseconds'\n - 'kube.dns.http.request.size.bytes'\n - 'kube.dns.http.requests.total.counter'\n - 'kube.dns.http.response.size.bytes'\n - 'kube.dns.kubedns.dnsmasq.*'\n - 'kube.dns.process.*' \n\n # auto-discover coredns\n - name: coredns-discovery\n type: prometheus\n selectors:\n images:\n - '*coredns:*'\n labels:\n k8s-app:\n - kube-dns\n port: 9153\n path: /metrics\n scheme: http\n prefix: kube.coredns. \n filters:\n metricWhitelist:\n - 'kube.coredns.coredns.cache.*'\n - 'kube.coredns.coredns.dns.request.count.total.counter'\n - 'kube.coredns.coredns.dns.request.duration.seconds'\n - 'kube.coredns.coredns.dns.request.size.bytes'\n - 'kube.coredns.coredns.dns.request.type.count.total.counter'\n - 'kube.coredns.coredns.dns.response.rcode.count.total.counter'\n - 'kube.coredns.coredns.dns.response.size.bytes' \n - 'kube.coredns.process.*' \n\n {{- if .Values.collector.discovery.config }}\n\n # user supplied discovery config\n{{ tpl (toYaml .Values.collector.discovery.config) . | indent 6 }}\n {{- end }}\n {{- end }}\n\n{{- end }}\n",
"# collector-daemonset.yaml\n{{- if and .Values.collector.enabled .Values.collector.useDaemonset }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector\n name: {{ template \"wavefront.collector.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: collector\n template:\n metadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: collector\n spec:\n tolerations:\n - effect: NoSchedule\n key: node.alpha.kubernetes.io/role\n operator: Exists\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n operator: Exists \n serviceAccountName: {{ template \"wavefront.collector.serviceAccountName\" . }}\n containers:\n - name: wavefront-collector\n image: {{ .Values.collector.image.repository }}:{{ .Values.collector.image.tag }}\n imagePullPolicy: {{ .Values.collector.image.pullPolicy }}\n command:\n - /wavefront-collector\n - --daemon=true\n - --config-file=/etc/collector/config.yaml\n {{- if .Values.collector.maxProcs }}\n - --max-procs={{ .Values.collector.maxProcs }}\n {{- end }}\n {{- if .Values.collector.logLevel }}\n - --log-level={{ .Values.collector.logLevel }}\n {{- end }}\n env:\n - name: HOST_PROC\n value: /host/proc\n - name: POD_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: POD_NAMESPACE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace \n ports:\n - containerPort: 8088\n protocol: TCP\n resources:\n{{ toYaml .Values.collector.resources | indent 10 }}\n volumeMounts:\n - name: procfs\n mountPath: /host/proc\n readOnly: true \n - name: config\n mountPath: /etc/collector/\n readOnly: true \n volumes:\n - name: procfs\n hostPath:\n path: /proc\n - name: config\n configMap:\n name: {{ template \"wavefront.collector.fullname\" . }}-config\n{{- end }}\n",
"# collector-deployment.yaml\n{{- if .Values.collector.enabled }}\n{{- if not .Values.collector.useDaemonset }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector\n name: {{ template \"wavefront.collector.fullname\" . }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: collector\n template:\n metadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: collector\n spec:\n serviceAccountName: {{ template \"wavefront.collector.serviceAccountName\" . }}\n containers:\n - name: wavefront-collector\n image: {{ .Values.collector.image.repository }}:{{ .Values.collector.image.tag }}\n imagePullPolicy: {{ .Values.collector.image.pullPolicy }}\n command:\n - /wavefront-collector\n - --daemon=false\n - --config-file=/etc/collector/config.yaml\n {{- if .Values.collector.maxProcs }}\n - --max-procs={{ .Values.collector.maxProcs }}\n {{- end }}\n {{- if .Values.collector.logLevel }}\n - --log-level={{ .Values.collector.logLevel }}\n {{- end }}\n resources:\n{{ toYaml .Values.collector.resources | indent 10 }}\n volumeMounts:\n - name: config\n mountPath: /etc/collector/\n readOnly: true \n - name: ssl-certs\n mountPath: /etc/ssl/certs\n readOnly: true\n volumes:\n - name: config\n configMap:\n name: {{ template \"wavefront.collector.fullname\" . }}-config\n - name: ssl-certs\n hostPath:\n path: /etc/ssl/certs\n{{- end }}\n{{- end }}\n",
"# collector-rbac.yaml\n{{- if and .Values.rbac.create .Values.collector.enabled }}\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector\n name: {{ template \"wavefront.collector.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"wavefront.collector.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"wavefront.collector.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# collector-service-account.yaml\n{{- if and .Values.serviceAccount.create .Values.collector.enabled }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: collector\n name: {{ template \"wavefront.collector.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# proxy-deployment.yaml\n{{- if .Values.proxy.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: proxy\n name: {{ template \"wavefront.proxy.fullname\" . }} \nspec:\n replicas: 1\n selector:\n matchLabels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: proxy\n template:\n metadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: proxy\n spec:\n containers:\n - name: wavefront-proxy\n image: {{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}\n imagePullPolicy: {{ .Values.proxy.image.pullPolicy }}\n env:\n - name: WAVEFRONT_URL\n value: {{ .Values.wavefront.url }}/api\n - name: WAVEFRONT_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"wavefront.fullname\" . }}\n key: api-token\n - name: WAVEFRONT_PROXY_ARGS\n value: {{ .Values.proxy.args }}\n {{- if .Values.proxy.tracePort }} --traceListenerPorts {{ .Values.proxy.tracePort }}{{- end -}}\n {{- if .Values.proxy.jaegerPort }} --traceJaegerListenerPorts {{ .Values.proxy.jaegerPort }}{{- end -}}\n {{- if .Values.proxy.zipkinPort }} --traceZipkinListenerPorts {{ .Values.proxy.zipkinPort }}{{- end -}}\n {{- if .Values.proxy.traceSamplingRate }} --traceSamplingRate {{ .Values.proxy.traceSamplingRate }}{{- end -}}\n {{- if .Values.proxy.traceSamplingDuration }} --traceSamplingDuration {{ .Values.proxy.traceSamplingDuration }}{{- end -}}\n {{- if .Values.proxy.preprocessor }} --preprocessorConfigFile /etc/wavefront/wavefront-proxy/preprocessor/rules.yaml{{- end -}}\n {{- if .Values.proxy.heap }}\n - name: JAVA_HEAP_USAGE\n value: {{ .Values.proxy.heap | quote }}\n {{- end }}\n ports:\n - containerPort: {{ .Values.proxy.port }}\n protocol: TCP\n {{- if .Values.proxy.tracePort }}\n - containerPort: {{ .Values.proxy.tracePort }}\n protocol: TCP\n {{- end }}\n {{- if .Values.proxy.jaegerPort }}\n - containerPort: {{ .Values.proxy.jaegerPort }}\n protocol: TCP\n {{- end }}\n {{- if .Values.proxy.zipkinPort }}\n - containerPort: {{ .Values.proxy.zipkinPort }}\n protocol: TCP\n {{- end }}\n securityContext:\n privileged: false\n volumeMounts:\n {{- if .Values.proxy.preprocessor }}\n - name: preprocessor\n mountPath: /etc/wavefront/wavefront-proxy/preprocessor\n {{- end }}\n volumes:\n {{- if .Values.proxy.preprocessor }}\n - name: preprocessor\n configMap:\n name: {{ template \"wavefront.proxy.fullname\" . }}-preprocessor\n {{- end }}\n{{- end }}\n",
"# proxy-preprocessor-config.yaml\n{{- if .Values.proxy.preprocessor }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: proxy\n name: {{ template \"wavefront.proxy.fullname\" . }}-preprocessor\ndata:\n{{ tpl (toYaml .Values.proxy.preprocessor) . | indent 2 }}\n{{- end }}\n",
"# proxy-service.yaml\n{{- if .Values.proxy.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" . }}\n helm.sh/chart: {{ template \"wavefront.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io.instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: proxy\n name: {{ template \"wavefront.proxy.fullname\" . }}\nspec:\n ports:\n - name: wavefront\n port: {{ .Values.proxy.port }}\n protocol: TCP\n {{- if .Values.proxy.tracePort }}\n - name: wavefront-trace\n port: {{ .Values.proxy.tracePort }}\n protocol: TCP\n {{- end }}\n {{- if .Values.proxy.jaegerPort }}\n - name: jaeger\n port: {{ .Values.proxy.jaegerPort }}\n protocol: TCP\n {{- end }}\n {{- if .Values.proxy.zipkinPort }}\n - name: zipkin\n port: {{ .Values.proxy.zipkinPort }}\n protocol: TCP\n {{- end }}\n selector:\n app.kubernetes.io/name : {{ template \"wavefront.fullname\" .}}\n app.kubernetes.io/component: proxy\n{{ end }}\n"
] | ## Default values for Wavefront
## This is a unique name for the cluster
## All metrics will receive a `cluster` tag with this value
## Required
clusterName: KUBERNETES_CLUSTER_NAME
## Wavefront URL (cluster) and API Token
## Required
wavefront:
url: https://YOUR_CLUSTER.wavefront.com
token: YOUR_API_TOKEN
## Wavefront Collector is responsible to get all Kubernetes metrics from your cluster.
## It will capture Kubernetes resources metrics available from the kubelets,
## as well as auto-discovery capabilities.
collector:
enabled: true
image:
repository: wavefronthq/wavefront-kubernetes-collector
tag: 1.0.3
pullPolicy: IfNotPresent
## If set to true, DaemonSet will be used for the collector.
## If set to false, Deployment will be used for the collector.
## Setting this to true is strongly recommended
useDaemonset: true
## max number of CPUs that can be used simultaneously. Less than 1 for default (number of cores)
# maxProcs: 0
## log level one of: info, debug, or trace. (default info)
# logLevel: info
## The resolution at which the collector will retain metrics. (default 60s)
# interval: 60s
## How often collected data is flushed (default 10s)
# flushInterval: 10s
## Timeout for exporting data (default 20s)
# sinkDelay: 20s
## If set to true, will use the unauthenticated real only port for the kubelet
## If set to false, will use the encrypted full access port for the kubelet (default false)
# useReadOnlyPort: false
## If set to true, metrics will be sent to Wavefront via a Wavefront Proxy.
## When true you must either specify a value for `collector.proxyAddress` or set `proxy.enabled` to true
## If set to false, metrics will be sent to Wavefront via the Direct Ingestion API
useProxy: true
## Can be used to specify a specific address for the Wavefront Proxy
## The proxy can be anywhere network reachable including outside of the cluster
## Required if `collector.useProxy` is true and `proxy.enabled` is false
# proxyAddress: wavefront-proxy:2878
## If set to true Kubernetes API Server will also be scraped for metrics (default false)
# apiServerMetrics: false
## Map of tags to apply to all metrics collected by the collector (default empty)
# tags:
## sample tags to include (env, region)
# env: production
# region: us-west-2
## Rules based discovery configuration
## Ref: https://github.com/wavefrontHQ/wavefront-kubernetes-collector/blob/master/docs/discovery.md
discovery:
enabled: true
## When specified, this replaces `prometheus.io` as the prefix for annotations used to
## auto-discover Prometheus endpoints
# annotationPrefix: "wavefront.com"
## Can be used to add additional discovery rules
# config:
## auto-discover a sample prometheus application
# - name: prom-example
# type: prometheus
# selectors:
# labels:
# k8s-app:
# - prom-example
# port: 8080
# path: /metrics
# prefix: kube.prom-example.
# tags:
# alt_name: sample-app
## auto-discover mongodb pods (replace USER:PASSWORD)
# - name: mongodb
# type: telegraf/mongodb
# selectors:
# images:
# - '*mongodb:*'
# port: 27017
# conf: |
# servers = ["mongodb://USER:PASSWORD${host}:${port}"]
# gather_perdb_stats = true
# filters:
# metricBlacklist:
# - 'mongodb.member.status'
# - 'mongodb.state'
# - 'mongodb.db.stats.type'
## auto-discover rabbitmq pods (replace USER and PASSWORD)
# - name: rabbitmq
# type: telegraf/rabbitmq
# selectors:
# images:
# - '*rabbitmq:*'
# port: 15672
# conf: |
# url = "http://${host}:${port}"
# username = "USER"
# password = "PASSWORD"
## Wavefront Collector resource requests and limits
## Make sure to keep requests and limits equal to keep the pods in the Guaranteed QoS class
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 200m
memory: 256Mi
## Wavefront Proxy is a metrics forwarder that is used to relay metrics to the Wavefront SaaS service.
## It can receive metrics from the Wavefront Collector as well as other metrics collection services
## within your cluster. The proxy also supports preprocessor rules to allow you to further filter
## and enhance your metric names, and tags. Should network connectivity fall between the proxy and
## Wavefront SaaS service, the proxy will buffer metrics, which will be flushed when connectivity resumes.
## Ref: https://docs.wavefront.com/proxies.html
proxy:
enabled: true
image:
repository: wavefronthq/proxy
tag: 5.7
pullPolicy: IfNotPresent
## The port number the proxy will listen on for metrics in Wavefront data format.
## This is usually 2878
port: 2878
## The port nubmer the proxy will listen on for tracing spans in Wavefront trace data format.
## This is usually 30000
# tracePort: 30000
## The port nubmer the proxy will listen on for tracing spans in Jaeger data format.
## This is usually 30001
# jaegerPort: 30001
## The port nubmer the proxy will listen on for tracing spans in Zipkin data format.
## This is usually 9411
# zipkinPort: 9411
## Sampling rate to apply to tracing spans sent to the proxy.
## This rate is applied to all data formats the proxy is listening on.
## Value should be between 0.0 and 1.0. Default is 1.0
# traceSamplingRate: 0.25
## When this is set to a value greater than 0,
## spans that are greater than or equal to this value will be sampled.
# traceSamplingDuration: 500
## Any configuration property can be passed to the proxy via command line args in
## in the format: `--<property_name> <value>`. Multiple properties can be specified
## separated by whitespace.
## Ref: https://docs.wavefront.com/proxies_configuring.html
# args:
## Proxy is a Java application. By default Java will consume upto 4G of heap memory.
## This can be used to override the default. Uses the `-Xmx` command line option for java
# heap: 1024m
## Preprocessor rules is a powerful way to apply filtering or to enhance metrics as they flow
## through the proxy. You can configure the rules here. By default a rule to drop Kubernetes
## generated labels is applied to remove unecessary and often noisy tags.
## Ref: https://docs.wavefront.com/proxies_preprocessor_rules.html
# preprocessor:
# rules.yaml: |
# '2878':
# # fix %2F to be a / instead. May be required on EKS.
# - rule : fix-forward-slash
# action : replaceRegex
# scope : pointLine
# search : "%2F"
# replace : "/"
# # replace bad characters ("&", "$", "!", "@") with underscores in the entire point line string
# - rule : replace-badchars
# action : replaceRegex
# scope : pointLine
# search : "[&\\$!@]"
# replace : "_"
## Specifies whether RBAC resources should be created
rbac:
create: true
## Specifies whether a ServiceAccount should be created
serviceAccount:
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
## kube-state-metrics are used to get metrics about the state of the Kubernetes scheduler
## If enabled the kube-state-metrics chart will be installed as a subchart and the collector
## will be configured to capture metrics.
kubeStateMetrics:
enabled: true
|
ghost | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"ghost.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"ghost.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"ghost.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"ghost.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"ghost.host\" -}}\n{{- if .Values.ghostHost -}}\n{{- $host := printf \"%s%s\" .Values.ghostHost .Values.ghostPath -}}\n{{- default (include \"ghost.serviceIP\" .) $host -}}\n{{- else -}}\n{{- default (include \"ghost.serviceIP\" .) \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"ghost.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper Ghost image name\n*/}}\n{{- define \"ghost.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name to change the volume permissions\n*/}}\n{{- define \"ghost.volumePermissions.image\" -}}\n{{- $registryName := .Values.volumePermissions.image.registry -}}\n{{- $repositoryName := .Values.volumePermissions.image.repository -}}\n{{- $tag := .Values.volumePermissions.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"ghost.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"ghost.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"ghost.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if include \"ghost.host\" . -}}\napiVersion: {{ template \"ghost.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"ghost.fullname\" . }}\n labels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n chart: \"{{ template \"ghost.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n selector:\n matchLabels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n release: {{ .Release.Name | quote }}\n replicas: 1\n template:\n metadata:\n labels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n chart: \"{{ template \"ghost.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n spec:\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- else }}\n initContainers:\n - name: volume-permissions\n image: {{ template \"ghost.volumePermissions.image\" . }}\n imagePullPolicy: \"{{ .Values.volumePermissions.image.pullPolicy }}\"\n command: ['sh', '-c', 'chmod -R g+rwX {{ .Values.persistence.path }}']\n volumeMounts:\n - mountPath: {{ .Values.persistence.path }}\n name: ghost-data\n {{- end }}\n{{- include \"ghost.imagePullSecrets\" . | indent 6 }}\n containers:\n - name: {{ template \"ghost.fullname\" . }}\n image: {{ template \"ghost.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n {{- if .Values.allowEmptyPassword }}\n value: \"yes\"\n {{- else }}\n value: \"no\"\n {{- end }}\n - name: MARIADB_HOST\n {{- if .Values.mariadb.enabled }}\n value: {{ template \"ghost.mariadb.fullname\" . }}\n {{- else }}\n value: {{ .Values.externalDatabase.host | quote }}\n {{- end }}\n - name: MARIADB_PORT_NUMBER\n {{- if .Values.mariadb.enabled }}\n value: \"3306\"\n {{- else }}\n value: {{ .Values.externalDatabase.port | quote }}\n {{- end }}\n - name: GHOST_DATABASE_NAME\n {{- if .Values.mariadb.enabled }}\n value: {{ .Values.mariadb.db.name | quote }}\n {{- else }}\n value: {{ .Values.externalDatabase.database | quote }}\n {{- end }}\n - name: GHOST_DATABASE_USER\n {{- if .Values.mariadb.enabled }}\n value: {{ .Values.mariadb.db.user | quote }}\n {{- else }}\n value: {{ .Values.externalDatabase.user | quote }}\n {{- end }}\n - name: GHOST_DATABASE_PASSWORD\n {{- if .Values.mariadb.enabled }}\n valueFrom:\n secretKeyRef:\n name: {{ template \"ghost.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n value: {{ .Values.externalDatabase.password | quote }}\n {{- end }}\n - name: GHOST_HOST\n value: {{ include \"ghost.host\" . | quote }}\n - name: GHOST_PROTOCOL\n value: {{ .Values.ghostProtocol | quote }}\n - name: GHOST_PORT_NUMBER\n {{- if .Values.ghostPort }}\n value: {{ .Values.ghostPort | quote }}\n {{- else }}\n value: {{ .Values.service.port | quote }}\n {{- end }}\n - name: GHOST_USERNAME\n value: {{ .Values.ghostUsername | quote }}\n - name: GHOST_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"ghost.fullname\" . }}\n key: ghost-password\n - name: GHOST_EMAIL\n value: {{ .Values.ghostEmail | quote }}\n - name: BLOG_TITLE\n value: {{ .Values.ghostBlogTitle | quote }}\n {{- if .Values.smtpHost }}\n - name: SMTP_HOST\n value: {{ .Values.smtpHost | quote }}\n {{- end }}\n {{- if .Values.smtpPort }}\n - name: SMTP_PORT\n value: {{ .Values.smtpPort | quote }}\n {{- end }}\n {{- if .Values.smtpUser }}\n - name: SMTP_USER\n value: {{ .Values.smtpUser | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"ghost.fullname\" . }}\n key: smtp-password\n {{- end }}\n {{- if .Values.smtpFromAddress }}\n - name: SMTP_FROM_ADDRESS\n value: {{ .Values.smtpFromAddress | quote }}\n {{- end }}\n {{- if .Values.smtpService }}\n - name: SMTP_SERVICE\n value: {{ .Values.smtpService | quote }}\n {{- end }}\n ports:\n - name: http\n containerPort: 2368\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: {{ .Values.ghostPath }}\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"ghost.host\" . | quote }}\n {{- if eq .Values.ghostProtocol \"https\" }}\n - name: X-Forwarded-Proto\n value: https\n {{- end }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: {{ .Values.ghostPath }}\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"ghost.host\" . | quote }}\n {{- if eq .Values.ghostProtocol \"https\" }}\n - name: X-Forwarded-Proto\n value: https\n {{- end }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.resources }}\n resources: {{- toYaml .Values.resources | nindent 10 }}\n {{- end }}\n volumeMounts:\n - name: ghost-data\n mountPath: /bitnami/ghost\n volumes:\n - name: ghost-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"ghost.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n{{- end -}}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"ghost.fullname\" . }}\n labels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n chart: \"{{ template \"ghost.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"ghost.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"ghost.fullname\" . }}\n labels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n chart: \"{{ template \"ghost.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"ghost.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"ghost.fullname\" . }}\n labels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n chart: \"{{ template \"ghost.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n {{- if .Values.ghostPassword }}\n ghost-password: {{ .Values.ghostPassword | b64enc | quote }}\n {{- else }}\n ghost-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n smtp-password: {{ default \"\" .Values.smtpPassword | b64enc | quote }}\n {{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"ghost.fullname\" . }}\n labels:\n app: \"{{ template \"ghost.fullname\" . }}\"\n chart: \"{{ template \"ghost.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- range $key, $value := .Values.service.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n {{- if eq .Values.service.type \"LoadBalancer\" }}\n loadBalancerIP: {{ default \"\" .Values.service.loadBalancerIP | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n selector:\n app: \"{{ template \"ghost.fullname\" . }}\"\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami Ghost image version
## ref: https://hub.docker.com/r/bitnami/ghost/tags/
##
image:
registry: docker.io
repository: bitnami/ghost
tag: 3.9.0-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override ghost.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override ghost.fullname template
##
# fullnameOverride:
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Ghost protocol, host, port and path to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration
##
ghostProtocol: http
# ghostHost:
# ghostPort:
ghostPath: /
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration
##
ghostUsername: [email protected]
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration
##
# ghostPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-ghost#configuration
##
ghostEmail: [email protected]
## Ghost Blog name
## ref: https://github.com/bitnami/bitnami-docker-ghost#environment-variables
##
ghostBlogTitle: User's Blog
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-wordpress#environment-variables
allowEmptyPassword: "yes"
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-redmine/#smtp-configuration
##
# smtpHost:
# smtpPort:
# smtpUser:
# smtpPassword:
# smtpFromAddress
# smtpService:
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 6
successThreshold: 1
##
## External database configuration
##
externalDatabase:
## All of these values are only used when mariadb.enabled is set to false
## Database host
host: localhost
## non-root Username for Wordpress Database
user: bn_ghost
## Database password
password: ""
## Database name
database: bitnami_ghost
## Database port number
port: 3306
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_ghost
user: bn_ghost
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
## loadBalancerIP:
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Service annotations done as key:value pairs
annotations:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## ghost data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
path: /bitnami
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Configure the ingress resource that allows you to access the
## Ghost installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: ghost.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.ghost.local
# - ghost.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: ghost.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: ghost.local-tls
# key:
# certificate:
## Node selector for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
##
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
|
fluentd | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"fluentd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"fluentd.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"fluentd.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"fluentd.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"fluentd.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ include \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - \"namespaces\"\n - \"pods\"\n verbs:\n - \"get\"\n - \"watch\"\n - \"list\"\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"fluentd.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: {{ template \"fluentd.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.configMaps }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n{{- end }}\n{{- if .Values.metrics.enabled }}\n metrics.conf: |\n <source>\n @type prometheus\n port {{ .Values.metrics.service.port }}\n </source>\n\n <source>\n @type prometheus_monitor\n </source>\n\n <source>\n @type prometheus_output_monitor\n </source>\n{{- end }}\n{{- if and (.Values.plugins.enabled) (gt (len .Values.plugins.pluginsList) 0) }}\n install-plugins.sh: |-\n #!/bin/sh\n {{- range $plugin := .Values.plugins.pluginsList }}\n fluent-gem install {{ $plugin }}\n {{- end }}\n exec /run.sh\n{{- end }}\n",
"# deployment.yaml\n{{- $statefulSet := or (.Values.autoscaling.enabled) (.Values.useStatefulSet) -}}\napiVersion: apps/v1\n{{- if $statefulSet }}\nkind: StatefulSet\n{{- else}}\nkind: Deployment\n{{- end}}\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n{{- if not .Values.autoscaling.enabled }}\n replicas: {{ .Values.replicaCount }}\n{{- end }}\n{{- if .Values.autoscaling.enabled }}\n serviceName: {{ template \"fluentd.name\" . }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"fluentd.name\" . }}\n release: {{ .Release.Name }}\n {{- if and .Values.persistence.enabled (not .Values.autoscaling.enabled) }}\n strategy:\n type: Recreate\n {{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"fluentd.name\" . }}\n release: {{ .Release.Name }}\n{{- with .Values.deployment.labels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n annotations:\n checksum/configmap: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if .Values.annotations }}\n {{- toYaml .Values.annotations | nindent 8 }}\n {{- end }}\n spec:\n{{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $pullSecret := .Values.image.pullSecrets }}\n - name: {{ $pullSecret }}\n {{- end }}\n{{- end }}\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if and (.Values.plugins.enabled) (gt (len .Values.plugins.pluginsList) 0) }}\n command: [\"/bin/sh\", \"-c\", \"/etc/fluent/config.d/install-plugins.sh\"]\n {{- end }}\n env:\n - name: OUTPUT_HOST\n value: {{ .Values.output.host | quote }}\n - name: OUTPUT_PORT\n value: {{ .Values.output.port | quote }}\n - name: OUTPUT_SCHEME\n value: {{ .Values.output.scheme | quote }}\n - name: OUTPUT_SSL_VERSION\n value: {{ .Values.output.sslVersion | quote }}\n - name: OUTPUT_BUFFER_CHUNK_LIMIT\n value: {{ .Values.output.buffer_chunk_limit | quote }}\n - name: OUTPUT_BUFFER_QUEUE_LIMIT\n value: {{ .Values.output.buffer_queue_limit | quote }}\n {{- range $key, $value := .Values.env }}\n - name: {{ $key | quote }}\n value: {{ $value | quote }}\n {{- end }}\n {{- if .Values.extraEnvVars }}\n{{ toYaml .Values.extraEnvVars | indent 10 }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n ports:\n{{- range $port := .Values.service.ports }}\n - name: {{ $port.name }}\n containerPort: {{ $port.containerPort }}\n protocol: {{ $port.protocol }}\n{{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n containerPort: {{ .Values.metrics.service.port }}\n protocol: TCP\n{{- end }}\n - name: http-input\n containerPort: 9880\n protocol: TCP\n livenessProbe:\n httpGet:\n # Use percent encoding for query param.\n # The value is {\"log\": \"health check\"}.\n # the endpoint itself results in a new fluentd\n # tag 'fluentd.pod-healthcheck'\n path: /fluentd.pod.healthcheck?json=%7B%22log%22%3A+%22health+check%22%7D\n port: 9880\n initialDelaySeconds: 5\n timeoutSeconds: 1\n volumeMounts:\n - name: config-volume-{{ template \"fluentd.fullname\" . }}\n mountPath: /etc/fluent/config.d\n - name: buffer\n mountPath: \"/var/log/fluentd-buffers\"\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n serviceAccountName: {{ template \"fluentd.fullname\" . }}\n volumes:\n - name: config-volume-{{ template \"fluentd.fullname\" . }}\n configMap:\n name: {{ template \"fluentd.fullname\" . }}\n defaultMode: 0777\n {{- if and .Values.persistence.enabled (not $statefulSet) }}\n - name: buffer\n persistentVolumeClaim:\n claimName: {{ template \"fluentd.fullname\" . }}\n {{- else if (not .Values.persistence.enabled) }}\n - name: buffer\n emptyDir: {}\n {{- end }}\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 8 }}\n{{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- if and .Values.persistence.enabled ($statefulSet) }}\n volumeClaimTemplates:\n - metadata:\n name: buffer\n spec:\n accessModes: [{{ .Values.persistence.accessMode }}]\n storageClassName: {{ .Values.persistence.storageClass }}\n resources:\n requests:\n storage: {{ .Values.persistence.size }}\n{{- end }}\n",
"# hpa.yaml\n{{- if and .Values.autoscaling.enabled}}\napiVersion: autoscaling/v2beta2\nkind: HorizontalPodAutoscaler\nmetadata:\n name: {{ include \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n scaleTargetRef:\n apiVersion: apps/v1\n kind: StatefulSet\n name: {{ include \"fluentd.fullname\" . }}\n minReplicas: {{ .Values.autoscaling.minReplicas }}\n maxReplicas: {{ .Values.autoscaling.maxReplicas }}\n metrics:\n {{- toYaml .Values.autoscaling.metrics | nindent 4 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"fluentd.fullname\" . -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n{{- if .Values.ingress.annotations }}\n annotations:\n{{ tpl ( toYaml .Values.ingress.annotations | indent 4 ) . }}\n{{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - http:\n paths:\n - path: {{ $host.path | default \"/\" }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $host.servicePort }}\n {{- if (not (empty $host.name)) }}\n host: {{ $host.name }}\n {{- end -}}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}",
"# pvc.yaml\n{{- $statefulSet := or (.Values.autoscaling.enabled) (.Values.useStatefulSet) -}}\n{{- if and .Values.persistence.enabled (not $statefulSet) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"fluentd.fullname\" . }}\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n kind: Role\n name: {{ template \"fluentd.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"fluentd.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"fluentd.fullname\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\nspec:\n type: {{ .Values.service.type }}\n {{ if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{ end }}\n ports:\n {{- range $port := .Values.service.ports }}\n - name: {{ $port.name }}\n port: {{ $port.containerPort }}\n targetPort: {{ $port.containerPort }}\n protocol: {{ $port.protocol }}\n {{- end }}\n {{ if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{ .Values.service.nodePort }}\n {{ end }}\n {{- if .Values.metrics.enabled }}\n - name: metrics\n port: {{ .Values.metrics.service.port }}\n targetPort: metrics\n protocol: TCP\n {{- end }}\n selector:\n app: {{ template \"fluentd.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"fluentd.serviceAccountName\" . }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n",
"# servicemonitor.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ include \"fluentd.fullname\" . }}\n {{- if .Values.metrics.serviceMonitor.namespace }}\n namespace: {{ .Values.metrics.serviceMonitor.namespace }}\n {{- end }}\n labels:\n app: {{ template \"fluentd.name\" . }}\n chart: {{ template \"fluentd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- if .Values.metrics.serviceMonitor.additionalLabels }}\n{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n endpoints:\n - port: metrics\n {{- if .Values.metrics.serviceMonitor.interval }}\n interval: {{ .Values.metrics.serviceMonitor.interval }}\n {{- end }}\n {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}\n {{- end }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ include \"fluentd.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n"
] | # Default values for fluentd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: gcr.io/google-containers/fluentd-elasticsearch
tag: v2.4.0
pullPolicy: IfNotPresent
# pullSecrets:
# - secret1
# - secret2
output:
host: elasticsearch-client.default.svc.cluster.local
port: 9200
scheme: http
sslVersion: TLSv1
buffer_chunk_limit: 2M
buffer_queue_limit: 8
env: {}
# Extra Environment Values - allows yaml definitions
extraEnvVars:
# - name: VALUE_FROM_SECRET
# valueFrom:
# secretKeyRef:
# name: secret_name
# key: secret_key
# extraVolumes:
# - name: es-certs
# secret:
# defaultMode: 420
# secretName: es-certs
# extraVolumeMounts:
# - name: es-certs
# mountPath: /certs
# readOnly: true
plugins:
enabled: false
pluginsList: []
service:
annotations: {}
type: ClusterIP
# loadBalancerIP:
# type: NodePort
# nodePort:
# Used to create Service records
ports:
- name: "monitor-agent"
protocol: TCP
containerPort: 24220
metrics:
enabled: false
service:
port: 24231
serviceMonitor:
enabled: false
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "24231"
# Pod Labels
deployment:
labels: {}
ingress:
enabled: false
annotations:
kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# # Depending on which version of ingress controller you may need to configure properly - https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target
# nginx.ingress.kubernetes.io/rewrite-target: /
labels: []
# If doing TCP or UDP ingress rule don't forget to update your Ingress Controller to accept TCP connections - https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/
hosts:
# - name: "http-input.local"
# protocol: TCP
# servicePort: 9880
# path: /
tls: {}
# Secrets must be manually created in the namespace.
# - secretName: http-input-tls
# hosts:
# - http-input.local
configMaps:
general.conf: |
# Prevent fluentd from handling records containing its own logs. Otherwise
# it can lead to an infinite loop, when error in sending one message generates
# another message which also fails to be sent and so on.
<match fluentd.**>
@type null
</match>
# Used for health checking
<source>
@type http
port 9880
bind 0.0.0.0
</source>
# Emits internal metrics to every minute, and also exposes them on port
# 24220. Useful for determining if an output plugin is retryring/erroring,
# or determining the buffer queue length.
<source>
@type monitor_agent
bind 0.0.0.0
port 24220
tag fluentd.monitor.metrics
</source>
system.conf: |-
<system>
root_dir /tmp/fluentd-buffers/
</system>
forward-input.conf: |
<source>
@type forward
port 24224
bind 0.0.0.0
</source>
output.conf: |
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
# Replace with the host/port to your Elasticsearch cluster.
host "#{ENV['OUTPUT_HOST']}"
port "#{ENV['OUTPUT_PORT']}"
scheme "#{ENV['OUTPUT_SCHEME']}"
ssl_version "#{ENV['OUTPUT_SSL_VERSION']}"
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size "#{ENV['OUTPUT_BUFFER_CHUNK_LIMIT']}"
queue_limit_length "#{ENV['OUTPUT_BUFFER_QUEUE_LIMIT']}"
overflow_action block
</buffer>
</match>
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 500m
# memory: 200Mi
# requests:
# cpu: 500m
# memory: 200Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Persist data to a persistent volume
persistence:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# annotations: {}
accessMode: ReadWriteOnce
size: 10Gi
nodeSelector: {}
tolerations: []
affinity: {}
# Enable autoscaling using HorizontalPodAutoscaler
autoscaling:
enabled: false
minReplicas: 2
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 60
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 60
# Consider to set higher value when using in conjuction with autoscaling
# Full description about this field: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#pod-v1-core
terminationGracePeriodSeconds: 30
|
opa | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"opa.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"opa.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{- define \"opa.sarfullname\" -}}\n{{- $name := (include \"opa.fullname\" . | trunc 59 | trimSuffix \"-\") -}}\n{{- printf \"%s-sar\" $name -}}\n{{- end -}}\n\n{{- define \"opa.mgmtfullname\" -}}\n{{- $name := (include \"opa.fullname\" . | trunc 58 | trimSuffix \"-\") -}}\n{{- printf \"%s-mgmt\" $name -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"opa.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nDefine standard labels for frequently used metadata.\n*/}}\n{{- define \"opa.labels.standard\" -}}\napp: {{ template \"opa.fullname\" . }}\nchart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nrelease: \"{{ .Release.Name }}\"\nheritage: \"{{ .Release.Service }}\"\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"opa.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"opa.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{- define \"opa.selfSignedIssuer\" -}}\n{{ printf \"%s-selfsign\" (include \"opa.fullname\" .) }}\n{{- end -}}\n\n{{- define \"opa.rootCAIssuer\" -}}\n{{ printf \"%s-ca\" (include \"opa.fullname\" .) }}\n{{- end -}}\n\n{{- define \"opa.rootCACertificate\" -}}\n{{ printf \"%s-ca\" (include \"opa.fullname\" .) }}\n{{- end -}}\n\n{{- define \"opa.servingCertificate\" -}}\n{{ printf \"%s-webhook-tls\" (include \"opa.fullname\" .) }}\n{{- end -}}\n\n{{/*\nDetect the version of cert manager crd that is installed\nError if CRD is not available\n*/}}\n{{- define \"opa.certManagerApiVersion\" -}}\n{{- if (.Capabilities.APIVersions.Has \"cert-manager.io/v1alpha3\") -}}\ncert-manager.io/v1alpha3\n{{- else if (.Capabilities.APIVersions.Has \"cert-manager.io/v1alpha2\") -}}\ncert-manager.io/v1alpha2\n{{- else if (.Capabilities.APIVersions.Has \"certmanager.k8s.io/v1alpha1\") -}}\ncertmanager.k8s.io/v1alpha1\n{{- else -}}\n{{- fail \"cert-manager CRD does not appear to be installed\" }}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"opa.fullname\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n replicas: {{ .Values.replicas }}\n selector:\n matchLabels:\n app: {{ template \"opa.fullname\" . }}\n {{- with .Values.deploymentStrategy }}\n strategy:\n {{- toYaml . | nindent 4 }}\n {{- end }}\n template:\n metadata:\n{{- if or .Values.generateAdmissionControllerCerts .Values.opa }}\n annotations:\n{{- if .Values.generateAdmissionControllerCerts }}\n checksum/certs: {{ include (print $.Template.BasePath \"/webhookconfiguration.yaml\") . | sha256sum }}\n{{- end }}\n{{- if .Values.opa }}\n checksum/config: {{ include (print $.Template.BasePath \"/secrets.yaml\") . | sha256sum }}\n{{- end }}\n{{- end }}\n{{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 8 }}\n{{- end }}\n labels:\n app: {{ template \"opa.fullname\" . }}\n name: {{ template \"opa.fullname\" . }}\n spec:\n{{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- range .Values.imagePullSecrets }}\n - name: {{ . }}\n {{- end }}\n{{- end }}\n{{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n{{- end }}\n{{- if or .Values.authz.enabled .Values.bootstrapPolicies}}\n initContainers:\n - name: initpolicy\n image: {{ .Values.mgmt.image }}:{{ .Values.mgmt.imageTag }}\n imagePullPolicy: {{ .Values.mgmt.imagePullPolicy }}\n resources:\n{{ toYaml .Values.mgmt.resources | indent 12 }}\n command:\n - /bin/sh\n - -c\n - |\n{{- if .Values.authz.enabled }}\n tr -dc 'A-F0-9' < /dev/urandom | dd bs=1 count=32 2>/dev/null > /bootstrap/mgmt-token\n TOKEN=`cat /bootstrap/mgmt-token`\n cat > /bootstrap/authz.rego <<EOF\n package system.authz\n default allow = false\n # Allow anonymous access to the default policy decision.\n allow { input.path = [\"\"]; input.method = \"POST\" }\n allow { input.path = [\"\"]; input.method = \"GET\" }\n # This is only used for health check in liveness and readiness probe\n allow { input.path = [\"health\"]; input.method = \"GET\" }\n{{- if .Values.prometheus.enabled }}\n # This allows metrics to be scraped by prometheus\n allow { input.path = [\"metrics\"]; input.method = \"GET\" }\n{{- end }}\n allow { input.identity == \"$TOKEN\" }\n EOF\n{{- end }}\n{{- range $policyName, $policy := .Values.bootstrapPolicies }}\n cat > /bootstrap/{{ $policyName }}.rego <<EOF\n{{ $policy | indent 12 }}\n EOF\n{{- end }}\n volumeMounts:\n - name: bootstrap\n mountPath: /bootstrap\n{{- end }}\n{{- if .Values.hostNetwork.enabled }}\n hostNetwork: true\n{{- end }}\n containers:\n - name: opa\n ports:\n - name: https\n containerPort: {{ .Values.port }}\n{{- if .Values.prometheus.enabled }}\n - name: http\n containerPort: {{ .Values.mgmt.port }}\n{{- end }}\n image: {{ .Values.image }}:{{ .Values.imageTag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n args:\n - \"run\"\n - \"--server\"\n{{- if .Values.opa }}\n - \"--config-file=/config/config.yaml\"\n{{- end }}\n - \"--tls-cert-file=/certs/tls.crt\"\n - \"--tls-private-key-file=/certs/tls.key\"\n - \"--addr=0.0.0.0:{{ .Values.port }}\"\n - \"--log-level={{ .Values.logLevel }}\"\n - \"--log-format={{ .Values.logFormat }}\"\n{{- if .Values.authz.enabled }}\n - \"--authentication=token\"\n - \"--authorization=basic\"\n - \"--ignore=.*\"\n{{- end }}\n{{- if .Values.prometheus.enabled }}\n - \"--addr=http://0.0.0.0:{{ .Values.mgmt.port }}\"\n{{- else if .Values.mgmt.enabled }}\n - \"--addr=http://127.0.0.1:{{ .Values.mgmt.port }}\"\n{{- end }}\n{{- if or .Values.authz.enabled .Values.bootstrapPolicies }}\n - \"/bootstrap\"\n{{- end }}\n{{- range .Values.extraArgs }}\n - {{ . }}\n{{- end }}\n volumeMounts:\n - name: certs\n readOnly: true\n mountPath: /certs\n{{- if .Values.opa }}\n - name: config\n readOnly: true\n mountPath: /config\n{{- end }}\n{{- if or .Values.authz.enabled .Values.bootstrapPolicies }}\n - name: bootstrap\n readOnly: true\n mountPath: /bootstrap\n{{- end }}\n readinessProbe:\n{{ toYaml .Values.readinessProbe | indent 12 }}\n livenessProbe:\n{{ toYaml .Values.livenessProbe | indent 12 }}\n{{- if .Values.mgmt.enabled }}\n - name: mgmt\n image: {{ .Values.mgmt.image }}:{{ .Values.mgmt.imageTag }}\n imagePullPolicy: {{ .Values.mgmt.imagePullPolicy }}\n resources:\n{{ toYaml .Values.mgmt.resources | indent 12 }}\n args:\n{{- if .Values.authz.enabled }}\n - --opa-auth-token-file=/bootstrap/mgmt-token\n{{- end }}\n - --opa-url=http://127.0.0.1:{{ .Values.mgmt.port }}/v1\n - --replicate-path={{ .Values.mgmt.replicate.path }}\n - --enable-data={{ .Values.mgmt.data.enabled }}\n - --enable-policies={{ .Values.mgmt.configmapPolicies.enabled }}\n{{- if .Values.mgmt.configmapPolicies.enabled }}\n - --policies={{ .Values.mgmt.configmapPolicies.namespaces | join \",\" }}\n - --require-policy-label={{ .Values.mgmt.configmapPolicies.requireLabel }}\n{{- end }}\n{{- range .Values.mgmt.replicate.namespace }}\n - --replicate={{ . }}\n{{- end }}\n{{- range .Values.mgmt.replicate.cluster }}\n - --replicate-cluster={{ . }}\n{{- end }}\n{{- range .Values.mgmt.extraArgs }}\n - {{ . }}\n{{- end }}\n{{- if or .Values.authz.enabled .Values.bootstrapPolicies }}\n volumeMounts:\n - name: bootstrap\n readOnly: true\n mountPath: /bootstrap\n{{- end }}\n{{- end }}\n{{- if .Values.sar.enabled }}\n - name: sarproxy\n image: {{ .Values.sar.image }}:{{ .Values.sar.imageTag }}\n imagePullPolicy: {{ .Values.sar.imagePullPolicy }}\n resources:\n{{ toYaml .Values.sar.resources | indent 12 }}\n command:\n - kubectl\n - proxy\n - --accept-paths=^/apis/authorization.k8s.io/v1/subjectaccessreviews$\n{{- end }}\n{{- if .Values.extraContainers }}\n{{ toYaml .Values.extraContainers | indent 8}}\n{{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n {{- range $key, $val := .Values.securityContext }}\n {{- if ne $key \"enabled\" }}\n {{ $key }}: {{ toYaml $val | nindent 10 }}\n {{- end }}\n {{- end }}\n {{- end }}\n serviceAccountName: {{ template \"opa.serviceAccountName\" .}}\n volumes:\n - name: certs\n secret:\n secretName: {{ template \"opa.fullname\" . }}-cert\n{{- if .Values.opa }}\n - name: config\n secret:\n secretName: {{ template \"opa.fullname\" . }}-config\n{{- end }}\n{{- if or .Values.authz.enabled .Values.bootstrapPolicies}}\n - name: bootstrap\n emptyDir: {}\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 8}}\n{{- end }}\n{{- end }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n",
"# mgmt-clusterrole.yaml\n{{- if (and .Values.rbac.create .Values.mgmt.enabled) -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"opa.name\" . }}\n chart: {{ template \"opa.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n component: mgmt\n name: {{ template \"opa.mgmtfullname\" . }}\nrules:\n{{ toYaml .Values.rbac.rules.cluster | indent 2 }}\n{{- end -}}\n",
"# mgmt-clusterrolebinding.yaml\n{{- if (and .Values.rbac.create .Values.mgmt.enabled) -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: {{ template \"opa.name\" . }}\n chart: {{ template \"opa.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n component: mgmt\n name: {{ template \"opa.mgmtfullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"opa.mgmtfullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"opa.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget.enabled }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"opa.fullname\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n{{- if .Values.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"opa.fullname\" . }}\n{{- end }}\n",
"# sar-clusterrole.yaml\n{{- if (and .Values.rbac.create .Values.sar.enabled) -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"opa.name\" . }}\n chart: {{ template \"opa.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n component: sar\n name: {{ template \"opa.sarfullname\" . }}\nrules:\n - apiGroups:\n - \"authorization.k8s.io\"\n resources:\n - subjectaccessreviews\n verbs:\n - create\n{{- end -}}\n",
"# sar-clusterrolebinding.yaml\n{{- if (and .Values.rbac.create .Values.sar.enabled) -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: {{ template \"opa.name\" . }}\n chart: {{ template \"opa.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n component: sar\n name: {{ template \"opa.sarfullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"opa.sarfullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"opa.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# secrets.yaml\n{{- if .Values.opa -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"opa.fullname\" . }}-config\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\ntype: Opaque\ndata:\n config.yaml: {{ toYaml .Values.opa | b64enc }}\n{{- end -}}",
"# service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"opa.fullname\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n selector:\n app: {{ template \"opa.fullname\" . }}\n ports:\n - name: https\n protocol: TCP\n port: 443\n targetPort: {{ .Values.port }}\n{{- if .Values.prometheus.enabled }}\n - name: http\n port: {{ .Values.mgmt.port }}\n{{- end }}\n{{- if .Values.extraPorts }}\n{{ toYaml .Values.extraPorts | indent 2}}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"opa.serviceAccountName\" .}}\n labels:\n app: {{ template \"opa.fullname\" . }}\n chart: {{ template \"opa.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if and (.Capabilities.APIVersions.Has \"monitoring.coreos.com/v1\") .Values.prometheus.enabled .Values.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app: {{ template \"opa.name\" . }}\n chart: {{ template \"opa.chart\" . }}\n heritage: {{ .Release.Service }}\n {{- if not .Values.serviceMonitor.additionalLabels.release }}\n release: {{ .Release.Name }}\n {{- end }}\n {{- if .Values.serviceMonitor.additionalLabels }}\n {{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4}}\n {{- end }}\n name: {{ template \"opa.fullname\" . }}\n {{- if .Values.serviceMonitor.namespace }}\n namespace: {{ .Values.serviceMonitor.namespace }}\n {{- end }}\nspec:\n endpoints:\n - port: http\n interval: {{ .Values.serviceMonitor.interval }}\n jobLabel: {{ template \"opa.fullname\" . }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"opa.fullname\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n",
"# webhookconfiguration.yaml\n{{- $cn := printf \"%s.%s.svc\" ( include \"opa.fullname\" . ) .Release.Namespace }}\n{{- $ca := genCA \"opa-admission-ca\" 3650 -}}\n{{- $cert := genSignedCert $cn nil nil 3650 $ca -}}\nkind: {{ .Values.admissionControllerKind }}\napiVersion: admissionregistration.k8s.io/v1beta1\nmetadata:\n name: {{ template \"opa.fullname\" . }}\n annotations:\n{{- if .Values.certManager.enabled }}\n certmanager.k8s.io/inject-ca-from: {{ printf \"%s/%s\" .Release.Namespace (include \"opa.rootCACertificate\" .) | quote }}\n cert-manager.io/inject-ca-from: {{ printf \"%s/%s\" .Release.Namespace (include \"opa.rootCACertificate\" .) | quote }}\n{{- end }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nwebhooks:\n - name: webhook.openpolicyagent.org\n{{- with .Values.admissionControllerNamespaceSelector }}\n namespaceSelector:\n{{ toYaml . | indent 6 }}\n{{ end }}\n failurePolicy: {{ .Values.admissionControllerFailurePolicy }}\n rules:\n{{ toYaml .Values.admissionControllerRules | indent 6 }}\n clientConfig:\n{{ if not .Values.certManager.enabled }}\n{{ if .Values.generateAdmissionControllerCerts }}\n caBundle: {{ b64enc $ca.Cert }}\n{{ else }}\n caBundle: {{ b64enc .Values.admissionControllerCA }}\n{{ end }}\n{{ end }}\n service:\n name: {{ template \"opa.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n sideEffects: {{ .Values.admissionControllerSideEffect }}\n{{ if .Values.timeoutSeconds }}\n timeoutSeconds: {{ .Values.timeoutSeconds }}\n{{ end }}\n\n{{ if .Values.certManager.enabled }}\n---\n# Create a selfsigned Issuer, in order to create a root CA certificate for\n# signing webhook serving certificates\napiVersion: {{ include \"opa.certManagerApiVersion\" . }}\nkind: Issuer\nmetadata:\n name: {{ include \"opa.selfSignedIssuer\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n selfSigned: {}\n\n---\n# Generate a CA Certificate used to sign certificates for the webhook\napiVersion: {{ include \"opa.certManagerApiVersion\" . }}\nkind: Certificate\nmetadata:\n name: {{ include \"opa.rootCACertificate\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n secretName: {{ include \"opa.rootCACertificate\" . }}\n duration: 43800h # 5y\n issuerRef:\n name: {{ include \"opa.selfSignedIssuer\" . }}\n commonName: \"ca.webhook.opa\"\n isCA: true\n\n---\n# Create an Issuer that uses the above generated CA certificate to issue certs\napiVersion: {{ include \"opa.certManagerApiVersion\" . }}\nkind: Issuer\nmetadata:\n name: {{ include \"opa.rootCAIssuer\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n ca:\n secretName: {{ include \"opa.rootCACertificate\" . }}\n\n---\n\n# Finally, generate a serving certificate for the webhook to use\napiVersion: {{ include \"opa.certManagerApiVersion\" . }}\nkind: Certificate\nmetadata:\n name: {{ include \"opa.servingCertificate\" . }}\n labels:\n{{ include \"opa.labels.standard\" . | indent 4 }}\nspec:\n secretName: {{ template \"opa.fullname\" . }}-cert\n duration: 8760h # 1y\n issuerRef:\n name: {{ include \"opa.rootCAIssuer\" . }}\n dnsNames:\n - {{ include \"opa.fullname\" . }}\n - {{ include \"opa.fullname\" . }}.{{ .Release.Namespace }}\n - {{ include \"opa.fullname\" . }}.{{ .Release.Namespace }}.svc\n{{ end }}\n{{- if not .Values.certManager.enabled }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"opa.fullname\" . }}-cert\n labels:\n app: {{ template \"opa.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n{{ if .Values.generateAdmissionControllerCerts }}\n tls.crt: {{ b64enc $cert.Cert }}\n tls.key: {{ b64enc $cert.Key }}\n{{ else }}\n tls.crt: {{ b64enc .Values.admissionControllerCert }}\n tls.key: {{ b64enc .Values.admissionControllerKey }}\n{{ end }}\n{{ end }}\n"
] | # Default values for opa.
# -----------------------
#
# The 'opa' key embeds an OPA configuration file. See https://www.openpolicyagent.org/docs/configuration.html for more details.
# Use 'opa: false' to disable the OPA configuration and rely on configmaps for policy loading.
# See https://www.openpolicyagent.org/docs/latest/kubernetes-admission-control/#3-deploy-opa-on-top-of-kubernetes and the `mgmt.configmapPolicies` section below for more details.
opa:
services:
controller:
url: 'https://www.openpolicyagent.org'
bundles:
quickstart:
service: controller
resource: /bundles/helm-kubernetes-quickstart
default_decision: /helm_kubernetes_quickstart/main
# Setup the webhook using cert-manager
certManager:
enabled: false
# Expose the prometheus scraping endpoint
prometheus:
enabled: false
## ServiceMonitor consumed by prometheus-operator
serviceMonitor:
## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
enabled: false
interval: "15s"
## Namespace in which the service monitor is created
# namespace: monitoring
# Added to the ServiceMonitor object so that prometheus-operator is able to discover it
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}
# Annotations in the deployment template
annotations:
{}
# Bootstrap policies to load upon startup
# Define policies in the form of:
# <policyName> : |-
# <regoBody>
# For example, to mask the entire input body in the decision logs:
# bootstrapPolicies:
# log: |-
# package system.log
# mask["/input"]
bootstrapPolicies: {}
# To enforce mutating policies, change to MutatingWebhookConfiguration.
admissionControllerKind: ValidatingWebhookConfiguration
# To _fail closed_ on failures, change to Fail. During initial testing, we
# recommend leaving the failure policy as Ignore.
admissionControllerFailurePolicy: Ignore
# Adds a namespace selector to the admission controller webhook
admissionControllerNamespaceSelector:
matchExpressions:
- {key: openpolicyagent.org/webhook, operator: NotIn, values: [ignore]}
# SideEffectClass for the webhook, setting to None enables dry-run
admissionControllerSideEffect: Unknown
# To restrict the kinds of operations and resources that are subject to OPA
# policy checks, see the settings below. By default, all resources and
# operations are subject to OPA policy checks.
admissionControllerRules:
- operations: ["*"]
apiGroups: ["*"]
apiVersions: ["*"]
resources: ["*"]
# Controls a PodDisruptionBudget for the OPA pod. Suggested use if having opa
# always running for admission control is important
podDisruptionBudget:
enabled: false
minAvailable: 1
# maxUnavailable: 1
# The helm Chart will automatically generate a CA and server certificate for
# the OPA. If you want to supply your own certificates, set the field below to
# false and add the PEM encoded CA certificate and server key pair below.
#
# WARNING: The common name name in the server certificate MUST match the
# hostname of the service that exposes the OPA to the apiserver. For example.
# if the service name is created in the "default" nanamespace with name "opa"
# the common name MUST be set to "opa.default.svc".
#
# If the common name is not set correctly, the apiserver will refuse to
# communicate with the OPA.
generateAdmissionControllerCerts: true
admissionControllerCA: ""
admissionControllerCert: ""
admissionControllerKey: ""
authz:
# Disable if you don't want authorization.
# Mostly useful for debugging.
enabled: true
# Use hostNetwork setting on OPA pod
hostNetwork:
enabled: false
# Docker image and tag to deploy.
image: openpolicyagent/opa
imageTag: 0.15.1
imagePullPolicy: IfNotPresent
# One or more secrets to be used when pulling images
imagePullSecrets: []
# - registrySecretName
# Port to which the opa pod will bind itself
# NOTE IF you use a different port make sure it maches the ones in the readinessProbe
# and livenessProbe
port: 443
extraArgs: []
mgmt:
enabled: true
image: openpolicyagent/kube-mgmt
imageTag: "0.10"
imagePullPolicy: IfNotPresent
# NOTE insecure http port conjointly used for mgmt access and prometheus metrics export
port: 8181
extraArgs: []
resources: {}
data:
enabled: false
configmapPolicies:
# NOTE IF you use these, remember to update the RBAC rules below to allow
# permissions to get, list, watch, patch and update configmaps
enabled: false
namespaces: [opa, kube-federation-scheduling-policy]
requireLabel: true
replicate:
# NOTE IF you use these, remember to update the RBAC rules below to allow
# permissions to replicate these things
cluster: []
# - [group/]version/resource
namespace: []
# - [group/]version/resource
path: kubernetes
# Log level for OPA ('debug', 'info', 'error') (app default=info)
logLevel: info
# Log format for OPA ('text', 'json') (app default=text)
logFormat: text
# Number of OPA replicas to deploy. OPA maintains an eventually consistent
# cache of policies and data. If you want high availability you can deploy two
# or more replicas.
replicas: 1
# To control how the OPA is scheduled on the cluster, set the affinity,
# tolerations and nodeSelector values below. For example, to deploy OPA onto
# the master nodes, 1 replica per node:
#
# affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "app"
# operator: In
# values:
# - opa
# topologyKey: "kubernetes.io/hostname"
# tolerations:
# - key: "node-role.kubernetes.io/master"
# effect: NoSchedule
# operator: Exists
# nodeSelector:
# kubernetes.io/role: "master"
affinity: {}
tolerations: []
nodeSelector: {}
# To control the CPU and memory resource limits and requests for OPA, set the
# field below.
resources: {}
rbac:
# If true, create & use RBAC resources
#
create: true
rules:
cluster: []
# - apiGroups:
# - ""
# resources:
# - namespaces
# verbs:
# - get
# - list
# - watch
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# This proxy allows opa to make Kubernetes SubjectAccessReview checks against the
# Kubernetes API. You can get a rego function at github.com/open-policy-agent/library
sar:
enabled: false
image: lachlanevenson/k8s-kubectl
imageTag: latest
imagePullPolicy: IfNotPresent
resources: {}
# To control the liveness and readiness probes change the fields below.
readinessProbe:
httpGet:
path: /health
scheme: HTTPS
port: 443
initialDelaySeconds: 3
periodSeconds: 5
livenessProbe:
httpGet:
path: /health
scheme: HTTPS
port: 443
initialDelaySeconds: 3
periodSeconds: 5
# Set a priorityClass using priorityClassName
# priorityClassName:
# Timeout for a webhook call in seconds.
# Starting in kubernetes 1.14 you can set the timeout and it is
# encouraged to use a small timeout for webhooks. If the webhook call times out, the request
# the request is handled according to the webhook'sfailure policy.
# timeoutSeconds: 20
securityContext:
enabled: false
runAsNonRoot: true
runAsUser: 1
deploymentStrategy: {}
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
# type: RollingUpdate
extraContainers: []
## Additional containers to be added to the opa pod.
# - name: example-app
# image: example/example-app:latest
# args:
# - "run"
# - "--port=11811"
# - "--config=/etc/example-app-conf/config.yaml"
# - "--opa-endpoint=https://localhost:443"
# ports:
# - name: http
# containerPort: 11811
# protocol: TCP
# volumeMounts:
# - name: example-app-auth-config
# mountPath: /etc/example-app-conf
extraVolumes: []
## Additional volumes to the opa pod.
# - name: example-app-auth-config
# secret:
# secretName: example-app-auth-config
extraPorts: []
## Additional ports to the opa services. Useful to expose extra container ports.
# - port: 11811
# protocol: TCP
# name: http
# targetPort: http
|
sematext-agent | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sematext-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"sematext-agent.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"sematext-agent.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"sematext-agent.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"sematext-agent.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n chart: {{ template \"sematext-agent.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n - pods\n - pods/log\n verbs:\n - list\n - get\n - watch\n{{- if or (.Values.containerToken) (.Values.infraToken) }}\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n - nodes\n - secrets\n verbs:\n - list\n - get\n - watch\n- apiGroups:\n - \"\"\n resources:\n - nodes/metrics\n verbs:\n - get\n - create\n- apiGroups:\n - \"\"\n resources:\n - pods\n - configmaps\n verbs:\n - create\n - delete\n - update\n- apiGroups:\n - apps\n resources:\n - deployments\n - replicasets\n verbs:\n - watch\n - list\n - get\n- apiGroups:\n - extensions\n resources:\n - replicasets\n verbs:\n - get\n - watch\n - list\n{{- end }}\n{{- end }}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n chart: {{ template \"sematext-agent.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"sematext-agent.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"sematext-agent.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# configmap-agent.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}-agent\n labels:\n app: {{ template \"sematext-agent.name\" . }}-agent\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n REGION: {{ .Values.region | quote }}\n {{- range $key, $val := .Values.agent.config }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- if .Values.serverBaseUrl }}\n SERVER_BASE_URL: {{ default \"\" .Values.serverBaseUrl | quote }}\n {{- end }}\n {{- if .Values.eventsReceiverUrl }}\n EVENTS_RECEIVER_URL: {{ default \"\" .Values.eventsReceiverUrl | quote }}\n {{- end }}\n {{- if .Values.logsReceiverUrl }}\n LOGS_RECEIVER_URL: {{ default \"\" .Values.logsReceiverUrl | quote }}\n {{- end }}\n API_SERVER_PORT: \"{{ .Values.agent.service.port }}\"\n",
"# configmap-logagent-custom-configs.yaml\n{{- if .Values.logagent.customConfigs }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}-logagent-custom-configs\n labels:\n app: {{ template \"sematext-agent.name\" . }}-logagent\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{ toYaml .Values.logagent.customConfigs | indent 2 }}\n{{- end }}\n",
"# configmap-logagent.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}-logagent\n labels:\n app: {{ template \"sematext-agent.name\" . }}-logagent\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n REGION: {{ .Values.region | quote }}\n {{- range $key, $val := .Values.logagent.config }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- if .Values.logsReceiverUrl }}\n LOGS_RECEIVER_URL: {{ default \"\" .Values.logsReceiverUrl | quote }}\n {{- end }}\n",
"# daemonset.yaml\n{{- if or (.Values.infraToken) (.Values.logsToken) (.Values.containerToken) }}\n{{- if .Capabilities.APIVersions.Has \"apps/v1\" }}\napiVersion: apps/v1\n{{- else }}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: DaemonSet\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n chart: {{ template \"sematext-agent.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"sematext-agent.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config-agent: {{ include (print $.Template.BasePath \"/configmap-agent.yaml\") . | sha256sum }}\n checksum/config-logagent: {{ include (print $.Template.BasePath \"/configmap-logagent.yaml\") . | sha256sum }}\n checksum/config-logagent-custom-configs: {{ include (print $.Template.BasePath \"/configmap-logagent-custom-configs.yaml\") . | sha256sum }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n spec:\n serviceAccountName: {{ template \"sematext-agent.serviceAccountName\" . }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName | quote }}\n {{- end }}\n containers:\n {{- if or (.Values.containerToken) (.Values.infraToken) }}\n - name: agent\n image: \"{{ .Values.agent.image.repository }}:{{ .Values.agent.image.tag }}\"\n imagePullPolicy: {{ .Values.agent.image.pullPolicy }}\n env:\n - name: INFRA_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"sematext-agent.fullname\" . }}\n key: infra-token\n - name: CONTAINER_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"sematext-agent.fullname\" . }}\n key: container-token\n - name: LOGS_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"sematext-agent.fullname\" . }}\n key: logs-token\n - name: STA_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n envFrom:\n - configMapRef:\n name: {{ template \"sematext-agent.fullname\" . }}-agent\n livenessProbe:\n httpGet:\n path: /health\n port: {{ .Values.agent.service.port }}\n readinessProbe:\n httpGet:\n path: /health\n port: {{ .Values.agent.service.port }}\n volumeMounts:\n - name: hostfs\n mountPath: /hostfs\n readOnly: true\n - name: sysfs\n mountPath: /hostfs/sys\n readOnly: true\n - name: passwd\n mountPath: /etc/passwd\n readOnly: true\n - name: group\n mountPath: /etc/group\n readOnly: true\n - name: debugfs\n mountPath: /sys/kernel/debug\n - name: run\n mountPath: /var/run/\n - name: dev\n mountPath: /hostfs/dev\n readOnly: true\n securityContext:\n privileged: true\n ports:\n - name: http\n containerPort: {{ .Values.agent.service.port }}\n protocol: TCP\n resources:\n{{ toYaml .Values.agent.resources | indent 12 }}\n {{- end }}\n {{- if .Values.logsToken }}\n - name: logagent\n image: \"{{ .Values.logagent.image.repository }}:{{ .Values.logagent.image.tag }}\"\n imagePullPolicy: {{ .Values.logagent.image.pullPolicy }}\n env:\n - name: LOGS_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"sematext-agent.fullname\" . }}\n key: logs-token\n envFrom:\n - configMapRef:\n name: {{ template \"sematext-agent.fullname\" . }}-logagent\n volumeMounts:\n - name: run\n mountPath: /var/run/\n {{- if .Values.logagent.customConfigs }}\n - name: logagent-config-volume\n mountPath: /etc/sematext\n {{- end }}\n {{- if .Values.logagent.extraHostVolumeMounts }}\n {{- range $_, $mount := .Values.logagent.extraHostVolumeMounts }}\n - name: {{ $mount.name }}\n mountPath: {{ $mount.mountPath }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.logagent.resources | indent 12 }}\n {{- end }}\n volumes:\n - name: hostfs\n hostPath:\n path: /\n - name: sysfs\n hostPath:\n path: /sys\n - name: passwd\n hostPath:\n path: /etc/passwd\n - name: group\n hostPath:\n path: /etc/group\n - name: debugfs\n hostPath:\n path: /sys/kernel/debug\n - name: run\n hostPath:\n path: /var/run/\n - name: dev\n hostPath:\n path: /dev\n {{- if .Values.logagent.customConfigs }}\n - name: logagent-config-volume\n configMap:\n name: {{ template \"sematext-agent.fullname\" . }}-logagent-custom-configs\n {{- end }}\n {{- if .Values.logagent.extraHostVolumeMounts }}\n {{- range $_, $mount := .Values.logagent.extraHostVolumeMounts }}\n - name: {{ $mount.name }}\n hostPath:\n path: {{ $mount.hostPath }}\n {{- end }}\n {{- end }}\n {{ if .Values.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecrets }}\n {{- end }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n infra-token: {{ default \"\" .Values.infraToken | b64enc | quote }}\n container-token: {{ default \"\" .Values.containerToken | b64enc | quote }}\n logs-token: {{ default \"\" .Values.logsToken | b64enc | quote }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"sematext-agent.fullname\" . }}\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n chart: {{ template \"sematext-agent.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.agent.service.type }}\n ports:\n - port: {{ .Values.agent.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"sematext-agent.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"sematext-agent.serviceAccountName\" . }}\n labels:\n app: {{ template \"sematext-agent.name\" . }}\n chart: {{ template \"sematext-agent.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end }}\n"
] | agent:
image:
repository: sematext/agent
tag: latest
pullPolicy: Always
service:
port: 80
type: ClusterIP
config:
PIPELINE_CONSOLE_OUTPUT: false
PIPELINE_NULL_OUTPUT: false
API_SERVER_HOST: 0.0.0.0
LOGGING_WRITE_EVENTS: false
LOGGING_REQUEST_TRACKING: false
AUTODISCO_ALLOWED_NAMESPACES: "default"
LOGGING_LEVEL: info
resources: {}
logagent:
image:
repository: sematext/logagent
tag: latest
pullPolicy: Always
config:
LOGSENE_BULK_SIZE: "1000"
LOGSENE_LOG_INTERVAL: "10000"
# Refer to logagent docs for custom config at https://sematext.com/docs/logagent/config-file/
customConfigs: []
# logagent.conf: |-
# options:
# printStats: 60
# suppress: true
# geoipEnabled: true
# diskBufferDir: /tmp/sematext-logagent
# parser:
# patternFiles:
# - /etc/logagent/patterns.yml
# output:
# logsene:
# module: elasticsearch
# url: ${LOGSENE_RECEIVER_URL}
resources: {}
extraHostVolumeMounts: {}
# - name: <mountName>
# hostPath: <hostPath>
# mountPath: <mountPath>
priorityClassName:
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# represents the infra token where most metrics, packages, processes, etc. are shipped
infraToken: null
# determines the token for the container app (container metrics are delivered here)
containerToken: null
# logs token to send logs
logsToken: null
# for private images
# imagePullSecrets:
region: US
# support for custom URLs
serverBaseUrl: null
eventsReceiverUrl: null
logsReceiverUrl: null
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
|
rookout | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"rookout.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"rookout.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"rookout.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if .Values.rookout.token }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"rookout.fullname\" . }}\n labels:\n app: {{ template \"rookout.name\" . }}\n chart: {{ template \"rookout.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels: \n app: {{ template \"rookout.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"rookout.name\" . }}\n release: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: ROOKOUT_TOKEN\n value: {{ .Values.token }}\n - name: ROOKOUT_LISTEN_ALL\n value: {{ .Values.listenAll }}\n - name: ROOKOUT_AGENT_TAGS\n value: {{ .Values.tags }} \n ports:\n - containerPort: 7486\n livenessProbe:\n tcpSocket:\n port: 7486\n initialDelaySeconds: 15\n periodSeconds: 20\n{{- end -}}\n",
"# service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"rookout.fullname\" . }}\n labels:\n app: {{ template \"rookout.name\" . }}\n chart: {{ template \"rookout.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n app: {{ template \"rookout.name\" . }}\n release: {{ .Release.Name }}\n\n type: ClusterIP\n ports:\n - protocol: TCP\n port: 7486"
] | image:
repository: rookout/agent
tag: 0.2.3
pullPolicy: IfNotPresent
service:
type: ClusterIP
listenAll: True
tags: ''
rookout:
token: ''
nodeSelector: {}
tolerations: []
affinity: {}
replicaCount: 1
|
atlantis | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"atlantis.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"atlantis.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"atlantis.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"atlantis.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"atlantis.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nDefines the scheme (http or https) of the Atlantis service\n*/}}\n{{- define \"atlantis.url.scheme\" -}}\n{{- if .Values.tlsSecretName -}}\nhttps\n{{- else -}}\nhttp\n{{- end -}}\n{{- end -}}\n\n{{/*\nDefines the internal kubernetes address to Atlantis\n*/}}\n{{- define \"atlantis.url\" -}}\n{{ template \"atlantis.url.scheme\" . }}://{{ template \"atlantis.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port }}\n{{- end -}}\n\n{{/*\nGenerates secret-webhook name\n*/}}\n{{- define \"atlantis.vcsSecretName\" -}}\n{{- if .Values.vcsSecretName -}}\n {{ .Values.vcsSecretName }}\n{{- else -}}\n {{ template \"atlantis.fullname\" . }}-webhook\n{{- end -}}\n{{- end -}}\n\n{{/*\nGenerates AWS Secret name\n*/}}\n{{- define \"atlantis.awsSecretName\" -}}\n{{- if .Values.awsSecretName -}}\n {{ .Values.awsSecretName }}\n{{- else -}}\n {{ template \"atlantis.fullname\" . }}-aws\n{{- end -}}\n{{- end -}}\n",
"# configmap-config.yaml\n{{- if .Values.config -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n atlantis.yaml: |\n{{ .Values.config | indent 4 }}\n{{- end -}}\n",
"# configmap-repo-config.yaml\n{{- if .Values.repoConfig -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}-repo-config\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n repos.yaml: |\n{{ .Values.repoConfig | indent 4 }}\n{{- end -}}\n",
"# extra-manifests.yaml\n{{- range .Values.extraManifests }}\n---\n{{ tpl (toYaml .) $ }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"atlantis.fullname\" . -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n{{- end }}\n rules:\n - host: {{ .Values.ingress.host }}\n http:\n paths:\n - path: {{ .Values.ingress.path }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ .Values.service.port }}\n{{- end }}\n",
"# secret-aws.yaml\n{{- if .Values.aws -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}-aws\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- if .Values.aws.credentials }}\n credentials: {{ .Values.aws.credentials | b64enc }}\n{{- end }}\n config: {{ .Values.aws.config | b64enc }}\n{{- end -}}\n",
"# secret-gitconfig.yaml\n{{- if .Values.gitconfig}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}-gitconfig\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n gitconfig: {{ .Values.gitconfig | b64enc }}\n{{- end }}",
"# secret-service-account.yaml\n{{- $all := . -}}\n{{ range $name, $secret := .Values.serviceAccountSecrets }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ $name }}\n labels:\n app: {{ $name }}\n chart: {{ template \"atlantis.chart\" $all }}\n component: service-account-secret\n heritage: {{ $all.Release.Service }}\n release: {{ $all.Release.Name }}\ndata:\n service-account.json: {{ $secret }}\n---\n{{ end }}",
"# secret-webhook.yaml\n{{- if not .Values.vcsSecretName }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}-webhook\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n {{- if .Values.github }}\n github_token: {{ required \"github.token is required if github configuration is specified.\" .Values.github.token | b64enc }}\n github_secret: {{ required \"github.secret is required if github configuration is specified.\" .Values.github.secret | b64enc }}\n {{- end}}\n {{- if .Values.gitlab }}\n gitlab_token: {{ required \"gitlab.token is required if gitlab configuration is specified.\" .Values.gitlab.token | b64enc }}\n gitlab_secret: {{ required \"gitlab.secret is required if gitlab configuration is specified.\" .Values.gitlab.secret | b64enc }}\n {{- end}}\n {{- if .Values.bitbucket }}\n bitbucket_token: {{ required \"bitbucket.token is required if bitbucket configuration is specified.\" .Values.bitbucket.token | b64enc }}\n {{- if .Values.bitbucket.baseURL }}\n bitbucket_secret: {{ required \"bitbucket.secret is required if bitbucket.baseURL is specified.\" .Values.bitbucket.secret | b64enc }}\n {{- end}}\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.service.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n {{- end }}\nspec:\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: 4141\n protocol: TCP\n name: atlantis\n selector:\n app: {{ template \"atlantis.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"atlantis.serviceAccountName\" . }}\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.serviceAccount.annotations }}\n annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}\n{{- end }}\n{{- end -}}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}\n labels:\n app: {{ template \"atlantis.name\" . }}\n chart: {{ template \"atlantis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.statefulSet.labels }}\n{{ toYaml .Values.statefulSet.labels | indent 4 }}\n{{- end }}\n {{- with .Values.statefulSet.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n {{- end }}\nspec:\n serviceName: {{ template \"atlantis.fullname\" . }}\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"atlantis.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"atlantis.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.podTemplate.labels }}\n{{ toYaml .Values.podTemplate.labels | indent 8 }}\n{{- end }}\n {{- with .Values.podTemplate.annotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"atlantis.serviceAccountName\" . }}\n securityContext:\n fsGroup: 1000\n runAsUser: 100\n volumes:\n {{- if .Values.tlsSecretName }}\n - name: tls\n secret:\n secretName: {{ .Values.tlsSecretName }}\n {{- end }}\n {{- range $name, $_ := .Values.serviceAccountSecrets }}\n - name: {{ $name }}-volume\n secret:\n secretName: {{ $name }}\n {{- end }}\n {{- range .Values.googleServiceAccountSecrets }}\n - name: {{ .name }}\n secret:\n secretName: {{ .secretName }}\n {{- end }}\n {{- if .Values.gitconfig }}\n - name: gitconfig-volume\n secret:\n secretName: {{ template \"atlantis.fullname\" . }}-gitconfig\n {{- else if .Values.gitconfigSecretName }}\n - name: gitconfig-volume\n secret:\n secretName: {{ .Values.gitconfigSecretName }}\n {{- end }}\n {{- if or .Values.aws .Values.awsSecretName}}\n - name: aws-volume\n secret:\n secretName: {{ template \"atlantis.awsSecretName\" . }}\n {{- end }}\n {{- if .Values.repoConfig }}\n - name: repo-config\n configMap:\n name: {{ template \"atlantis.fullname\" . }}-repo-config\n {{- end }}\n {{- if .Values.config }}\n - name: config\n configMap:\n name: {{ template \"atlantis.fullname\" . }}\n {{- end }}\n {{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 6 }}\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- range .Values.imagePullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if or .Values.gitconfig .Values.gitconfigSecretName }}\n lifecycle:\n postStart:\n exec:\n command: [\"/bin/sh\", \"-c\", \"cp /etc/secret-gitconfig/gitconfig /home/atlantis/.gitconfig && chown atlantis /home/atlantis/.gitconfig\"]\n {{- end}}\n {{- if .Values.command }}\n command:\n {{- range .Values.command }}\n - {{ . }}\n {{- end }}\n {{- end }}\n args:\n - server\n {{- if .Values.config }}\n - --config=/etc/atlantis/atlantis.yaml\n {{- end }}\n ports:\n - name: atlantis\n containerPort: 4141\n {{- if .Values.loadEnvFromSecrets }}\n envFrom:\n {{- range .Values.loadEnvFromSecrets }}\n - secretRef:\n name: {{ . }}\n {{- end }}\n {{- end }}\n env:\n {{- range $key, $value := .Values.environment }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n {{- range .Values.environmentSecrets }}\n - name: {{ .name }}\n valueFrom:\n secretKeyRef:\n name: {{ .secretKeyRef.name }}\n key: {{ .secretKeyRef.key }}\n {{- end }}\n {{- if .Values.allowForkPRs }}\n - name: ATLANTIS_ALLOW_FORK_PRS\n value: {{ .Values.allowForkPRs | quote }}\n {{- end }}\n {{- if .Values.disableApplyAll }}\n - name: ATLANTIS_DISABLE_APPLY_ALL\n value: {{ .Values.disableApplyAll | quote }}\n {{- end }}\n {{- if .Values.defaultTFVersion }}\n - name: ATLANTIS_DEFAULT_TF_VERSION\n value: {{ .Values.defaultTFVersion }}\n {{- end }}\n {{- if .Values.logLevel }}\n - name: ATLANTIS_LOG_LEVEL\n value: {{ .Values.logLevel | quote}}\n {{- end }}\n {{- if .Values.tlsSecretName }}\n - name: ATLANTIS_SSL_CERT_FILE\n value: /etc/tls/tls.crt\n - name: ATLANTIS_SSL_KEY_FILE\n value: /etc/tls/tls.key\n {{- end }}\n - name: ATLANTIS_DATA_DIR\n value: /atlantis-data\n - name: ATLANTIS_REPO_WHITELIST\n value: {{ toYaml .Values.orgWhitelist }}\n - name: ATLANTIS_PORT\n value: \"4141\"\n {{- if .Values.repoConfig }}\n - name: ATLANTIS_REPO_CONFIG\n value: /etc/atlantis/repos.yaml\n {{- end }}\n {{- if .Values.atlantisUrl }}\n - name: ATLANTIS_ATLANTIS_URL\n value: {{ .Values.atlantisUrl }}\n {{- else if .Values.ingress.enabled }}\n - name: ATLANTIS_ATLANTIS_URL\n value: http://{{ .Values.ingress.host }}\n {{- end }}\n {{- if .Values.github }}\n - name: ATLANTIS_GH_USER\n value: {{ required \"github.user is required if github configuration is specified.\" .Values.github.user }}\n - name: ATLANTIS_GH_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"atlantis.vcsSecretName\" . }}\n key: github_token\n - name: ATLANTIS_GH_WEBHOOK_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"atlantis.vcsSecretName\" . }}\n key: github_secret\n {{- if .Values.github.hostname }}\n - name: ATLANTIS_GH_HOSTNAME\n value: {{ .Values.github.hostname }}\n {{- end }}\n {{- end}}\n {{- if .Values.gitlab }}\n - name: ATLANTIS_GITLAB_USER\n value: {{ required \"gitlab.user is required if gitlab configuration is specified.\" .Values.gitlab.user }}\n - name: ATLANTIS_GITLAB_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"atlantis.vcsSecretName\" . }}\n key: gitlab_token\n - name: ATLANTIS_GITLAB_WEBHOOK_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"atlantis.vcsSecretName\" . }}\n key: gitlab_secret\n {{- if .Values.gitlab.hostname }}\n - name: ATLANTIS_GITLAB_HOSTNAME\n value: {{ .Values.gitlab.hostname }}\n {{- end }}\n {{- end}}\n {{- if .Values.bitbucket }}\n - name: ATLANTIS_BITBUCKET_USER\n value: {{ required \"bitbucket.user is required if bitbucket configuration is specified.\" .Values.bitbucket.user }}\n - name: ATLANTIS_BITBUCKET_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"atlantis.vcsSecretName\" . }}\n key: bitbucket_token\n {{- if .Values.bitbucket.baseURL }}\n - name: ATLANTIS_BITBUCKET_BASE_URL\n value: {{ .Values.bitbucket.baseURL }}\n - name: ATLANTIS_BITBUCKET_WEBHOOK_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"atlantis.vcsSecretName\" . }}\n key: bitbucket_secret\n {{- end }}\n {{- end }}\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /healthz\n port: 4141\n scheme: {{ .Values.livenessProbe.scheme }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /healthz\n port: 4141\n scheme: {{ .Values.readinessProbe.scheme }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n volumeMounts:\n - name: atlantis-data\n mountPath: /atlantis-data\n {{- range $name, $_ := .Values.serviceAccountSecrets }}\n - name: {{ $name }}-volume\n readOnly: true\n mountPath: /etc/{{ $name }}\n {{- end }}\n {{- range .Values.googleServiceAccountSecrets }}\n - name: {{ .name }}\n readOnly: true\n mountPath: /var/secrets/{{ .name }}\n {{- end }}\n {{- if or .Values.gitconfig .Values.gitconfigSecretName }}\n - name: gitconfig-volume\n readOnly: true\n mountPath: /etc/secret-gitconfig\n {{- end }}\n {{- if or .Values.aws .Values.awsSecretName }}\n - name: aws-volume\n readOnly: true\n mountPath: /home/atlantis/.aws\n {{- end }}\n {{- if .Values.tlsSecretName }}\n - name: tls\n mountPath: /etc/tls/\n {{- end }}\n {{- if .Values.repoConfig }}\n - name: repo-config\n mountPath: /etc/atlantis/repos.yaml\n subPath: repos.yaml\n readOnly: true\n {{- end }}\n {{- if .Values.config }}\n - name: config\n mountPath: /etc/atlantis/atlantis.yaml\n subPath: atlantis.yaml\n readOnly: true\n {{- end }}\n {{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 10 }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumeClaimTemplates:\n - metadata:\n name: atlantis-data\n spec:\n accessModes: [\"ReadWriteOnce\"] # Volume should not be shared by multiple nodes.\n {{- if .Values.storageClassName }}\n storageClassName: {{ .Values.storageClassName }} # Storage class of the volume\n {{- end }}\n resources:\n requests:\n # The biggest thing Atlantis stores is the Git repo when it checks it out.\n # It deletes the repo after the pull request is merged.\n storage: {{ .Values.dataStorage }}\n",
"# test-atlantis-configmap.yaml\n{{- if .Values.test.enabled -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"atlantis.fullname\" . }}-tests\ndata:\n run.sh: |-\n @test \"Atlantis UI is available\" {\n ATLANTIS_URL={{ template \"atlantis.url\" . }}\n echo \"Trying Atlantis at: $ATLANTIS_URL\"\n curl $ATLANTIS_URL\n }\n{{- end }}\n",
"# test-atlantis-pod.yaml\n{{- if .Values.test.enabled -}}\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}\"\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: test-framework\n image: dduportal/bats:0.4.0\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-ui-test\n image: {{ .Values.test.image }}:{{ .Values.test.imageTag }}\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"atlantis.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n{{- end }}\n"
] | ## -------------------------- ##
# Values to override for your instance.
## -------------------------- ##
## An option to override the atlantis url,
## if not using an ingress, set it to the external IP.
# atlantisUrl: http://10.0.0.0
# Replace this with your own repo whitelist:
orgWhitelist: <replace-me>
# logLevel: "debug"
# If using GitHub, specify like the following:
# github:
# user: foo
# token: bar
# secret: baz
# GitHub Enterprise only:
# hostname: github.your.org
# (The chart will perform the base64 encoding for you for values that are stored in secrets.)
# If using GitLab, specify like the following:
# gitlab:
# user: foo
# token: bar
# secret: baz
# GitLab Enterprise only:
# hostname: gitlab.your.org
# (The chart will perform the base64 encoding for you for values that are stored in secrets.)
# If using Bitbucket, specify like the following:
# bitbucket:
# user: foo
# token: bar
# Bitbucket Server only:
# secret: baz
# baseURL: https://bitbucket.yourorganization.com
# (The chart will perform the base64 encoding for you for values that are stored in secrets.)
# If managing secrets outside the chart for the webhook, use this variable to reference the secret name
# vcsSecretName: 'mysecret'
# When referencing Terraform modules in private repositories, it may be helpful
# (necessary?) to use redirection in a .gitconfig like so:
# gitconfig: |
# [url "https://[email protected]"]
# insteadOf = https://github.com
# [url "https://[email protected]"]
# insteadOf = ssh://[email protected]
# [url "https://oauth2:[email protected]"]
# insteadOf = https://gitlab.com
# [url "https://oauth2:[email protected]"]
# insteadOf = ssh://[email protected]
# Source: https://stackoverflow.com/questions/42148841/github-clone-with-oauth-access-token
# If managing secrets outside the chart for the gitconfig, use this variable to reference the secret name
# gitconfigSecretName: 'mygitconfigsecret'
# To specify AWS credentials to be mapped to ~/.aws:
# aws:
# credentials: |
# [default]
# aws_access_key_id=YOUR_ACCESS_KEY_ID
# aws_secret_access_key=YOUR_SECRET_ACCESS_KEY
# region=us-east-1
# config: |
# [profile a_role_to_assume]
# role_arn = arn:aws:iam::123456789:role/service-role/roleToAssume
# source_profile = default
# To reference an already existing Secret object with AWS credentials
# awsSecretName: 'mysecretwithawscreds'
## To be used for mounting credential files (when using google provider).
serviceAccountSecrets:
# credentials: <json file as base64 encoded string>
# credentials-staging: <json file as base64 encoded string>
## -------------------------- ##
# Default values for atlantis (override as needed).
## -------------------------- ##
image:
repository: runatlantis/atlantis
tag: v0.14.0
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# imagePullSecrets:
# - myRegistryKeySecretName
## Use Server Side Repo Config,
## ref: https://www.runatlantis.io/docs/server-side-repo-config.html
## Example default configuration
# repoConfig: |
# ---
# repos:
# - id: /.*/
# apply_requirements: []
# workflow: default
# allowed_overrides: []
# allow_custom_workflows: false
# workflows:
# default:
# plan:
# steps: [init, plan]
# apply:
# steps: [apply]
# allowForkPRs enables atlantis to run on a fork Pull Requests
allowForkPRs: false
## defaultTFVersion set the default terraform version to be used in atlantis server
# defaultTFVersion: 0.12.0
# disableApplyAll disables running `atlantis apply` without any flags
disableApplyAll: false
# We only need to check every 60s since Atlantis is not a high-throughput service.
livenessProbe:
enabled: true
periodSeconds: 60
initialDelaySeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
scheme: HTTP
readinessProbe:
enabled: true
periodSeconds: 60
initialDelaySeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
scheme: HTTP
service:
type: NodePort
port: 80
podTemplate:
annotations: {}
# kube2iam example:
# iam.amazonaws.com/role: role-arn
labels: {}
statefulSet:
annotations: {}
labels: {}
ingress:
enabled: true
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
host: chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
labels: {}
resources:
requests:
memory: 1Gi
cpu: 100m
limits:
memory: 1Gi
cpu: 100m
# Disk space for Atlantis to check out repositories
dataStorage: 5Gi
replicaCount: 1
## test container details
test:
enabled: true
image: lachlanevenson/k8s-kubectl
imageTag: v1.4.8-bash
nodeSelector: {}
tolerations: []
affinity: {}
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Annotations for the Service Account
# Example:
#
# annotations:
# annotation1: value
# annotation2: value
annotations: {}
# tlsSecretName: tls
# Optionally specify additional environment variables to be populated from Kubernetes secrets.
# Useful for passing in TF_VAR_foo or other secret environment variables from Kubernetes secrets.
environmentSecrets: []
# environmentSecrets:
# - name: THE_ENV_VAR
# secretKeyRef:
# name: the_k8s_secret_name
# key: the_key_of_the_value_in_the_secret
# Optionally specify additional Kubernetes secrets to load environment variables from.
# All key-value pairs within these secrets will be set as environment variables.
# Note that any variables set here will be ignored if also defined in the env block of the atlantis statefulset.
# For example, providing ATLANTIS_GH_USER here and defining a value for github.user will result in the github.user value being used.
loadEnvFromSecrets: []
# loadEnvFromSecrets:
# - secret_one
# - secret_two
# Optionally specify google service account credentials as Kubernetes secrets. If you are using the terraform google provider you can specify the credentials as "${file("/var/secrets/some-secret-name/key.json")}".
googleServiceAccountSecrets: []
# googleServiceAccountSecrets:
# - name: some-secret-name
# secretName: the_k8s_secret_name
# Optionally specify additional volumes for the pod.
extraVolumes: []
# extraVolumes:
# - name: some-volume-name
# emptyDir: {}
# Optionally specify additional volume mounts for the container.
extraVolumeMounts: []
# extraVolumeMounts:
# - name: some-volume-name
# mountPath: /path/in/container
extraManifests: []
# extraManifests:
# - apiVersion: cloud.google.com/v1beta1
# kind: BackendConfig
# metadata:
# name: "{{ .Release.Name }}-test"
# spec:
# securityPolicy:
# name: "gcp-cloud-armor-policy-test"
|
prometheus-cloudwatch-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-cloudwatch-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prometheus-cloudwatch-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-cloudwatch-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate serviceAccountName for deployment.\n*/}}\n{{- define \"prometheus-cloudwatch-exporter.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n{{ default (include \"prometheus-cloudwatch-exporter.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n{{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.9-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1beta2\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for rbac.\n*/}}\n{{- define \"rbac.apiVersion\" -}}\n{{- if .Capabilities.APIVersions.Has \"rbac.authorization.k8s.io/v1\" }}\n{{- print \"rbac.authorization.k8s.io/v1\" -}}\n{{- else -}}\n{{- print \"rbac.authorization.k8s.io/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for ingress.\n*/}}\n{{- define \"ingress.apiVersion\" -}}\n{{- if .Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }}\n{{- print \"networking.k8s.io/v1beta1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRole\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n - apiGroups: [\"\"]\n resources: [\"secrets\",\"configmap\"]\n resourceNames: [\"{{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\"]\n verbs: [\"get\"]\n{{- end }}",
"# clusterrolebinding.yaml\n{{ if .Values.rbac.create }}\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"prometheus-cloudwatch-exporter.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\n{{ end }}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n config.yml: |\n{{ printf .Values.config | indent 4 }}\n",
"# deployment.yaml\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n {{ if .Values.aws.role}}iam.amazonaws.com/role: {{ .Values.aws.role }}{{ end }}\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n checksum/secrets: {{ include (print $.Template.BasePath \"/secrets.yaml\") . | sha256sum }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n {{- if not .Values.aws.role }}\n {{- if .Values.aws.secret.name }}\n env:\n - name: AWS_ACCESS_KEY_ID\n valueFrom:\n secretKeyRef:\n key: access_key\n name: {{ .Values.aws.secret.name }}\n - name: AWS_SECRET_ACCESS_KEY\n valueFrom:\n secretKeyRef:\n key: secret_key\n name: {{ .Values.aws.secret.name }}\n {{- if .Values.aws.secret.includesSessionToken }}\n - name: AWS_SESSION_TOKEN\n valueFrom:\n secretKeyRef:\n key: security_token\n name: {{ .Values.aws.secret.name }}\n {{- end }}\n {{- else if and .Values.aws.aws_secret_access_key .Values.aws.aws_access_key_id }}\n env:\n - name: AWS_ACCESS_KEY_ID\n valueFrom:\n secretKeyRef:\n key: aws_access_key_id\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n - name: AWS_SECRET_ACCESS_KEY\n valueFrom:\n secretKeyRef:\n key: aws_secret_access_key\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n {{- end }}\n {{- end }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if .Values.command }}\n command: {{ toYaml .Values.command | nindent 12 -}}\n {{- end }}\n ports:\n - name: container-port\n containerPort: {{ .Values.containerPort }}\n protocol: TCP\n livenessProbe:\n httpGet:\n path: {{ .Values.livenessProbe.path }}\n port: container-port\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n readinessProbe:\n httpGet:\n path: {{ .Values.readinessProbe.path }}\n port: container-port\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: vol-prometheus-cloudwatch-exporter\n mountPath: /config\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.securityContext }}\n securityContext:\n{{ toYaml . | indent 8 }}\n {{- end }}\n serviceAccount: {{ template \"prometheus-cloudwatch-exporter.serviceAccountName\" . }}\n serviceAccountName: {{ template \"prometheus-cloudwatch-exporter.serviceAccountName\" . }}\n volumes:\n - configMap:\n defaultMode: 420\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n name: vol-prometheus-cloudwatch-exporter\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"prometheus-cloudwatch-exporter.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: {{ template \"ingress.apiVersion\" . }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n",
"# prometheusrule.yaml\n{{- if and ( .Capabilities.APIVersions.Has \"monitoring.coreos.com/v1\" ) ( .Values.prometheusRule.enabled ) }}\n{{- $fullName := include \"prometheus-cloudwatch-exporter.fullname\" . -}}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n{{- if .Values.prometheusRule.labels }}\n labels:\n{{ toYaml .Values.prometheusRule.labels | indent 4}}\n{{- end }}\n name: {{ $fullName }}\n{{- if .Values.prometheusRule.namespace }}\n namespace: {{ .Values.prometheusRule.namespace }}\n{{- end }}\nspec:\n groups:\n - name: {{ $fullName }}\n rules:\n {{- toYaml .Values.prometheusRule.rules | nindent 6 }}\n{{- end }}\n",
"# secrets.yaml\n{{- if and (not .Values.aws.role) (not .Values.aws.secret.name) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n {{ if .Values.aws.aws_access_key_id }}\n aws_access_key_id: {{ .Values.aws.aws_access_key_id | b64enc | quote }}\n {{ end }}\n {{ if .Values.aws.aws_secret_access_key }}\n aws_secret_access_key: {{ .Values.aws.aws_secret_access_key | b64enc | quote }}\n {{ end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: container-port\n protocol: TCP\n name: {{ .Values.service.portName }}\n selector:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"prometheus-cloudwatch-exporter.serviceAccountName\" . }}\n labels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n chart: {{ template \"prometheus-cloudwatch-exporter.chart\" . }} \n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n \n annotations:\n {{- if .Values.serviceAccount.annotations }}\n {{ toYaml .Values.serviceAccount.annotations | indent 4 }}\n {{- end }}\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if and ( .Capabilities.APIVersions.Has \"monitoring.coreos.com/v1\" ) ( .Values.serviceMonitor.enabled ) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n{{- if .Values.serviceMonitor.labels }}\n labels:\n{{ toYaml .Values.serviceMonitor.labels | indent 4}}\n{{- end }}\n name: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n{{- if .Values.serviceMonitor.namespace }}\n namespace: {{ .Values.serviceMonitor.namespace }}\n{{- end }}\nspec:\n endpoints:\n - targetPort: {{ .Values.service.port }}\n{{- if .Values.serviceMonitor.interval }}\n interval: {{ .Values.serviceMonitor.interval }}\n{{- end }}\n{{- if .Values.serviceMonitor.telemetryPath }}\n path: {{ .Values.serviceMonitor.telemetryPath }}\n{{- end }}\n{{- if .Values.serviceMonitor.timeout }}\n scrapeTimeout: {{ .Values.serviceMonitor.timeout }}\n{{- end }}\n{{- if .Values.serviceMonitor.relabelings }}\n relabelings: \n{{ toYaml .Values.serviceMonitor.relabelings | indent 6 }}\n{{- end }}\n{{- if .Values.serviceMonitor.metricRelabelings }}\n metricRelabelings: \n{{ toYaml .Values.serviceMonitor.metricRelabelings | indent 6 }}\n{{- end }}\n jobLabel: {{ template \"prometheus-cloudwatch-exporter.fullname\" . }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-cloudwatch-exporter.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n"
] | # Default values for prometheus-cloudwatch-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: prom/cloudwatch-exporter
tag: cloudwatch_exporter-0.8.0
pullPolicy: IfNotPresent
# Example proxy configuration:
# command:
# - 'java'
# - '-Dhttp.proxyHost=proxy.example.com'
# - '-Dhttp.proxyPort=3128'
# - '-Dhttps.proxyHost=proxy.example.com'
# - '-Dhttps.proxyPort=3128'
# - '-jar'
# - '/cloudwatch_exporter.jar'
# - '9106'
# - '/config/config.yml'
command: []
containerPort: 9106
service:
type: ClusterIP
port: 9106
portName: http
annotations: {}
labels: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
aws:
role:
# The name of a pre-created secret in which AWS credentials are stored. When
# set, aws_access_key_id is assumed to be in a field called access_key,
# aws_secret_access_key is assumed to be in a field called secret_key, and the
# session token, if it exists, is assumed to be in a field called
# security_token
secret:
name:
includesSessionToken: false
# Note: Do not specify the aws_access_key_id and aws_secret_access_key if you specified role or secret.name before
aws_access_key_id:
aws_secret_access_key:
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# annotations:
# Will add the provided map to the annotations for the crated serviceAccount
# e.g.
# annotations:
# eks.amazonaws.com/role-arn: arn:aws:iam::1234567890:role/prom-cloudwatch-exporter-oidc
rbac:
# Specifies whether RBAC resources should be created
create: true
config: |-
# This is the default configuration for prometheus-cloudwatch-exporter
region: eu-west-1
period_seconds: 240
metrics:
- aws_namespace: AWS/ELB
aws_metric_name: HealthyHostCount
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_statistics: [Average]
- aws_namespace: AWS/ELB
aws_metric_name: UnHealthyHostCount
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_statistics: [Average]
- aws_namespace: AWS/ELB
aws_metric_name: RequestCount
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_statistics: [Sum]
- aws_namespace: AWS/ELB
aws_metric_name: Latency
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_statistics: [Average]
- aws_namespace: AWS/ELB
aws_metric_name: SurgeQueueLength
aws_dimensions: [AvailabilityZone, LoadBalancerName]
aws_statistics: [Maximum, Sum]
nodeSelector: {}
tolerations: []
affinity: {}
# Configurable health checks against the /healthy and /ready endpoints
livenessProbe:
path: /-/healthy
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
path: /-/ready
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to cloudwatch-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set timeout for scrape
# timeout: 10s
# Set relabelings for the ServiceMonitor, use to apply to samples before scraping
# relabelings: []
# Set metricRelabelings for the ServiceMonitor, use to apply to samples for ingestion
# metricRelabelings: []
#
# Example - note the Kubernetes convention of camelCase instead of Prometheus' snake_case
# metricRelabelings:
# - sourceLabels: [dbinstance_identifier]
# action: replace
# replacement: mydbname
# targetLabel: dbname
prometheusRule:
# Specifies whether a PrometheusRule should be created
enabled: false
# Set the namespace the PrometheusRule should be deployed
# namespace: monitoring
# Set labels for the PrometheusRule, use this to define your scrape label for Prometheus Operator
# labels:
# Example - note the Kubernetes convention of camelCase instead of Prometheus'
# rules:
# - alert: ELB-Low-BurstBalance
# annotations:
# message: The ELB BurstBalance during the last 10 minutes is lower than 80%.
# expr: aws_ebs_burst_balance_average < 80
# for: 10m
# labels:
# severity: warning
# - alert: ELB-Low-BurstBalance
# annotations:
# message: The ELB BurstBalance during the last 10 minutes is lower than 50%.
# expr: aws_ebs_burst_balance_average < 50
# for: 10m
# labels:
# severity: warning
# - alert: ELB-Low-BurstBalance
# annotations:
# message: The ELB BurstBalance during the last 10 minutes is lower than 30%.
# expr: aws_ebs_burst_balance_average < 30
# for: 10m
# labels:
# severity: critical
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
securityContext:
runAsUser: 65534 # run as nobody user instead of root
|
kuberhealthy | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nSetup a chart name\n*/}}\n{{- define \"kuberhealthy.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for RBAC APIs.\n*/}}\n{{- define \"rbac.apiVersion\" -}}\n{{- if semverCompare \"^1.8-0\" .Capabilities.KubeVersion.GitVersion -}}\n\"rbac.authorization.k8s.io/v1\"\n{{- else -}}\n\"rbac.authorization.k8s.io/v1beta1\"\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRole\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n - namespaces\n - componentstatuses\n - nodes\n verbs:\n - get\n - list\n - watch\n \n",
"# clusterrolebinding.yaml\n---\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"kuberhealthy.name\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kuberhealthy.name\" . }}\n namespace: {{ .Release.Namespace }}\n \n",
"# configmap.yaml\n{{- if .Values.prometheus.enabled -}}\n{{- if .Values.prometheus.enableAlerting -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app: \"prometheus\"\n prometheus: {{ .Values.prometheus.name }}\n role: alert-rules\n name: {{ template \"kuberhealthy.name\" . }}\ndata:\n kuberhealthy.rules: |-\n groups:\n - name: ./kuberhealthy.rules\n rules:\n - alert: KuberhealthyError\n expr: kuberhealthy_running < 1\n for: 5m\n labels:\n severity: critical\n annotations:\n description: Kuberhealthy is not healthy\n - alert: ClusterUnhealthy\n expr: kuberhealthy_cluster_state < 1\n for: 5m\n labels:\n severity: critical\n annotations:\n description: Kuberhealthy shows that the cluster is not healthy\n{{- end -}}\n{{- end -}}\n",
"# customresourcedefinition.yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: khstates.comcast.github.io\nspec:\n group: comcast.github.io\n version: v1\n scope: Namespaced\n names:\n plural: khstates\n singular: khstate\n kind: KuberhealthyState\n shortNames:\n - khs\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}\n labels:\n app: {{ template \"kuberhealthy.name\" . }}\n chart: {{ .Chart.Name }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.deployment.replicas }}\n selector:\n matchLabels:\n app: {{ template \"kuberhealthy.name\" . }}\n release: {{ .Release.Name }}\n strategy:\n rollingUpdate:\n maxSurge: {{ .Values.deployment.maxSurge }}\n maxUnavailable: {{ .Values.deployment.maxUnavailable }}\n type: RollingUpdate\n template:\n metadata:\n {{- if .Values.deployment.podAnnotations }}\n annotations:\n {{- range $key, $value := .Values.deployment.podAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n {{- if .Values.prometheus.enabled -}}\n {{- if .Values.prometheus.enableScraping -}}\n {{- if not .Values.deployment.podAnnotations }}\n annotations:\n {{- end}}\n prometheus.io/scrape: \"true\"\n prometheus.io/path: \"/metrics\"\n prometheus.io/port: \"8080\"\n {{- end }}\n {{- end }}\n labels:\n app: {{ template \"kuberhealthy.name\" . }}\n chart: {{ .Chart.Name }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n spec:\n serviceAccountName: kuberhealthy\n automountServiceAccountToken: true\n containers:\n - image: {{ .Values.image.repository }}:{{ .Values.image.tag }}\n command: {{ .Values.deployment.command }}\n {{- if .Values.deployment.args }}\n args:\n{{ toYaml .Values.deployment.args | nindent 8 }}\n {{- end }}\n ports:\n - containerPort: 8080\n name: http\n securityContext:\n runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n allowPrivilegeEscalation: {{ .Values.securityContext.allowPrivilegeEscalation }}\n imagePullPolicy: {{ .Values.deployment.imagePullPolicy }}\n livenessProbe:\n failureThreshold: 3\n initialDelaySeconds: 2\n periodSeconds: 4\n successThreshold: 1\n tcpSocket:\n port: 8080\n timeoutSeconds: 1\n name: {{ template \"kuberhealthy.name\" . }}\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n readinessProbe:\n failureThreshold: 3\n initialDelaySeconds: 2\n periodSeconds: 4\n successThreshold: 1\n tcpSocket:\n port: 8080\n timeoutSeconds: 1\n resources:\n requests:\n cpu: {{ .Values.resources.requests.cpu }}\n memory: {{ .Values.resources.requests.memory }}\n restartPolicy: Always\n terminationGracePeriodSeconds: 310\n{{- if .Values.tolerations.master }}\n tolerations:\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n operator: Exists\n{{- end -}}\n",
"# poddisruptionbudget.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}-pdb\nspec:\n minAvailable: 1\n selector:\n matchLabels:\n app: {{ template \"kuberhealthy.name\" . }}\n chart: {{ .Chart.Name }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n",
"# role.yaml\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: Role\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}\nrules:\n - apiGroups:\n - apps\n resources:\n - daemonsets\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n - apiGroups:\n - extensions\n resources:\n - daemonsets\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n - apiGroups:\n - comcast.github.io\n resources:\n - khstates\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - list\n - apiGroups:\n - \"\"\n resources:\n - namespaces\n verbs:\n - get\n - list\n",
"# rolebinding.yaml\napiVersion: {{ template \"rbac.apiVersion\" . }}\nkind: RoleBinding\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"kuberhealthy.name\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kuberhealthy.name\" . }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"kuberhealthy.name\" . }}\n release: {{ .Release.Name }}\n name: {{ template \"kuberhealthy.name\" . }}\n {{- if .Values.service.annotations }}\n annotations:\n {{- range $key, $value := .Values.service.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n name: http\n targetPort: http\n selector:\n app: {{ template \"kuberhealthy.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"kuberhealthy.name\" . }}\n",
"# servicemonitor.yaml\n{{- if .Values.prometheus.enabled -}}\n{{- if .Values.prometheus.serviceMonitor -}}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app: {{ template \"kuberhealthy.name\" . }}\n chart: {{ .Chart.Name }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n prometheus: {{ .Values.prometheus.name }}\n name: {{ template \"kuberhealthy.name\" . }}\nspec:\n jobLabel: component\n selector:\n matchLabels:\n app: {{ .Chart.Name }}\n release: {{ .Release.Name }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n endpoints:\n - port: http\n interval: 15s\n bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token\n{{- end -}}\n{{- end -}}\n"
] | # Default values for kuberhealthy.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
prometheus:
enabled: false
name: "prometheus"
enableScraping: true
serviceMonitor: false
enableAlerting: true
image:
repository: quay.io/comcast/kuberhealthy
tag: v1.0.2
resources:
requests:
cpu: 100m
memory: 80Mi
limits:
cpu: 400m
memory: 200Mi
tolerations:
# change to true to tolerate and deploy to masters
master: false
deployment:
replicas: 2
maxSurge: 0
maxUnavailable: 1
imagePullPolicy: IfNotPresent
podAnnotations: {}
command:
- /app/kuberhealthy
# use this to override location of the test-image, see: https://github.com/Comcast/kuberhealthy/blob/master/docs/FLAGS.md
# args:
# - -dsPauseContainerImageOverride
# - your-repo/google_containers/pause:0.8.0
securityContext:
runAsNonRoot: true
runAsUser: 999
allowPrivilegeEscalation: false
# Please remember that changing the service type to LoadBalancer
# will expose Kuberhealthy to the internet, which could cause
# error messages shown by Kuberhealthy to be exposed to the
# public internet. It is recommended to create the service
# with ClusterIP, then to manually edit the service in order to
# securely expose the port in an appropriate way for your
# specific environment.
service:
externalPort: 80
type: ClusterIP
annotations: {}
|
heartbeat | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"heartbeat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"heartbeat.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"heartbeat.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"heartbeat.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"heartbeat.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"heartbeat.fullname\" . }}\n labels:\n app: {{ template \"heartbeat.name\" . }}\n chart: {{ template \"heartbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n- apiGroups: [\"\"]\n resources:\n - namespaces\n - pods\n verbs: [\"get\", \"list\", \"watch\"]\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"heartbeat.fullname\" . }}\n labels:\n app: {{ template \"heartbeat.name\" . }}\n chart: {{ template \"heartbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"heartbeat.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"heartbeat.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"heartbeat.fullname\" . }}\n labels:\n app: {{ template \"heartbeat.name\" . }}\n chart: {{ template \"heartbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"heartbeat.name\" . }}\n release: {{ .Release.Name }}\n minReadySeconds: 10\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n template:\n metadata:\n labels:\n app: {{ template \"heartbeat.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/secret: {{ toYaml .Values.config | sha256sum }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n - \"-e\"\n{{- if .Values.plugins }}\n - \"--plugin\"\n - {{ .Values.plugins | join \",\" | quote }}\n{{- end }}\n env:\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: NODE_IP\n valueFrom:\n fieldRef:\n fieldPath: status.hostIP\n{{- range $key, $value := .Values.extraVars }}\n - name: {{ $key }}\n value: {{ $value }}\n{{- end }}\n securityContext:\n runAsUser: 0\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: heartbeat-config\n mountPath: /usr/share/heartbeat/heartbeat.yml\n readOnly: true\n subPath: heartbeat.yml\n - name: data\n mountPath: /usr/share/heartbeat/data\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n volumes:\n - name: heartbeat-config\n secret:\n secretName: {{ template \"heartbeat.fullname\" . }}\n - name: data\n hostPath:\n path: /var/lib/heartbeat\n type: DirectoryOrCreate\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 6 }}\n{{- end }}\n{{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n{{- end }}\n terminationGracePeriodSeconds: 60\n{{- if .Values.hostNetwork }}\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n{{- end }}\n serviceAccountName: {{ template \"heartbeat.serviceAccountName\" . }}\n{{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"heartbeat.fullname\" . }}\n labels:\n app: {{ template \"heartbeat.name\" . }}\n chart: {{ template \"heartbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n heartbeat.yml: {{ toYaml .Values.config | indent 4 | b64enc }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"heartbeat.serviceAccountName\" . }}\n labels:\n app: {{ template \"heartbeat.name\" . }}\n chart: {{ template \"heartbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n"
] | image:
repository: docker.elastic.co/beats/heartbeat
tag: 6.7.0
pullPolicy: IfNotPresent
config:
# See https://www.elastic.co/guide/en/beats/heartbeat/current/heartbeat-reference-yml.html for reference
heartbeat.monitors:
- type: icmp
schedule: '*/5 * * * * * *'
hosts: ["localhost"]
ipv4: true
timeout: 16s
wait: 1s
processors:
- add_cloud_metadata:
output.file:
path: "/usr/share/heartbeat/data"
filename: heartbeat
rotate_every_kb: 10000
number_of_files: 5
# output.elasticsearch:
# hosts: ["elasticsearch:9200"]
# protocol: "https"
# username: "elastic"
# password: "changeme"
# List of beat plugins
plugins: []
# - kinesis.so
hostNetwork: false
# A map of additional environment variables
extraVars: {}
# test1: "test2"
# Add additional volumes and mounts, for example to read other log files on the host
extraVolumes: []
# - hostPath:
# path: /var/log
# name: varlog
extraVolumeMounts: []
# - name: varlog
# mountPath: /host/var/log
# readOnly: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
priorityClassName: ""
nodeSelector: {}
tolerations: []
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
|
distributed-jmeter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"distributed-jmeter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"distributed-jmeter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"distributed-jmeter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# jmeter-master-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"distributed-jmeter.fullname\" . }}-master\n labels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n helm.sh/chart: {{ include \"distributed-jmeter.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/component: master\nspec:\n replicas: {{ .Values.master.replicaCount }}\n strategy:\n type: RollingUpdate\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: master\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: master\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n - master\n ports:\n - containerPort: 60000\n",
"# jmeter-server-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"distributed-jmeter.fullname\" . }}-server\n labels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n helm.sh/chart: {{ include \"distributed-jmeter.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/component: server\nspec:\n replicas: {{ .Values.server.replicaCount }}\n strategy:\n type: RollingUpdate\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: server\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: server\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args: [\"server\"]\n ports:\n - containerPort: 50000\n - containerPort: 1099\n",
"# jmeter-server-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"distributed-jmeter.fullname\" . }}-server \n labels:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n helm.sh/chart: {{ include \"distributed-jmeter.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/component: server\nspec:\n clusterIP: None\n ports:\n - port: 50000\n protocol: TCP\n name: tcp-50000\n - port: 1099\n protocol: TCP\n name: tcp-1099\n selector:\n app.kubernetes.io/name: {{ include \"distributed-jmeter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: server\n"
] | # Default values for distributed-jmeter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
master:
## The number of pods in the master deployment
replicaCount: 1
server:
## The number of pods in the server deployment
replicaCount: 3
image:
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
pullPolicy: IfNotPresent
## The repository and image
## ref: https://hub.docker.com/r/pedrocesarti/jmeter-docker/
repository: "pedrocesarti/jmeter-docker"
## The tag for the image
## ref: https://hub.docker.com/r/pedrocesarti/jmeter-docker/tags/
tag: 3.3
|
gocd | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gocd.name\" -}}\n{{- default .Chart.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"gocd.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"gocd.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"gocd.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use for agents\n*/}}\n{{- define \"gocd.agentServiceAccountName\" -}}\n{{- if .Values.agent.serviceAccount.reuseTopLevelServiceAccount -}}\n {{ template \"gocd.serviceAccountName\" . }}\n{{- else -}}\n {{ default \"default\" .Values.agent.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\n{{- if .Values.server.shouldPreconfigure }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"gocd.fullname\" . }}\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n preconfigure_server.sh: |-\n #!/bin/bash\n\n SERVICE_ACCOUNT_PATH=/var/run/secrets/kubernetes.io/serviceaccount\n KUBE_TOKEN=$(<${SERVICE_ACCOUNT_PATH}/token)\n\n while true\n do\n status_code=$(curl 'http://localhost:8153/go/api/v1/health' -o /dev/null -w \"%{http_code}\")\n if [ $status_code == 200 ]; then\n break\n fi\n sleep 10\n done\n\n set -e\n\n echo \"checking if server has already been configured\" >> /godata/logs/preconfigure.log\n\n if [ -f /godata/logs/preconfigure_complete.log ]\n then\n echo \"Existing server configuration found in cruise-config.xml. Skipping preconfigure_server scripts.\" >> /godata/logs/preconfigure.log\n exit 0\n fi\n\n echo \"No configuration found in cruise-config.xml. Using default preconfigure_server scripts to configure server\" >> /godata/logs/preconfigure.log\n\n echo \"Trying to configure cluster profile.\" >> /godata/logs/preconfigure.log\n\n (curl --fail -i 'http://localhost:8153/go/api/admin/elastic/cluster_profiles' \\\n -H'Accept: application/vnd.go.cd+json' \\\n -H 'Content-Type: application/json' \\\n -X POST -d '{\n \"id\": \"k8-cluster-profile\",\n \"plugin_id\": \"cd.go.contrib.elasticagent.kubernetes\",\n \"properties\": [\n {\n \"key\": \"go_server_url\",\n \"value\": \"http://{{ template \"gocd.fullname\" . }}-server:{{ .Values.server.service.httpPort }}/go\"\n },\n {\n \"key\": \"kubernetes_cluster_url\",\n \"value\": \"https://'$KUBERNETES_SERVICE_HOST':'$KUBERNETES_SERVICE_PORT_HTTPS'\"\n },\n {\n \"key\": \"namespace\",\n \"value\": \"{{ .Release.Namespace }}\"\n },\n {\n \"key\": \"security_token\",\n \"value\": \"'$KUBE_TOKEN'\"\n }\n ]\n }' >> /godata/logs/preconfigure.log)\n\n echo \"Trying to create an elastic profile now.\" >> /godata/logs/preconfigure.log\n\n (curl --fail -i 'http://localhost:8153/go/api/elastic/profiles' \\\n -H 'Accept: application/vnd.go.cd+json' \\\n -H 'Content-Type: application/json' \\\n -X POST -d '{\n \"id\": \"demo-app\",\n \"cluster_profile_id\": \"k8-cluster-profile\",\n \"properties\": [\n {\n \"key\": \"Image\",\n \"value\": \"gocd/gocd-agent-docker-dind:v{{ .Chart.AppVersion }}\"\n },\n {\n \"key\": \"PodConfiguration\",\n \"value\": \"apiVersion: v1\\nkind: Pod\\nmetadata:\\n name: gocd-agent-{{ `{{ POD_POSTFIX }}` }}\\n labels:\\n app: web\\nspec:\\n serviceAccountName: {{ template \"gocd.agentServiceAccountName\" . }}\\n containers:\\n - name: gocd-agent-container-{{ `{{ CONTAINER_POSTFIX }}` }}\\n image: gocd/gocd-agent-docker-dind:v{{ .Chart.AppVersion }}\\n securityContext:\\n privileged: true\"\n },\n {\n \"key\": \"PodSpecType\",\n \"value\": \"yaml\"\n },\n {\n \"key\": \"Privileged\",\n \"value\": \"true\"\n }\n ]\n }' >> /godata/logs/preconfigure.log)\n\n echo \"Trying to creating a hello world pipeline.\" >> /godata/logs/preconfigure.log\n\n (curl --fail -i 'http://localhost:8153/go/api/admin/pipelines' \\\n -H 'Accept: application/vnd.go.cd+json' \\\n -H 'Content-Type: application/json' \\\n -X POST -d '{ \"group\": \"sample\",\n \"pipeline\": {\n \"label_template\": \"${COUNT}\",\n \"name\": \"getting_started_pipeline\",\n \"materials\": [\n {\n \"type\": \"git\",\n \"attributes\": {\n \"url\": \"https://github.com/gocd-contrib/getting-started-repo\",\n \"shallow_clone\": true\n }\n }\n ],\n \"stages\": [\n {\n \"name\": \"default_stage\",\n \"jobs\": [\n {\n \"name\": \"default_job\",\n \"elastic_profile_id\": \"demo-app\",\n \"tasks\": [\n {\n \"type\": \"exec\",\n \"attributes\": {\n \"command\": \"./build\"\n }\n }\n ],\n \"tabs\": [\n {\n \"name\": \"Sample\",\n \"path\": \"my-artifact.html\"\n }\n ],\n \"artifacts\": [\n {\n \"type\": \"build\",\n \"source\": \"my-artifact.html\"\n }\n ]\n }\n ]\n }\n ]\n }\n }' >> /godata/logs/preconfigure.log )\n\n echo \"Done preconfiguring the GoCD server\" > /godata/logs/preconfigure_complete.log\n\n{{- end }}\n",
"# gocd-agent-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-agent\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: agent\n annotations:\n {{- range $key, $value := .Values.agent.annotations.deployment }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n replicas: {{ .Values.agent.replicaCount }}\n {{- if .Values.agent.deployStrategy }}\n strategy:\n{{ toYaml .Values.agent.deployStrategy | indent 4 }}\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"gocd.name\" . }}\n release: {{ .Release.Name | quote }}\n component: agent\n template:\n metadata:\n labels:\n app: {{ template \"gocd.name\" . }}\n release: {{ .Release.Name | quote }}\n component: agent\n annotations:\n {{- range $key, $value := .Values.agent.annotations.pod }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"gocd.agentServiceAccountName\" . }}\n securityContext:\n runAsUser: {{ .Values.agent.securityContext.runAsUser }}\n runAsGroup: {{ .Values.agent.securityContext.runAsGroup }}\n fsGroup: {{ .Values.agent.securityContext.fsGroup }}\n {{- if or .Values.agent.persistence.enabled (or .Values.agent.security.ssh.enabled .Values.agent.persistence.extraVolumes) }}\n volumes:\n {{- end }}\n {{- if .Values.agent.persistence.enabled }}\n - name: goagent-vol\n persistentVolumeClaim:\n claimName: {{ .Values.agent.persistence.existingClaim | default (printf \"%s-%s\" (include \"gocd.fullname\" .) \"agent\") }}\n {{- end }}\n {{- if ne (len .Values.agent.persistence.extraVolumes) 0 }}\n{{ toYaml .Values.agent.persistence.extraVolumes | indent 8 }}\n {{- end }}\n {{- if .Values.agent.security.ssh.enabled }}\n - name: ssh-secrets\n secret:\n secretName: {{ .Values.agent.security.ssh.secretName }}\n defaultMode: {{ .Values.agent.security.ssh.defaultMode | default 256 }}\n {{- end }}\n {{- if .Values.agent.initContainers }}\n initContainers:\n{{ toYaml .Values.agent.initContainers | indent 8 }}\n {{- end }}\n containers:\n - name: {{ template \"gocd.name\" . }}-agent\n {{- if .Values.agent.image.tag }}\n image: \"{{ .Values.agent.image.repository }}:{{ .Values.agent.image.tag }}\"\n {{- else }}\n image: \"{{ .Values.agent.image.repository }}:v{{ .Chart.AppVersion }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.agent.image.pullPolicy }}\n resources:\n{{ toYaml .Values.agent.resources | indent 12 }}\n env:\n - name: GO_SERVER_URL\n {{- if .Values.agent.env.goServerUrl }}\n value: {{ .Values.agent.env.goServerUrl }}\n {{- else }}\n value: \"http://{{ template \"gocd.fullname\" . }}-server:{{ .Values.server.service.httpPort }}/go\"\n {{- end }}\n\n {{- if .Values.agent.env.agentAutoRegisterKey }}\n - name: AGENT_AUTO_REGISTER_KEY\n value: {{ .Values.agent.env.agentAutoRegisterKey }}\n {{- end }}\n {{- if .Values.agent.env.agentAutoRegisterResources }}\n - name: AGENT_AUTO_REGISTER_RESOURCES\n value: {{ .Values.agent.env.agentAutoRegisterResources }}\n {{- end }}\n {{- if .Values.agent.env.agentAutoRegisterEnvironments }}\n - name: AGENT_AUTO_REGISTER_ENVIRONMENTS\n value: {{ .Values.agent.env.agentAutoRegisterEnvironments }}\n {{- else if .Values.agent.env.agentAutoRegisterEnvironemnts }}\n - name: AGENT_AUTO_REGISTER_ENVIRONMENTS\n value: {{ .Values.agent.env.agentAutoRegisterEnvironemnts }}\n {{- end }}\n {{- if .Values.agent.env.agentAutoRegisterHostname }}\n - name: AGENT_AUTO_REGISTER_HOSTNAME\n value: {{ .Values.agent.env.agentAutoRegisterHostname }}\n {{- end }}\n {{- if .Values.agent.env.goAgentJvmOpts }}\n - name: GOCD_AGENT_JVM_OPTS\n value: {{ .Values.agent.env.goAgentJvmOpts }}\n {{- end }}\n {{- if .Values.agent.env.goAgentBootstrapperJvmArgs }}\n - name: AGENT_BOOTSTRAPPER_JVM_ARGS\n value: {{ .Values.agent.env.goAgentBootstrapperJvmArgs }}\n {{- end }}\n {{- if .Values.agent.env.goAgentBootstrapperArgs }}\n - name: AGENT_BOOTSTRAPPER_ARGS\n value: {{ .Values.agent.env.goAgentBootstrapperArgs }}\n {{- end }}\n {{- if .Values.agent.env.extraEnvVars }}\n{{ toYaml .Values.agent.env.extraEnvVars | indent 12 }}\n {{- end }}\n {{- if .Values.agent.healthCheck.enabled }}\n livenessProbe:\n httpGet:\n path: /health/v1/isConnectedToServer\n port: 8152\n initialDelaySeconds: {{ .Values.agent.healthCheck.initialDelaySeconds }}\n failureThreshold: {{ .Values.agent.healthCheck.failureThreshold }}\n periodSeconds: {{ .Values.agent.healthCheck.periodSeconds }}\n readinessProbe:\n httpGet:\n path: /health/v1/isConnectedToServer\n port: 8152\n initialDelaySeconds: {{ .Values.agent.healthCheck.initialDelaySeconds }}\n {{- end }}\n {{- if or .Values.agent.persistence.enabled (or .Values.agent.security.ssh.enabled .Values.agent.persistence.extraVolumeMounts) }}\n volumeMounts:\n {{- end }}\n {{- if .Values.agent.persistence.enabled }}\n - name: goagent-vol\n mountPath: /home/go\n subPath: {{ .Values.agent.persistence.subpath.homego }}\n - name: {{ .Values.agent.persistence.name.dockerEntryPoint }}\n mountPath: /docker-entrypoint.d\n subPath: {{ .Values.agent.persistence.subpath.dockerEntryPoint }}\n {{- end }}\n {{- if ne (len .Values.agent.persistence.extraVolumeMounts) 0 }}\n{{ toYaml .Values.agent.persistence.extraVolumeMounts | indent 12 }}\n {{- end }}\n {{- if .Values.agent.security.ssh.enabled }}\n - name: ssh-secrets\n readOnly: true\n mountPath: /home/go/.ssh\n {{- end }}\n {{- if or .Values.agent.preStop .Values.agent.postStart }}\n lifecycle:\n {{- if .Values.agent.preStop }}\n preStop:\n exec:\n command:\n{{ toYaml .Values.agent.preStop | indent 18 }}\n {{- end }}\n {{- if .Values.agent.postStart }}\n postStart:\n exec:\n command:\n{{ toYaml .Values.agent.postStart | indent 18 }}\n {{- end }}\n {{- end }}\n securityContext:\n privileged: {{ .Values.agent.privileged }}\n {{- if .Values.agent.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.agent.terminationGracePeriodSeconds }}\n {{- end }}\n restartPolicy: {{ .Values.agent.restartPolicy }}\n {{- if .Values.agent.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.agent.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.agent.affinity }}\n affinity:\n{{ toYaml .Values.agent.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.agent.tolerations }}\n tolerations:\n{{ toYaml .Values.agent.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.agent.hostAliases }}\n hostAliases:\n{{ toYaml .Values.agent.hostAliases | indent 8 }}\n {{- end }}\n",
"# gocd-agent-homego-pvc.yaml\n{{- if and .Values.agent.persistence.enabled (not .Values.agent.persistence.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-agent\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: agent\nspec:\n accessModes:\n - {{ .Values.agent.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.agent.persistence.size | quote }}\n{{- if .Values.agent.persistence.storageClass }}\n storageClassName: {{ .Values.agent.persistence.storageClass }}\n{{- else }}\n storageClassName: \"\"\n{{- end }}\n{{- if .Values.agent.persistence.pvSelector }}\n selector:\n{{ toYaml .Values.agent.persistence.pvSelector | indent 4 }}\n{{- end }}\n{{- end }}\n",
"# gocd-ea-cluster-role-binding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/{{ required \"A valid .Values.rbac.apiVersion entry required!\" .Values.rbac.apiVersion }}\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"gocd.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"gocd.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"gocd.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ .Values.rbac.roleRef | default (printf \"%s\" (include \"gocd.fullname\" .)) }}\n{{ end }}",
"# gocd-ea-cluster-role.yaml\n{{ if and .Values.rbac.create (not .Values.rbac.roleRef) }}\napiVersion: rbac.authorization.k8s.io/{{ required \"A valid .Values.rbac.apiVersion entry required!\" .Values.rbac.apiVersion }}\nkind: ClusterRole\nmetadata:\n name: {{ template \"gocd.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"gocd.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nrules:\n- apiGroups: [\"\"]\n resources:\n - pods\n - pods/log\n verbs: [\"*\"]\n- apiGroups: [\"\"]\n resources:\n - nodes\n verbs: [\"get\", \"list\"]\n- apiGroups: [\"\"]\n resources:\n - events\n verbs: [\"list\", \"watch\"]\n- apiGroups: [\"\"]\n resources:\n - namespaces\n verbs: [\"get\"]\n{{ end }}\n",
"# gocd-ea-service-account.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"gocd.serviceAccountName\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"gocd.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n{{ end }}\n",
"# gocd-server-deployment.yaml\n{{- if .Values.server.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-server\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: server\n annotations:\n {{- range $key, $value := .Values.server.annotations.deployment }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n replicas: 1\n strategy:\n type: Recreate\n selector:\n matchLabels:\n app: {{ template \"gocd.name\" . }}\n release: {{ .Release.Name | quote }}\n component: server\n template:\n metadata:\n labels:\n app: {{ template \"gocd.name\" . }}\n release: {{ .Release.Name | quote }}\n component: server\n annotations:\n {{- range $key, $value := .Values.server.annotations.pod }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n spec:\n securityContext:\n runAsUser: {{ .Values.server.securityContext.runAsUser }}\n runAsGroup: {{ .Values.server.securityContext.runAsGroup }}\n fsGroup: {{ .Values.server.securityContext.fsGroup }}\n serviceAccountName: {{ template \"gocd.serviceAccountName\" . }}\n {{- if or .Values.server.shouldPreconfigure (or .Values.server.persistence.enabled (or .Values.server.security.ssh.enabled .Values.server.persistence.extraVolumes)) }}\n volumes:\n {{- end }}\n {{- if .Values.server.shouldPreconfigure }}\n - name: config-vol\n configMap:\n name: {{ template \"gocd.fullname\" . }}\n {{- end }}\n {{- if .Values.server.persistence.enabled }}\n - name: goserver-vol\n persistentVolumeClaim:\n claimName: {{ .Values.server.persistence.existingClaim | default (printf \"%s-%s\" (include \"gocd.fullname\" .) \"server\") }}\n {{- end }}\n {{- if ne (len .Values.server.persistence.extraVolumes) 0 }}\n{{ toYaml .Values.server.persistence.extraVolumes | indent 8 }}\n {{- end }}\n {{- if .Values.server.security.ssh.enabled }}\n - name: ssh-secrets\n secret:\n secretName: {{ .Values.server.security.ssh.secretName }}\n defaultMode: {{ .Values.server.security.ssh.defaultMode | default 256 }}\n {{- end }}\n {{- if .Values.server.initContainers }}\n initContainers:\n{{ toYaml .Values.server.initContainers | indent 8 }}\n {{- end }}\n containers:\n {{- if .Values.server.sidecarContainers }}\n{{ toYaml .Values.server.sidecarContainers | indent 8 }}\n {{- end }}\n - name: {{ template \"gocd.name\" . }}-server\n {{- if .Values.server.image.tag }}\n image: \"{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}\"\n {{- else }}\n image: \"{{ .Values.server.image.repository }}:v{{ .Chart.AppVersion }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.server.image.pullPolicy }}\n env:\n {{- if .Values.server.env.goServerJvmOpts }}\n - name: GOCD_SERVER_JVM_OPTS\n value: {{ .Values.server.env.goServerJvmOpts }}\n {{- end }}\n {{- if .Values.server.env.extraEnvVars }}\n{{ toYaml .Values.server.env.extraEnvVars | indent 12 }}\n {{- end }}\n ports:\n - containerPort: 8153\n livenessProbe:\n httpGet:\n path: /go/api/v1/health\n port: 8153\n initialDelaySeconds: {{ .Values.server.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.server.healthCheck.periodSeconds }}\n failureThreshold: {{ .Values.server.healthCheck.failureThreshold }}\n readinessProbe:\n httpGet:\n path: /go/api/v1/health\n port: 8153\n initialDelaySeconds: {{ .Values.server.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.server.healthCheck.periodSeconds }}\n failureThreshold: {{ .Values.server.healthCheck.failureThreshold }}\n {{- if or .Values.server.shouldPreconfigure (or .Values.server.persistence.enabled (or .Values.server.security.ssh.enabled .Values.server.persistence.extraVolumeMounts)) }}\n volumeMounts:\n {{- end }}\n {{- if .Values.server.shouldPreconfigure }}\n - name: config-vol\n mountPath: /preconfigure_server.sh\n subPath: preconfigure_server.sh\n {{- end }}\n {{- if .Values.server.persistence.enabled }}\n - name: goserver-vol\n mountPath: /godata\n subPath: {{ .Values.server.persistence.subpath.godata }}\n - name: goserver-vol\n mountPath: /home/go\n subPath: {{ .Values.server.persistence.subpath.homego }}\n - name: {{ .Values.server.persistence.name.dockerEntryPoint }}\n mountPath: /docker-entrypoint.d\n subPath: {{ .Values.server.persistence.subpath.dockerEntryPoint }}\n {{- end }}\n {{- if ne (len .Values.server.persistence.extraVolumeMounts) 0 }}\n{{ toYaml .Values.server.persistence.extraVolumeMounts | indent 12 }}\n {{- end }}\n {{- if .Values.server.security.ssh.enabled }}\n - name: ssh-secrets\n readOnly: true\n mountPath: /home/go/.ssh\n {{- end }}\n {{- if or .Values.server.shouldPreconfigure .Values.server.preStop }}\n lifecycle:\n {{- if .Values.server.shouldPreconfigure}}\n postStart:\n exec:\n command:\n{{ toYaml .Values.server.preconfigureCommand | indent 18 }}\n {{- end }}\n {{- if .Values.server.preStop}}\n preStop:\n exec:\n command:\n{{ toYaml .Values.server.preStop | indent 18 }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.server.resources | indent 12 }}\n {{- if .Values.server.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }}\n {{- end }}\n restartPolicy: {{ .Values.server.restartPolicy }}\n {{- if .Values.server.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.server.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.server.affinity }}\n affinity:\n{{ toYaml .Values.server.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.server.tolerations }}\n tolerations:\n{{ toYaml .Values.server.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.server.hostAliases }}\n hostAliases:\n{{ toYaml .Values.server.hostAliases | indent 8 }}\n {{- end }}\n{{- end}}\n",
"# gocd-server-pvc.yaml\n{{- if .Values.server.enabled }}\n{{- if and .Values.server.persistence.enabled (not .Values.server.persistence.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-server\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: server\nspec:\n accessModes:\n - {{ .Values.server.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.server.persistence.size | quote }}\n{{- if .Values.server.persistence.storageClass }}\n{{- if (eq \"-\" .Values.server.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: {{ .Values.server.persistence.storageClass }}\n{{- end }}\n{{- end }}\n{{- if .Values.server.persistence.pvSelector }}\n selector:\n{{ toYaml .Values.server.persistence.pvSelector | indent 4 }}\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# gocd-test.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ template \"gocd.fullname\" . }}-test-{{ randAlphaNum 5 | lower }}\"\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: \"test-framework\"\n image: \"dduportal/bats:0.4.0\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ template \"gocd.name\" . }}-ui-test\n image: \"gocddev/gocd-helm-build:v0.1.0\"\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"gocd.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n",
"# ingress.yaml\n{{- if .Values.server.enabled }}\n{{- if .Values.server.ingress.enabled -}}\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-server\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: server\n annotations:\n {{- range $key, $value := .Values.server.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n {{- if .Values.server.ingress.hosts }}\n {{ $dot := .}}\n rules:\n {{- range $host := .Values.server.ingress.hosts }}\n - host: {{ $host | quote }}\n http:\n paths:\n - backend:\n serviceName: {{ template \"gocd.fullname\" $dot }}-server\n servicePort: {{ $dot.Values.server.service.httpPort }}\n {{- end }}\n {{- else }}\n backend:\n serviceName: {{ template \"gocd.fullname\" . }}-server\n servicePort: {{ .Values.server.service.httpPort }}\n {{- end -}}\n {{- if .Values.server.ingress.tls }}\n tls:\n{{ toYaml .Values.server.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# service.yaml\n{{- if .Values.server.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-server\n annotations:\n {{- range $key, $value := .Values.server.service.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n component: server\nspec:\n type: {{ .Values.server.service.type }}\n {{ if .Values.server.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- range .Values.server.service.loadBalancerSourceRanges }}\n - {{ . }}\n {{- end }}\n {{ end }}\n ports:\n - port: {{ .Values.server.service.httpPort }}\n targetPort: 8153\n {{- if .Values.server.service.nodeHttpPort }}\n nodePort: {{ .Values.server.service.nodeHttpPort }}\n {{- end}}\n protocol: TCP\n name: http\n selector:\n app: {{ template \"gocd.name\" . }}\n release: {{ .Release.Name | quote }}\n component: server\n{{- end -}}\n",
"# test-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"gocd.fullname\" . }}-tests\n labels:\n app: {{ template \"gocd.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n run.sh: |-\n @test \"Testing GoCD UI is accessible\" {\n {{- if .Values.agent.goServerUrl }}\n curl --connect-timeout 10 --retry 12 --retry-delay 10 --retry-max-time {{ .Values.server.healthCheck.initialDelaySeconds }} {{ .Values.agent.goServerUrl }}/auth/login\n {{- else }}\n curl --connect-timeout 10 --retry 12 --retry-delay 10 --retry-max-time {{ .Values.server.healthCheck.initialDelaySeconds }} \"http://{{ template \"gocd.fullname\" . }}-server:{{ .Values.server.service.httpPort }}/go/auth/login\"\n {{- end }}\n }\n\n @test \"Testing GoCD application is accessible through service\" {\n curl --retry 2 --retry-delay 10 --retry-max-time {{ .Values.server.healthCheck.initialDelaySeconds }} http://{{ template \"gocd.fullname\" . }}-server:{{ .Values.server.service.httpPort }}/go\n }"
] | # Default values for gocd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
rbac:
# Specifies whether rbac resources must be created.
create: true
# The API version to use while creating the rbac resources. Use `kubectl api-versions | grep rbac` to find which abi versions are supported for your cluster.
apiVersion: v1
# Create a cluster role binding with the existing role, do not create a new one. If left blank, a new cluster role is created.
roleRef:
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# If create is false and a name is not specified, the default service account is used for the cluster role binding.
name:
server:
# server.enabled is the toggle to run GoCD Server. Change to false for Agent Only Deployment.
enabled: true
# server.annotations is the annotations for the GoCD Server Deployment and Pod spec.
annotations:
deployment:
# iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role
pod:
# iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role
# Specify security settings for GoCD Server Pod
securityContext:
# Specify the container user for the GoCD server pod
runAsUser: 1000
# Specify the container group for the GoCD server pod
runAsGroup: 0
# Specify the container supplementary group for the GoCD server pod
fsGroup: 0
# server.shouldPreconfigure is used to invoke a script to pre configure the elastic agent profile and the plugin settings in the GoCD server.
# Note: If this value is set to true, then, the serviceAccount.name is configured for the GoCD server pod. The service account token is mounted as a secret and is used in the lifecycle hook.
# Note: An attempt to preconfigure the GoCD server is made. There are cases where the pre-configuration can fail and the GoCD server starts with an empty config.
shouldPreconfigure: true
preconfigureCommand:
- "/bin/bash"
- "/preconfigure_server.sh"
# server.preStop - array of commands to use in the server pre-stop lifecycle hook
# preStop:
# - "/bin/bash"
# - "/backup_and_stop.sh"
# server.terminationGracePeriodSeconds is the optional duration in seconds the gocd server pod needs to terminate gracefully.
# Note: SIGTERM is issued immediately after the pod deletion request is sent. If the pod doesn't terminate, k8s waits for terminationGracePeriodSeconds before issuing SIGKILL.
# server.terminationGracePeriodSeconds: 60
image:
# server.image.repository is the GoCD Server image name
repository: "gocd/gocd-server"
# server.image.tag is the GoCD Server image's tag
tag:
# server.image.pullPolicy is the GoCD Server image's pull policy
pullPolicy: "IfNotPresent"
## Configure GoCD server resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# requests:
# memory: 512Mi
# cpu: 300m
# limits:
# cpu: 100m
# memory: 1024Mi
# Sidecar containers that runs alongside GoCD server.
# https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/
sidecarContainers: []
# - name: sidecar-container
# image: sidecar-image:latest
# volumeMounts:
# - name: goserver-vol
# mountPath: /godata
# specify init containers, e.g. to prepopulate home directories etc
initContainers: []
# - name: download-kubectl
# image: "ellerbrock/alpine-bash-curl-ssl:latest"
# imagePullPolicy: "IfNotPresent"
# volumeMounts:
# - name: kubectl
# mountPath: /download
# workingDir: /download
# command: ["/bin/bash"]
# args:
# - "-c"
# - 'curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl'
# specify restart policy for server
restartPolicy: Always
## Additional GoCD server pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
nodeSelector: {}
## Affinity for assigning pods to specific nodes
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Tolerations for allowing pods to be scheduled on nodes with matching taints
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: {}
healthCheck:
# server.healthCheck.initialDelaySeconds is the initial delays in seconds to start the health checks
initialDelaySeconds: 90
# server.healthCheck.periodSeconds is the health check interval duration
periodSeconds: 15
# server.healthCheck.failureThreshold is the number of unsuccessful attempts made to the GoCD server health check endpoint before the container is restarted (for liveness) or marked as unready (for readiness)
failureThreshold: 10
env:
# server.env.goServerJvmOpts is a list of JVM options, which needs to be provided to the GoCD Server, typically prefixed with -D unless otherwise stated.
# Example: "-Xmx4096mb -Dfoo=bar"
goServerJvmOpts:
# server.env.extraEnvVars is the list of environment variables passed to GoCD Server
extraEnvVars:
- name: GOCD_PLUGIN_INSTALL_kubernetes-elastic-agents
value: https://github.com/gocd/kubernetes-elastic-agents/releases/download/v3.4.0-196/kubernetes-elastic-agent-3.4.0-196.jar
- name: GOCD_PLUGIN_INSTALL_docker-registry-artifact-plugin
value: https://github.com/gocd/docker-registry-artifact-plugin/releases/download/v1.1.0-104/docker-registry-artifact-plugin-1.1.0-104.jar
service:
# server.service.type is the GoCD Server service type
type: "NodePort"
# server.service.httpPort is the GoCD Server HTTP port
httpPort: 8153
# Provide the nodeHttpPort and nodeHttpsPort if you want the service to be exposed on specific ports. Without this, random node ports will be assigned.
# server.service.nodeHttpPort is the GoCD Server Service Node HTTP port
nodeHttpPort:
# server.service.nodeHttpPort is the GoCD Server Service Node HTTPS port
nodeHttpsPort:
annotations:
## When using LoadBalancer service type, use the following AWS certificate from ACM
## https://aws.amazon.com/documentation/acm/
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:eu-west-1:123456789:certificate/abc123-abc123-abc123-abc123"
# service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "https"
# service.beta.kubernetes.io/aws-load-balancer-backend-port: "https"
## When using LoadBalancer service type, whitelist these source IP ranges
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
# loadBalancerSourceRanges:
# - 192.168.1.10/32
ingress:
# server.ingress.enabled is the toggle to enable/disable GoCD Server Ingress
enabled: true
# server.ingress.hosts is used to create an Ingress record.
# hosts:
# - ci.example.com
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# - secretName: ci-example-tls
# hosts:
# - ci.example.com
persistence:
# server.persistence.enabled is the toggle for server volume persistence.
enabled: true
accessMode: "ReadWriteOnce"
# The storage space that should be claimed from the persistent volume
size: 2Gi
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc).
# storageClass: "-"
# A manually managed Persistent Volume and Claim
# If defined, PVC must be created manually before volume will be bound
existingClaim:
# To choose a suitable persistent volume from available static persistent volumes, selectors are used.
pvSelector:
# matchLabels:
# volume-type: ssd
name:
# server.persistence.name.dockerEntryPoint name of the volume mounted at /docker-entrypoint.d/ on the server
dockerEntryPoint: goserver-vol
# "" for the volume root
subpath:
# godata is where the config, db, plugins are stored
godata: godata
# homego can be used for storing and mounting secrets
homego: homego
# custom entrypoint scripts that should be run before starting the GoCD server inside the container.
dockerEntryPoint: scripts
# server.persistence.extraVolumes additional server volumes
extraVolumes: []
# - name: gocd-server-init-scripts
# configMap:
# name: gocd-server-init-scripts
# defaultMode: 0755
# - name: github-key
# secret:
# secretName: github-key
# defaultMode: 0744
# server.persistence.extraVolumeMounts additional server volumeMounts
extraVolumeMounts: []
# - name: github-key
# mountPath: /etc/config/keys/
# readOnly: true
# - name: gocd-server-init-scripts
# mountPath: /docker-entrypoint.d/
# server.hostAliases allows the modification of the hosts file inside a container
hostAliases:
# - ip: "192.168.1.10"
# hostnames:
# - "example.com"
# - "www.example.com"
security:
ssh:
# server.security.ssh.enabled is the toggle to enable/disable mounting of ssh secret on GoCD server pods
enabled: false
# server.security.ssh.secretName specifies the name of the k8s secret object that contains the ssh key and known hosts
secretName: gocd-server-ssh
# server.security.ssh.defaultMode specifies the permission of the files in ~/.ssh directory
defaultMode:
agent:
# specifies overrides for agent specific service account creation
serviceAccount:
# specifies whether the top level service account (also used by the server) should be reused as the service account for gocd agents
reuseTopLevelServiceAccount: false
# if reuseTopLevelServiceAccount is false, this field specifies the name of an existing service account to be associated with gocd agents
# If field is empty, the service account "default" will be used.
name:
# agent.annotations is the annotations for the GoCD Agent Deployment and Pod Spec
annotations:
deployment:
# iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role
pod:
# iam.amazonaws.com/role: arn:aws:iam::xxx:role/my-custom-role
# Specify security settings for GoCD Agent Pod
securityContext:
# Specify the container user for all the GoCD agent pods
runAsUser: 1000
# Specify the container group for all the GoCD agent pods
runAsGroup: 0
# Specify the container supplementary group for all the GoCD agent pods
fsGroup: 0
# agent.replicaCount is the GoCD Agent replicas Count. Specify the number of GoCD agents to run
replicaCount: 0
# agent.preStop - array of command and arguments to run in the agent pre-stop lifecycle hook
# preStop:
# - "/bin/bash"
# - "/disable_and_stop.sh"
# agent.postStart - array of command and arguments to run in agent post-start lifecycle hook
# postStart:
# - "/bin/bash"
# - "/agent_startup.sh"
# agent.deployStrategy is the strategy explained in detail at https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
# agent.terminationGracePeriodSeconds is the optional duration in seconds the gocd agent pods need to terminate gracefully.
# Note: SIGTERM is issued immediately after the pod deletion request is sent. If the pod doesn't terminate, k8s waits for terminationGracePeriodSeconds before issuing SIGKILL.
# agent.terminationGracePeriodSeconds: 60
deployStrategy: {}
image:
# agent.image.repository is the GoCD Agent image name
repository: "gocd/gocd-agent-alpine-3.9"
# agent.image.tag is the GoCD Agent image's tag
tag:
# agent.image.pullPolicy is the GoCD Agent image's pull policy
pullPolicy: "IfNotPresent"
env:
# agent.env.goServerUrl is the GoCD Server Url
goServerUrl:
# agent.env.agentAutoRegisterKey is the GoCD Agent auto-register key
agentAutoRegisterKey:
# agent.env.agentAutoRegisterResources is the GoCD Agent auto-register resources
agentAutoRegisterResources:
# agent.env.agentAutoRegisterEnvironments is the GoCD Agent auto-register Environments
# deprecated because of a typo. Use agent.env.agentAutoRegisterEnvironments instead
agentAutoRegisterEnvironemnts:
# agent.env.agentAutoRegisterEnvironments is the GoCD Agent auto-register Environments
agentAutoRegisterEnvironments:
# agent.env.agentAutoRegisterHostname is the GoCD Agent auto-register hostname
agentAutoRegisterHostname:
# agent.env.goAgentJvmOpts is the GoCD Agent JVM options
goAgentJvmOpts:
# agent.env.goAgentBootstrapperArgs is the GoCD Agent bootstrapper args
goAgentBootstrapperArgs:
# agent.env.goAgentBootstrapperJvmArgs is the GoCD Agent bootstrapper JVM args
goAgentBootstrapperJvmArgs:
# agent.env.extraEnvVars is the list of environment variables passed to GoCD Agent
extraEnvVars:
persistence:
# agent.persistence.enabled is the toggle for agent volume persistence. Change to true if a persistent volume is available and configured manually.
enabled: false
accessMode: "ReadWriteOnce"
size: 1Gi
# If defined, storageClassName: <storageClass>
# If set to "-", storageClassName: "", which disables dynamic provisioning
# If undefined (the default) or set to null, no storageClassName spec is
# set, choosing 'standard' storage class available with the default provisioner (gcd-pd on GKE, hostpath on minikube, etc).
# storageClass: "-"
# A manually managed Persistent Volume and Claim
# If defined, PVC must be created manually before volume will be bound
existingClaim:
pvSelector:
# matchLabels:
# app: godata-gocd-agent
name:
# agent.persistence.name.dockerEntryPoint name of the volume mounted at /docker-entrypoint.d/ on the agent
dockerEntryPoint: goagent-vol
# "" for the volume root
subpath:
homego: homego
dockerEntryPoint: scripts
# agent.persistence.extraVolumes additional agent volumes
extraVolumes: []
# - name: gocd-agent-init-scripts
# configMap:
# name: gocd-agent-init-scripts
# defaultMode: 0755
# - name: github-key
# secret:
# secretName: github-key
# defaultMode: 0744
# agent.persistence.extraVolumeMounts additional agent volumeMounts
extraVolumeMounts: []
# - name: github-key
# mountPath: /etc/config/keys/
# readOnly: true
# - name: gocd-agent-init-scripts
# mountPath: /docker-entrypoint.d/
# specify init containers, e.g. to prepopulate home directories etc
initContainers: []
# - name: download-kubectl
# image: "ellerbrock/alpine-bash-curl-ssl:latest"
# imagePullPolicy: "IfNotPresent"
# volumeMounts:
# - name: kubectl
# mountPath: /download
# workingDir: /download
# command: ["/bin/bash"]
# args:
# - "-c"
# - 'curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl'
# specify restart policy for agents
restartPolicy: Always
# agent.privileged is needed for running Docker-in-Docker (DinD) agents
privileged: false
healthCheck:
# agent.healthCheck.enable is the toggle for GoCD agent health checks
enabled: false
# agent.healthCheck.initialDelaySeconds is the initial delays in seconds to start the health checks
initialDelaySeconds: 60
# agent.healthCheck.periodSeconds is the health check interval duration
periodSeconds: 60
# agent.healthCheck.failureThreshold is the health check failure threshold of GoCD agent
failureThreshold: 60
security:
ssh:
# agent.security.ssh.enabled is the toggle to enable/disable mounting of ssh secret on GoCD agent pods
enabled: false
# agent.security.ssh.secretName specifies the name of the k8s secret object that contains the ssh key and known hosts
secretName: gocd-agent-ssh
# agent.security.ssh.defaultMode specifies the permission of the files in ~/.ssh directory
defaultMode:
## Configure GoCD agent resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# requests:
# memory: 512Mi
# cpu: 300m
# limits:
# cpu: 100m
# memory: 1024Mi
# agent.hostAliases allows the modification of the hosts file inside a container
hostAliases:
# - ip: "192.168.1.10"
# hostnames:
# - "example.com"
# - "www.example.com"
## Additional GoCD agent pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
nodeSelector: {}
## Affinity for assigning pods to specific nodes
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
## Tolerations for allowing pods to be scheduled on nodes with matching taints
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: {}
|
katafygio | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"katafygio.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"katafygio.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"katafygio.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"katafygio.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"katafygio.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{- /*\nCredit: @technosophos\nhttps://github.com/technosophos/common-chart/\nlabels.standard prints the standard Helm labels.\nThe standard labels are frequently used in metadata.\n*/ -}}\n{{- define \"katafygio.labels.standard\" -}}\napp: {{ template \"katafygio.name\" . }}\nheritage: {{ .Release.Service | quote }}\nrelease: {{ .Release.Name | quote }}\nchart: {{ .Chart.Name }}-{{ .Chart.Version }}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"katafygio.fullname\" . }}\n labels:\n{{ include \"katafygio.labels.standard\" . | indent 4 }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"katafygio.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n{{ include \"katafygio.labels.standard\" . | indent 8 }}\n spec:\n serviceAccountName: {{ template \"katafygio.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /usr/bin/katafygio\n - --local-dir={{ .Values.localDir }}\n - --healthcheck-port={{ .Values.healthcheckPort }}\n {{- if .Values.gitUrl }}\n - --git-url={{ .Values.gitUrl }}\n {{- end }}\n {{- if .Values.filter }}\n - --filter={{ .Values.filter }}\n {{- end }}\n {{- if .Values.logLevel }}\n - --log-level={{ .Values.logLevel }}\n {{- end }}\n {{- if .Values.logOutput }}\n - --log-output={{ .Values.logOutput }}\n {{- end }}\n {{- if .Values.logServer }}\n - --log-server={{ .Values.logServer }}\n {{- end }}\n {{- if .Values.resyncInterval }}\n - --resync-interval={{ .Values.resyncInterval }}\n {{- end }}\n {{- if .Values.noGit }}\n - --no-git\n {{- end }}\n {{- if .Values.excludeKind }}\n {{- range .Values.excludeKind }}\n - --exclude-kind={{ . }}\n {{- end }}\n {{- end }}\n {{- if .Values.excludeObject }}\n {{- range .Values.excludeObject }}\n - --exclude-object={{ . }}\n {{- end }}\n {{- end }}\n ports:\n - name: http\n containerPort: {{ .Values.healthcheckPort }}\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /health\n port: http\n readinessProbe:\n httpGet:\n path: /health\n port: http\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: katafygio-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"katafygio.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"katafygio.fullname\" . }}\n labels:\n{{ include \"katafygio.labels.standard\" . | indent 4 }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# rbac.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"katafygio.fullname\" . }}\n labels:\n{{ include \"katafygio.labels.standard\" . | indent 4 }}\nrules:\n - apiGroups: [\"*\"]\n resources: [\"*\"]\n verbs:\n - get\n - list\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"katafygio.fullname\" . }}\n labels:\n{{ include \"katafygio.labels.standard\" . | indent 4 }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"katafygio.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"katafygio.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# service-account.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"katafygio.serviceAccountName\" . }}\n labels:\n{{ include \"katafygio.labels.standard\" . | indent 4 }}\n{{- end }}\n"
] | # Default values for the katafygio chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# gitUrl (optional) is a remote git repository that Katafygio can clone, and where
# it can push changes. If gitUrl is not defined, Katafygio will still maintain a
# pod-local git repository, which can be on a persistent volume (see above).
# gitUrl: https://user:[email protected]/myorg/myrepos.git
# noGit disable git versioning when true (will only keep an unversioned local dump up-to-date).
noGit: false
# healthcheckPort is the TCP port Katafygio will listen for health check requests.
healthcheckPort: 8080
# logLevel can be info, warning, error, or fatal.
logLevel: warning
# logOutput can be stdout, stderr, or syslog.
logOutput: stdout
# logServer (optional) provide the address of a remote syslog server.
# logServer: "localhost:514"
# filter is an (optional) label selector used to restrict backups to selected objects.
# filter: "app in (foo, bar)"
# excludeKind is an array of excluded (not backuped) Kubernetes objects kinds.
excludeKind:
- replicaset
- endpoints
- event
# excludeObject is an array of specific Kubernetes objects to exclude from dumps
# (the format is: objectkind:namespace/objectname).
# excludeObject:
# - "configmap:kube-system/leader-elector"
# resyncInterval is the interval (in seconds) between full catch-up resyncs
# (to catch possibly missed events). Set to 0 to disable resyncs.
resyncInterval: 300
# localDir is the path where we'll dump and commit cluster objects.
localDir: "/var/lib/katafygio/data"
# persistence for the localDir dump directory. Note that configuring gitUrl
# is an other way to achieve persistence.
persistence:
enabled: true
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 1Gi
# existingClaim: ""
# rbac allow to enable or disable RBAC role and binding. Katafygio needs
# read-only access to all Kubernetes API groups and resources.
rbac:
# Specifies whether RBAC resources should be created
create: true
# serviceAccount is used to provide a dedicated serviceAccount when using RBAC
# (or to fallback to the namespace's "default" SA if name is left empty).
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
image:
repository: bpineau/katafygio
tag: v0.8.1
pullPolicy: IfNotPresent
# resources define the deployment's cpu and memory resources.
# Katafygio only needs about 50Mi of memory as a baseline, and more depending
# on the cluster's content. For instance, on a 45 nodes cluster with about 2k
# pods and 1k services, Katafygio use about 250Mi.
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
replicaCount: 1
nodeSelector: {}
tolerations: []
affinity: {}
|
moodle | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"moodle.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"moodle.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"moodle.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"moodle.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper Moodle image name\n*/}}\n{{- define \"moodle.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"moodle.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"moodle.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"moodle.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"moodle.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"moodle.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"moodle.fullname\" . }}\n labels:\n app: {{ template \"moodle.fullname\" . }}\n chart: {{ template \"moodle.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"moodle.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"moodle.fullname\" . }}\n chart: {{ template \"moodle.chart\" . }}\n release: \"{{ .Release.Name }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n{{- include \"moodle.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: moodle\n image: {{ template \"moodle.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"moodle.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: MOODLE_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: MOODLE_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: MOODLE_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"moodle.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: MOODLE_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: MOODLE_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: MOODLE_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n - name: MOODLE_SKIP_INSTALL\n value: {{ default \"no\" .Values.moodleSkipInstall | quote }}\n - name: MOODLE_USERNAME\n value: {{ default \"\" .Values.moodleUsername | quote }}\n - name: MOODLE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"moodle.fullname\" . }}\n key: moodle-password\n - name: MOODLE_EMAIL\n value: {{ default \"\" .Values.moodleEmail | quote }}\n - name: SMTP_HOST\n value: {{ default \"\" .Values.smtpHost | quote }}\n - name: SMTP_PORT\n value: {{ default \"\" .Values.smtpPort | quote }}\n - name: SMTP_USER\n value: {{ default \"\" .Values.smtpUser | quote }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"moodle.fullname\" . }}\n key: smtp-password\n - name: SMTP_PROTOCOL\n value: {{ default \"\" .Values.smtpProtocol | quote }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /login/index.php\n port: http\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /login/index.php\n port: http\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: moodle-data\n mountPath: /bitnami\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"moodle.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: moodle-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"moodle.fullname\" . ) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\n{{- range .Values.ingress.hosts }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: \"{{- printf \"%s-%s\" .name $.Release.Name | trunc 63 | trimSuffix \"-\" -}}\"\n labels:\n app: {{ template \"moodle.fullname\" $ }}\n chart: {{ template \"moodle.chart\" $ }}\n release: \"{{ $.Release.Name }}\"\n heritage: \"{{ $.Release.Service }}\"\n annotations:\n {{- if .certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"moodle.fullname\" $ }}\n servicePort: 80\n{{- if .tls }}\n tls:\n - hosts:\n - {{ .name }}\n secretName: {{ .tlsSecret }}\n{{- end }}\n---\n{{- end }}\n{{- end }}\n\n\n\n\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"moodle.fullname\" . }}\n labels:\n app: {{ template \"moodle.fullname\" . }}\n chart: {{ template \"moodle.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"moodle.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"moodle.fullname\" . }}\n labels:\n app: {{ template \"moodle.fullname\" . }}\n chart: {{ template \"moodle.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.moodlePassword }}\n moodle-password: {{ default \"\" .Values.moodlePassword | b64enc | quote }}\n {{ else }}\n moodle-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n smtp-password: {{ default \"\" .Values.smtpPassword | b64enc | quote }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"moodle.fullname\" . }}\n labels:\n app: {{ template \"moodle.fullname\" . }}\n chart: {{ template \"moodle.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app: {{ template \"moodle.fullname\" . }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami Moodle image version
## ref: https://hub.docker.com/r/bitnami/moodle/tags/
##
image:
registry: docker.io
repository: bitnami/moodle
tag: 3.8.2-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override moodle.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override moodle.fullname template
##
# fullnameOverride:
## Skip Moodle installation wizard. Useful for migrations and restoring from SQL dump
## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration
##
moodleSkipInstall: "no"
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration
##
moodleUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration
##
# moodlePassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-moodle#configuration
moodleEmail: [email protected]
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-moodle#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_moodle
## Database password
password:
## Database name
database: bitnami_moodle
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-moodle/#smtp-configuration
# smtpHost:
# smtpPort:
# smtpUser:
# smtpPassword:
# smtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_moodle
user: bn_moodle
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
## Define affinity for the pod
## Sometimes required when persistent volumes are defined externally
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-role.kubernetes.io/master
# operator: Exists
# values:
# - machine01
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: another-node-label-key
# operator: In
# values:
# - another-node-label-value
#
# resources:
# requests:
# memory: 768Mi
# cpu: 750m
## Kubernetes configuration
## For minikube, set this to NodePort, for ingress ClusterIP, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Configure the ingress resource that allows you to access the
## Moodle installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: moodle.local
## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend moodle service will be connected at port 443
tls: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: moodle.local-tls
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: moodle.local-tls
# key:
# certificate:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
# existingClaim: ""
## Define affinity for the moodle pod
## Sometimes required when persistent volumes are defined externally
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node-role.kubernetes.io/master
# operator: In
# values:
# - machine01
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 1
# preference:
# matchExpressions:
# - key: another-node-label-key
# operator: In
# values:
# - another-node-label-value
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 600
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 6
successThreshold: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r39
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
collabora-code | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"collabora-code.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"collabora-code.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"collabora-code.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"collabora-code.fullname\" . }}\ndata:\n DONT_GEN_SSL_CERT: \"{{ .Values.collabora.DONT_GEN_SSL_CERT }}\"\n dictionaries: {{ .Values.collabora.dictionaries }}\n domain: {{ .Values.collabora.domain }}\n extra_params: {{ .Values.collabora.extra_params }}\n server_name: {{ .Values.collabora.server_name }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"collabora-code.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n helm.sh/chart: {{ include \"collabora-code.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n strategy:\n type: {{ .Values.strategy }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: DONT_GEN_SSL_CERT\n valueFrom:\n configMapKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: DONT_GEN_SSL_CERT\n - name: dictionaries\n valueFrom:\n configMapKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: dictionaries\n - name: domain\n valueFrom:\n configMapKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: domain\n - name: extra_params\n valueFrom:\n configMapKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: extra_params\n - name: server_name\n valueFrom:\n configMapKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: server_name\n - name: username\n valueFrom:\n secretKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: username\n - name: password\n valueFrom:\n secretKeyRef:\n name: {{ include \"collabora-code.fullname\" . }}\n key: password\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n failureThreshold: 3\n httpGet:\n path: {{ .Values.livenessProbe.path }}\n port: http\n scheme: {{ .Values.livenessProbe.scheme }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n failureThreshold: 3\n httpGet:\n path: {{ .Values.readinessProbe.path }}\n port: http\n scheme: {{ .Values.readinessProbe.scheme }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n ports:\n - name: http\n containerPort: 9980\n protocol: TCP\n resources:\n {{- toYaml .Values.resources | nindent 12 }}\n securityContext:\n {{- toYaml .Values.securitycontext | nindent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"collabora-code.fullname\" . -}}\n{{- $ingressPaths := .Values.ingress.paths -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n helm.sh/chart: {{ include \"collabora-code.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n {{- with .Values.ingress.annotations }}\n annotations:\n {{- toYaml . | nindent 4 }}\n {{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n {{- range $ingressPaths }}\n - path: {{ . }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ include \"collabora-code.fullname\" . }}\ndata:\n username: {{ .Values.collabora.username | b64enc }}\n password: {{ .Values.collabora.password | b64enc }}",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"collabora-code.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n helm.sh/chart: {{ include \"collabora-code.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# test-connection.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ include \"collabora-code.fullname\" . }}-test-connection\"\n labels:\n app.kubernetes.io/name: {{ include \"collabora-code.name\" . }}\n helm.sh/chart: {{ include \"collabora-code.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n containers:\n - name: wget\n image: busybox\n command: ['wget']\n args: ['{{ include \"collabora-code.fullname\" . }}:{{ .Values.service.port }}']\n restartPolicy: Never\n"
] | # Default values for collabora-code.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: collabora/code
tag: 4.0.3.1
pullPolicy: IfNotPresent
strategy: Recreate
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 9980
ingress:
enabled: false
annotations: {}
paths: []
hosts: []
tls: []
collabora:
DONT_GEN_SSL_CERT: true
domain: nextcloud\\.domain
extra_params: --o:ssl.termination=true --o:ssl.enable=false
server_name: collabora\.domain
password: examplepass
username: admin
dictionaries: de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru
securitycontext:
allowPrivilegeEscalation: true
capabilities:
add:
- MKNOD
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
livenessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 2
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
scheme: HTTP
path: /
readinessProbe:
enabled: true
initialDelaySeconds: 30
timeoutSeconds: 2
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
scheme: HTTP
path: /
|
suitecrm | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"suitecrm.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"suitecrm.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"suitecrm.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"suitecrm.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"host\" -}}\n{{- $host := index .Values (printf \"%sHost\" .Chart.Name) | default \"\" -}}\n{{- default (include \"serviceIP\" .) $host -}}\n{{- end -}}\n\n{{/*\nReturn the proper SuiteCRM image name\n*/}}\n{{- define \"suitecrm.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"suitecrm.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"suitecrm.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"suitecrm.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"suitecrm.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if include \"host\" . -}}\napiVersion: {{ template \"suitecrm.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"suitecrm.fullname\" . }}\n labels:\n app: {{ template \"suitecrm.name\" . }}\n chart: \"{{ template \"suitecrm.chart\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"suitecrm.name\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"suitecrm.name\" . }}\n release: \"{{ .Release.Name }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"suitecrm.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"suitecrm.fullname\" . }}\n image: {{ template \"suitecrm.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n - name: SUITECRM_VALIDATE_USER_IP\n value: {{ .Values.suitecrmValidateUserIP | quote }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"suitecrm.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: SUITECRM_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: SUITECRM_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: SUITECRM_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"suitecrm.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: SUITECRM_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: SUITECRM_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: SUITECRM_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n - name: SUITECRM_HOST\n{{- $port:=.Values.service.port | toString }}\n value: \"{{ include \"host\" . }}:{{- if ne $port \"80\" }}:{{ .Values.service.port }}{{ end }}\"\n - name: SUITECRM_USERNAME\n value: {{ .Values.suitecrmUsername | quote }}\n - name: SUITECRM_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"suitecrm.fullname\" . }}\n key: suitecrm-password\n - name: SUITECRM_EMAIL\n value: {{ .Values.suitecrmEmail | quote }}\n - name: SUITECRM_LAST_NAME\n value: {{ .Values.suitecrmLastName | quote }}\n - name: SUITECRM_SMTP_HOST\n value: {{ .Values.suitecrmSmtpHost | quote }}\n - name: SUITECRM_SMTP_PORT\n value: {{ .Values.suitecrmSmtpPort | quote }}\n - name: SUITECRM_SMTP_USER\n value: {{ .Values.suitecrmSmtpUser | quote }}\n - name: SUITECRM_SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"suitecrm.fullname\" . }}\n key: suitecrm-smtp-password\n - name: SUITECRM_SMTP_PROTOCOL\n value: {{ .Values.suitecrmSmtpProtocol | quote }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n httpGet:\n path: /index.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"host\" . | quote }}\n initialDelaySeconds: 300\n readinessProbe:\n httpGet:\n path: /index.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"host\" . | quote }}\n initialDelaySeconds: 60\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: suitecrm-data\n mountPath: /bitnami/suitecrm\n subPath: suitecrm\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"suitecrm.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n{{ toYaml .Values.metrics.resources | indent 12 }}\n{{- end }}\n volumes:\n - name: suitecrm-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"suitecrm.fullname\" . }}-suitecrm{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- end -}}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n chart: \"{{ template \"suitecrm.chart\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"suitecrm.fullname\" . }}\n labels:\n app: \"{{ template \"suitecrm.fullname\" . }}\"\n chart: \"{{ template \"suitecrm.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"suitecrm.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"suitecrm.fullname\" . }}\n labels:\n app: {{ template \"suitecrm.name\" . }}\n chart: \"{{ template \"suitecrm.chart\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.suitecrmPassword }}\n suitecrm-password: {{ .Values.suitecrmPassword | b64enc | quote }}\n {{ else }}\n suitecrm-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n suitecrm-smtp-password: {{ default \"\" .Values.suitecrmSmtpPassword | b64enc | quote }}\n",
"# suitecrm-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"suitecrm.fullname\" . }}-suitecrm\n labels:\n app: {{ template \"suitecrm.name\" . }}\n chart: \"{{ template \"suitecrm.chart\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"suitecrm.storageClass\" . }}\n{{- end -}}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"suitecrm.fullname\" . }}\n labels:\n app: {{ template \"suitecrm.name\" . }}\n chart: \"{{ template \"suitecrm.chart\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app: {{ template \"suitecrm.name\" . }}\n release: \"{{ .Release.Name }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami SuiteCRM image version
## ref: https://hub.docker.com/r/bitnami/suitecrm/tags/
##
image:
registry: docker.io
repository: bitnami/suitecrm
tag: 7.11.12-debian-10-r18
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override suitecrm.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override suitecrm.fullname template
##
# fullnameOverride:
## SuiteCRM host to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration
##
# suitecrmHost:
## SuiteCRM validate user IP
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration
##
suitecrmValidateUserIP: "no"
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration
##
suitecrmUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration
##
# suitecrmPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration
##
suitecrmEmail: [email protected]
## Lastname
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#configuration
##
suitecrmLastName: Name
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-suitecrm#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_suitecrm
## Database password
password:
## Database name
database: bitnami_suitecrm
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-suitecrm/#smtp-configuration
##
# suitecrmSmtpHost:
# suitecrmSmtpPort:
# suitecrmSmtpUser:
# suitecrmSmtpPassword:
# suitecrmSmtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_suitecrm
user: bn_suitecrm
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
## loadBalancerIP for the SuiteCRM Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
## loadBalancerIP:
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## suitecrm data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# requests:
# memory: 512Mi
# cpu: 300m
## Configure the ingress resource that allows you to access the
## SuiteCRM installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: suitecrm.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.suitecrm.local
# - suitecrm.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: suitecrm.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: suitecrm.local-tls
# key:
# certificate:
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r37
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
bookstack | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"bookstack.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"bookstack.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"bookstack.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"bookstack.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"bookstack.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"bookstack.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# app-secrets.yaml\n{{- if .Values.app.key }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"app\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"app\" }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n app-key: {{ .Values.app.key | b64enc | quote }}\n{{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: {{ template \"bookstack.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"bookstack.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"bookstack.name\" . }}\n release: {{ .Release.Name }}\n spec:\n serviceAccountName: {{ template \"bookstack.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /\n port: http\n readinessProbe:\n httpGet:\n path: /\n port: http\n env:\n {{- if .Values.ldap.enabled }}\n - name: AUTH_METHOD\n value: ldap\n - name: LDAP_SERVER\n value: {{ .Values.ldap.server | quote }}\n - name: LDAP_BASE_DN\n value: {{ .Values.ldap.base_dn | quote }}\n - name: LDAP_DN\n value: {{ .Values.ldap.dn | quote }}\n - name: LDAP_PASS\n value: {{ .Values.ldap.pass | quote }}\n - name: LDAP_USER_FILTER\n value: {{ .Values.ldap.userFilter | quote }}\n - name: LDAP_VERSION\n value: {{ .Values.ldap.version | quote }}\n {{- end }}\n {{- if .Values.mariadb.enabled }}\n - name: DB_HOST\n value: {{ template \"bookstack.mariadb.fullname\" . }}\n - name: DB_DATABASE\n value: {{ .Values.mariadb.db.name | quote }}\n - name: DB_USERNAME\n value: {{ .Values.mariadb.db.user | quote }}\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"bookstack.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: DB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: DB_DATABASE\n value: {{ .Values.externalDatabase.database | quote }}\n - name: DB_USERNAME\n value: {{ .Values.externalDatabase.user | quote }}\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n {{- if .Values.app.key }}\n - name: APP_KEY\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"app\" }}\n key: app-key\n {{- end }}\n {{- range $key, $value := .Values.env }}\n - name: {{ $key | upper | replace \".\" \"_\" }}\n value: {{ $value | quote }}\n {{- end }}\n volumeMounts:\n - name: uploads\n mountPath: /var/www/bookstack/public/uploads\n - name: storage\n mountPath: /var/www/bookstack/storage/uploads\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumes:\n - name: uploads\n {{- if .Values.persistence.uploads.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.uploads.existingClaim | default (printf \"%s-%s\" (include \"bookstack.fullname\" .) \"uploads\") }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: storage\n {{- if .Values.persistence.storage.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.storage.existingClaim | default (printf \"%s-%s\" (include \"bookstack.fullname\" .) \"storage\") }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"bookstack.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: {{ template \"bookstack.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n privileged: false\n allowPrivilegeEscalation: false\n volumes:\n - 'configMap'\n - 'downwardAPI'\n - 'emptyDir'\n - 'persistentVolumeClaim'\n - 'secret'\n - 'projected'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: {{ template \"bookstack.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n- apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\"]\n{{- if .Values.podSecurityPolicy.enabled }}\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"bookstack.fullname\" . }}]\n{{- end }}\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: {{ template \"bookstack.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"bookstack.serviceAccountName\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"bookstack.fullname\" . }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: {{ template \"bookstack.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"bookstack.name\" . }}\n release: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"bookstack.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"bookstack.serviceAccountName\" . }}\n{{- end -}}\n",
"# storage-pvc.yaml\n{{- if and .Values.persistence.storage.enabled (not .Values.persistence.storage.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}-storage\n labels:\n app: {{ template \"bookstack.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.storage.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.storage.size | quote }}\n{{- if .Values.persistence.storage.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storage.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storage.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# uploads-pvc.yaml\n{{- if and .Values.persistence.uploads.enabled (not .Values.persistence.uploads.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"bookstack.fullname\" . }}-uploads\n labels:\n app: {{ template \"bookstack.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.uploads.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.uploads.size | quote }}\n{{- if .Values.persistence.uploads.storageClass }}\n{{- if (eq \"-\" .Values.persistence.uploads.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.uploads.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n"
] | # Default values for bookstack.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: solidnerd/bookstack
tag: 0.27.5
pullPolicy: IfNotPresent
app:
# Laravel APP_KEY. Generate one with `php artisan key:generate` and put here if you want a static key.
key:
env: {}
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bookstack
## Database password
password:
## Database name
database: bookstack
##
## MariaDB chart configuration
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bookstack
user: bookstack
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: false
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
service:
type: ClusterIP
port: 80
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Persistence for the public/uploads folder
uploads:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
# Persistence for the public/storage folder
storage:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- bookstack-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
## Enable ldap authentication. See https://www.bookstackapp.com/docs/admin/ldap-auth/ for details on how to set it up.
ldap:
enabled: false
server:
base_dn:
dn:
pass:
userFilter:
version:
|
signalfx-agent | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"signalfx-agent.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"signalfx-agent.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"signalfx-agent.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"signalfx-agent.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"signalfx-agent.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet namespace to deploy agent and its dependencies.\n*/}}\n{{- define \"signalfx-agent.namespace\" -}}\n {{- default .Release.Namespace .Values.namespace -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"signalfx-agent.fullname\" . }}\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n chart: {{ template \"signalfx-agent.chart\" . }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n - namespaces\n - namespaces/status\n - nodes\n - nodes/spec\n - pods\n - pods/status\n - replicationcontrollers\n - replicationcontrollers/status\n - services\n - resourcequotas\n # Only need to be able to view secrets if using k8s annotation\n # agent.signalfx.com/configWithSecret.*. You can also whitelist specific\n # secrets for finer-grain permission sets.\n {{- if .Values.permitReadingSecrets }}\n - secrets\n {{- end }}\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs:\n - get\n - update\n - create\n- apiGroups:\n - \"\"\n resources:\n - nodes/stats\n verbs:\n - get\n - list\n # We need create because kubelet takes a POST for the stat query\n - create\n- apiGroups:\n - apps\n resources:\n - daemonsets\n - deployments\n - replicasets\n - statefulsets\n verbs:\n - get\n - list\n - watch\n- apiGroups:\n - extensions\n resources:\n - daemonsets\n - deployments\n - replicasets\n verbs:\n - get\n - list\n - watch\n{{- with .Values.rbac.customRules }}\n{{ toYaml . }}\n{{- end }}\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"signalfx-agent.fullname\" . }}\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n chart: {{ template \"signalfx-agent.chart\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"signalfx-agent.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"signalfx-agent.serviceAccountName\" . }}\n namespace: {{ template \"signalfx-agent.namespace\" . }}\n{{- end -}}\n",
"# configmap.yaml\n{{- if and .Values.clusterName .Values.signalFxAccessToken -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"signalfx-agent.fullname\" . }}\n namespace: {{ template \"signalfx-agent.namespace\" . }}\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n chart: {{ template \"signalfx-agent.chart\" . }}\ndata:\n agent.yaml: |\n{{- if .Values.agentConfig }}\n{{ toYaml .Values.agentConfig | indent 4 }}\n{{- else }}\n signalFxAccessToken: ${SFX_ACCESS_TOKEN}\n\n intervalSeconds: {{ .Values.metricIntervalSeconds }}\n\n logging:\n level: {{ .Values.logLevel | default \"info\" }}\n\n globalDimensions:\n kubernetes_cluster: {{ .Values.clusterName }}\n\n sendMachineID: false\n\n observers:\n - type: k8s-api\n {{- if .Values.apiServerSkipVerify }}\n kubernetesAPI:\n skipVerify: true\n {{- end }}\n\n monitors:\n - type: collectd/cpu\n - type: collectd/cpufreq\n - type: collectd/df\n hostFSPath: /hostfs\n - type: collectd/disk\n - type: collectd/interface\n - type: collectd/load\n - type: collectd/memory\n - type: collectd/protocols\n - type: collectd/signalfx-metadata\n procFSPath: /hostfs/proc\n - type: host-metadata\n etcPath: /hostfs/etc\n procFSPath: /hostfs/proc\n - type: collectd/uptime\n - type: collectd/vmem\n\n - type: collectd/processes\n processes:\n - collectd\n - signalfx-agent\n\n - type: kubelet-stats\n {{- if .Values.containerStatsIntervalSeconds }}\n intervalSeconds: {{ .Values.containerStatsIntervalSeconds }}\n {{- end }}\n {{- if .Values.kubeletAPI }}\n kubeletAPI:\n {{ toYaml .Values.kubeletAPI | indent 8 | trim }}\n {{- end }}\n\n {{ if .Values.gatherClusterMetrics -}}\n # Collects k8s cluster-level metrics\n - type: kubernetes-cluster\n useNodeName: true\n {{- end }}\n\n {{ if .Values.gatherDockerMetrics -}}\n - type: docker-container-stats\n dockerURL: unix:///var/run/docker.sock\n excludedImages:\n - '*pause-amd64*'\n labelsToDimensions:\n io.kubernetes.container.name: container_spec_name\n io.kubernetes.pod.name: kubernetes_pod_name\n io.kubernetes.pod.uid: kubernetes_pod_uid\n io.kubernetes.pod.namespace: kubernetes_namespace\n metricsToExclude:\n - metricNames:\n - 'cpu.usage*'\n - 'cpu.percent'\n - 'memory.usage*'\n - 'memory.percent'\n - 'blkio.io_service_bytes_recursive.*'\n negated: true\n {{- end }}\n\n {{ range .Values.monitors -}}\n - type: {{ .type }}\n {{- with .discoveryRule }}\n discoveryRule: {{ . }}\n {{- end }}\n {{- if (omit . \"type\" \"discoveryRule\") }}\n {{ toYaml (omit . \"type\" \"discoveryRule\") | indent 6 | trim }}\n {{- end }}\n\n {{ end }}\n\n collectd:\n readThreads: {{ .Values.readThreads | default 5 }}\n writeQueueLimitHigh: {{ .Values.writeQueueLimitHigh | default 500000 }}\n writeQueueLimitLow: {{ .Values.writeQueueLimitLow | default 400000 }}\n timeout: {{ .Values.timeout | default 40 }}\n logLevel: {{ .Values.logLevel | default \"notice\" }}\n\n metricsToExclude:\n {{- if .Values.metricNamesToExclude }}\n - metricNames:\n {{- range .Values.metricNamesToExclude }}\n - \"{{ . }}\"\n {{- end }}\n {{- end }}\n {{- with .Values.metricsToExclude }}\n{{ toYaml . | indent 6 }}\n {{- end }}\n{{- end }}\n{{- end -}}\n",
"# daemonset.yaml\n{{- if and .Values.clusterName .Values.signalFxAccessToken -}}\napiVersion: apps/v1beta2\nkind: DaemonSet\nmetadata:\n name: {{ template \"signalfx-agent.fullname\" . }}\n namespace: {{ template \"signalfx-agent.namespace\" . }}\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n version: {{ .Values.agentVersion }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n chart: {{ template \"signalfx-agent.chart\" . }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"signalfx-agent.name\" . }}\n release: {{ .Release.Name }}\n updateStrategy:\n rollingUpdate:\n maxUnavailable: {{ .Values.rollingUpdateMaxUnavailable | default 1 }}\n type: RollingUpdate\n template:\n metadata:\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n version: {{ .Values.agentVersion }}\n release: {{ .Release.Name }}\n {{ with .Values.extraPodLabels -}}\n {{ toYaml . | indent 8 | trim }}\n {{- end }}\n spec:\n # Use host network so we can access kubelet directly\n hostNetwork: true\n restartPolicy: Always\n serviceAccountName: {{ template \"signalfx-agent.serviceAccountName\" . }}\n {{ with .Values.image.pullSecret -}}\n imagePullSecrets:\n - name: {{ . }}\n {{- end }}\n tolerations:\n {{ if .Values.runOnMaster -}}\n - effect: NoSchedule\n key: node.alpha.kubernetes.io/role\n operator: Exists\n - effect: NoSchedule\n key: node-role.kubernetes.io/master\n operator: Exists\n {{- end }}\n {{ range .Values.tolerations -}}\n - {{ toYaml . | indent 8 | trim }}\n {{ end }}\n containers:\n - name: signalfx-agent\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag | default .Values.agentVersion }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /bin/signalfx-agent\n volumeMounts:\n - mountPath: /etc/signalfx\n name: config\n - mountPath: /etc/machine-id\n name: machine-id\n readOnly: true\n - mountPath: /hostfs\n name: hostfs\n readOnly: true\n - mountPath: /var/run/docker.sock\n name: docker\n readOnly: true\n env:\n - name: SFX_ACCESS_TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"signalfx-agent.fullname\" . }}\n key: access-token\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: spec.nodeName\n - name: MY_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n volumes:\n - name: config\n configMap:\n name: {{ template \"signalfx-agent.fullname\" . }}\n - name: hostfs\n hostPath:\n path: /\n - name: docker\n hostPath:\n path: /var/run/docker.sock\n - name: machine-id\n hostPath:\n path: /etc/machine-id\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"signalfx-agent.fullname\" . }}\n namespace: {{ template \"signalfx-agent.namespace\" . }}\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n chart: {{ template \"signalfx-agent.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n access-token: {{ .Values.signalFxAccessToken | b64enc | quote }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"signalfx-agent.name\" . }}\n chart: {{ template \"signalfx-agent.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"signalfx-agent.serviceAccountName\" . }}\n namespace: {{ template \"signalfx-agent.namespace\" . }}\n{{- end -}}\n"
] | # Version of the signalfx-agent to deploy. This will be the default for the
# docker image tag if not overridden with imageTag
agentVersion: 3.6.1
# The access token for SignalFx. REQUIRED
signalFxAccessToken: ""
# Docker image configuration
image:
# Image pull policy for the agent pod
pullPolicy: IfNotPresent
# The docker image to use for the agent
repository: quay.io/signalfx/signalfx-agent
# tag defaults to the agentVersion but can be overridden
tag:
# pullSecret is not needed for our standard image
pullSecret:
# How many agent pods can be unavailable at a time when rolling out a new
# version of the agent
rollingUpdateMaxUnavailable: 1
# Namespace to deploy agent in (Optional: Will default to release namespace)
namespace:
# RBAC config for the agent
rbac:
create: true
# You might need custom rules if you are pulling secrets to configure
# monitors.
customRules: []
# Service account config for the agent pods
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# This adds some tolerations to the pods that the DaemonSet creates that
# should allow the agent to run on the master nodes of newer versions of K8s
# that are deployed with standard master taints (see daemonset.yaml). If you
# need custom tolerations, see the 'tolerations' config option below.
runOnMaster: true
# You can specify additional tolerations for the pods that the DaemonSet
# creates.
tolerations: []
# Extra labels to put on agent pods. Values must be strings per the k8s label
# schema.
extraPodLabels: {}
# You can specify a custom agent config file with the agentConfig value. If
# you specify this, all of the options below here will no longer be applicable
# since they are used to render a default config (see configmap.yaml template).
agentConfig:
# clusterName must be provided. It is an arbitrary value that identifies this
# K8s cluster in SignalFx. This will be the value of the 'kubernetes_cluster'
# dimension on every metric sent by the agent.
clusterName:
# How frequently to send metrics by default in the agent. This can be
# overridden by individual monitors.
metricIntervalSeconds: 10
# The log level of the agent. Valid values are 'debug', 'info', 'warn', and
# 'error'. Info is a good default and won't be too spamy. Note that 'debug'
# may dump sensitive values in the provided configuration so use with care.
logLevel: info
# Whether to ignore TLS validation issue when connecting to the main K8s API
# server. This should almost never need to be set to true since the CA cert is
# provided with the service account token automatically by K8s.
apiServerSkipVerify: false
# Additional options for connecting to the Kubelet. These options are
# equivalent to what is under the 'kubeletAPI' key of the 'kubelet-stats'
# monitor. By default, the agent tries to use its service account if kubelet
# authentication is required.
kubeletAPI:
authType: serviceAccount
# Any values put in this object correspond to the 'collectd' config block of
# the agent config
collectd: {}
# How often to send cAdvisor-based container metrics. Defaults to whatever is
# in metricIntervalSeconds.
containerStatsIntervalSeconds:
# If true, K8s cluster-level metrics will be collected (e.g. pod counts,
# deployment status, etc). The agents will decide amongst themselves which
# instance should send the metrics so that they are only sent once.
gatherClusterMetrics: true
# Enables the docker-container-stats monitor with some specific config that
# causes it to send container stats from Docker with certain dimensions from
# container labels that makes it easy to correlate metrics between cadvisor and
# docker. Note that docker metrics are not sent for pause containers by
# default.
gatherDockerMetrics: true
# A list of metric names that are collected by monitors but are not to be sent
# to SignalFx. This default set include a lot of highly specific or duplicated
# cAdvisor metrics that cause a large increase in DPM for little value for most
# customers.
metricNamesToExclude:
- container_cpu_user_seconds_total
- container_cpu_system_seconds_total
- container_cpu_utilization_per_core
- container_fs_reads_total
- container_fs_sector_reads_total
- container_fs_reads_merged_total
- container_fs_read_seconds_total
- container_fs_writes_total
- container_fs_sector_writes_total
- container_fs_writes_merged_total
- container_fs_write_seconds_total
- container_fs_io_current
- container_fs_io_time_seconds_total
- container_fs_io_time_weighted_seconds_total
- container_last_seen
- container_tasks_state
- pod_network_receive_packets_total
- pod_network_receive_packets_dropped_total
- pod_network_transmit_packets_total
- pod_network_transmit_packets_dropped_total
- machine_cpu_frequency_khz
# A list of monitor configurations to include in the agent config. These
# values correspond exactly to what goes under 'monitors' in the agent config.
# The following are a set of monitors with discovery rules that should cover
# many standard deployments. Most users will want to override this with their
# own monitors and discovery rules.
monitors:
- type: collectd/activemq
discoveryRule: container_image =~ "activemq" && private_port == 1099
- type: collectd/apache
discoveryRule: container_image =~ "apache" && private_port == 80
- type: collectd/cassandra
discoveryRule: container_image =~ "cassandra" && private_port == 7199
- type: collectd/consul
discoveryRule: container_image =~ "consul" && private_port == 8500
- type: collectd/elasticsearch
discoveryRule: container_image =~ "elasticsearch" && port == 9200
- type: collectd/etcd
discoveryRule: container_image =~ "etcd" && port == 2379
# REQUIRED
clusterName: my-cluster
- type: collectd/haproxy
discoveryRule: container_image =~ "haproxy" && port == 9000
- type: collectd/kafka
discoveryRule: container_image =~ "kafka" && private_port == 9092
- type: collectd/memcached
discoveryRule: container_image =~ "memcache" && private_port == 11211
- type: collectd/mongodb
discoveryRule: container_image =~ "mongo" && private_port == 27017
# REQUIRED
databases:
- mydatabase
- type: collectd/mysql
discoveryRule: container_image =~ "mysql" && private_port == 3306
# REQUIRED
username: admin
databases:
- name: mydb
- type: collectd/nginx
discoveryRule: container_image =~ "nginx" && private_port == 80
- type: collectd/rabbitmq
discoveryRule: container_image =~ "rabbitmq" && private_port == 15672
- type: collectd/redis
discoveryRule: container_image =~ "redis" && private_port == 6379
- type: collectd/spark
discoveryRule: container_image =~ "spark" && private_port == 8080
isMaster: true
collectApplicationMetrics: true
clusterType: Standalone
- type: collectd/spark
discoveryRule: container_image =~ "spark" && private_port >= 8081
isMaster: false
clusterType: Standalone
- type: collectd/zookeeper
discoveryRule: container_image =~ "zookeeper" && private_port == 2181
|
jasperreports | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"jasperreports.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"jasperreports.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"jasperreports.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"jasperreports.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper Jasper Reports image name\n*/}}\n{{- define \"jasperreports.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"jasperreports.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if .Values.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if .Values.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"jasperreports.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"jasperreports.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"jasperreports.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"jasperreports.fullname\" . }}\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n selector:\n matchLabels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n release: {{ .Release.Name | quote }}\n template:\n metadata:\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n spec:\n{{- include \"jasperreports.imagePullSecrets\" . | indent 6 }}\n containers:\n - name: {{ template \"jasperreports.fullname\" . }}\n image: {{ template \"jasperreports.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"jasperreports.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: JASPERREPORTS_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: JASPERREPORTS_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: JASPERREPORTS_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"jasperreports.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: JASPERREPORTS_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: JASPERREPORTS_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: JASPERREPORTS_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n - name: JASPERREPORTS_USERNAME\n value: {{ .Values.jasperreportsUsername | quote }}\n - name: JASPERREPORTS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"jasperreports.fullname\" . }}\n key: jasperreports-password\n - name: JASPERREPORTS_EMAIL\n value: {{ .Values.jasperreportsEmail | quote }}\n {{- if .Values.smtpHost }}\n - name: SMTP_HOST\n value: {{ .Values.smtpHost | quote }}\n {{- end }}\n {{- if .Values.smtpPort }}\n - name: SMTP_PORT\n value: {{ .Values.smtpPort | quote }}\n {{- end }}\n {{- if .Values.smtpEmail }}\n - name: SMTP_EMAIL\n value: {{ .Values.smtpEmail| quote }}\n {{- end }}\n {{- if .Values.smtpUser }}\n - name: SMTP_USER\n value: {{ .Values.smtpUser | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"jasperreports.fullname\" . }}\n key: smtp-password\n {{- end }}\n {{- if .Values.smtpProtocol }}\n - name: SMTP_PROTOCOL\n value: {{ .Values.smtpProtocol | quote }}\n {{- end }}\n ports:\n - name: http\n containerPort: 8080\n livenessProbe:\n httpGet:\n path: /jasperserver/login.html\n port: http\n initialDelaySeconds: 300\n timeoutSeconds: 5\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /jasperserver/login.html\n port: http\n initialDelaySeconds: 30\n timeoutSeconds: 3\n periodSeconds: 5\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: jasperreports-data\n mountPath: /bitnami/jasperreports\n volumes:\n - name: jasperreports-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"jasperreports.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"jasperreports.fullname\" . }}\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"jasperreports.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"jasperreports.fullname\" . }}\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"jasperreports.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"jasperreports.fullname\" . }}\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n {{- if .Values.jasperreportsPassword }}\n jasperreports-password: {{ default \"\" .Values.jasperreportsPassword | b64enc | quote }}\n {{- else }}\n jasperreports-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n smtp-password: {{ default \"\" .Values.smtpPassword | b64enc | quote }}\n {{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"jasperreports.fullname\" . }}\n labels:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n chart: \"{{ template \"jasperreports.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n selector:\n app: \"{{ template \"jasperreports.fullname\" . }}\"\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami JasperReports image version
## ref: https://hub.docker.com/r/bitnami/dokuwiki/tags/
##
image:
registry: docker.io
repository: bitnami/jasperreports
tag: 7.2.0-debian-10-r24
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override jasperreports.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override jasperreports.fullname template
##
# fullnameOverride:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-jasperreports#configuration
##
jasperreportsUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-jasperreports#configuration
##
# jasperreportsPassword:
#
## Application mail
## ref: https://github.com/bitnami/bitnami-docker-jasperreports#configuration
##
jasperreportsEmail: [email protected]
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-jasperreports#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_jasperreports
## Database password
password:
## Database name
database: bitnami_jasperreports
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-jasperreports#smtp-configuration
##
# smtpHost:
# smtpPort:
# smtpEmail:
# smtpUser:
# smtpPassword:
# smtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_jasperreports
user: bn_jasperreports
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Configure the ingress resource that allows you to access the
## JasperReports installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: jasperreports.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.jasperreports.local
# - jasperreports.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: jasperreports.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: jasperreports.local-tls
# key:
# certificate:
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
|
dex | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"dex.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"dex.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"dex.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"dex.labels\" -}}\napp.kubernetes.io/name: {{ include \"dex.name\" . }}\nhelm.sh/chart: {{ include \"dex.chart\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"dex.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"dex.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if and .Values.rbac.create (not .Values.crd.present) }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.fullname\" . }}\nrules:\n- apiGroups: [\"dex.coreos.com\"] # API group created by dex\n resources: [\"*\"]\n verbs: [\"*\"]\n- apiGroups: [\"apiextensions.k8s.io\"]\n resources: [\"customresourcedefinitions\"]\n verbs: [\"create\"] # To manage its own resources, dex must be able to create customresourcedefinitions\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if and .Values.rbac.create (not .Values.crd.present) }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"dex.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"dex.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# config-openssl.yaml\n{{- if and .Values.grpc .Values.certs.grpc.create }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.fullname\" . }}-openssl-config\ndata:\n openssl.conf: |\n{{ .Files.Get \"config/openssl.conf\" | indent 4 }}\n{{- end }}\n",
"# deployment.yaml\n{{ $fullname := include \"dex.fullname\" . }}\n{{ $httpsTlsBuiltName := printf \"%s-tls\" $fullname }}\n{{ $httpsTlsSecretName := default $httpsTlsBuiltName .Values.certs.web.secret.tlsName }}\n{{ $grpcTlsServerBuiltName := printf \"%s-server-tls\" $fullname }}\n{{ $grpcTlsServerSecretName := default $grpcTlsServerBuiltName .Values.certs.grpc.secret.serverTlsName }}\n{{ $grpcCaBuiltName := printf \"%s-ca\" $fullname }}\n{{ $grpcCaSecretName := default $grpcCaBuiltName .Values.certs.grpc.secret.caName }}\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"dex.fullname\" . }}\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n app.kubernetes.io/component: dex\nspec:\n replicas: {{ .Values.replicas }}\n strategy:\n rollingUpdate:\n maxSurge: 0\n maxUnavailable: 1\n type: RollingUpdate\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"dex.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: dex\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"dex.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: dex\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n{{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n{{- end }}\n{{- if .Values.initContainers }}\n initContainers:\n {{- toYaml .Values.initContainers | nindent 8 }}\n{{- end }}\n serviceAccountName: {{ template \"dex.serviceAccountName\" . }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 10 }}\n{{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- if .Values.securityContext }}\n securityContext: {{ toYaml .Values.securityContext | nindent 8 }}\n {{- end }}\n containers:\n - name: main\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n command:\n - /usr/local/bin/dex\n - serve\n - /etc/dex/cfg/config.yaml\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n ports:\n - name: {{ if .Values.https }}https{{ else }}http{{ end }}\n containerPort: {{ .Values.ports.web.containerPort }}\n protocol: TCP\n {{- if .Values.grpc }}\n - name: grpc\n containerPort: {{ .Values.ports.grpc.containerPort }}\n protocol: TCP\n {{- end }}\n {{- if .Values.telemetry }}\n - name: telemetry\n containerPort: {{ .Values.ports.telemetry.containerPort }}\n protocol: TCP\n {{- end }}\n{{- if and (not .Values.https) .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: {{ .Values.livenessProbe.httpPath }}\n port: {{ if .Values.https }}https{{ else }}http{{ end }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n{{- end }}\n{{- if and (not .Values.https) .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: {{ .Values.readinessProbe.httpPath }}\n port: {{ if .Values.https }}https{{ else }}http{{ end }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n{{- end }}\n env:\n{{ toYaml .Values.env | indent 10 }}\n volumeMounts:\n - mountPath: /etc/dex/cfg\n name: config\n{{- if .Values.https }}\n - mountPath: /etc/dex/tls/https/server\n name: https-tls\n{{- end }}\n{{- if .Values.grpc }}\n - mountPath: /etc/dex/tls/grpc/server\n name: grpc-tls-server\n - mountPath: /etc/dex/tls/grpc/ca\n name: grpc-tls-ca\n{{- end }}\n{{- if ne (len .Values.extraVolumeMounts) 0 }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 8 }}\n {{- end }}\n volumes:\n - secret:\n defaultMode: 420\n items:\n - key: config.yaml\n path: config.yaml\n secretName: {{ template \"dex.fullname\" . }}\n name: config\n{{- if .Values.https }}\n - name: https-tls\n secret:\n defaultMode: 420\n secretName: {{ $httpsTlsSecretName | quote }}\n{{- end }}\n{{- if .Values.grpc }}\n - name: grpc-tls-server\n secret:\n defaultMode: 420\n secretName: {{ $grpcTlsServerSecretName | quote }}\n - name: grpc-tls-ca\n secret:\n defaultMode: 420\n secretName: {{ $grpcCaSecretName| quote }}\n{{- end }}\n{{- if ne (len .Values.extraVolumes) 0 }}\n{{ toYaml .Values.extraVolumes | indent 6 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"dex.fullname\" . -}}\n{{- $servicePort := .Values.ports.web.servicePort -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n",
"# job-grpc-certs.yaml\n{{- if and .Values.grpc .Values.certs.grpc.create }}\n{{ $fullname := include \"dex.fullname\" . }}\n{{ $tlsServerBuiltName := printf \"%s-server-tls\" $fullname }}\n{{ $tlsServerSecretName := default $tlsServerBuiltName .Values.certs.grpc.secret.serverTlsName }}\n{{ $tlsClientBuiltName := printf \"%s-client-tls\" $fullname }}\n{{ $tlsClientSecretName := default $tlsClientBuiltName .Values.certs.grpc.secret.clientTlsName }}\n{{ $caBuiltName := printf \"%s-ca\" $fullname }}\n{{ $caName := default $caBuiltName .Values.certs.grpc.secret.caName }}\n{{ $openSslConfigName := printf \"%s-openssl-config\" $fullname }}\n{{ $local := dict \"i\" 0 }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n annotations:\n \"helm.sh/hook\": post-install\n \"helm.sh/hook-weight\": \"2\"\n \"helm.sh/hook-delete-policy\": hook-succeeded\n name: {{ $fullname }}-grpc-certs\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n app.kubernetes.io/component: \"job-grpc-certs\"\nspec:\n activeDeadlineSeconds: {{ .Values.certs.grpc.activeDeadlineSeconds }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"dex.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: \"job-grpc-certs\"\n{{- if .Values.certs.grpc.pod.annotations }}\n annotations:\n{{ toYaml .Values.certs.grpc.pod.annotations | trim | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.certs.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.certs.securityContext.runAsUser }}\n fsGroup: {{ .Values.certs.securityContext.fsGroup }}\n {{- end }}\n serviceAccountName: {{ template \"dex.serviceAccountName\" . }}\n nodeSelector:\n{{ toYaml .Values.certs.grpc.pod.nodeSelector | indent 8 }}\n{{- with .Values.certs.grpc.pod.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.certs.grpc.pod.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n restartPolicy: OnFailure\n containers:\n - name: main\n image: \"{{ .Values.certs.image }}:{{ .Values.certs.imageTag }}\"\n imagePullPolicy: {{ .Values.certs.imagePullPolicy }}\n env:\n - name: HOME\n value: /tmp\n workingDir: /tmp\n command:\n - /bin/bash\n - -exc\n - |\n export CONFIG=/etc/dex/tls/grpc/openssl.conf;\n cat << EOF > san.cnf\n {{- $_ := set $local \"i\" 1 }}\n {{- range .Values.certs.grpc.altNames }}\n DNS.{{ $local.i }}:{{ . }}\n {{- $_ := set $local \"i\" ( add1 $local.i ) }}\n {{- end }}\n {{- $_ := set $local \"i\" 1 }}\n {{- range .Values.certs.grpc.altIPs }}\n IP.{{ $local.i }}:{{ . }}\n {{- $_ := set $local \"i\" ( add1 $local.i ) }}\n {{- end }}\n EOF\n export SAN=$(cat san.cnf | paste -sd \",\" -)\n\n # Creating basic files/directories\n mkdir -p {certs,crl,newcerts}\n touch index.txt\n touch index.txt.attr\n echo 1000 > serial\n # CA private key (unencrypted)\n openssl genrsa -out ca.key 4096;\n # Certificate Authority (self-signed certificate)\n openssl req -config $CONFIG -new -x509 -days 3650 -sha256 -key ca.key -extensions v3_ca -out ca.crt -subj \"/CN=grpc-ca\";\n # Server private key (unencrypted)\n openssl genrsa -out server.key 2048;\n # Server certificate signing request (CSR)\n openssl req -config $CONFIG -new -sha256 -key server.key -out server.csr -subj \"/CN=grpc-server\";\n # Certificate Authority signs CSR to grant a certificate\n openssl ca -batch -config $CONFIG -extensions server_cert -days 365 -notext -md sha256 -in server.csr -out server.crt -cert ca.crt -keyfile ca.key;\n # Client private key (unencrypted)\n openssl genrsa -out client.key 2048;\n # Signed client certificate signing request (CSR)\n openssl req -config $CONFIG -new -sha256 -key client.key -out client.csr -subj \"/CN=grpc-client\";\n # Certificate Authority signs CSR to grant a certificate\n openssl ca -batch -config $CONFIG -extensions usr_cert -days 365 -notext -md sha256 -in client.csr -out client.crt -cert ca.crt -keyfile ca.key;\n # Remove CSR's\n rm *.csr;\n\n # Cleanup the existing configmap and secrets\n kubectl delete configmap {{ $caName }} --namespace {{ .Release.Namespace }} || true\n kubectl delete secret {{ $caName }} {{ $tlsServerSecretName }} {{ $tlsClientSecretName }} --namespace {{ .Release.Namespace }} || true\n kubectl create configmap {{ $caName }} --namespace {{ .Release.Namespace }} --from-file=ca.crt;\n # Store all certficates in secrets\n kubectl create secret tls {{ $caName }} --namespace {{ .Release.Namespace }} --cert=ca.crt --key=ca.key;\n kubectl create secret tls {{ $tlsServerSecretName }} --namespace {{ .Release.Namespace }} --cert=server.crt --key=server.key;\n kubectl create secret tls {{ $tlsClientSecretName }} --namespace {{ .Release.Namespace }} --cert=client.crt --key=client.key;\n volumeMounts:\n - name: openssl-config\n mountPath: /etc/dex/tls/grpc\n volumes:\n - name: openssl-config\n configMap:\n name: {{ $openSslConfigName }}\n{{- end }}\n",
"# job-web-certs.yaml\n{{- if .Values.certs.web.create }}\n{{ $fullname := include \"dex.fullname\" . }}\n{{ $tlsBuiltName := printf \"%s-tls\" $fullname }}\n{{ $tlsSecretName := default $tlsBuiltName .Values.certs.web.secret.tlsName }}\n{{ $caBuiltName := printf \"%s-ca\" $fullname }}\n{{ $caName := default $caBuiltName .Values.certs.web.secret.caName }}\n{{ $local := dict \"i\" 0 }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n annotations:\n \"helm.sh/hook\": post-install\n \"helm.sh/hook-weight\": \"1\"\n \"helm.sh/hook-delete-policy\": hook-succeeded\n name: {{ $fullname }}-web-certs\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n app.kubernetes.io/component: \"job-web-certs\"\nspec:\n activeDeadlineSeconds: {{ .Values.certs.web.activeDeadlineSeconds }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"dex.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: \"job\"\n{{- if .Values.certs.web.pod.annotations }}\n annotations:\n{{ toYaml .Values.certs.web.pod.annotations | trim | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.certs.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.certs.securityContext.runAsUser }}\n fsGroup: {{ .Values.certs.securityContext.fsGroup }}\n {{- end }}\n serviceAccountName: {{ template \"dex.serviceAccountName\" . }}\n nodeSelector:\n{{ toYaml .Values.certs.web.pod.nodeSelector | indent 8 }}\n{{- with .Values.certs.web.pod.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.certs.web.pod.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n restartPolicy: OnFailure\n containers:\n - name: main\n image: \"{{ .Values.certs.image }}:{{ .Values.certs.imageTag }}\"\n imagePullPolicy: {{ .Values.certs.imagePullPolicy }}\n env:\n - name: HOME\n value: /tmp\n workingDir: /tmp\n command:\n - /bin/bash\n - -exc\n - |\n cat << EOF > req.cnf\n [req]\n req_extensions = v3_req\n distinguished_name = req_distinguished_name\n\n [req_distinguished_name]\n\n [ v3_req ]\n basicConstraints = CA:FALSE\n keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n subjectAltName = @alt_names\n\n [alt_names]\n {{- $_ := set $local \"i\" 1 }}\n {{- range .Values.certs.web.altNames }}\n DNS.{{ $local.i }} = {{ . }}\n {{- $_ := set $local \"i\" ( add1 $local.i ) }}\n {{- end }}\n {{- $_ := set $local \"i\" 1 }}\n {{- range .Values.certs.web.altIPs }}\n IP.{{ $local.i }} = {{ . }}\n {{- $_ := set $local \"i\" ( add1 $local.i ) }}\n {{- end }}\n EOF\n\n openssl genrsa -out ca-key.pem 2048;\n openssl req -x509 -new -nodes -key ca-key.pem -days {{ .Values.certs.web.caDays }} -out ca.pem -subj \"/CN=dex-ca\";\n\n openssl genrsa -out key.pem 2048;\n openssl req -new -key key.pem -out csr.pem -subj \"/CN=dex\" -config req.cnf;\n openssl x509 -req -in csr.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -days {{ .Values.certs.web.certDays }} -extensions v3_req -extfile req.cnf;\n\n kubectl delete configmap {{ $caName | quote }} --namespace {{ .Release.Namespace }} || true\n kubectl delete secret {{ $caName | quote }} {{ $tlsSecretName }} --namespace {{ .Release.Namespace }} || true\n\n kubectl create configmap {{ $caName | quote }} --namespace {{ .Release.Namespace }} --from-file dex-ca.pem=ca.pem;\n kubectl create secret tls {{ $caName | quote }} --namespace {{ .Release.Namespace }} --cert=ca.pem --key=ca-key.pem;\n kubectl create secret tls {{ $tlsSecretName }} --namespace {{ .Release.Namespace }} --cert=cert.pem --key=key.pem;\n{{- if .Values.inMiniKube }}\n cp -a ca.pem /var/lib/localkube/oidc.pem\n volumeMounts:\n - mountPath: /var/lib/localkube\n name: localkube\n volumes:\n - name: localkube\n hostPath:\n path: /var/lib/localkube\n{{- end }}\n{{- end }}\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"dex.fullname\" . }}\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"dex.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\n{{- if or .Values.certs.grpc.create .Values.certs.web.create .Values.crd.present }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nrules:\n{{- if .Values.crd.present }}\n- apiGroups: [\"dex.coreos.com\"] # API group created by dex\n resources: [\"*\"]\n verbs: [\"*\"]\n{{- end -}}\n{{- if or .Values.certs.grpc.create .Values.certs.web.create }}\n- apiGroups: [\"\"]\n resources: [\"configmaps\", \"secrets\"]\n verbs: [\"create\", \"delete\"]\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\n{{- if or .Values.certs.grpc.create .Values.certs.web.create .Values.crd.present }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"dex.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"dex.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n{{- end -}}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.fullname\" . }}\nstringData:\n config.yaml: |-\n {{- with .Values.config }}\n issuer: {{ .issuer }}\n storage:\n{{ toYaml .storage | indent 6 }}\n logger:\n{{ toYaml .logger | indent 6 }}\n web:\n {{- if $.Values.https }}\n https: {{ $.Values.config.web.address }}:{{ $.Values.ports.web.containerPort }}\n tlsCert: {{ .web.tlsCert }}\n tlsKey: {{ .web.tlsKey }}\n {{- else }}\n http: {{ $.Values.config.web.address }}:{{ $.Values.ports.web.containerPort }}\n {{- end }}\n {{- if .web.allowedOrigins }}\n allowedOrigins:\n {{- range $allowedOrigin := .web.allowedOrigins }}\n - {{ $allowedOrigin | quote }}\n {{- end }} \n {{- end }}\n {{- if $.Values.grpc }}\n grpc:\n addr: {{ $.Values.config.grpc.address }}:{{ $.Values.ports.grpc.containerPort }}\n tlsCert: {{ .grpc.tlsCert }}\n tlsKey: {{ .grpc.tlsKey }}\n tlsClientCA: {{ .grpc.tlsClientCA }}\n {{- end }}\n {{- if $.Values.telemetry }}\n telemetry:\n http: {{ $.Values.config.web.address }}:{{ $.Values.ports.telemetry.containerPort }}\n {{- end }}\n {{- if .connectors }}\n connectors:\n{{ toYaml .connectors | indent 4 }}\n {{- end }}\n oauth2: {{ toYaml .oauth2 | nindent 6 }}\n {{- if .staticClients }}\n staticClients:\n{{ toYaml .staticClients | indent 4 }}\n {{- end }}\n enablePasswordDB: {{ .enablePasswordDB }}\n {{- if .staticPasswords }}\n staticPasswords:\n{{ toYaml .staticPasswords | indent 4 }}\n {{- end }}\n {{- if .expiry }}\n expiry:\n{{ toYaml .expiry | indent 6 }}\n {{- end }}\n {{- if .frontend }}\n frontend: {{ toYaml .frontend | nindent 6 }}\n {{- end }}\n {{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"dex.fullname\" . }}\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type}}\n sessionAffinity: None\n ports:\n - name: {{ if .Values.https }}https{{ else }}http{{ end }}\n targetPort: {{ if .Values.https }}https{{ else }}http{{ end }}\n{{- if eq \"NodePort\" .Values.service.type }}\n nodePort: {{ .Values.ports.web.nodePort }}\n{{- end }}\n port: {{ .Values.ports.web.servicePort }}\n{{- if .Values.grpc }}\n - name: grpc\n targetPort: grpc\n {{- if eq \"NodePort\" .Values.service.type }}\n nodePort: {{ .Values.ports.grpc.nodePort }}\n {{- end }}\n port: {{ .Values.ports.grpc.servicePort }}\n{{- end }}\n{{- if .Values.telemetry }}\n - name: telemetry\n targetPort: telemetry\n {{- if eq \"NodePort\" .Values.service.type }}\n nodePort: {{ .Values.ports.telemetry.nodePort }}\n {{- end }}\n port: {{ .Values.ports.telemetry.servicePort }}\n{{- end }}\n{{- if hasKey .Values.service \"externalIPs\" }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n{{- if hasKey .Values.service \"loadBalancerIP\" }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n{{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"dex.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n{{ include \"dex.labels\" . | indent 4 }}\n name: {{ template \"dex.serviceAccountName\" . }}\n{{- end -}}\n"
] | # Default values for dex
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
image: quay.io/dexidp/dex
imageTag: "v2.24.0"
imagePullPolicy: "IfNotPresent"
imagePullSecrets: {}
inMiniKube: false
nodeSelector: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
initContainers: []
tolerations: []
# - key: CriticalAddonsOnly
# operator: Exists
# - key: foo
# operator: Equal
# value: bar
# effect: NoSchedule
securityContext:
# Rejecting containers trying to run with root privileges
# runAsNonRoot: true
# Preventing privilege escalation to root privileges
# allowPrivilegeEscalation: false
# Set the user ID used to run the container
# runAsUser: 1001
# Set the primary group ID used to run all processes within any container of the pod
# runAsGroup: 1001
# Set the group ID associated with the container
# fsGroup: 1001
replicas: 1
# resources:
# limits:
# cpu: 100m
# memory: 50Mi
# requests:
# cpu: 100m
# memory: 50Mi
# grpc support
grpc: true
# https termination by dex itself
https: false
# expose prometheus metrics ?
telemetry: false
ports:
web:
containerPort: 5556
# for service.type: NodePort
nodePort: 32000
servicePort: 32000
# Relevant only when grpc support is enabled
grpc:
containerPort: 5000
# for service.type: NodePort
nodePort: 35000
servicePort: 35000
telemetry:
containerPort: 5558
# for service.type: NodePort
nodePort: 37000
servicePort: 37000
livenessProbe:
enabled: true
initialDelaySeconds: 1
failureThreshold: 1
httpPath: "/healthz"
periodSeconds: 10
timeoutSeconds: 1
readinessProbe:
enabled: true
initialDelaySeconds: 1
failureThreshold: 1
httpPath: "/healthz"
periodSeconds: 10
timeoutSeconds: 1
service:
type: ClusterIP
# Override IP for the Service Type: LoadBalancer.
# This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created.
# loadBalancerIP: 127.0.0.1
annotations: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- dex.example.com
tls: []
# - secretName: dex-example-tls
# hosts:
# - dex.example.com
extraVolumes: []
extraVolumeMounts: []
certs:
securityContext:
enabled: true
runAsUser: 65534
fsGroup: 65534
image: gcr.io/google_containers/kubernetes-dashboard-init-amd64
imageTag: "v1.0.0"
imagePullPolicy: "IfNotPresent"
# Section below is relevant only when https termination is enabled
web:
create: true
activeDeadlineSeconds: 300
caDays: 10000
certDays: 10000
altNames:
- dex.example.com
altIPs: {}
secret:
tlsName: dex-web-server-tls
caName: dex-web-server-ca
pod:
annotations: {}
nodeSelector: {}
tolerations: []
# - key: CriticalAddonsOnly
# operator: Exists
# - key: foo
# operator: Equal
# value: bar
# effect: NoSchedule
affinity: {}
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 5
# podAffinityTerm:
# topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# app: {{ template "dex.name" . }}
# release: "{{ .Release.Name }}"
# Section below is relevant only when grpc support is enabled
grpc:
create: true
activeDeadlineSeconds: 300
altNames:
- dex.example.com
altIPs: {}
secret:
serverTlsName: dex-grpc-server-tls
clientTlsName: dex-grpc-client-tls
caName: dex-grpc-ca
pod:
annotations: {}
nodeSelector: {}
tolerations: []
# - key: CriticalAddonsOnly
# operator: Exists
# - key: foo
# operator: Equal
# value: bar
# effect: NoSchedule
affinity: {}
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 5
# podAffinityTerm:
# topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# app: {{ template "dex.name" . }}
# release: "{{ .Release.Name }}"
env: []
rbac:
# Specifies whether RBAC resources should be created
create: true
crd:
# Specifies whether dex's CRDs are already present (if not cluster role and cluster role binding will be created
# to enable dex to create them). Depends on rbac.create
present: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
affinity: {}
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 5
# podAffinityTerm:
# topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# app: {{ template "dex.name" . }}
# release: "{{ .Release.Name }}"
podDisruptionBudget: {}
# maxUnavailable: 1
config:
issuer: http://dex.example.com:8080
storage:
type: kubernetes
config:
inCluster: true
logger:
level: debug
web:
# port is taken from ports section above
address: 0.0.0.0
tlsCert: /etc/dex/tls/https/server/tls.crt
tlsKey: /etc/dex/tls/https/server/tls.key
allowedOrigins: []
# Section below is relevant only when grpc support is enabled
grpc:
# port is taken from ports section above
address: 127.0.0.1
tlsCert: /etc/dex/tls/grpc/server/tls.crt
tlsKey: /etc/dex/tls/grpc/server/tls.key
tlsClientCA: /etc/dex/tls/grpc/ca/tls.crt
connectors: []
# - type: github
# id: github
# name: GitHub
# config:
# clientID: xxxxxxxxxxxxxxx
# clientSecret: yyyyyyyyyyyyyyyyyyyyy
# redirectURI: https://dex.minikube.local:5556/callback
# org: kubernetes
oauth2:
alwaysShowLoginScreen: false
skipApprovalScreen: true
# expiry:
# signingKeys: "6h"
# idTokens: "24h"
# staticClients:
# - id: example-app
# redirectURIs:
# - 'http://192.168.42.219:31850/oauth2/callback'
# name: 'Example App'
# secret: ZXhhbXBsZS1hcHAtc2VjcmV0
#
enablePasswordDB: true
# staticPasswords:
# - email: "[email protected]"
# # bcrypt hash of the string "password"
# hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W"
# username: "admin"
# userID: "08a8684b-db88-4b73-90a9-3cd1661f5466"
# frontend:
# logoURL: https://example.com/yourlogo.png
|
hlf-ord | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hlf-ord.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"hlf-ord.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"hlf-ord.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- /*\nCredit: @technosophos\nhttps://github.com/technosophos/common-chart/\nlabels.standard prints the standard Helm labels.\nThe standard labels are frequently used in metadata.\n*/ -}}\n{{- define \"labels.standard\" -}}\napp: {{ include \"hlf-ord.name\" . }}\nheritage: {{ .Release.Service | quote }}\nrelease: {{ .Release.Name | quote }}\nchart: {{ include \"hlf-ord.chart\" . }}\n{{- end -}}\n",
"# configmap--ord.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"hlf-ord.fullname\" . }}--ord\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\ndata:\n ## Location where fabric-ca-client configuration is saved\n FABRIC_CA_CLIENT_HOME: /var/hyperledger/fabric-ca-client\n ## Orderer defaults\n ORDERER_CFG_PATH: /var/hyperledger/config\n ORDERER_GENERAL_LEDGERTYPE: file\n ORDERER_FILELEDGER_LOCATION: /var/hyperledger/ledger\n ORDERER_GENERAL_BATCHTIMEOUT: 1s\n ORDERER_GENERAL_BATCHSIZE_MAXMESSAGECOUNT: \"10\"\n ORDERER_GENERAL_MAXWINDOWSIZE: \"1000\"\n ORDERER_GENERAL_ORDERERTYPE: {{ .Values.ord.type | quote }}\n ORDERER_GENERAL_LISTENADDRESS: 0.0.0.0\n ORDERER_GENERAL_LISTENPORT: \"7050\"\n ORDERER_GENERAL_LOGLEVEL: debug\n ORDERER_GENERAL_LOCALMSPDIR: /var/hyperledger/msp\n ORDERER_GENERAL_LOCALMSPID: {{ .Values.ord.mspID | quote }}\n ORDERER_GENERAL_GENESISMETHOD: file\n ORDERER_GENERAL_GENESISFILE: /hl_config/genesis/genesis.block\n ORDERER_GENERAL_GENESISPROFILE: initial\n ORDERER_GENERAL_TLS_ENABLED: {{ .Values.ord.tls.server.enabled | quote }}\n ORDERER_GENERAL_TLS_CERTIFICATE: \"/var/hyperledger/tls/server/pair/tls.crt\"\n ORDERER_GENERAL_TLS_PRIVATEKEY: \"/var/hyperledger/tls/server/pair/tls.key\"\n ORDERER_GENERAL_TLS_ROOTCAS: \"/var/hyperledger/tls/server/cert/cacert.pem\"\n ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED: {{ .Values.ord.tls.client.enabled | quote }}\n # This is fixed prior to starting the orderer\n ORDERER_GENERAL_TLS_CLIENTROOTCAS: \"/var/hyperledger/tls/client/cert/*\"\n GODEBUG: \"netdns=go\"\n ADMIN_MSP_PATH: /var/hyperledger/admin_msp\n ##############\n # Operations #\n ##############\n {{- if eq .Values.ord.metrics.provider \"prometheus\" }}\n ORDERER_OPERATIONS_LISTENADDRESS: 0.0.0.0:9443\n {{- end }}\n {{- if eq .Values.ord.metrics.provider \"statsd\" }}\n ORDERER_OPERATIONS_LISTENADDRESS: 127.0.0.1:8125\n {{- end }}\n ###########\n # Metrics #\n ###########\n ORDERER_METRICS_PROVIDER: {{ .Values.ord.metrics.provider | quote }}\n {{- if eq .Values.ord.metrics.provider \"statsd\" }}\n ORDERER_METRICS_STATSD_NETWORK: {{ .Values.ord.metrics.statsd.network | quote }}\n ORDERER_METRICS_STATSD_ADDRESS: {{ .Values.ord.metrics.statsd.address | quote }}\n ORDERER_METRICS_STATSD_WRITEINTERVAL: {{ .Values.ord.metrics.statsd.WriteInterval | quote }}\n ORDERER_METRICS_STATSD_PREFIX: {{ .Values.ord.metrics.statsd.prefix | quote }}\n {{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"hlf-ord.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ include \"hlf-ord.name\" . }}\n release: {{ .Release.Name }}\n # Ensure we allow our pod to be unavailable, so we can upgrade\n strategy:\n rollingUpdate:\n maxUnavailable: 1\n template:\n metadata:\n labels:\n{{ include \"labels.standard\" . | indent 8 }}\n spec:\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"hlf-ord.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- if .Values.secrets.ord.cert }}\n - name: id-cert\n secret:\n secretName: {{ .Values.secrets.ord.cert }}\n {{- end }}\n {{- if .Values.secrets.ord.key }}\n - name: id-key\n secret:\n secretName: {{ .Values.secrets.ord.key }}\n {{- end }}\n {{- if .Values.secrets.ord.caCert }}\n - name: cacert\n secret:\n secretName: {{ .Values.secrets.ord.caCert }}\n {{- end }}\n {{- if .Values.secrets.ord.intCaCert }}\n - name: intcacert\n secret:\n secretName: {{ .Values.secrets.ord.intCaCert }}\n {{- end }}\n {{- if .Values.secrets.ord.tls }}\n - name: tls\n secret:\n secretName: {{ .Values.secrets.ord.tls }}\n {{- end }}\n {{- if .Values.secrets.ord.tlsRootCert }}\n - name: tls-rootcert\n secret:\n secretName: {{ .Values.secrets.ord.tlsRootCert }}\n {{- end }}\n {{- if .Values.secrets.ord.tlsClientRootCert }}\n - name: tls-clientrootcert\n secret:\n secretName: {{ .Values.secrets.ord.tlsClientRootCert }}\n {{- end }}\n {{- if .Values.secrets.genesis }}\n - name: genesis\n secret:\n secretName: {{ .Values.secrets.genesis }}\n {{- end }}\n {{- if .Values.secrets.adminCert }}\n - name: admin-cert\n secret:\n secretName: {{ .Values.secrets.adminCert }}\n {{- end }}\n {{- if .Values.secrets.caServerTls }}\n - name: ca-server-tls\n secret:\n secretName: {{ .Values.secrets.caServerTls }}\n {{- end }}\n containers:\n - name: orderer\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: ord-port\n containerPort: 7050\n protocol: TCP\n {{- if eq .Values.ord.metrics.provider \"prometheus\" }}\n - name: metrics\n containerPort: 9443\n protocol: TCP\n {{- end }}\n livenessProbe:\n exec:\n command:\n - ls\n - /var/hyperledger\n initialDelaySeconds: 5\n periodSeconds: 5\n # TODO: Improve readiness probe (ideally `ps aux | awk '$11==\"orderer\"'`)\n readinessProbe:\n exec:\n command:\n - ls\n - /var/hyperledger\n initialDelaySeconds: 15\n command:\n - bash\n - -c\n - |\n\n while [ ! -d /var/hyperledger/admin_msp/admincerts ] || [ -z \"$(ls -A /var/hyperledger/admin_msp/admincerts)\" ];\n do\n echo \"\\033[0;31m /var/hyperledger/admin_msp/admincerts must contain Ord admin certificates files \\033[0m\"\n sleep 60\n done\n\n while [ ! -d /hl_config/genesis ] || [ -z \"$(ls -A /hl_config/genesis)\" ];\n do\n echo \"\\033[0;31m /hl_config/genesis must contain Genesis transaction \\033[0m\"\n sleep 60\n done\n\n while [ ! -d ${ORDERER_GENERAL_LOCALMSPDIR}/signcerts ];\n do\n echo \"\\033[0;31m ${ORDERER_GENERAL_LOCALMSPDIR}/signcerts directory must exist \\033[0m\"\n sleep 60\n done\n\n echo \"\\033[0;32m Create ${ORDERER_FILELEDGER_LOCATION} directory to avoid restarts \\033[0m\"\n mkdir -p ${ORDERER_FILELEDGER_LOCATION}\n mkdir -p ${ORDERER_FILELEDGER_LOCATION}/index\n\n if [ $ORDERER_GENERAL_TLS_CLIENTAUTHREQUIRED ]\n then\n export ORDERER_GENERAL_TLS_CLIENTROOTCAS=$(echo $(ls $ORDERER_GENERAL_TLS_CLIENTROOTCAS) | echo \"[$(sed 's/ /,/g')]\")\n fi\n\n echo \">\\033[0;35m orderer \\033[0m\"\n orderer\n envFrom:\n {{- if .Values.secrets.ord.cred }}\n - secretRef:\n # Environmental variables CA_USERNAME and CA_PASSWORD\n name: {{ .Values.secrets.ord.cred }}\n {{- end }}\n - configMapRef:\n name: {{ include \"hlf-ord.fullname\" . }}--ord\n volumeMounts:\n - mountPath: /var/hyperledger\n name: data\n {{- if .Values.secrets.ord.cert }}\n - mountPath: /var/hyperledger/msp/signcerts\n name: id-cert\n {{- end }}\n {{- if .Values.secrets.ord.key }}\n - mountPath: /var/hyperledger/msp/keystore\n name: id-key\n {{- end }}\n {{- if .Values.secrets.ord.caCert }}\n - mountPath: /var/hyperledger/msp/cacerts\n name: cacert\n - mountPath: /var/hyperledger/admin_msp/cacerts\n name: cacert\n {{- end }}\n {{- if .Values.secrets.ord.intCaCert }}\n - mountPath: /var/hyperledger/msp/intermediatecerts\n name: intcacert\n - mountPath: /var/hyperledger/admin_msp/intermediatecerts\n name: intcacert\n {{- end }}\n {{- if .Values.secrets.ord.tls }}\n - mountPath: /var/hyperledger/tls/server/pair\n name: tls\n {{- end }}\n {{- if .Values.secrets.ord.tlsRootCert }}\n - mountPath: /var/hyperledger/tls/server/cert\n name: tls-rootcert\n {{- end }}\n {{- if .Values.secrets.ord.tlsClientRootCert }}\n - mountPath: /var/hyperledger/tls/client/cert\n name: tls-clientrootcert\n {{- end }}\n {{- if .Values.secrets.genesis }}\n - mountPath: /hl_config/genesis\n name: genesis\n {{- end }}\n {{- if .Values.secrets.adminCert }}\n - mountPath: /var/hyperledger/admin_msp/admincerts\n name: admin-cert\n - mountPath: /var/hyperledger/admin_msp/signcerts\n name: admin-cert\n - mountPath: /var/hyperledger/msp/admincerts\n name: admin-cert\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"hlf-ord.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: grpc\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ include \"hlf-ord.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- if .Values.persistence.storageClass }}\n {{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"hlf-ord.fullname\" . }}\n labels:\n{{ include \"labels.standard\" . | indent 4 }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: 7050\n protocol: TCP\n name: grpc\n {{- if eq .Values.ord.metrics.provider \"prometheus\" }}\n - port: {{ .Values.service.portMetrics }}\n targetPort: 9443\n protocol: TCP\n name: metrics\n {{- end }}\n selector:\n app: {{ include \"hlf-ord.name\" . }}\n release: {{ .Release.Name }}\n"
] | ## Default values for hlf-ord.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
image:
repository: hyperledger/fabric-orderer
tag: 1.4.3
pullPolicy: IfNotPresent
service:
# Cluster IP or LoadBalancer
type: ClusterIP
port: 7050
portMetrics: 9443
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
# nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
# certmanager.k8s.io/cluster-issuer: "letsencrypt-staging"
path: /
hosts:
- hlf-ord.local
tls: []
# - secretName: hlf-ord-tls
# hosts:
# - hlf-ord.local
persistence:
enabled: true
annotations: {}
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: ""
accessMode: ReadWriteOnce
size: 1Gi
# existingClaim: ""
##################################
## Orderer configuration options #
##################################
ord:
## Type of Orderer, `solo` or `kafka`
type: solo
## MSP ID of the Orderer
mspID: OrdererMSP
# TLS
tls:
server:
enabled: "false"
client:
enabled: "false"
metrics:
provider: "disabled"
statsd:
network: "udp"
address: "127.0.0.1:8125"
writeInterval: "30s"
prefix: ""
secrets:
## These secrets should contain the Orderer crypto materials and credentials
ord: {}
## Credentials, saved under keys 'CA_USERNAME' and 'CA_PASSWORD'
# cred: hlf--ord1-cred
## Certificate, saved under key 'cert.pem'
# cert: hlf--ord1-idcert
## Key, saved under 'key.pem'
# key: hlf--ord1-idkey
## CA Cert, saved under 'cacert.pem'
# caCert: hlf--ord1-cacert
## Intermediate CA Cert (optional), saved under 'intermediatecacert.pem'
# intCaCert: hlf--ord1-caintcert
## TLS secret, saved under keys 'tls.crt' and 'tls.key' (to conform with K8S nomenclature)
# tls: hlf--ord1-tls
## TLS root CA certificate saved under key 'cert.pem'
# tlsRootCert: hlf--ord-tlsrootcert
## TLS client root CA certificates saved under any names (as there may be multiple)
# tlsClientRootCerts: hlf--peer-tlsrootcert
## This should contain "genesis" block derived from a configtx.yaml
## configtxgen -profile OrdererGenesis -outputBlock genesis.block
# genesis: hlf--genesis
## This should contain the Certificate of the Orderer Organisation admin
## This is necessary to successfully run the orderer
# adminCert: hlf--ord-admincert
resources: {}
## We usually recommend not to specify default resources and to leave this as a conscious
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
## Suggested antiAffinity, as each Orderer should be on a separate Node for resilience
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - topologyKey: "kubernetes.io/hostname"
# labelSelector:
# matchLabels:
# app: hlf-ord
|
namerd | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"namerd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"namerd.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n app: {{ template \"namerd.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n name: {{ template \"namerd.fullname\" . }}-config\ndata:\n config.yaml: |-\n admin:\n ip: 0.0.0.0\n port: {{ .Values.service.adminPort }}\n storage:\n kind: io.l5d.k8s\n experimental: true\n namers:\n - kind: io.l5d.k8s\n host: 127.0.0.1\n port: 8001\n interfaces:\n - kind: io.l5d.thriftNameInterpreter\n ip: 0.0.0.0\n port: {{ .Values.service.syncPort }}\n - kind: io.l5d.httpController\n ip: 0.0.0.0\n port: {{ .Values.service.apiPort }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"namerd.fullname\" . }}\n labels:\n app: {{ template \"namerd.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"namerd.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"namerd.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n spec:\n volumes:\n - name: {{ template \"namerd.fullname\" . }}-config\n configMap:\n name: \"{{ template \"namerd.fullname\" . }}-config\"\n containers:\n - name: {{ template \"namerd.fullname\" . }}\n image: \"{{ .Values.namerd.image.repository }}\"\n imagePullPolicy: {{ default \"\" .Values.namerd.image.pullPolicy | quote }}\n args:\n - /io.buoyant/namerd/config/config.yaml\n ports:\n - name: sync\n containerPort: {{ .Values.service.syncPort }}\n - name: api\n containerPort: {{ .Values.service.apiPort }}\n volumeMounts:\n - name: \"{{ template \"namerd.fullname\" . }}-config\"\n mountPath: \"/io.buoyant/namerd/config\"\n readOnly: true\n resources:\n{{ toYaml .Values.namerd.resources | indent 12 }}\n - name: kubectl\n image: \"{{ .Values.kubectl.image.repository }}\"\n imagePullPolicy: {{ default \"\" .Values.kubectl.image.pullPolicy | quote }}\n args:\n - \"proxy\"\n - \"-p\"\n - \"8001\"\n resources:\n{{ toYaml .Values.kubectl.resources | indent 12 }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"namerd.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: sync\n port: {{ .Values.service.syncPort }}\n - name: api\n port: {{ .Values.service.apiPort }}\n - name: admin\n port: {{ .Values.service.adminPort }}\n selector:\n app: {{ template \"namerd.fullname\" . }}\n",
"# tpr.yaml\nmetadata:\n name: d-tab.l5d.io\napiVersion: extensions/v1beta1\nkind: ThirdPartyResource\ndescription: stores dtabs used by Buoyant's `namerd` service\nversions:\n - name: v1alpha1 # Do not change this value as it hardcoded in Namerd and doesn't work with other value.\n"
] | # Default values for namerd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 3
namerd:
image:
repository: buoyantio/namerd:0.9.1
pullPolicy: IfNotPresent
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 0
memory: 512Mi
kubectl:
image:
repository: buoyantio/kubectl:v1.4.0
pullPolicy: IfNotPresent
resources:
# limits:
# cpu: 10m
# memory: 32Mi
requests:
cpu: 0
memory: 32Mi
service:
type: ClusterIP
syncPort: 4100
apiPort: 4180
adminPort: 9991
|
couchdb | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"couchdb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"couchdb.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- printf \"%s-%s\" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nIn the event that we create both a headless service and a traditional one,\nensure that the latter gets a unique name.\n*/}}\n{{- define \"couchdb.svcname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- printf \"%s-svc-%s\" .Values.fullnameOverride .Chart.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-svc-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a random string if the supplied key does not exist\n*/}}\n{{- define \"couchdb.defaultsecret\" -}}\n{{- if . -}}\n{{- . | b64enc | quote -}}\n{{- else -}}\n{{- randAlphaNum 20 | b64enc | quote -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nLabels used to define Pods in the CouchDB statefulset\n*/}}\n{{- define \"couchdb.ss.selector\" -}}\napp: {{ template \"couchdb.name\" . }}\nrelease: {{ .Release.Name }}\n{{- end -}}\n\n{{/*\nGenerates a comma delimited list of nodes in the cluster\n*/}}\n{{- define \"couchdb.seedlist\" -}}\n{{- $nodeCount := min 5 .Values.clusterSize | int }}\n {{- range $index0 := until $nodeCount -}}\n {{- $index1 := $index0 | add1 -}}\n {{ $.Values.erlangFlags.name }}@{{ template \"couchdb.fullname\" $ }}-{{ $index0 }}.{{ template \"couchdb.fullname\" $ }}.{{ $.Release.Namespace }}.svc.{{ $.Values.dns.clusterDomainSuffix }}{{ if ne $index1 $nodeCount }},{{ end }}\n {{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"couchdb.fullname\" . }}\n labels:\n app: {{ template \"couchdb.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\ndata:\n inifile: |\n {{ range $section, $settings := .Values.couchdbConfig -}}\n {{ printf \"[%s]\" $section }}\n {{ range $key, $value := $settings -}}\n {{ printf \"%s = %s\" $key ($value | toString) }}\n {{ end }}\n {{ end }}\n\n seedlistinifile: |\n [cluster]\n seedlist = {{ template \"couchdb.seedlist\" . }}\n",
"# headless.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"couchdb.fullname\" . }}\n labels:\n app: {{ template \"couchdb.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n clusterIP: None\n publishNotReadyAddresses: true\n ports:\n - name: couchdb\n port: 5984\n selector:\n{{ include \"couchdb.ss.selector\" . | indent 4 }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"couchdb.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"couchdb.fullname\" . }}\n labels:\n app: {{ template \"couchdb.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# secrets.yaml\n{{- if .Values.createAdminSecret -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"couchdb.fullname\" . }}\n labels:\n app: {{ template \"couchdb.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n adminUsername: {{ template \"couchdb.defaultsecret\" .Values.adminUsername }}\n adminPassword: {{ template \"couchdb.defaultsecret\" .Values.adminPassword }}\n cookieAuthSecret: {{ template \"couchdb.defaultsecret\" .Values.cookieAuthSecret }}\n{{- end -}}\n",
"# service.yaml\n{{- if .Values.service.enabled -}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"couchdb.svcname\" . }}\n labels:\n app: {{ template \"couchdb.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n ports:\n - port: {{ .Values.service.externalPort }}\n protocol: TCP\n targetPort: 5984\n type: {{ .Values.service.type }}\n selector:\n{{ include \"couchdb.ss.selector\" . | indent 4 }}\n{{- end -}}\n",
"# statefulset.yaml\napiVersion: apps/v1beta2\nkind: StatefulSet\nmetadata:\n name: {{ template \"couchdb.fullname\" . }}\n labels:\n app: {{ template \"couchdb.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.clusterSize }}\n serviceName: {{ template \"couchdb.fullname\" . }}\n podManagementPolicy: {{ .Values.podManagementPolicy }}\n selector:\n matchLabels:\n{{ include \"couchdb.ss.selector\" . | indent 6 }}\n template:\n metadata:\n labels:\n{{ include \"couchdb.ss.selector\" . | indent 8 }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n initContainers:\n - name: init-copy\n image: \"{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}\"\n imagePullPolicy: {{ .Values.initImage.pullPolicy }}\n command: ['sh','-c','cp /tmp/chart.ini /default.d; cp /tmp/seedlist.ini /default.d; ls -lrt /default.d;']\n volumeMounts:\n - name: config\n mountPath: /tmp/\n - name: config-storage\n mountPath: /default.d\n containers:\n - name: couchdb\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: couchdb\n containerPort: 5984\n - name: epmd\n containerPort: 4369\n - containerPort: 9100\n env:\n{{- if not .Values.allowAdminParty }}\n - name: COUCHDB_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"couchdb.fullname\" . }}\n key: adminUsername\n - name: COUCHDB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"couchdb.fullname\" . }}\n key: adminPassword\n - name: COUCHDB_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"couchdb.fullname\" . }}\n key: cookieAuthSecret\n{{- end }}\n - name: ERL_FLAGS\n value: \"{{ range $k, $v := .Values.erlangFlags }} -{{ $k }} {{ $v }} {{ end }}\"\n livenessProbe:\n{{- if .Values.couchdbConfig.chttpd.require_valid_user }}\n exec:\n command:\n - sh\n - -c\n - curl -G --silent --fail -u ${COUCHDB_USER}:${COUCHDB_PASSWORD} http://localhost:5984/\n{{- else }}\n httpGet:\n path: /\n port: 5984\n{{- end }}\n readinessProbe:\n{{- if .Values.couchdbConfig.chttpd.require_valid_user }}\n exec:\n command:\n - sh\n - -c\n - curl -G --silent --fail -u ${COUCHDB_USER}:${COUCHDB_PASSWORD} http://localhost:5984/_up\n{{- else }}\n httpGet:\n path: /_up\n port: 5984\n{{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: config-storage\n mountPath: /opt/couchdb/etc/default.d\n - name: database-storage\n mountPath: /opt/couchdb/data\n{{- if .Values.enableSearch }}\n - name: clouseau\n image: \"{{ .Values.searchImage.repository }}:{{ .Values.searchImage.tag }}\"\n imagePullPolicy: {{ .Values.searchImage.pullPolicy }}\n volumeMounts:\n - name: database-storage\n mountPath: /opt/couchdb-search/data\n{{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n{{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n volumes:\n - name: config-storage\n emptyDir: {}\n - name: config\n configMap:\n name: {{ template \"couchdb.fullname\" . }}\n items:\n - key: inifile\n path: chart.ini\n - key: seedlistinifile\n path: seedlist.ini\n\n{{- if not .Values.persistentVolume.enabled }}\n - name: database-storage\n emptyDir: {}\n{{- else }}\n volumeClaimTemplates:\n - metadata:\n name: database-storage\n labels:\n app: {{ template \"couchdb.name\" . }}\n release: {{ .Release.Name }}\n spec:\n accessModes:\n {{- range .Values.persistentVolume.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.persistentVolume.size | quote }}\n {{- if .Values.persistentVolume.storageClass }}\n {{- if (eq \"-\" .Values.persistentVolume.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistentVolume.storageClass }}\"\n {{- end }}\n {{- end }}\n{{- end }}\n"
] | ## clusterSize is the initial size of the CouchDB cluster.
clusterSize: 3
## If allowAdminParty is enabled the cluster will start up without any database
## administrator account; i.e., all users will be granted administrative
## access. Otherwise, the system will look for a Secret called
## <ReleaseName>-couchdb containing `adminUsername`, `adminPassword` and
## `cookieAuthSecret` keys. See the `createAdminSecret` flag.
## ref: https://kubernetes.io/docs/concepts/configuration/secret/
allowAdminParty: false
## If createAdminSecret is enabled a Secret called <ReleaseName>-couchdb will
## be created containing auto-generated credentials. Users who prefer to set
## these values themselves have a couple of options:
##
## 1) The `adminUsername`, `adminPassword`, and `cookieAuthSecret` can be
## defined directly in the chart's values. Note that all of a chart's values
## are currently stored in plaintext in a ConfigMap in the tiller namespace.
##
## 2) This flag can be disabled and a Secret with the required keys can be
## created ahead of time.
createAdminSecret: true
adminUsername: admin
# adminPassword: this_is_not_secure
# cookieAuthSecret: neither_is_this
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## The storage volume used by each Pod in the StatefulSet. If a
## persistentVolume is not enabled, the Pods will use `emptyDir` ephemeral
## local storage. Setting the storageClass attribute to "-" disables dynamic
## provisioning of Persistent Volumes; leaving it unset will invoke the default
## provisioner.
persistentVolume:
enabled: false
accessModes:
- ReadWriteOnce
size: 10Gi
# storageClass: "-"
## The CouchDB image
image:
repository: couchdb
tag: 2.3.1
pullPolicy: IfNotPresent
## Experimental integration with Lucene-powered fulltext search
searchImage:
repository: kocolosk/couchdb-search
tag: 0.1.2
pullPolicy: IfNotPresent
## Flip this to flag to include the Search container in each Pod
enableSearch: false
initImage:
repository: busybox
tag: latest
pullPolicy: Always
## CouchDB is happy to spin up cluster nodes in parallel, but if you encounter
## problems you can try setting podManagementPolicy to the StatefulSet default
## `OrderedReady`
podManagementPolicy: Parallel
## To better tolerate Node failures, we can prevent Kubernetes scheduler from
## assigning more than one Pod of CouchDB StatefulSet per Node using podAntiAffinity.
affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: "app"
# operator: In
# values:
# - couchdb
# topologyKey: "kubernetes.io/hostname"
## A StatefulSet requires a headless Service to establish the stable network
## identities of the Pods, and that Service is created automatically by this
## chart without any additional configuration. The Service block below refers
## to a second Service that governs how clients connect to the CouchDB cluster.
service:
# annotations:
enabled: true
type: ClusterIP
externalPort: 5984
## An Ingress resource can provide name-based virtual hosting and TLS
## termination among other things for CouchDB deployments which are accessed
## from outside the Kubernetes cluster.
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
hosts:
- chart-example.local
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## Optional resource requests and limits for the CouchDB container
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# requests:
# cpu: 100m
# memory: 128Mi
# limits:
# cpu: 56
# memory: 256Gi
## erlangFlags is a map that is passed to the Erlang VM as flags using the
## ERL_FLAGS env. `name` and `setcookie` flags are minimally required to
## establish connectivity between cluster nodes.
## ref: http://erlang.org/doc/man/erl.html#init_flags
erlangFlags:
name: couchdb
setcookie: monster
## couchdbConfig will override default CouchDB configuration settings.
## The contents of this map are reformatted into a .ini file laid down
## by a ConfigMap object.
## ref: http://docs.couchdb.org/en/latest/config/index.html
couchdbConfig:
# cluster:
# q: 8 # Create 8 shards for each database
chttpd:
bind_address: any
# chttpd.require_valid_user disables all the anonymous requests to the port
# 5984 when is set to true.
require_valid_user: false
# Kubernetes local cluster domain.
# This is used to generate FQDNs for peers when joining the CouchDB cluster.
dns:
clusterDomainSuffix: cluster.local
|
docker-registry | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"docker-registry.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"docker-registry.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}-config\n labels:\n app: {{ template \"docker-registry.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n config.yml: |-\n{{ toYaml .Values.configData | indent 4 }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}\n labels:\n app: {{ template \"docker-registry.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"docker-registry.name\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.replicaCount }}\n{{- if .Values.updateStrategy }}\n strategy:\n{{ toYaml .Values.updateStrategy | indent 4 }}\n{{- end }}\n minReadySeconds: 5\n template:\n metadata:\n labels:\n app: {{ template \"docker-registry.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n{{- if $.Values.podAnnotations }}\n{{ toYaml $.Values.podAnnotations | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 8 }}\n {{- end }}\n{{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n{{- end }}\n{{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n{{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /bin/registry\n - serve\n - /etc/docker/registry/config.yml\n ports:\n - containerPort: 5000\n livenessProbe:\n httpGet:\n{{- if .Values.tlsSecretName }}\n scheme: HTTPS\n{{- end }}\n path: /\n port: 5000\n readinessProbe:\n httpGet:\n{{- if .Values.tlsSecretName }}\n scheme: HTTPS\n{{- end }}\n path: /\n port: 5000\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n env:\n{{- if .Values.secrets.htpasswd }}\n - name: REGISTRY_AUTH\n value: \"htpasswd\"\n - name: REGISTRY_AUTH_HTPASSWD_REALM\n value: \"Registry Realm\"\n - name: REGISTRY_AUTH_HTPASSWD_PATH\n value: \"/auth/htpasswd\"\n{{- end }}\n - name: REGISTRY_HTTP_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: haSharedSecret\n{{- if .Values.tlsSecretName }}\n - name: REGISTRY_HTTP_TLS_CERTIFICATE\n value: /etc/ssl/docker/tls.crt\n - name: REGISTRY_HTTP_TLS_KEY\n value: /etc/ssl/docker/tls.key\n{{- end }}\n{{- if eq .Values.storage \"filesystem\" }}\n - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY\n value: \"/var/lib/registry\"\n{{- else if eq .Values.storage \"azure\" }}\n - name: REGISTRY_STORAGE_AZURE_ACCOUNTNAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: azureAccountName\n - name: REGISTRY_STORAGE_AZURE_ACCOUNTKEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: azureAccountKey\n - name: REGISTRY_STORAGE_AZURE_CONTAINER\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: azureContainer\n{{- else if eq .Values.storage \"s3\" }}\n {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }}\n - name: REGISTRY_STORAGE_S3_ACCESSKEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: s3AccessKey\n - name: REGISTRY_STORAGE_S3_SECRETKEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: s3SecretKey\n {{- end }}\n - name: REGISTRY_STORAGE_S3_REGION\n value: {{ required \".Values.s3.region is required\" .Values.s3.region }}\n {{- if .Values.s3.regionEndpoint }}\n - name: REGISTRY_STORAGE_S3_REGIONENDPOINT\n value: {{ .Values.s3.regionEndpoint }}\n {{- end }}\n - name: REGISTRY_STORAGE_S3_BUCKET\n value: {{ required \".Values.s3.bucket is required\" .Values.s3.bucket }}\n {{- if .Values.s3.encrypt }}\n - name: REGISTRY_STORAGE_S3_ENCRYPT\n value: {{ .Values.s3.encrypt | quote }}\n {{- end }}\n {{- if .Values.s3.secure }}\n - name: REGISTRY_STORAGE_S3_SECURE\n value: {{ .Values.s3.secure | quote }}\n {{- end }}\n{{- else if eq .Values.storage \"swift\" }}\n - name: REGISTRY_STORAGE_SWIFT_AUTHURL\n value: {{ required \".Values.swift.authurl is required\" .Values.swift.authurl }}\n - name: REGISTRY_STORAGE_SWIFT_USERNAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: swiftUsername\n - name: REGISTRY_STORAGE_SWIFT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n key: swiftPassword\n - name: REGISTRY_STORAGE_SWIFT_CONTAINER\n value: {{ required \".Values.swift.container is required\" .Values.swift.container }}\n{{- end }}\n{{- if .Values.persistence.deleteEnabled }}\n - name: REGISTRY_STORAGE_DELETE_ENABLED\n value: \"true\"\n{{- end }}\n volumeMounts:\n{{- if .Values.secrets.htpasswd }}\n - name: auth\n mountPath: /auth\n readOnly: true\n{{- end }}\n{{- if eq .Values.storage \"filesystem\" }}\n - name: data\n mountPath: /var/lib/registry/\n{{- end }}\n - name: \"{{ template \"docker-registry.fullname\" . }}-config\"\n mountPath: \"/etc/docker/registry\"\n{{- if .Values.tlsSecretName }}\n - mountPath: /etc/ssl/docker\n name: tls-cert\n readOnly: true\n{{- end }}\n{{- with .Values.extraVolumeMounts }}\n {{- toYaml . | nindent 12 }}\n{{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n{{- end }}\n{{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n{{- end }}\n volumes:\n{{- if .Values.secrets.htpasswd }}\n - name: auth\n secret:\n secretName: {{ template \"docker-registry.fullname\" . }}-secret\n items:\n - key: htpasswd\n path: htpasswd\n{{- end }}\n{{- if eq .Values.storage \"filesystem\" }}\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"docker-registry.fullname\" . }}{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n{{- end }}\n - name: {{ template \"docker-registry.fullname\" . }}-config\n configMap:\n name: {{ template \"docker-registry.fullname\" . }}-config\n{{- if .Values.tlsSecretName }}\n - name: tls-cert\n secret:\n secretName: {{ .Values.tlsSecretName }}\n{{- end }}\n{{- with .Values.extraVolumes }}\n {{- toYaml . | nindent 8 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"docker-registry.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $path := .Values.ingress.path -}}\napiVersion: {{- if .Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }}\nkind: Ingress\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}\n labels:\n app: {{ template \"docker-registry.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: {{ $path }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}\n labels:\n app: {{ template \"docker-registry.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"docker-registry.name\" . }}\n release: {{ .Release.Name }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end -}}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled }}\n{{- if not .Values.persistence.existingClaim -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}\n labels:\n app: {{ template \"docker-registry.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}-secret\n labels:\n app: {{ template \"docker-registry.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ntype: Opaque\ndata:\n {{- if .Values.secrets.htpasswd }}\n htpasswd: {{ .Values.secrets.htpasswd | b64enc }}\n {{- end }}\n {{- if .Values.secrets.haSharedSecret }}\n haSharedSecret: {{ .Values.secrets.haSharedSecret | b64enc | quote }}\n {{- else }}\n haSharedSecret: {{ randAlphaNum 16 | b64enc | quote }}\n {{- end }}\n \n {{- if eq .Values.storage \"azure\" }}\n {{- if and .Values.secrets.azure.accountName .Values.secrets.azure.accountKey .Values.secrets.azure.container }}\n azureAccountName: {{ .Values.secrets.azure.accountName | b64enc | quote }}\n azureAccountKey: {{ .Values.secrets.azure.accountKey | b64enc | quote }}\n azureContainer: {{ .Values.secrets.azure.container | b64enc | quote }}\n {{- end }}\n {{- else if eq .Values.storage \"s3\" }}\n {{- if and .Values.secrets.s3.secretKey .Values.secrets.s3.accessKey }}\n s3AccessKey: {{ .Values.secrets.s3.accessKey | b64enc | quote }}\n s3SecretKey: {{ .Values.secrets.s3.secretKey | b64enc | quote }}\n {{- end }}\n {{- else if eq .Values.storage \"swift\" }}\n {{- if and .Values.secrets.swift.username .Values.secrets.swift.password }}\n swiftUsername: {{ .Values.secrets.swift.username | b64enc | quote }}\n swiftPassword: {{ .Values.secrets.swift.password | b64enc | quote }}\n {{- end }}\n {{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"docker-registry.fullname\" . }}\n labels:\n app: {{ template \"docker-registry.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n{{- if (and (eq .Values.service.type \"ClusterIP\") (not (empty .Values.service.clusterIP))) }}\n clusterIP: {{ .Values.service.clusterIP }}\n{{- end }}\n{{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n{{- end }}\n{{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerSourceRanges))) }}\n loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}\n{{- end }}\n ports:\n - port: {{ .Values.service.port }}\n protocol: TCP\n name: {{ .Values.service.name }}\n targetPort: 5000\n{{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{ .Values.service.nodePort }}\n{{- end }}\n selector:\n app: {{ template \"docker-registry.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for docker-registry.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
updateStrategy:
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
podAnnotations: {}
podLabels: {}
image:
repository: registry
tag: 2.7.1
pullPolicy: IfNotPresent
# imagePullSecrets:
# - name: docker
service:
name: registry
type: ClusterIP
# clusterIP:
port: 5000
# nodePort:
# loadBalancerIP:
# loadBalancerSourceRanges:
annotations: {}
# foo.io/bar: "true"
ingress:
enabled: false
path: /
# Used to create an Ingress record.
hosts:
- chart-example.local
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
persistence:
accessMode: 'ReadWriteOnce'
enabled: false
size: 10Gi
# storageClass: '-'
# set the type of filesystem to use: filesystem, s3
storage: filesystem
# Set this to name of secret for tls certs
# tlsSecretName: registry.docker.example.com
secrets:
haSharedSecret: ""
htpasswd: ""
# Secrets for Azure
# azure:
# accountName: ""
# accountKey: ""
# container: ""
# Secrets for S3 access and secret keys
# s3:
# accessKey: ""
# secretKey: ""
# Secrets for Swift username and password
# swift:
# username: ""
# password: ""
# Options for s3 storage type:
# s3:
# region: us-east-1
# regionEndpoint: s3.us-east-1.amazonaws.com
# bucket: my-bucket
# encrypt: false
# secure: true
# Options for swift storage type:
# swift:
# authurl: http://swift.example.com/
# container: my-container
configData:
version: 0.1
log:
fields:
service: registry
storage:
cache:
blobdescriptor: inmemory
http:
addr: :5000
headers:
X-Content-Type-Options: [nosniff]
health:
storagedriver:
enabled: true
interval: 10s
threshold: 3
securityContext:
enabled: true
runAsUser: 1000
fsGroup: 1000
priorityClassName: ""
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 2
nodeSelector: {}
affinity: {}
tolerations: []
extraVolumeMounts: []
## Additional volumeMounts to the registry container.
# - mountPath: /secret-data
# name: cloudfront-pem-secret
# readOnly: true
extraVolumes: []
## Additional volumes to the pod.
# - name: cloudfront-pem-secret
# secret:
# secretName: cloudfront-credentials
# items:
# - key: cloudfront.pem
# path: cloudfront.pem
# mode: 511
|
distributed-tensorflow | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"distributed-tensorflow.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"distributed-tensorflow.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"distributed-tensorflow.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# config.yaml\n{{- $workerNum := .Values.worker.number -}}\n{{- $workerPort := .Values.worker.port -}}\n{{- $psNum := .Values.ps.number -}}\n{{- $psPort := .Values.ps.port -}}\n{{- $tfService := include \"distributed-tensorflow.fullname\" . }}\n{{- $releaseName := .Release.Name -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"distributed-tensorflow.fullname\" . }}\n labels:\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n app: {{ template \"distributed-tensorflow.name\" . }}\ndata:\n ps.hostList: \"\n {{- range $i, $none := until (int $psNum) }}\n {{- if gt $i 0}},{{- end }}{{ $releaseName }}-ps-{{ $i }}.{{ $tfService }}-ps:{{ $psPort }}\n {{- end }}\"\n worker.hostList: \"\n {{- range $i, $none := until (int $workerNum) }}\n {{- if gt $i 0}},{{- end }}{{ $releaseName }}-worker-{{ $i }}.{{ $tfService }}-worker:{{ $workerPort }}\n {{- end }}\"",
"# service-ps.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"distributed-tensorflow.fullname\" . }}-ps\n labels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n clusterIP: None\n ports:\n - port: {{ .Values.ps.port }}\n targetPort: {{ .Values.ps.port }}\n protocol: TCP\n name: ps\n selector:\n app: {{ template \"distributed-tensorflow.name\" . }}\n release: {{ .Release.Name }}\n role: ps\n",
"# service-worker.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"distributed-tensorflow.fullname\" . }}-worker\n labels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n clusterIP: None\n ports:\n - port: {{ .Values.worker.port }}\n targetPort: {{ .Values.worker.port }}\n protocol: TCP\n name: worker\n selector:\n app: {{ template \"distributed-tensorflow.name\" . }}\n release: {{ .Release.Name }}\n role: worker\n",
"# statefulset-ps.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ .Release.Name }}-ps\n labels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n role: ps\nspec:\n selector:\n matchLabels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n release: {{ .Release.Name }}\n role: ps\n serviceName: {{ template \"distributed-tensorflow.fullname\" . }}-ps\n podManagementPolicy: {{ .Values.ps.podManagementPolicy }}\n replicas: {{.Values.ps.number}}\n template:\n metadata:\n labels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n role: ps\n spec:\n{{- if .Values.volumes }}\n volumes:\n{{ toYaml .Values.volumes | indent 6 }}\n{{- end }}\n containers:\n - name: ps\n image: \"{{ .Values.ps.image.repository }}:{{ .Values.ps.image.tag }}\"\n imagePullPolicy: {{ .Values.ps.image.pullPolicy }}\n command:\n - \"python\"\n - \"train_distributed.py\"\n{{- if .Values.hyperparams.learningrate }}\n - --learning_rate\n - \"{{ .Values.hyperparams.learningrate }}\"\n{{- end }}\n{{- if .Values.hyperparams.batchsize }}\n - --batch_size\n - \"{{ .Values.hyperparams.batchsize }}\"\n{{- end }}\n{{- if .Values.hyperparams.trainsteps }}\n - --train_steps\n - \"{{ .Values.hyperparams.trainsteps }}\"\n{{- end }}\n{{- if .Values.hyperparams.datadir }}\n - --data_dir\n - \"{{ .Values.hyperparams.datadir }}\"\n{{- end }}\n{{- if .Values.hyperparams.logdir }}\n - --log_dir\n - \"{{ .Values.hyperparams.logdir }}\"\n{{- end }}\n{{- if .Values.hyperparams.hiddenunits }}\n - --hidden_units\n - \"{{ .Values.hyperparams.hiddenunits }}\"\n{{- end }}\n env:\n - name: WORKER_HOSTS\n valueFrom:\n configMapKeyRef:\n name: {{ template \"distributed-tensorflow.fullname\" . }}\n key: worker.hostList\n - name: PS_HOSTS\n valueFrom:\n configMapKeyRef:\n name: {{ template \"distributed-tensorflow.fullname\" . }}\n key: ps.hostList\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: JOB_NAME\n value: ps\n {{- if .Values.ps.env }}\n {{- range $key, $value := .Values.ps.env }}\n - name: \"{{ $key }}\"\n value: \"{{ $value }}\"\n {{- end }}\n {{- end }}\n{{- if .Values.ps.privileged }}\n securityContext:\n privileged: true\n{{- end }}\n ports:\n - containerPort: {{ .Values.ps.port }}\n{{- if .Values.volumeMounts }}\n volumeMounts:\n{{ toYaml .Values.volumeMounts | indent 8 }}\n{{- end }}\n{{- if .Values.ps.resources }}\n resources:\n{{ toYaml .Values.ps.resources | indent 10 }}\n{{- end }}\n",
"# statefulset-worker.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ .Release.Name }}-worker\n labels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n role: worker\nspec:\n selector:\n matchLabels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n release: {{ .Release.Name }}\n role: worker\n serviceName: {{ template \"distributed-tensorflow.fullname\" . }}-worker\n podManagementPolicy: {{ .Values.worker.podManagementPolicy }}\n replicas: {{.Values.worker.number}}\n template:\n metadata:\n labels:\n app: {{ template \"distributed-tensorflow.name\" . }}\n chart: {{ template \"distributed-tensorflow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n role: worker\n spec:\n{{- if .Values.volumes }}\n volumes:\n{{ toYaml .Values.volumes | indent 6 }}\n{{- end }}\n containers:\n - name: worker\n image: \"{{ .Values.worker.image.repository }}:{{ .Values.worker.image.tag }}\"\n imagePullPolicy: {{ .Values.worker.image.pullPolicy }}\n command:\n - \"python\"\n - \"train_distributed.py\"\n{{- if gt (int .Values.worker.gpuCount) 0 }}\n - --num_gpus\n - \"{{ .Values.worker.gpuCount }}\"\n{{- end }}\n{{- if .Values.hyperparams.learningrate }}\n - --learning_rate\n - \"{{ .Values.hyperparams.learningrate }}\"\n{{- end }}\n{{- if .Values.hyperparams.batchsize }}\n - --batch_size\n - \"{{ .Values.hyperparams.batchsize }}\"\n{{- end }}\n{{- if .Values.hyperparams.trainsteps }}\n - --train_steps\n - \"{{ .Values.hyperparams.trainsteps }}\"\n{{- end }}\n{{- if .Values.hyperparams.datadir }}\n - --data_dir\n - \"{{ .Values.hyperparams.datadir }}\"\n{{- end }}\n{{- if .Values.hyperparams.logdir }}\n - --log_dir\n - \"{{ .Values.hyperparams.logdir }}\"\n{{- end }}\n{{- if .Values.hyperparams.hiddenunits }}\n - --hidden_units\n - \"{{ .Values.hyperparams.hiddenunits }}\"\n{{- end }}\n env:\n - name: WORKER_HOSTS\n valueFrom:\n configMapKeyRef:\n name: {{ template \"distributed-tensorflow.fullname\" . }}\n key: worker.hostList\n - name: PS_HOSTS\n valueFrom:\n configMapKeyRef:\n name: {{ template \"distributed-tensorflow.fullname\" . }}\n key: ps.hostList\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: JOB_NAME\n value: worker\n {{- if .Values.worker.env }}\n {{- range $key, $value := .Values.worker.env }}\n - name: \"{{ $key }}\"\n value: \"{{ $value }}\"\n {{- end }}\n {{- end }}\n ports:\n - containerPort: {{ .Values.worker.port }}\n{{- if .Values.volumeMounts }}\n volumeMounts:\n{{ toYaml .Values.volumeMounts | indent 10 }}\n{{- end }}\n{{- if gt (int .Values.worker.gpuCount) 0 }}\n resources:\n limits:\n nvidia.com/gpu: {{- .Values.worker.gpuCount }}\n request:\n nvidia.com/gpu: {{- .Values.worker.gpuCount }}\n{{- end }}\n"
] | # Default values for distributed-tensorflow.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
worker:
number: 2
podManagementPolicy: Parallel
image:
repository: dysproz/distributed-tf
tag: 1.7.0
pullPolicy: IfNotPresent
port: 9000
ps:
number: 2
podManagementPolicy: Parallel
image:
repository: dysproz/distributed-tf
tag: 1.7.0
pullPolicy: IfNotPresent
port: 8000
# optimize for training
hyperparams:
batchsize: 20
learningrate: 0.001
trainsteps: 0
|
grafana | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"grafana.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"grafana.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"grafana.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account\n*/}}\n{{- define \"grafana.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"grafana.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{- define \"grafana.serviceAccountNameTest\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (print (include \"grafana.fullname\" .) \"-test\") .Values.serviceAccount.nameTest }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.nameTest }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nAllow the release namespace to be overridden for multi-namespace deployments in combined charts\n*/}}\n{{- define \"grafana.namespace\" -}}\n {{- if .Values.namespaceOverride -}}\n {{- .Values.namespaceOverride -}}\n {{- else -}}\n {{- .Release.Namespace -}}\n {{- end -}}\n{{- end -}}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"grafana.labels\" -}}\nhelm.sh/chart: {{ include \"grafana.chart\" . }}\n{{ include \"grafana.selectorLabels\" . }}\n{{- if or .Chart.AppVersion .Values.image.tag }}\napp.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"grafana.selectorLabels\" -}}\napp.kubernetes.io/name: {{ include \"grafana.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end -}}\n",
"# _pod.tpl\n\n{{- define \"grafana.pod\" -}}\n{{- if .Values.schedulerName }}\nschedulerName: \"{{ .Values.schedulerName }}\"\n{{- end }}\nserviceAccountName: {{ template \"grafana.serviceAccountName\" . }}\n{{- if .Values.securityContext }}\nsecurityContext:\n{{ toYaml .Values.securityContext | indent 2 }}\n{{- end }}\n{{- if .Values.hostAliases }}\nhostAliases:\n{{ toYaml .Values.hostAliases | indent 2 }}\n{{- end }}\n{{- if .Values.priorityClassName }}\npriorityClassName: {{ .Values.priorityClassName }}\n{{- end }}\n{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }}\ninitContainers:\n{{- end }}\n{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }}\n - name: init-chown-data\n {{- if .Values.initChownData.image.sha }}\n image: \"{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}\"\n {{- else }}\n image: \"{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }}\n securityContext:\n runAsUser: 0\n command: [\"chown\", \"-R\", \"{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}\", \"/var/lib/grafana\"]\n resources:\n{{ toYaml .Values.initChownData.resources | indent 6 }}\n volumeMounts:\n - name: storage\n mountPath: \"/var/lib/grafana\"\n{{- if .Values.persistence.subPath }}\n subPath: {{ .Values.persistence.subPath }}\n{{- end }}\n{{- end }}\n{{- if .Values.dashboards }}\n - name: download-dashboards\n {{- if .Values.downloadDashboardsImage.sha }}\n image: \"{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}\"\n {{- else }}\n image: \"{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }}\n command: [\"/bin/sh\"]\n args: [ \"-c\", \"mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh\" ]\n resources:\n{{ toYaml .Values.downloadDashboards.resources | indent 6 }}\n env:\n{{- range $key, $value := .Values.downloadDashboards.env }}\n - name: \"{{ $key }}\"\n value: \"{{ $value }}\"\n{{- end }}\n volumeMounts:\n - name: config\n mountPath: \"/etc/grafana/download_dashboards.sh\"\n subPath: download_dashboards.sh\n - name: storage\n mountPath: \"/var/lib/grafana\"\n{{- if .Values.persistence.subPath }}\n subPath: {{ .Values.persistence.subPath }}\n{{- end }}\n {{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n readOnly: {{ .readOnly }}\n {{- end }}\n{{- end }}\n{{- if .Values.sidecar.datasources.enabled }}\n - name: {{ template \"grafana.name\" . }}-sc-datasources\n {{- if .Values.sidecar.image.sha }}\n image: \"{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}\"\n {{- else }}\n image: \"{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}\n env:\n - name: METHOD\n value: LIST\n - name: LABEL\n value: \"{{ .Values.sidecar.datasources.label }}\"\n - name: FOLDER\n value: \"/etc/grafana/provisioning/datasources\"\n - name: RESOURCE\n value: \"both\"\n {{- if .Values.sidecar.enableUniqueFilenames }}\n - name: UNIQUE_FILENAMES\n value: \"{{ .Values.sidecar.enableUniqueFilenames }}\"\n {{- end }}\n {{- if .Values.sidecar.datasources.searchNamespace }}\n - name: NAMESPACE\n value: \"{{ .Values.sidecar.datasources.searchNamespace }}\"\n {{- end }}\n {{- if .Values.sidecar.skipTlsVerify }}\n - name: SKIP_TLS_VERIFY\n value: \"{{ .Values.sidecar.skipTlsVerify }}\"\n {{- end }}\n resources:\n{{ toYaml .Values.sidecar.resources | indent 6 }}\n volumeMounts:\n - name: sc-datasources-volume\n mountPath: \"/etc/grafana/provisioning/datasources\"\n{{- end}}\n{{- if .Values.sidecar.notifiers.enabled }}\n - name: {{ template \"grafana.name\" . }}-sc-notifiers\n {{- if .Values.sidecar.image.sha }}\n image: \"{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}\"\n {{- else }}\n image: \"{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}\n env:\n - name: METHOD\n value: LIST\n - name: LABEL\n value: \"{{ .Values.sidecar.notifiers.label }}\"\n - name: FOLDER\n value: \"/etc/grafana/provisioning/notifiers\"\n - name: RESOURCE\n value: \"both\"\n {{- if .Values.sidecar.enableUniqueFilenames }}\n - name: UNIQUE_FILENAMES\n value: \"{{ .Values.sidecar.enableUniqueFilenames }}\"\n {{- end }}\n {{- if .Values.sidecar.notifiers.searchNamespace }}\n - name: NAMESPACE\n value: \"{{ .Values.sidecar.notifiers.searchNamespace }}\"\n {{- end }}\n {{- if .Values.sidecar.skipTlsVerify }}\n - name: SKIP_TLS_VERIFY\n value: \"{{ .Values.sidecar.skipTlsVerify }}\"\n {{- end }}\n resources:\n{{ toYaml .Values.sidecar.resources | indent 6 }}\n volumeMounts:\n - name: sc-notifiers-volume\n mountPath: \"/etc/grafana/provisioning/notifiers\"\n{{- end}}\n{{- if .Values.extraInitContainers }}\n{{ toYaml .Values.extraInitContainers | indent 2 }}\n{{- end }}\n{{- if .Values.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end}}\n{{- end }}\ncontainers:\n{{- if .Values.sidecar.dashboards.enabled }}\n - name: {{ template \"grafana.name\" . }}-sc-dashboard\n {{- if .Values.sidecar.image.sha }}\n image: \"{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}\"\n {{- else }}\n image: \"{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }}\n env:\n - name: METHOD\n value: {{ .Values.sidecar.dashboards.watchMethod }}\n - name: LABEL\n value: \"{{ .Values.sidecar.dashboards.label }}\"\n - name: FOLDER\n value: \"{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}\"\n - name: RESOURCE\n value: \"both\"\n {{- if .Values.sidecar.enableUniqueFilenames }}\n - name: UNIQUE_FILENAMES\n value: \"{{ .Values.sidecar.enableUniqueFilenames }}\"\n {{- end }}\n {{- if .Values.sidecar.dashboards.searchNamespace }}\n - name: NAMESPACE\n value: \"{{ .Values.sidecar.dashboards.searchNamespace }}\"\n {{- end }}\n {{- if .Values.sidecar.skipTlsVerify }}\n - name: SKIP_TLS_VERIFY\n value: \"{{ .Values.sidecar.skipTlsVerify }}\"\n {{- end }}\n resources:\n{{ toYaml .Values.sidecar.resources | indent 6 }}\n volumeMounts:\n - name: sc-dashboard-volume\n mountPath: {{ .Values.sidecar.dashboards.folder | quote }}\n{{- end}}\n - name: {{ .Chart.Name }}\n {{- if .Values.image.sha }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}\"\n {{- else }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n {{- end }}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if .Values.command }}\n command:\n {{- range .Values.command }}\n - {{ . }}\n {{- end }}\n {{- end}}\n volumeMounts:\n - name: config\n mountPath: \"/etc/grafana/grafana.ini\"\n subPath: grafana.ini\n {{- if .Values.ldap.enabled }}\n - name: ldap\n mountPath: \"/etc/grafana/ldap.toml\"\n subPath: ldap.toml\n {{- end }}\n {{- range .Values.extraConfigmapMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath | default \"\" }}\n readOnly: {{ .readOnly }}\n {{- end }}\n - name: storage\n mountPath: \"/var/lib/grafana\"\n{{- if .Values.persistence.subPath }}\n subPath: {{ .Values.persistence.subPath }}\n{{- end }}\n{{- if .Values.dashboards }}\n{{- range $provider, $dashboards := .Values.dashboards }}\n{{- range $key, $value := $dashboards }}\n{{- if (or (hasKey $value \"json\") (hasKey $value \"file\")) }}\n - name: dashboards-{{ $provider }}\n mountPath: \"/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json\"\n subPath: \"{{ $key }}.json\"\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end -}}\n{{- if .Values.dashboardsConfigMaps }}\n{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }}\n - name: dashboards-{{ . }}\n mountPath: \"/var/lib/grafana/dashboards/{{ . }}\"\n{{- end }}\n{{- end }}\n{{- if .Values.datasources }}\n - name: config\n mountPath: \"/etc/grafana/provisioning/datasources/datasources.yaml\"\n subPath: datasources.yaml\n{{- end }}\n{{- if .Values.notifiers }}\n - name: config\n mountPath: \"/etc/grafana/provisioning/notifiers/notifiers.yaml\"\n subPath: notifiers.yaml\n{{- end }}\n{{- if .Values.dashboardProviders }}\n - name: config\n mountPath: \"/etc/grafana/provisioning/dashboards/dashboardproviders.yaml\"\n subPath: dashboardproviders.yaml\n{{- end }}\n{{- if .Values.sidecar.dashboards.enabled }}\n - name: sc-dashboard-volume\n mountPath: {{ .Values.sidecar.dashboards.folder | quote }}\n{{ if .Values.sidecar.dashboards.SCProvider }}\n - name: sc-dashboard-provider\n mountPath: \"/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml\"\n subPath: provider.yaml\n{{- end}}\n{{- end}}\n{{- if .Values.sidecar.datasources.enabled }}\n - name: sc-datasources-volume\n mountPath: \"/etc/grafana/provisioning/datasources\"\n{{- end}}\n{{- if .Values.sidecar.notifiers.enabled }}\n - name: sc-notifiers-volume\n mountPath: \"/etc/grafana/provisioning/notifiers\"\n{{- end}}\n {{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n readOnly: {{ .readOnly }}\n subPath: {{ .subPath | default \"\" }}\n {{- end }}\n {{- range .Values.extraVolumeMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath | default \"\" }}\n readOnly: {{ .readOnly }}\n {{- end }}\n {{- range .Values.extraEmptyDirMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n {{- end }}\n ports:\n - name: {{ .Values.service.portName }}\n containerPort: {{ .Values.service.port }}\n protocol: TCP\n - name: {{ .Values.podPortName }}\n containerPort: 3000\n protocol: TCP\n env:\n {{- if not .Values.env.GF_SECURITY_ADMIN_USER }}\n - name: GF_SECURITY_ADMIN_USER\n valueFrom:\n secretKeyRef:\n name: {{ .Values.admin.existingSecret | default (include \"grafana.fullname\" .) }}\n key: {{ .Values.admin.userKey | default \"admin-user\" }}\n {{- end }}\n {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }}\n - name: GF_SECURITY_ADMIN_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.admin.existingSecret | default (include \"grafana.fullname\" .) }}\n key: {{ .Values.admin.passwordKey | default \"admin-password\" }}\n {{- end }}\n {{- if .Values.plugins }}\n - name: GF_INSTALL_PLUGINS\n valueFrom:\n configMapKeyRef:\n name: {{ template \"grafana.fullname\" . }}\n key: plugins\n {{- end }}\n {{- if .Values.smtp.existingSecret }}\n - name: GF_SMTP_USER\n valueFrom:\n secretKeyRef:\n name: {{ .Values.smtp.existingSecret }}\n key: {{ .Values.smtp.userKey | default \"user\" }}\n - name: GF_SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.smtp.existingSecret }}\n key: {{ .Values.smtp.passwordKey | default \"password\" }}\n {{- end }}\n {{- range $key, $value := .Values.envValueFrom }}\n - name: {{ $key | quote }}\n valueFrom:\n{{ toYaml $value | indent 10 }}\n {{- end }}\n{{- range $key, $value := .Values.env }}\n - name: \"{{ $key }}\"\n value: \"{{ $value }}\"\n{{- end }}\n {{- if .Values.envFromSecret }}\n envFrom:\n - secretRef:\n name: {{ tpl .Values.envFromSecret . }}\n {{- end }}\n {{- if .Values.envRenderSecret }}\n envFrom:\n - secretRef:\n name: {{ template \"grafana.fullname\" . }}-env\n {{- end }}\n livenessProbe:\n{{ toYaml .Values.livenessProbe | indent 6 }}\n readinessProbe:\n{{ toYaml .Values.readinessProbe | indent 6 }}\n resources:\n{{ toYaml .Values.resources | indent 6 }}\n{{- with .Values.extraContainers }}\n{{ tpl . $ | indent 2 }}\n{{- end }}\n{{- with .Values.nodeSelector }}\nnodeSelector:\n{{ toYaml . | indent 2 }}\n{{- end }}\n{{- with .Values.affinity }}\naffinity:\n{{ toYaml . | indent 2 }}\n{{- end }}\n{{- with .Values.tolerations }}\ntolerations:\n{{ toYaml . | indent 2 }}\n{{- end }}\nvolumes:\n - name: config\n configMap:\n name: {{ template \"grafana.fullname\" . }}\n{{- range .Values.extraConfigmapMounts }}\n - name: {{ .name }}\n configMap:\n name: {{ .configMap }}\n{{- end }}\n {{- if .Values.dashboards }}\n {{- range (keys .Values.dashboards | sortAlpha) }}\n - name: dashboards-{{ . }}\n configMap:\n name: {{ template \"grafana.fullname\" $ }}-dashboards-{{ . }}\n {{- end }}\n {{- end }}\n {{- if .Values.dashboardsConfigMaps }}\n {{ $root := . }}\n {{- range $provider, $name := .Values.dashboardsConfigMaps }}\n - name: dashboards-{{ $provider }}\n configMap:\n name: {{ tpl $name $root }}\n {{- end }}\n {{- end }}\n {{- if .Values.ldap.enabled }}\n - name: ldap\n secret:\n {{- if .Values.ldap.existingSecret }}\n secretName: {{ .Values.ldap.existingSecret }}\n {{- else }}\n secretName: {{ template \"grafana.fullname\" . }}\n {{- end }}\n items:\n - key: ldap-toml\n path: ldap.toml\n {{- end }}\n{{- if and .Values.persistence.enabled (eq .Values.persistence.type \"pvc\") }}\n - name: storage\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"grafana.fullname\" .) }}\n{{- else if and .Values.persistence.enabled (eq .Values.persistence.type \"statefulset\") }}\n# nothing\n{{- else }}\n - name: storage\n emptyDir: {}\n{{- end -}}\n{{- if .Values.sidecar.dashboards.enabled }}\n - name: sc-dashboard-volume\n emptyDir: {}\n{{- if .Values.sidecar.dashboards.SCProvider }}\n - name: sc-dashboard-provider\n configMap:\n name: {{ template \"grafana.fullname\" . }}-config-dashboards\n{{- end }}\n{{- end }}\n{{- if .Values.sidecar.datasources.enabled }}\n - name: sc-datasources-volume\n emptyDir: {}\n{{- end -}}\n{{- if .Values.sidecar.notifiers.enabled }}\n - name: sc-notifiers-volume\n emptyDir: {}\n{{- end -}}\n{{- range .Values.extraSecretMounts }}\n - name: {{ .name }}\n secret:\n secretName: {{ .secretName }}\n defaultMode: {{ .defaultMode }}\n{{- end }}\n{{- range .Values.extraVolumeMounts }}\n - name: {{ .name }}\n persistentVolumeClaim:\n claimName: {{ .existingClaim }}\n{{- end }}\n{{- range .Values.extraEmptyDirMounts }}\n - name: {{ .name }}\n emptyDir: {}\n{{- end -}}\n{{- if .Values.extraContainerVolumes }}\n{{ toYaml .Values.extraContainerVolumes | indent 2 }}\n{{- end }}\n{{- end }}\n",
"# clusterrole.yaml\n{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\n name: {{ template \"grafana.fullname\" . }}-clusterrole\n{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }}\nrules:\n{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }}\n- apiGroups: [\"\"] # \"\" indicates the core API group\n resources: [\"configmaps\", \"secrets\"]\n verbs: [\"get\", \"watch\", \"list\"]\n{{- end}}\n{{- with .Values.rbac.extraClusterRoleRules }}\n{{ toYaml . | indent 0 }}\n{{- end}}\n{{- else }}\nrules: []\n{{- end}}\n{{- end}}\n",
"# clusterrolebinding.yaml\n{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }}\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-clusterrolebinding\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"grafana.serviceAccountName\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\nroleRef:\n kind: ClusterRole\n name: {{ template \"grafana.fullname\" . }}-clusterrole\n apiGroup: rbac.authorization.k8s.io\n{{- end -}}\n",
"# configmap-dashboard-provider.yaml\n{{- if .Values.sidecar.dashboards.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\n name: {{ template \"grafana.fullname\" . }}-config-dashboards\n namespace: {{ template \"grafana.namespace\" . }}\ndata:\n provider.yaml: |-\n apiVersion: 1\n providers:\n - name: '{{ .Values.sidecar.dashboards.provider.name }}'\n orgId: {{ .Values.sidecar.dashboards.provider.orgid }}\n folder: '{{ .Values.sidecar.dashboards.provider.folder }}'\n type: {{ .Values.sidecar.dashboards.provider.type }}\n disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }}\n allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }}\n options:\n path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}\n{{- end}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\ndata:\n{{- if .Values.plugins }}\n plugins: {{ join \",\" .Values.plugins }}\n{{- end }}\n grafana.ini: |\n{{- range $key, $value := index .Values \"grafana.ini\" }}\n [{{ $key }}]\n {{- range $elem, $elemVal := $value }}\n {{ $elem }} = {{ $elemVal }}\n {{- end }}\n{{- end }}\n\n{{- if .Values.datasources }}\n{{ $root := . }}\n {{- range $key, $value := .Values.datasources }}\n {{ $key }}: |\n{{ tpl (toYaml $value | indent 4) $root }}\n {{- end -}}\n{{- end -}}\n\n{{- if .Values.notifiers }}\n {{- range $key, $value := .Values.notifiers }}\n {{ $key }}: |\n{{ toYaml $value | indent 4 }}\n {{- end -}}\n{{- end -}}\n\n{{- if .Values.dashboardProviders }}\n {{- range $key, $value := .Values.dashboardProviders }}\n {{ $key }}: |\n{{ toYaml $value | indent 4 }}\n {{- end -}}\n{{- end -}}\n\n{{- if .Values.dashboards }}\n download_dashboards.sh: |\n #!/usr/bin/env sh\n set -euf\n {{- if .Values.dashboardProviders }}\n {{- range $key, $value := .Values.dashboardProviders }}\n {{- range $value.providers }}\n mkdir -p {{ .options.path }}\n {{- end }}\n {{- end }}\n {{- end }}\n\n {{- range $provider, $dashboards := .Values.dashboards }}\n {{- range $key, $value := $dashboards }}\n {{- if (or (hasKey $value \"gnetId\") (hasKey $value \"url\")) }}\n curl -skf \\\n --connect-timeout 60 \\\n --max-time 60 \\\n {{- if not $value.b64content }}\n -H \"Accept: application/json\" \\\n -H \"Content-Type: application/json;charset=UTF-8\" \\\n {{ end }}\n {{- if $value.url -}}\"{{ $value.url }}\"{{- else -}}\"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download\"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/\"datasource\":.*,/\"datasource\": \"{{ $value.datasource }}\",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \\\n > \"/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json\"\n {{- end -}}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# dashboards-json-configmap.yaml\n{{- if .Values.dashboards }}\n{{ $files := .Files }}\n{{- range $provider, $dashboards := .Values.dashboards }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"grafana.fullname\" $ }}-dashboards-{{ $provider }}\n namespace: {{ template \"grafana.namespace\" $ }}\n labels:\n {{- include \"grafana.labels\" $ | nindent 4 }}\n dashboard-provider: {{ $provider }}\n{{- if $dashboards }}\ndata:\n{{- $dashboardFound := false }}\n{{- range $key, $value := $dashboards }}\n{{- if (or (hasKey $value \"json\") (hasKey $value \"file\")) }}\n{{- $dashboardFound = true }}\n{{ print $key | indent 2 }}.json:\n{{- if hasKey $value \"json\" }}\n |-\n{{ $value.json | indent 6 }}\n{{- end }}\n{{- if hasKey $value \"file\" }}\n{{ toYaml ( $files.Get $value.file ) | indent 4}}\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if not $dashboardFound }}\n {}\n{{- end }}\n{{- end }}\n---\n{{- end }}\n\n{{- end }}\n",
"# deployment.yaml\n{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type \"pvc\")) }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- if .Values.labels }}\n{{ toYaml .Values.labels | indent 4 }}\n{{- end }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicas }}\n selector:\n matchLabels:\n {{- include \"grafana.selectorLabels\" . | nindent 6 }}\n{{- with .Values.deploymentStrategy }}\n strategy:\n{{ toYaml . | trim | indent 4 }}\n{{- end }}\n template:\n metadata:\n labels:\n {{- include \"grafana.selectorLabels\" . | nindent 8 }}\n{{- with .Values.podLabels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n checksum/dashboards-json-config: {{ include (print $.Template.BasePath \"/dashboards-json-configmap.yaml\") . | sha256sum }}\n checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath \"/configmap-dashboard-provider.yaml\") . | sha256sum }}\n{{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n{{- end }}\n{{- if .Values.envRenderSecret }}\n checksum/secret-env: {{ include (print $.Template.BasePath \"/secret-env.yaml\") . | sha256sum }}\n{{- end }}\n{{- with .Values.podAnnotations }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n {{- include \"grafana.pod\" . | nindent 6 }}\n{{- end }}\n",
"# headless-service.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type \"statefulset\")}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-headless\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n clusterIP: None\n selector:\n {{- include \"grafana.selectorLabels\" . | nindent 4 }}\n type: ClusterIP\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"grafana.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\n{{- $extraPaths := .Values.ingress.extraPaths -}}\n{{- if .Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }}\napiVersion: networking.k8s.io/v1beta1\n{{ else }}\napiVersion: extensions/v1beta1\n{{ end -}}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n {{- if .Values.ingress.annotations }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ tpl $value $ | quote }}\n {{- end }}\n {{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n{{- end }}\n rules:\n {{- if .Values.ingress.hosts }}\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n{{ if $extraPaths }}\n{{ toYaml $extraPaths | indent 10 }}\n{{- end }}\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n {{- else }}\n - http:\n paths:\n - backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- if $ingressPath }}\n path: {{ $ingressPath }}\n {{- end }}\n {{- end -}}\n{{- end }}\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget }}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"grafana.name\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- if .Values.labels }}\n{{ toYaml .Values.labels | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n{{- end }}\n{{- if .Values.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n{{- end }}\n selector:\n matchLabels:\n {{- include \"grafana.selectorLabels\" . | nindent 6 }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'\n {{- if .Values.rbac.pspUseAppArmor }}\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\n {{- end }}\nspec:\n privileged: false\n allowPrivilegeEscalation: false\n requiredDropCapabilities:\n # Default set from Docker, without DAC_OVERRIDE or CHOWN\n - FOWNER\n - FSETID\n - KILL\n - SETGID\n - SETUID\n - SETPCAP\n - NET_BIND_SERVICE\n - NET_RAW\n - SYS_CHROOT\n - MKNOD\n - AUDIT_WRITE\n - SETFCAP\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type \"pvc\")}}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n {{- with .Values.persistence.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n {{- end }}\n {{- with .Values.persistence.finalizers }}\n finalizers:\n{{ toYaml . | indent 4 }}\n {{- end }}\nspec:\n accessModes:\n {{- range .Values.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- if .Values.persistence.storageClassName }}\n storageClassName: {{ .Values.persistence.storageClassName }}\n {{- end -}}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: Role\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\n{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }}\nrules:\n{{- if .Values.rbac.pspEnabled }}\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"grafana.fullname\" . }}]\n{{- end }}\n{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }}\n- apiGroups: [\"\"] # \"\" indicates the core API group\n resources: [\"configmaps\", \"secrets\"]\n verbs: [\"get\", \"watch\", \"list\"]\n{{- end }}\n{{- with .Values.rbac.extraRoleRules }}\n{{ toYaml . | indent 0 }}\n{{- end}}\n{{- else }}\nrules: []\n{{- end }}\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"grafana.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"grafana.serviceAccountName\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n{{- end -}}\n",
"# secret-env.yaml\n{{- if .Values.envRenderSecret }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-env\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\ntype: Opaque\ndata:\n{{- range $key, $val := .Values.envRenderSecret }}\n {{ $key }}: {{ $val | b64enc | quote }}\n{{- end -}}\n{{- end }}\n",
"# secret.yaml\n{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\ntype: Opaque\ndata:\n {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }}\n admin-user: {{ .Values.adminUser | b64enc | quote }}\n {{- if .Values.adminPassword }}\n admin-password: {{ .Values.adminPassword | b64enc | quote }}\n {{- else }}\n admin-password: {{ randAlphaNum 40 | b64enc | quote }}\n {{- end }}\n {{- end }}\n {{- if not .Values.ldap.existingSecret }}\n ldap-toml: {{ .Values.ldap.config | b64enc | quote }}\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\n{{- with .Values.service.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if (or (eq .Values.service.type \"ClusterIP\") (empty .Values.service.type)) }}\n type: ClusterIP\n {{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{end}}\n{{- else if eq .Values.service.type \"LoadBalancer\" }}\n type: {{ .Values.service.type }}\n {{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n {{- end -}}\n{{- else }}\n type: {{ .Values.service.type }}\n{{- end }}\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n ports:\n - name: {{ .Values.service.portName }}\n port: {{ .Values.service.port }}\n protocol: TCP\n targetPort: {{ .Values.service.targetPort }}\n{{ if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{.Values.service.nodePort}}\n{{ end }}\n {{- if .Values.extraExposePorts }}\n {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }}\n {{- end }}\n selector:\n {{- include \"grafana.selectorLabels\" . | nindent 4 }}\n\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.serviceAccount.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\n name: {{ template \"grafana.serviceAccountName\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n{{- end }}\n",
"# statefulset.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type \"statefulset\")}}\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"grafana.fullname\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n{{- with .Values.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicas }}\n selector:\n matchLabels:\n {{- include \"grafana.selectorLabels\" . | nindent 6 }}\n serviceName: {{ template \"grafana.fullname\" . }}-headless\n template:\n metadata:\n labels:\n {{- include \"grafana.selectorLabels\" . | nindent 8 }}\n{{- with .Values.podLabels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n checksum/dashboards-json-config: {{ include (print $.Template.BasePath \"/dashboards-json-configmap.yaml\") . | sha256sum }}\n checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath \"/configmap-dashboard-provider.yaml\") . | sha256sum }}\n{{- if not .Values.admin.existingSecret }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n{{- end }}\n{{- with .Values.podAnnotations }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n {{- include \"grafana.pod\" . | nindent 6 }}\n volumeClaimTemplates:\n - metadata:\n name: storage\n spec:\n accessModes: {{ .Values.persistence.accessModes }}\n storageClassName: {{ .Values.persistence.storageClassName }}\n resources:\n requests:\n storage: {{ .Values.persistence.size }} \n{{- end }}\n",
"# test-configmap.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-test\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\ndata:\n run.sh: |-\n @test \"Test Health\" {\n url=\"http://{{ template \"grafana.fullname\" . }}/api/health\"\n\n code=$(wget --server-response --spider --timeout 10 --tries 1 ${url} 2>&1 | awk '/^ HTTP/{print $2}')\n [ \"$code\" == \"200\" ]\n }\n{{- end }}\n",
"# test-podsecuritypolicy.yaml\n{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-test\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\nspec:\n allowPrivilegeEscalation: true\n privileged: false\n hostNetwork: false\n hostIPC: false\n hostPID: false\n fsGroup:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n runAsUser:\n rule: RunAsAny\n volumes:\n - configMap\n - downwardAPI\n - emptyDir\n - projected\n - secret\n{{- end }}\n",
"# test-role.yaml\n{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-test\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\nrules:\n- apiGroups: ['policy']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"grafana.fullname\" . }}-test]\n{{- end }}\n",
"# test-rolebinding.yaml\n{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-test\n namespace: {{ template \"grafana.namespace\" . }}\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"grafana.fullname\" . }}-test\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"grafana.serviceAccountNameTest\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n{{- end }}\n",
"# test-serviceaccount.yaml\n{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n name: {{ template \"grafana.serviceAccountNameTest\" . }}\n namespace: {{ template \"grafana.namespace\" . }}\n{{- end }}\n",
"# test.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: Pod\nmetadata:\n name: {{ template \"grafana.fullname\" . }}-test\n labels:\n {{- include \"grafana.labels\" . | nindent 4 }}\n annotations:\n \"helm.sh/hook\": test-success\n namespace: {{ template \"grafana.namespace\" . }}\nspec:\n serviceAccountName: {{ template \"grafana.serviceAccountNameTest\" . }}\n {{- if .Values.testFramework.securityContext }}\n securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }}\n {{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 4 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 4 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 4 }}\n {{- end }}\n containers:\n - name: {{ .Release.Name }}-test\n image: \"{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}\"\n imagePullPolicy: \"{{ .Values.testFramework.imagePullPolicy}}\"\n command: [\"/opt/bats/bin/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n volumes:\n - name: tests\n configMap:\n name: {{ template \"grafana.fullname\" . }}-test\n restartPolicy: Never\n{{- end }}\n"
] | rbac:
create: true
pspEnabled: true
pspUseAppArmor: true
namespaced: false
extraRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
extraClusterRoleRules: []
# - apiGroups: []
# resources: []
# verbs: []
serviceAccount:
create: true
name:
nameTest:
# annotations:
replicas: 1
## See `kubectl explain poddisruptionbudget.spec` for more
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget: {}
# minAvailable: 1
# maxUnavailable: 1
## See `kubectl explain deployment.spec.strategy` for more
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
deploymentStrategy:
type: RollingUpdate
readinessProbe:
httpGet:
path: /api/health
port: 3000
livenessProbe:
httpGet:
path: /api/health
port: 3000
initialDelaySeconds: 60
timeoutSeconds: 30
failureThreshold: 10
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
image:
repository: grafana/grafana
tag: 7.1.1
sha: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
testFramework:
enabled: true
image: "bats/bats"
tag: "v1.1.0"
imagePullPolicy: IfNotPresent
securityContext: {}
securityContext:
runAsUser: 472
runAsGroup: 472
fsGroup: 472
extraConfigmapMounts: []
# - name: certs-configmap
# mountPath: /etc/grafana/ssl/
# subPath: certificates.crt # (optional)
# configMap: certs-configmap
# readOnly: true
extraEmptyDirMounts: []
# - name: provisioning-notifiers
# mountPath: /etc/grafana/provisioning/notifiers
## Assign a PriorityClassName to pods if set
# priorityClassName:
downloadDashboardsImage:
repository: curlimages/curl
tag: 7.70.0
sha: ""
pullPolicy: IfNotPresent
downloadDashboards:
env: {}
resources: {}
## Pod Annotations
# podAnnotations: {}
## Pod Labels
# podLabels: {}
podPortName: grafana
## Deployment annotations
# annotations: {}
## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
port: 80
targetPort: 3000
# targetPort: 4181 To be used with a proxy extraContainer
annotations: {}
labels: {}
portName: service
extraExposePorts: []
# - name: keycloak
# port: 8080
# targetPort: 8080
# type: ClusterIP
# overrides pod.spec.hostAliases in the grafana deployment's pods
hostAliases: []
# - ip: "1.2.3.4"
# hostnames:
# - "my.host.com"
ingress:
enabled: false
# Values can be templated
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
path: /
hosts:
- chart-example.local
## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
extraPaths: []
# - path: /*
# backend:
# serviceName: ssl-redirect
# servicePort: use-annotation
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
extraInitContainers: []
## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - -provider=github
# - -client-id=
# - -client-secret=
# - -github-org=<ORG_NAME>
# - -email-domain=*
# - -cookie-secret=
# - -http-address=http://0.0.0.0:4181
# - -upstream-url=http://127.0.0.1:3000
# ports:
# - name: proxy-web
# containerPort: 4181
## Volumes that can be used in init containers that will not be mounted to deployment pods
extraContainerVolumes: []
# - name: volume-from-secret
# secret:
# secretName: secret-to-mount
# - name: empty-dir-volume
# emptyDir: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
type: pvc
enabled: false
# storageClassName: default
accessModes:
- ReadWriteOnce
size: 10Gi
# annotations: {}
finalizers:
- kubernetes.io/pvc-protection
# subPath: ""
# existingClaim:
initChownData:
## If false, data ownership will not be reset at startup
## This allows the prometheus-server to be run with an arbitrary user
##
enabled: true
## initChownData container image
##
image:
repository: busybox
tag: "1.31.1"
sha: ""
pullPolicy: IfNotPresent
## initChownData resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
# adminPassword: strongpassword
# Use an existing secret for the admin user.
admin:
existingSecret: ""
userKey: admin-user
passwordKey: admin-password
## Define command to be executed at startup by grafana container
## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/)
## Default is "run.sh" as defined in grafana's Dockerfile
# command:
# - "sh"
# - "/run.sh"
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Extra environment variables that will be pass onto deployment pods
env: {}
## "valueFrom" environment variable references that will be added to deployment pods
## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core
## Renders in container spec as:
## env:
## ...
## - name: <key>
## valueFrom:
## <value rendered as YAML>
envValueFrom: {}
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc. Value is templated.
envFromSecret: ""
## Sensible environment variables that will be rendered as new secret object
## This can be useful for auth tokens, etc
envRenderSecret: {}
## Additional grafana server secret mounts
# Defines additional mounts with secrets. Secrets must be manually created in the namespace.
extraSecretMounts: []
# - name: secret-files
# mountPath: /etc/secrets
# secretName: grafana-secret-files
# readOnly: true
# subPath: ""
## Additional grafana server volume mounts
# Defines additional volume mounts.
extraVolumeMounts: []
# - name: extra-volume
# mountPath: /mnt/volume
# readOnly: true
# existingClaim: volume-claim
## Pass the plugins you want installed as a list.
##
plugins: []
# - digrich-bubblechart-panel
# - grafana-clock-panel
## Configure grafana datasources
## ref: http://docs.grafana.org/administration/provisioning/#datasources
##
datasources: {}
# datasources.yaml:
# apiVersion: 1
# datasources:
# - name: Prometheus
# type: prometheus
# url: http://prometheus-prometheus-server
# access: proxy
# isDefault: true
## Configure notifiers
## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels
##
notifiers: {}
# notifiers.yaml:
# notifiers:
# - name: email-notifier
# type: email
# uid: email1
# # either:
# org_id: 1
# # or
# org_name: Main Org.
# is_default: true
# settings:
# addresses: [email protected]
# delete_notifiers:
## Configure grafana dashboard providers
## ref: http://docs.grafana.org/administration/provisioning/#dashboards
##
## `path` must be /var/lib/grafana/dashboards/<provider_name>
##
dashboardProviders: {}
# dashboardproviders.yaml:
# apiVersion: 1
# providers:
# - name: 'default'
# orgId: 1
# folder: ''
# type: file
# disableDeletion: false
# editable: true
# options:
# path: /var/lib/grafana/dashboards/default
## Configure grafana dashboard to import
## NOTE: To use dashboards you must also enable/configure dashboardProviders
## ref: https://grafana.com/dashboards
##
## dashboards per provider, use provider name as key.
##
dashboards: {}
# default:
# some-dashboard:
# json: |
# $RAW_JSON
# custom-dashboard:
# file: dashboards/custom-dashboard.json
# prometheus-stats:
# gnetId: 2
# revision: 2
# datasource: Prometheus
# local-dashboard:
# url: https://example.com/repository/test.json
# local-dashboard-base64:
# url: https://example.com/repository/test-b64.json
# b64content: true
## Reference to external ConfigMap per provider. Use provider name as key and ConfiMap name as value.
## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both.
## ConfigMap data example:
##
## data:
## example-dashboard.json: |
## RAW_JSON
##
dashboardsConfigMaps: {}
# default: ""
## Grafana's primary configuration
## NOTE: values in map will be converted to ini format
## ref: http://docs.grafana.org/installation/configuration/
##
grafana.ini:
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## grafana Authentication can be enabled with the following values on grafana.ini
# server:
# The full public facing url you use in browser, used for redirects and emails
# root_url:
# https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana
# auth.github:
# enabled: false
# allow_sign_up: false
# scopes: user:email,read:org
# auth_url: https://github.com/login/oauth/authorize
# token_url: https://github.com/login/oauth/access_token
# api_url: https://github.com/user
# team_ids:
# allowed_organizations:
# client_id:
# client_secret:
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
# auth.ldap:
# enabled: true
# allow_sign_up: true
# config_file: /etc/grafana/ldap.toml
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
enabled: false
# `existingSecret` is a reference to an existing secret containing the ldap configuration
# for Grafana in a key `ldap-toml`.
existingSecret: ""
# `config` is the content of `ldap.toml` that will be stored in the created secret
config: ""
# config: |-
# verbose_logging = true
# [[servers]]
# host = "my-ldap-server"
# port = 636
# use_ssl = true
# start_tls = false
# ssl_skip_verify = false
# bind_dn = "uid=%s,ou=users,dc=myorg,dc=com"
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: ""
userKey: "user"
passwordKey: "password"
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
image:
repository: kiwigrid/k8s-sidecar
tag: 0.1.151
sha: ""
imagePullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 50m
# memory: 50Mi
# skipTlsVerify Set to true to skip tls verification for kube api calls
# skipTlsVerify: true
enableUniqueFilenames: false
dashboards:
enabled: false
SCProvider: true
# label that the configmaps with dashboards are marked with
label: grafana_dashboard
# folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
folder: /tmp/dashboards
# The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
defaultFolderName: null
# If specified, the sidecar will search for dashboard config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
# provider configuration that lets grafana manage the dashboards
provider:
# name of the provider, should be unique
name: sidecarProvider
# orgid as configured in grafana
orgid: 1
# folder in which the dashboards should be imported in grafana
folder: ''
# type of the provider
type: file
# disableDelete to activate a import-only behaviour
disableDelete: false
# allow updating provisioned dashboards from the UI
allowUiUpdates: false
datasources:
enabled: false
# label that the configmaps with datasources are marked with
label: grafana_datasource
# If specified, the sidecar will search for datasource config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
notifiers:
enabled: false
# label that the configmaps with notifiers are marked with
label: grafana_notifier
# If specified, the sidecar will search for notifier config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: null
## Override the deployment namespace
##
namespaceOverride: ""
|
etcd-operator | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"etcd-operator.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"etcd-operator.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-%s\" .Release.Name $name .Values.etcdOperator.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"etcd-backup-operator.name\" -}}\n{{- default .Chart.Name .Values.backupOperator.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"etcd-backup-operator.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-%s\" .Release.Name $name .Values.backupOperator.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"etcd-restore-operator.name\" -}}\n{{- default .Chart.Name .Values.restoreOperator.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"etcd-restore-operator.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-%s\" .Release.Name $name .Values.restoreOperator.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the etcd-operator service account to use\n*/}}\n{{- define \"etcd-operator.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"etcd-operator.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# backup-etcd-crd.yaml\n{{- if .Values.customResources.createBackupCRD }}\n---\napiVersion: \"etcd.database.coreos.com/v1beta2\"\nkind: \"EtcdBackup\"\nmetadata:\n name: {{ template \"etcd-backup-operator.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-backup-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n \"helm.sh/hook\": \"post-install\"\n \"helm.sh/hook-delete-policy\": \"before-hook-creation\"\nspec:\n clusterName: {{ .Values.etcdCluster.name }}\n{{ toYaml .Values.backupOperator.spec | indent 2 }}\n{{- end}}\n",
"# backup-operator-deployment.yaml\n{{- if .Values.deployments.backupOperator }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"etcd-backup-operator.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-backup-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"etcd-backup-operator.fullname\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.backupOperator.replicaCount }}\n template:\n metadata:\n name: {{ template \"etcd-backup-operator.fullname\" . }}\n labels:\n app: {{ template \"etcd-backup-operator.fullname\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.backupOperator.priorityClassName }}\n priorityClassName: {{ .Values.backupOperator.priorityClassName }}\n {{- end }}\n serviceAccountName: {{ template \"etcd-operator.serviceAccountName\" . }}\n containers:\n - name: {{ .Values.backupOperator.name }}\n image: \"{{ .Values.backupOperator.image.repository }}:{{ .Values.backupOperator.image.tag }}\"\n imagePullPolicy: {{ .Values.backupOperator.image.pullPolicy }}\n command:\n - etcd-backup-operator\n{{- range $key, $value := .Values.backupOperator.commandArgs }}\n - \"--{{ $key }}={{ $value }}\"\n{{- end }}\n env:\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n resources:\n limits:\n cpu: {{ .Values.backupOperator.resources.cpu }}\n memory: {{ .Values.backupOperator.resources.memory }}\n requests:\n cpu: {{ .Values.backupOperator.resources.cpu }}\n memory: {{ .Values.backupOperator.resources.memory }}\n {{- if .Values.backupOperator.nodeSelector }}\n nodeSelector: {{ toYaml .Values.backupOperator.nodeSelector | nindent 8 }}\n {{- end }}\n {{- if .Values.backupOperator.securityContext }}\n securityContext: {{ toYaml .Values.backupOperator.securityContext | nindent 8 }}\n {{- end }}\n {{- if .Values.backupOperator.tolerations }}\n tolerations: {{ toYaml .Values.backupOperator.tolerations | nindent 8 }}\n {{- end }}\n{{- end }}\n",
"# crds.yaml\n{{- if .Values.deployments.etcdOperator }}\n{{- range $path, $bytes := .Files.Glob \"crds/*.yaml\" }}\n{{ $.Files.Get $path }}\n---\n{{- end }}\n{{- end }}\n",
"# etcd-cluster-crd.yaml\n{{- if .Values.customResources.createEtcdClusterCRD }}\n---\napiVersion: \"etcd.database.coreos.com/v1beta2\"\nkind: \"EtcdCluster\"\nmetadata:\n name: {{ .Values.etcdCluster.name }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n size: {{ .Values.etcdCluster.size }}\n version: \"{{ .Values.etcdCluster.version }}\"\n repository: \"{{ .Values.etcdCluster.image.repository }}\"\n pod:\n{{ toYaml .Values.etcdCluster.pod | indent 4 }}\n {{- if .Values.etcdCluster.enableTLS }}\n TLS:\n{{ toYaml .Values.etcdCluster.tls | indent 4 }}\n {{- end }}\n{{- end }}\n",
"# operator-cluster-role.yaml\n{{- if .Values.rbac.create }}\n---\napiVersion: rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}\nkind: ClusterRole\nmetadata:\n name: {{ template \"etcd-operator.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nrules:\n- apiGroups:\n - etcd.database.coreos.com\n resources:\n - etcdclusters\n - etcdbackups\n - etcdrestores\n verbs:\n - \"*\"\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - \"*\"\n- apiGroups:\n - \"\"\n resources:\n - pods\n - services\n - endpoints\n - persistentvolumeclaims\n - events\n verbs:\n - \"*\"\n- apiGroups:\n - apps\n resources:\n - deployments\n verbs:\n - \"*\"\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - get\n{{- end }}\n",
"# operator-clusterrole-binding.yaml\n{{- if and .Values.rbac.create .Values.deployments.etcdOperator }}\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/{{ required \"A valid .Values.rbac.apiVersion entry required!\" .Values.rbac.apiVersion }}\nmetadata:\n name: {{ template \"etcd-operator.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"etcd-operator.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"etcd-operator.fullname\" . }}\n{{- end }}\n",
"# operator-deployment.yaml\n{{- if .Values.deployments.etcdOperator }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"etcd-operator.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"etcd-operator.fullname\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.etcdOperator.replicaCount }}\n template:\n metadata:\n name: {{ template \"etcd-operator.fullname\" . }}\n labels:\n app: {{ template \"etcd-operator.fullname\" . }}\n release: {{ .Release.Name }}\n annotations: {{ toYaml .Values.etcdOperator.podAnnotations | nindent 8}}\n spec:\n {{- if .Values.etcdOperator.priorityClassName }}\n priorityClassName: {{ .Values.etcdOperator.priorityClassName }}\n {{- end }}\n serviceAccountName: {{ template \"etcd-operator.serviceAccountName\" . }}\n containers:\n - name: {{ template \"etcd-operator.fullname\" . }}\n image: \"{{ .Values.etcdOperator.image.repository }}:{{ .Values.etcdOperator.image.tag }}\"\n imagePullPolicy: {{ .Values.etcdOperator.image.pullPolicy }}\n command:\n - etcd-operator\n{{- range $key, $value := .Values.etcdOperator.commandArgs }}\n - \"--{{ $key }}={{ $value }}\"\n{{- end }}\n env:\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n resources:\n limits:\n cpu: {{ .Values.etcdOperator.resources.cpu }}\n memory: {{ .Values.etcdOperator.resources.memory }}\n requests:\n cpu: {{ .Values.etcdOperator.resources.cpu }}\n memory: {{ .Values.etcdOperator.resources.memory }}\n {{- if .Values.etcdOperator.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /readyz\n port: 8080\n initialDelaySeconds: {{ .Values.etcdOperator.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.etcdOperator.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.etcdOperator.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.etcdOperator.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.etcdOperator.livenessProbe.failureThreshold }}\n {{- end}}\n {{- if .Values.etcdOperator.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /readyz\n port: 8080\n initialDelaySeconds: {{ .Values.etcdOperator.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.etcdOperator.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.etcdOperator.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.etcdOperator.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.etcdOperator.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.etcdOperator.nodeSelector }}\n nodeSelector: {{ toYaml .Values.etcdOperator.nodeSelector | nindent 8 }}\n {{- end }}\n {{- if .Values.etcdOperator.securityContext }}\n securityContext: {{ toYaml .Values.etcdOperator.securityContext | nindent 8 }}\n {{- end }}\n {{- if .Values.etcdOperator.tolerations }}\n tolerations: {{ toYaml .Values.etcdOperator.tolerations | nindent 8 }}\n {{- end }}\n{{- end }}\n",
"# operator-service-account.yaml\n{{- if and .Values.serviceAccount.create .Values.deployments.etcdOperator }}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"etcd-operator.serviceAccountName\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nimagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}\n{{- end }}\n",
"# restore-etcd-crd.yaml\n{{- if .Values.customResources.createRestoreCRD }}\n---\napiVersion: \"etcd.database.coreos.com/v1beta2\"\nkind: \"EtcdRestore\"\nmetadata:\n # An EtcdCluster with the same name will be created\n name: {{ .Values.etcdCluster.name }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-restore-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n \"helm.sh/hook\": \"post-install\"\n \"helm.sh/hook-delete-policy\": \"before-hook-creation\"\nspec:\n clusterSpec:\n size: {{ .Values.etcdCluster.size }}\n baseImage: \"{{ .Values.etcdCluster.image.repository }}\"\n version: {{ .Values.etcdCluster.image.tag }}\n pod:\n{{ toYaml .Values.etcdCluster.pod | indent 6 }}\n {{- if .Values.etcdCluster.enableTLS }}\n TLS:\n{{ toYaml .Values.etcdCluster.tls | indent 6 }}\n {{- end }}\n{{ toYaml .Values.restoreOperator.spec | indent 2 }}\n{{- end}}\n",
"# restore-operator-deployment.yaml\n{{- if .Values.deployments.restoreOperator }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"etcd-restore-operator.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-restore-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"etcd-restore-operator.name\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.restoreOperator.replicaCount }}\n template:\n metadata:\n name: {{ template \"etcd-restore-operator.fullname\" . }}\n labels:\n app: {{ template \"etcd-restore-operator.name\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if .Values.restoreOperator.priorityClassName }}\n priorityClassName: {{ .Values.restoreOperator.priorityClassName }}\n {{- end }}\n serviceAccountName: {{ template \"etcd-operator.serviceAccountName\" . }}\n containers:\n - name: {{ .Values.restoreOperator.name }}\n image: \"{{ .Values.restoreOperator.image.repository }}:{{ .Values.restoreOperator.image.tag }}\"\n imagePullPolicy: {{ .Values.restoreOperator.image.pullPolicy }}\n ports:\n - containerPort: {{ .Values.restoreOperator.port }}\n command:\n - etcd-restore-operator\n{{- range $key, $value := .Values.restoreOperator.commandArgs }}\n - \"--{{ $key }}={{ $value }}\"\n{{- end }}\n env:\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: SERVICE_ADDR\n value: \"{{ .Values.restoreOperator.name }}:{{ .Values.restoreOperator.port }}\"\n resources:\n limits:\n cpu: {{ .Values.restoreOperator.resources.cpu }}\n memory: {{ .Values.restoreOperator.resources.memory }}\n requests:\n cpu: {{ .Values.restoreOperator.resources.cpu }}\n memory: {{ .Values.restoreOperator.resources.memory }}\n {{- if .Values.restoreOperator.nodeSelector }}\n nodeSelector: {{ toYaml .Values.restoreOperator.nodeSelector | nindent 8 }}\n {{- end }}\n {{- if .Values.restoreOperator.securityContext }}\n securityContext: {{ toYaml .Values.restoreOperator.securityContext | nindent 8 }}\n {{- end }}\n {{- if .Values.restoreOperator.tolerations }}\n tolerations: {{ toYaml .Values.restoreOperator.tolerations | nindent 8 }}\n {{- end }}\n{{- end }}\n",
"# restore-operator-service.yaml\n{{- if .Values.deployments.restoreOperator }}\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ .Values.restoreOperator.name }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: {{ template \"etcd-restore-operator.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n ports:\n - protocol: TCP\n name: http-etcd-restore-port\n port: {{ .Values.restoreOperator.port }}\n selector:\n app: {{ template \"etcd-restore-operator.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n"
] | # Default values for etcd-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
## Install Default RBAC roles and bindings
rbac:
create: true
apiVersion: v1
## Service account name and whether to create it
serviceAccount:
create: true
name:
# Select what to deploy
deployments:
etcdOperator: true
# one time deployment, delete once completed,
# Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/backup-operator.md
backupOperator: true
# one time deployment, delete once completed
# Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/walkthrough/restore-operator.md
restoreOperator: true
# creates custom resources, not all required,
# you could use `helm template --values <values.yaml> --name release_name ... `
# and create the resources yourself to deploy on your cluster later
customResources:
createEtcdClusterCRD: false
createBackupCRD: false
createRestoreCRD: false
# etcdOperator
etcdOperator:
priorityClassName: ""
name: etcd-operator
replicaCount: 1
image:
repository: quay.io/coreos/etcd-operator
tag: v0.9.4
pullPolicy: Always
resources:
cpu: 100m
memory: 128Mi
## Node labels for etcd-operator pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## additional command arguments go here; will be translated to `--key=value` form
## e.g., analytics: true
commandArgs: {}
tolerations: []
## Configurable health checks against the /readyz endpoint that etcd-operator exposes
readinessProbe:
enabled: false
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
livenessProbe:
enabled: false
initialDelaySeconds: 0
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
# backup spec
backupOperator:
priorityClassName: ""
name: etcd-backup-operator
replicaCount: 1
image:
repository: quay.io/coreos/etcd-operator
tag: v0.9.4
pullPolicy: Always
resources:
cpu: 100m
memory: 128Mi
spec:
storageType: S3
s3:
s3Bucket:
awsSecret:
## Node labels for etcd pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## additional command arguments go here; will be translated to `--key=value` form
## e.g., analytics: true
commandArgs: {}
securityContext: {}
tolerations: []
# restore spec
restoreOperator:
priorityClassName: ""
name: etcd-restore-operator
replicaCount: 1
image:
repository: quay.io/coreos/etcd-operator
tag: v0.9.4
pullPolicy: Always
port: 19999
resources:
cpu: 100m
memory: 128Mi
spec:
s3:
# The format of "path" must be: "<s3-bucket-name>/<path-to-backup-file>"
# e.g: "etcd-snapshot-bucket/v1/default/example-etcd-cluster/3.2.10_0000000000000001_etcd.backup"
path:
awsSecret:
## Node labels for etcd pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## additional command arguments go here; will be translated to `--key=value` form
## e.g., analytics: true
commandArgs: {}
securityContext: {}
tolerations: []
## etcd-cluster specific values
etcdCluster:
name: etcd-cluster
size: 3
version: 3.2.25
image:
repository: quay.io/coreos/etcd
tag: v3.2.25
pullPolicy: Always
enableTLS: false
# TLS configs
tls:
static:
member:
peerSecret: etcd-peer-tls
serverSecret: etcd-server-tls
operatorSecret: etcd-client-tls
## etcd cluster pod specific values
## Ref: https://github.com/coreos/etcd-operator/blob/master/doc/user/spec_examples.md#three-members-cluster-with-resource-requirement
pod:
busyboxImage: busybox:1.28.0-glibc
## Antiaffinity for etcd pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
antiAffinity: false
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
## Node labels for etcd pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
securityContext: {}
tolerations: []
|
signalsciences | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"signalsciences.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"signalsciences.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn secret name to be used based on provided values.\n*/}}\n{{- define \"signalsciences.secretAccessKeySecretName\" -}}\n{{- $fullName := printf \"%s-secretaccesskey\" (include \"signalsciences.fullname\" .) -}}\n{{- default $fullName .Values.signalsciences.secretAccessKeyExistingSecret | quote -}}\n{{- end -}}\n\n{{/*\nReturn secret name to be used based on provided values.\n*/}}\n{{- define \"signalsciences.accessKeyIdSecretName\" -}}\n{{- $fullName := printf \"%s-accesskeyid\" (include \"signalsciences.fullname\" .) -}}\n{{- default $fullName .Values.signalsciences.accessKeyIdExistingSecret | quote -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"signalsciences.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# accesskeyid-secret.yaml\n{{- if not .Values.signalsciences.accessKeyIdExistingSecret }}\n\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"signalsciences.accessKeyIdSecretName\" . }}\n labels:\n app: \"{{ template \"signalsciences.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n accessKeyId: {{ default \"MISSING\" .Values.signalsciences.accessKeyId | b64enc | quote }}\n\n{{- end }}\n",
"# daemonset.yaml\n{{- if .Values.daemonset.enabled }}\napiVersion: extensions/v1beta1\nkind: DaemonSet\nmetadata:\n name: {{ template \"signalsciences.fullname\" . }}\n labels:\n app: {{ template \"signalsciences.name\" . }}\n chart: {{ template \"signalsciences.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n template:\n metadata:\n labels:\n app: {{ template \"signalsciences.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.daemonset.podAnnotations }}\n annotations:\n{{ toYaml .Values.daemonset.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.daemonset.tolerations }}\n tolerations:\n{{ toYaml .Values.daemonset.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.daemonset.affinity }}\n affinity:\n{{ toYaml .Values.daemonset.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.daemonset.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.daemonset.nodeSelector | indent 8 }}\n {{- end }}\n volumes:\n - name: sigsci-tmp\n hostPath:\n path: {{ .Values.signalsciences.agentTempVolume}}\n containers:\n - name: sigsci-agent\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n volumeMounts:\n - mountPath: {{ .Values.signalsciences.agentTempVolume }}\n name: sigsci-tmp\n readOnly: false\n env:\n {{- if .Values.signalsciences.socketAddress }}\n - name: SIGSCI_RPC_ADDRESS\n value: unix:{{ .Values.signalsciences.socketAddress }}\n {{- end }}\n - name: SIGSCI_HOSTNAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: SIGSCI_SECRETACCESSKEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"signalsciences.secretAccessKeySecretName\" . }}\n key: secretAccessKey\n - name: SIGSCI_ACCESSKEYID\n valueFrom:\n secretKeyRef:\n name: {{ template \"signalsciences.accessKeyIdSecretName\" . }}\n key: accessKeyId\n securityContext:\n # The sigsci-agent container should run with its root filesystem read only\n readOnlyRootFilesystem: true\n resources:\n{{ toYaml .Values.signalsciences.resources | indent 12 }}\n updateStrategy:\n type: {{ default \"OnDelete\" .Values.daemonset.updateStrategy | quote }}\n{{- end }}\n",
"# secretaccesskey-secret.yaml\n{{- if not .Values.signalsciences.secretAccessKeyExistingSecret }}\n\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"signalsciences.secretAccessKeySecretName\" . }}\n labels:\n app: \"{{ template \"signalsciences.fullname\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n secretAccessKey: {{ default \"MISSING\" .Values.signalsciences.secretAccessKey | b64enc | quote }}\n\n{{- end }}\n"
] | # Default values for signalsciences.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: signalsciences/sigsci-agent
tag: 4.6.0
pullPolicy: IfNotPresent
daemonset: {}
## Annotations to add to the DaemonSet's Pods
# podAnnotations:
# scheduler.alpha.kubernetes.io/tolerations: '[{"key": "example", "value": "foo"}]'
## Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6)
# tolerations: []
## Allow the DaemonSet to schedule on selected nodes
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
# nodeSelector: {}
## Allow the DaemonSet to schedule ussing affinity rules
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# affinity: {}
## Allow the DaemonSet to perform a rolling update on helm update
## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
# updateStrategy: RollingUpdate
signalsciences:
## You'll need to set this to your agent accessKeyId before the agent will run
## ref: https://docs.signalsciences.net/install-guides/#step-1-agent-installation
##
# accessKeyId:
## Use existing Secret which stores accessKeyId instead of creating a new one
# accessKeyIdExistingSecret:
## You'll need to set this to your agent secretAccessKey before the agent will run
## ref: https://docs.signalsciences.net/install-guides/#step-1-agent-installation
# secretAccessKey:
## Use existing Secret which stores the secretAccessKey instead of creating a new one
# secretAccessKeyExistingSecret:
## For added security, it is recommended that the sigsci-agent container be executed
## with the root filesystem mounted read only. The agent, however, still needs to write
## some temporary files such as the socket file for RPC communication and some periodically
## updated files such as GeoIP data
agentTempVolume: /sigsci/tmp
# If required (default is /sigsci/tmp/sigsci.sock for the container)
# socketAddress: /sigsci/tmp/sigsci.sock
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 200m
memory: 256Mi
|
metrics-server | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"metrics-server.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"metrics-server.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"metrics-server.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a service name that defaults to app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"metrics-server.service.fullname\" -}}\n{{- .Values.service.nameOverride | default .Chart.Name }}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"metrics-server.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"metrics-server.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# aggregated-metrics-reader-cluster-role.yaml\n{{- if .Values.rbac.create -}}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: system:{{ template \"metrics-server.name\" . }}-aggregated-reader\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n rbac.authorization.k8s.io/aggregate-to-view: \"true\"\n rbac.authorization.k8s.io/aggregate-to-edit: \"true\"\n rbac.authorization.k8s.io/aggregate-to-admin: \"true\"\nrules:\n - apiGroups: [\"metrics.k8s.io\"]\n resources: [\"pods\",\"nodes\"]\n verbs: [\"get\", \"list\", \"watch\"]\n{{- end -}}\n",
"# auth-delegator-crb.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"metrics-server.fullname\" . }}:system:auth-delegator\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:auth-delegator\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"metrics-server.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# cluster-role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: system:{{ template \"metrics-server.fullname\" . }}\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n - nodes\n - nodes/stats\n - namespaces\n verbs:\n - get\n - list\n - watch\n {{- if .Values.rbac.pspEnabled }}\n - apiGroups:\n - extensions\n - policy\n resources:\n - podsecuritypolicies\n resourceNames:\n - privileged-{{ template \"metrics-server.fullname\" . }}\n verbs:\n - use\n {{- end -}}\n{{- end -}}\n",
"# metric-server-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"metrics-server.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.service.labels -}}\n {{ toYaml . | nindent 4 }}\n {{- end }}\n annotations:\n {{- toYaml .Values.service.annotations | trim | nindent 4 }}\nspec:\n ports:\n - port: {{ .Values.service.port }}\n protocol: TCP\n targetPort: https\n selector:\n app: {{ template \"metrics-server.name\" . }}\n release: {{ .Release.Name }}\n type: {{ .Values.service.type }}\n\n",
"# metrics-api-service.yaml\n{{- if .Values.apiService.create -}}\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n name: v1beta1.metrics.k8s.io\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n service:\n name: {{ template \"metrics-server.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n group: metrics.k8s.io\n version: v1beta1\n insecureSkipTLSVerify: true\n groupPriorityMinimum: 100\n versionPriority: 100\n{{- end -}}\n",
"# metrics-server-crb.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: system:{{ template \"metrics-server.fullname\" . }}\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: system:{{ template \"metrics-server.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"metrics-server.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# metrics-server-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"metrics-server.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"metrics-server.name\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.replicas }}\n template:\n metadata:\n labels:\n app: {{ template \"metrics-server.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n {{- with .Values.podAnnotations }}\n annotations:\n {{- range $key, $value := . }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n spec:\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- range .Values.imagePullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n serviceAccountName: {{ template \"metrics-server.serviceAccountName\" . }}\n{{- if .Values.hostNetwork.enabled }}\n hostNetwork: true\n{{- end }}\n containers:\n {{- if .Values.extraContainers }}\n {{- ( tpl (toYaml .Values.extraContainers) . ) | nindent 8 }}\n {{- end }}\n - name: metrics-server\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /metrics-server\n - --cert-dir=/tmp\n - --logtostderr\n - --secure-port=8443\n {{- range .Values.args }}\n - {{ . }}\n {{- end }}\n ports:\n - containerPort: 8443\n name: https\n livenessProbe:\n {{- toYaml .Values.livenessProbe | trim | nindent 12 }}\n readinessProbe:\n {{- toYaml .Values.readinessProbe | trim | nindent 12 }}\n resources:\n {{- toYaml .Values.resources | trim | nindent 12 }}\n securityContext:\n {{- toYaml .Values.securityContext | trim | nindent 12 }}\n volumeMounts:\n - name: tmp\n mountPath: /tmp\n {{- with .Values.extraVolumeMounts }}\n {{- toYaml . | nindent 10 }}\n {{- end }}\n nodeSelector:\n {{- toYaml .Values.nodeSelector | trim | nindent 8 }}\n affinity:\n {{- toYaml .Values.affinity | trim | nindent 8 }}\n tolerations:\n {{- toYaml .Values.tolerations | trim | nindent 8 }}\n volumes:\n - name: tmp\n emptyDir: {}\n {{- with .Values.extraVolumes }}\n {{- toYaml . | nindent 6}}\n {{- end }}\n",
"# metrics-server-serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"metrics-server.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n",
"# pdb.yaml\n{{- if .Values.podDisruptionBudget.enabled -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n name: {{ template \"metrics-server.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n\nspec:\n {{- if .Values.podDisruptionBudget.minAvailable }}\n minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}\n {{- end }}\n {{- if .Values.podDisruptionBudget.maxUnavailable }}\n maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"metrics-server.name\" . }}\n{{- end -}}",
"# psp.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: privileged-{{ template \"metrics-server.fullname\" . }}\nspec:\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n hostPID: true\n hostIPC: true\n hostNetwork: true\n hostPorts:\n - min: 1\n max: 65536\n{{- end }}\n",
"# role-binding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"metrics-server.fullname\" . }}-auth-reader\n namespace: kube-system\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: extension-apiserver-authentication-reader\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"metrics-server.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# test-version.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: {{ template \"metrics-server.fullname\" . }}-test\n labels:\n app: {{ template \"metrics-server.name\" . }}\n chart: {{ template \"metrics-server.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n containers:\n - name: wget\n image: \"{{ .Values.testImage.repository }}:{{ .Values.testImage.tag }}\"\n imagePullPolicy: {{ .Values.testImage.pullPolicy }}\n command: ['/bin/sh']\n args:\n - -c\n - 'wget -qO- https://{{ include \"metrics-server.fullname\" . }}:{{ .Values.service.port }}/version | grep -F {{ .Values.image.tag }}'\n{{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- range .Values.imagePullSecrets }}\n - name: {{ . }}\n {{- end }}\n{{- end }}\n restartPolicy: Never\n"
] | rbac:
# Specifies whether RBAC resources should be created
create: true
pspEnabled: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
apiService:
# Specifies if the v1beta1.metrics.k8s.io API service should be created.
#
# You typically want this enabled! If you disable API service creation you have to
# manage it outside of this chart for e.g horizontal pod autoscaling to
# work with this release.
create: true
hostNetwork:
# Specifies if metrics-server should be started in hostNetwork mode.
#
# You would require this enabled if you use alternate overlay networking for pods and
# API server unable to communicate with metrics-server. As an example, this is required
# if you use Weave network on EKS
enabled: false
image:
repository: k8s.gcr.io/metrics-server-amd64
tag: v0.3.6
pullPolicy: IfNotPresent
imagePullSecrets: []
# - registrySecretName
args: []
# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server
# - --kubelet-insecure-tls
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
replicas: 1
extraContainers: []
podLabels: {}
podAnnotations: {}
# The following annotations guarantee scheduling for critical add-on pods.
# See more at: https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
# scheduler.alpha.kubernetes.io/critical-pod: ''
## Set a pod priorityClassName
# priorityClassName: system-node-critical
extraVolumeMounts: []
# - name: secrets
# mountPath: /etc/kubernetes/secrets
# readOnly: true
extraVolumes: []
# - name: secrets
# secret:
# secretName: kube-apiserver
livenessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 20
readinessProbe:
httpGet:
path: /healthz
port: https
scheme: HTTPS
initialDelaySeconds: 20
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop: ["all"]
readOnlyRootFilesystem: true
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
service:
annotations: {}
labels: {}
# Add these labels to have metrics-server show up in `kubectl cluster-info`
# kubernetes.io/cluster-service: "true"
# kubernetes.io/name: "Metrics-server"
port: 443
type: ClusterIP
podDisruptionBudget:
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/
enabled: false
minAvailable:
maxUnavailable:
testImage:
repository: busybox
tag: latest
pullPolicy: IfNotPresent
|
hackmd | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hackmd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"hackmd.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"hackmd.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified postgresql name.\nWe truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"hackmd.postgresql.fullname\" -}}\n{{- $name := default \"postgresql\" .Values.postgresql.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nDetermine database host based on use of postgresql dependency.\n*/}}\n{{- define \"hackmd.database.host\" -}}\n{{- if .Values.postgresql.install -}}\n{{- template \"hackmd.postgresql.fullname\" . -}}\n{{- else -}}\n{{- .Values.postgresql.postgresHost -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"hackmd.fullname\" . }}\n labels:\n app: {{ template \"hackmd.name\" . }}\n chart: {{ template \"hackmd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"hackmd.name\" . }}\n release: {{ .Release.Name }}\n strategy:\n type: {{ .Values.deploymentStrategy }}\n {{- if ne .Values.deploymentStrategy \"RollingUpdate\" }}\n rollingUpdate: null\n {{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"hackmd.name\" . }}\n release: {{ .Release.Name }}\n{{- with .Values.podAnnotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: 3000\n protocol: TCP\n livenessProbe:\n httpGet:\n path: /status\n port: 3000\n initialDelaySeconds: 120\n readinessProbe:\n httpGet:\n path: /status\n port: 3000\n initialDelaySeconds: 30\n env:\n - name: CMD_DB_PASSWORD\n {{- if .Values.postgresql.install }}\n valueFrom:\n secretKeyRef:\n name: {{ template \"hackmd.postgresql.fullname\" . }}\n key: postgresql-password\n {{- else }}\n value: {{ .Values.postgresql.postgresPassword }}\n {{- end }}\n - name: CMD_SESSION_SECRET\n valueFrom:\n secretKeyRef:\n name: {{ template \"hackmd.fullname\" . }}\n key: sessionSecret\n - name: CMD_DB_URL\n value: postgres://{{ .Values.postgresql.postgresqlUsername }}:$(CMD_DB_PASSWORD)@{{ template \"hackmd.database.host\" . }}:5432/{{ .Values.postgresql.postgresqlDatabase }}\n - name: HMD_DB_URL\n value: postgres://{{ .Values.postgresql.postgresqlUsername }}:$(CMD_DB_PASSWORD)@{{ template \"hackmd.database.host\" . }}:5432/{{ .Values.postgresql.postgresqlDatabase }}\n {{- if .Values.extraVars }}\n{{ toYaml .Values.extraVars | indent 12 }}\n {{- end }}\n volumeMounts:\n - name: data\n mountPath: \"/hackmd/public/uploads\"\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"hackmd.fullname\" . }}{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"hackmd.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"hackmd.name\" . }}\n chart: {{ template \"hackmd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"hackmd.fullname\" . }}\n labels:\n app: {{ template \"hackmd.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n {{- if .Values.persistence.annotations }}\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n {{- end }}\nspec:\n accessModes:\n{{ toYaml .Values.persistence.accessModes | indent 4 }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# secret.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"hackmd.fullname\" . }}\n labels:\n app: {{ template \"hackmd.name\" . }}\n chart: {{ template \"hackmd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.sessionSecret }}\n sessionSecret: {{ .Values.sessionSecret | b64enc | quote }}\n {{- else }}\n sessionSecret: {{ randAlphaNum 10 | b64enc | quote }}\n {{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"hackmd.fullname\" . }}\n labels:\n app: {{ template \"hackmd.name\" . }}\n chart: {{ template \"hackmd.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"hackmd.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for hackmd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
deploymentStrategy: RollingUpdate
image:
repository: hackmdio/hackmd
tag: 1.3.0-alpine
pullPolicy: IfNotPresent
service:
name: hackmd
type: ClusterIP
port: 3000
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
persistence:
enabled: true
## hackmd data Persistent Volume access modes
## Must match those of existing PV or dynamic provisioner
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
accessModes:
- ReadWriteOnce
annotations: {}
existingClaim: ""
size: 2Gi
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
podAnnotations: {}
extraVars: []
nodeSelector: {}
tolerations: []
affinity: {}
## Configuration values for the postgresql dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
##
postgresql:
install: true
image:
tag: "9.6"
postgresqlUsername: "hackmd"
postgresqlDatabase: "hackmd"
## Default: random 10 character string
# postgresqlPassword:
|
spartakus | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n labels:\n app: {{ template \"name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"fullname\" . }}\nspec:\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"name\" . }}\n release: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ template \"name\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: \"{{ .Values.image.pullPolicy }}\"\n args:\n - volunteer\n - --cluster-id=\"{{ .Values.uuid | default uuidv4 }}\"\n {{- range $key, $value := .Values.extraArgs }}\n - --{{ $key }}={{ $value }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n"
] | extraArgs: {}
## Container image
##
image:
repository: k8s.gcr.io/spartakus-amd64
tag: v1.0.0
pullPolicy: IfNotPresent
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Annotations to be added to pods
##
podAnnotations: {}
replicaCount: 1
## Resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 2m
# memory: 8Mi
# requests:
# cpu: 2m
# memory: 8Mi
## A version 4 UUID to uniquely identify the cluster
## If not provided, Helm will generate automatically at install-time.
##
uuid: ""
|
orangehrm | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"orangehrm.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"orangehrm.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"orangehrm.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"orangehrm.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper OrangeHRM image name\n*/}}\n{{- define \"orangehrm.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"orangehrm.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"orangehrm.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"orangehrm.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.orangehrm.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.orangehrm.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.orangehrm.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.orangehrm.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.orangehrm.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.orangehrm.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"orangehrm.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"orangehrm.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"orangehrm.fullname\" . }}\n labels:\n app: {{ template \"orangehrm.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"orangehrm.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"orangehrm.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"orangehrm.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"orangehrm.fullname\" . }}\n image: {{ template \"orangehrm.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"orangehrm.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: ORANGEHRM_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: ORANGEHRM_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: ORANGEHRM_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"orangehrm.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: ORANGEHRM_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: ORANGEHRM_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: ORANGEHRM_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n - name: ORANGEHRM_USERNAME\n value: {{ default \"\" .Values.orangehrmUsername | quote }}\n - name: ORANGEHRM_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"orangehrm.fullname\" . }}\n key: orangehrm-password\n - name: SMTP_HOST\n value: {{ default \"\" .Values.smtpHost | quote }}\n - name: SMTP_PORT\n value: {{ default \"\" .Values.smtpPort | quote }}\n - name: SMTP_USER\n value: {{ default \"\" .Values.smtpUser | quote }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"orangehrm.fullname\" . }}\n key: smtp-password\n - name: SMTP_PROTOCOL\n value: {{ default \"none\" .Values.smtpProtocol | quote }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n httpGet:\n path: /symfony/web/index.php\n port: http\n initialDelaySeconds: 120\n readinessProbe:\n httpGet:\n path: /symfony/web/index.php\n port: http\n initialDelaySeconds: 30\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: orangehrm-data\n mountPath: /bitnami/orangehrm\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"orangehrm.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: orangehrm-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"orangehrm.fullname\" . }}-orangehrm\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# externaldb-secrets.yaml\n\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"orangehrm.fullname\" . }}\n labels:\n app: \"{{ template \"orangehrm.fullname\" . }}\"\n chart: \"{{ template \"orangehrm.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"orangehrm.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# orangehrm-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"orangehrm.fullname\" . }}-orangehrm\n labels:\n app: {{ template \"orangehrm.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.orangehrm.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.orangehrm.size | quote }}\n {{ include \"orangehrm.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"orangehrm.fullname\" . }}\n labels:\n app: {{ template \"orangehrm.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.orangehrmPassword }}\n orangehrm-password: {{ default \"\" .Values.orangehrmPassword | b64enc | quote }}\n {{ else }}\n orangehrm-password: {{ list (lower (randAlpha 3)) (randNumeric 2) (upper (randAlpha 3)) | join \"_\" | b64enc | quote }}\n {{ end }}\n smtp-password: {{ default \"\" .Values.smtpPassword | b64enc | quote }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"orangehrm.fullname\" . }}\n labels:\n app: {{ template \"orangehrm.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }} \n selector:\n app: {{ template \"orangehrm.fullname\" . }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami OrangeHRM image version
## ref: https://hub.docker.com/r/bitnami/orangehrm/tags/
##
image:
registry: docker.io
repository: bitnami/orangehrm
tag: 4.3.4-0-debian-10-r26
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override orangehrm.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override orangehrm.fullname template
##
# fullnameOverride:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-orangehrm#configuration
##
orangehrmUsername: admin
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-orangehrm#configuration
##
# orangehrmPassword:
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-orangehrm#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_orangehrm
## Database password
password:
## Database name
database: bitnami_orangehrm
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-orangehrm/#smtp-configuration
# smtpHost:
# smtpPort:
# smtpUser:
# smtpPassword:
# smtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_orangehrm
user: bn_orangehrm
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer or ClusterIP
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
orangehrm:
## orangehrm data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Configure the ingress resource that allows you to access the
## OrangeHRM installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: orangehrm.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.orangehrm.local
# - orangehrm.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: orangehrm.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: orangehrm.local-tls
# key:
# certificate:
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r30
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
postgresql | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"postgresql.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"postgresql.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"postgresql.master.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- $fullname := default (printf \"%s-%s\" .Release.Name $name) .Values.fullnameOverride -}}\n{{- if .Values.replication.enabled -}}\n{{- printf \"%s-%s\" $fullname \"master\" | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s\" $fullname | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for networkpolicy.\n*/}}\n{{- define \"postgresql.networkPolicy.apiVersion\" -}}\n{{- if semverCompare \">=1.4-0, <1.7-0\" .Capabilities.KubeVersion.GitVersion -}}\n\"extensions/v1beta1\"\n{{- else if semverCompare \"^1.7-0\" .Capabilities.KubeVersion.GitVersion -}}\n\"networking.k8s.io/v1\"\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"postgresql.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper PostgreSQL image name\n*/}}\n{{- define \"postgresql.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn PostgreSQL postgres user password\n*/}}\n{{- define \"postgresql.postgres.password\" -}}\n{{- if .Values.global.postgresql.postgresqlPostgresPassword }}\n {{- .Values.global.postgresql.postgresqlPostgresPassword -}}\n{{- else if .Values.postgresqlPostgresPassword -}}\n {{- .Values.postgresqlPostgresPassword -}}\n{{- else -}}\n {{- randAlphaNum 10 -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn PostgreSQL password\n*/}}\n{{- define \"postgresql.password\" -}}\n{{- if .Values.global.postgresql.postgresqlPassword }}\n {{- .Values.global.postgresql.postgresqlPassword -}}\n{{- else if .Values.postgresqlPassword -}}\n {{- .Values.postgresqlPassword -}}\n{{- else -}}\n {{- randAlphaNum 10 -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn PostgreSQL replication password\n*/}}\n{{- define \"postgresql.replication.password\" -}}\n{{- if .Values.global.postgresql.replicationPassword }}\n {{- .Values.global.postgresql.replicationPassword -}}\n{{- else if .Values.replication.password -}}\n {{- .Values.replication.password -}}\n{{- else -}}\n {{- randAlphaNum 10 -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn PostgreSQL username\n*/}}\n{{- define \"postgresql.username\" -}}\n{{- if .Values.global.postgresql.postgresqlUsername }}\n {{- .Values.global.postgresql.postgresqlUsername -}}\n{{- else -}}\n {{- .Values.postgresqlUsername -}}\n{{- end -}}\n{{- end -}}\n\n\n{{/*\nReturn PostgreSQL replication username\n*/}}\n{{- define \"postgresql.replication.username\" -}}\n{{- if .Values.global.postgresql.replicationUser }}\n {{- .Values.global.postgresql.replicationUser -}}\n{{- else -}}\n {{- .Values.replication.user -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn PostgreSQL port\n*/}}\n{{- define \"postgresql.port\" -}}\n{{- if .Values.global.postgresql.servicePort }}\n {{- .Values.global.postgresql.servicePort -}}\n{{- else -}}\n {{- .Values.service.port -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn PostgreSQL created database\n*/}}\n{{- define \"postgresql.database\" -}}\n{{- if .Values.global.postgresql.postgresqlDatabase }}\n {{- .Values.global.postgresql.postgresqlDatabase -}}\n{{- else if .Values.postgresqlDatabase -}}\n {{- .Values.postgresqlDatabase -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name to change the volume permissions\n*/}}\n{{- define \"postgresql.volumePermissions.image\" -}}\n{{- $registryName := .Values.volumePermissions.image.registry -}}\n{{- $repositoryName := .Values.volumePermissions.image.repository -}}\n{{- $tag := .Values.volumePermissions.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper PostgreSQL metrics image name\n*/}}\n{{- define \"postgresql.metrics.image\" -}}\n{{- $registryName := default \"docker.io\" .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := default \"latest\" .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the password secret.\n*/}}\n{{- define \"postgresql.secretName\" -}}\n{{- if .Values.global.postgresql.existingSecret }}\n {{- printf \"%s\" .Values.global.postgresql.existingSecret -}}\n{{- else if .Values.existingSecret -}}\n {{- printf \"%s\" .Values.existingSecret -}}\n{{- else -}}\n {{- printf \"%s\" (include \"postgresql.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn true if a secret object should be created\n*/}}\n{{- define \"postgresql.createSecret\" -}}\n{{- if .Values.global.postgresql.existingSecret }}\n{{- else if .Values.existingSecret -}}\n{{- else -}}\n {{- true -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the configuration ConfigMap name.\n*/}}\n{{- define \"postgresql.configurationCM\" -}}\n{{- if .Values.configurationConfigMap -}}\n{{- printf \"%s\" (tpl .Values.configurationConfigMap $) -}}\n{{- else -}}\n{{- printf \"%s-configuration\" (include \"postgresql.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the extended configuration ConfigMap name.\n*/}}\n{{- define \"postgresql.extendedConfigurationCM\" -}}\n{{- if .Values.extendedConfConfigMap -}}\n{{- printf \"%s\" (tpl .Values.extendedConfConfigMap $) -}}\n{{- else -}}\n{{- printf \"%s-extended-configuration\" (include \"postgresql.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the initialization scripts ConfigMap name.\n*/}}\n{{- define \"postgresql.initdbScriptsCM\" -}}\n{{- if .Values.initdbScriptsConfigMap -}}\n{{- printf \"%s\" (tpl .Values.initdbScriptsConfigMap $) -}}\n{{- else -}}\n{{- printf \"%s-init-scripts\" (include \"postgresql.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the initialization scripts Secret name.\n*/}}\n{{- define \"postgresql.initdbScriptsSecret\" -}}\n{{- printf \"%s\" (tpl .Values.initdbScriptsSecret $) -}}\n{{- end -}}\n\n{{/*\nGet the metrics ConfigMap name.\n*/}}\n{{- define \"postgresql.metricsCM\" -}}\n{{- printf \"%s-metrics\" (include \"postgresql.fullname\" .) -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"postgresql.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the readiness probe command\n*/}}\n{{- define \"postgresql.readinessProbeCommand\" -}}\n- |\n{{- if (include \"postgresql.database\" .) }}\n exec pg_isready -U {{ include \"postgresql.username\" . | quote }} -d {{ (include \"postgresql.database\" .) | quote }} -h 127.0.0.1 -p {{ template \"postgresql.port\" . }}\n{{- else }}\n exec pg_isready -U {{ include \"postgresql.username\" . | quote }} -h 127.0.0.1 -p {{ template \"postgresql.port\" . }}\n{{- end }}\n{{- if contains \"bitnami/\" .Values.image.repository }}\n [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"postgresql.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nRenders a value that contains template.\nUsage:\n{{ include \"postgresql.tplValue\" ( dict \"value\" .Values.path.to.the.Value \"context\" $) }}\n*/}}\n{{- define \"postgresql.tplValue\" -}}\n {{- if typeIs \"string\" .value }}\n {{- tpl .value .context }}\n {{- else }}\n {{- tpl (.value | toYaml) .context }}\n {{- end }}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for statefulset.\n*/}}\n{{- define \"postgresql.statefulset.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1beta2\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCompile all warnings into a single message, and call fail.\n*/}}\n{{- define \"postgresql.validateValues\" -}}\n{{- $messages := list -}}\n{{- $messages := append $messages (include \"postgresql.validateValues.ldapConfigurationMethod\" .) -}}\n{{- $messages := without $messages \"\" -}}\n{{- $message := join \"\\n\" $messages -}}\n\n{{- if $message -}}\n{{- printf \"\\nVALUES VALIDATION:\\n%s\" $message | fail -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nValidate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap\n*/}}\n{{- define \"postgresql.validateValues.ldapConfigurationMethod\" -}}\n{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }}\npostgresql: ldap.url, ldap.server\n You cannot set both `ldap.url` and `ldap.server` at the same time.\n Please provide a unique way to configure LDAP.\n More info at https://www.postgresql.org/docs/current/auth-ldap.html\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\n{{ if and (or (.Files.Glob \"files/postgresql.conf\") (.Files.Glob \"files/pg_hba.conf\") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}-configuration\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n{{- if (.Files.Glob \"files/postgresql.conf\") }}\n{{ (.Files.Glob \"files/postgresql.conf\").AsConfig | indent 2 }}\n{{- else if .Values.postgresqlConfiguration }}\n postgresql.conf: |\n{{- range $key, $value := default dict .Values.postgresqlConfiguration }}\n {{ $key | snakecase }}={{ $value }}\n{{- end }}\n{{- end }}\n{{- if (.Files.Glob \"files/pg_hba.conf\") }}\n{{ (.Files.Glob \"files/pg_hba.conf\").AsConfig | indent 2 }}\n{{- else if .Values.pgHbaConfiguration }}\n pg_hba.conf: |\n{{ .Values.pgHbaConfiguration | indent 4 }}\n{{- end }}\n{{ end }}\n",
"# extended-config-configmap.yaml\n{{- if and (or (.Files.Glob \"files/conf.d/*.conf\") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}-extended-configuration\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n{{- with .Files.Glob \"files/conf.d/*.conf\" }}\n{{ .AsConfig | indent 2 }}\n{{- end }}\n{{ with .Values.postgresqlExtendedConf }}\n override.conf: |\n{{- range $key, $value := . }}\n {{ $key | snakecase }}={{ $value }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# initialization-configmap.yaml\n{{- if and (or (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}\") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}-init-scripts\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Files.Glob \"files/docker-entrypoint-initdb.d/*.sql.gz\" }}\nbinaryData:\n{{- range $path, $bytes := . }}\n {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }}\n{{- end }}\n{{- end }}\ndata:\n{{- with .Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql}\" }}\n{{ .AsConfig | indent 2 }}\n{{- end }}\n{{- with .Values.initdbScripts }}\n{{ toYaml . | indent 2 }}\n{{- end }}\n{{- end }}\n",
"# metrics-configmap.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"postgresql.metricsCM\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }}\n{{- end }}\n",
"# metrics-svc.yaml\n{{- if .Values.metrics.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}-metrics\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n{{ toYaml .Values.metrics.service.annotations | indent 4 }}\nspec:\n type: {{ .Values.metrics.service.type }}\n {{- if and (eq .Values.metrics.service.type \"LoadBalancer\") .Values.metrics.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: http-metrics\n port: 9187\n targetPort: http-metrics\n selector:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name }}\n role: master\n{{- end }}\n",
"# networkpolicy.yaml\n{{- if .Values.networkPolicy.enabled }}\nkind: NetworkPolicy\napiVersion: {{ template \"postgresql.networkPolicy.apiVersion\" . }}\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n podSelector:\n matchLabels:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n ingress:\n # Allow inbound connections\n - ports:\n - port: {{ template \"postgresql.port\" . }}\n {{- if not .Values.networkPolicy.allowExternal }}\n from:\n - podSelector:\n matchLabels:\n {{ template \"postgresql.fullname\" . }}-client: \"true\"\n {{- if .Values.networkPolicy.explicitNamespacesSelector }}\n namespaceSelector:\n{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }}\n {{- end }}\n - podSelector:\n matchLabels:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n role: slave\n {{- end }}\n # Allow prometheus scrapes\n - ports:\n - port: 9187\n{{- end }}\n",
"# prometheusrule.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}\n{{- with .Values.metrics.prometheusRule.namespace }}\n namespace: {{ . }}\n{{- end }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.metrics.prometheusRule.additionalLabels }}\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- with .Values.metrics.prometheusRule.rules }}\n groups:\n - name: {{ template \"postgresql.name\" $ }}\n rules: {{ tpl (toYaml .) $ | nindent 8 }}\n{{- end }}\n{{- end }}\n",
"# secrets.yaml\n{{- if (include \"postgresql.createSecret\" .) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername \"postgres\")) }}\n postgresql-postgres-password: {{ include \"postgresql.postgres.password\" . | b64enc | quote }}\n {{- end }}\n postgresql-password: {{ include \"postgresql.password\" . | b64enc | quote }}\n {{- if .Values.replication.enabled }}\n postgresql-replication-password: {{ include \"postgresql.replication.password\" . | b64enc | quote }}\n {{- end }}\n {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}}\n postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }}\n {{- end }}\n{{- end -}}\n",
"# serviceaccount.yaml\n{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n name: {{ template \"postgresql.fullname\" . }}\n{{- end }}",
"# servicemonitor.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ include \"postgresql.fullname\" . }}\n {{- if .Values.metrics.serviceMonitor.namespace }}\n namespace: {{ .Values.metrics.serviceMonitor.namespace }}\n {{- end }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n {{- if .Values.metrics.serviceMonitor.additionalLabels }}\n{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n endpoints:\n - port: http-metrics\n {{- if .Values.metrics.serviceMonitor.interval }}\n interval: {{ .Values.metrics.serviceMonitor.interval }}\n {{- end }}\n {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}\n {{- end }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n",
"# statefulset-slaves.yaml\n{{- if .Values.replication.enabled }}\napiVersion: {{ template \"postgresql.statefulset.apiVersion\" . }}\nkind: StatefulSet\nmetadata:\n name: \"{{ template \"postgresql.fullname\" . }}-slave\"\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.slave.labels }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n{{- with .Values.slave.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n serviceName: {{ template \"postgresql.fullname\" . }}-headless\n replicas: {{ .Values.replication.slaveReplicas }}\n selector:\n matchLabels:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n role: slave\n template:\n metadata:\n name: {{ template \"postgresql.fullname\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n role: slave\n{{- with .Values.slave.podLabels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.slave.podAnnotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n{{- include \"postgresql.imagePullSecrets\" . | indent 6 }}\n {{- if .Values.slave.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.slave.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.slave.affinity }}\n affinity:\n{{ toYaml .Values.slave.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.slave.tolerations }}\n tolerations:\n{{ toYaml .Values.slave.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- end }}\n {{- if .Values.serviceAccount.enabled }}\n serviceAccountName: {{ default (include \"postgresql.fullname\" . ) .Values.serviceAccount.name}}\n {{- end }}\n {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }}\n initContainers:\n {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }}\n - name: init-chmod-data\n image: {{ template \"postgresql.volumePermissions.image\" . }}\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n {{- if .Values.resources }}\n resources: {{- toYaml .Values.resources | nindent 12 }}\n {{- end }}\n command:\n - /bin/sh\n - -cx\n - |\n {{ if .Values.persistence.enabled }}\n mkdir -p {{ .Values.persistence.mountPath }}/data\n chmod 700 {{ .Values.persistence.mountPath }}/data\n find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name \".snapshot\" -not -name \"lost+found\" | \\\n {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) \"auto\" }}\n xargs chown -R `id -u`:`id -G | cut -d \" \" -f2`\n {{- else }}\n xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\n {{- end }}\n {{- end }}\n {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }}\n chmod -R 777 /dev/shm\n {{- end }}\n {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) \"auto\" }}\n securityContext:\n {{- else }}\n securityContext:\n runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}\n {{- end }}\n volumeMounts:\n {{ if .Values.persistence.enabled }}\n - name: data\n mountPath: {{ .Values.persistence.mountPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{- end }}\n {{- if .Values.shmVolume.enabled }}\n - name: dshm\n mountPath: /dev/shm\n {{- end }}\n {{- end }}\n {{- if .Values.slave.extraInitContainers }}\n{{ tpl .Values.slave.extraInitContainers . | indent 8 }}\n {{- end }}\n {{- end }}\n {{- if .Values.slave.priorityClassName }}\n priorityClassName: {{ .Values.slave.priorityClassName }}\n {{- end }}\n containers:\n - name: {{ template \"postgresql.fullname\" . }}\n image: {{ template \"postgresql.image\" . }}\n imagePullPolicy: \"{{ .Values.image.pullPolicy }}\"\n {{- if .Values.resources }}\n resources: {{- toYaml .Values.resources | nindent 12 }}\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n env:\n - name: BITNAMI_DEBUG\n value: {{ ternary \"true\" \"false\" .Values.image.debug | quote }}\n - name: POSTGRESQL_VOLUME_DIR\n value: \"{{ .Values.persistence.mountPath }}\"\n - name: POSTGRESQL_PORT_NUMBER\n value: \"{{ template \"postgresql.port\" . }}\"\n {{- if .Values.persistence.mountPath }}\n - name: PGDATA\n value: {{ .Values.postgresqlDataDir | quote }}\n {{- end }}\n - name: POSTGRES_REPLICATION_MODE\n value: \"slave\"\n - name: POSTGRES_REPLICATION_USER\n value: {{ include \"postgresql.replication.username\" . | quote }}\n {{- if .Values.usePasswordFile }}\n - name: POSTGRES_REPLICATION_PASSWORD_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-replication-password\"\n {{- else }}\n - name: POSTGRES_REPLICATION_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-replication-password\n {{- end }}\n - name: POSTGRES_CLUSTER_APP_NAME\n value: {{ .Values.replication.applicationName }}\n - name: POSTGRES_MASTER_HOST\n value: {{ template \"postgresql.fullname\" . }}\n - name: POSTGRES_MASTER_PORT_NUMBER\n value: {{ include \"postgresql.port\" . | quote }}\n {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername \"postgres\")) }}\n {{- if .Values.usePasswordFile }}\n - name: POSTGRES_POSTGRES_PASSWORD_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-postgres-password\"\n {{- else }}\n - name: POSTGRES_POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-postgres-password\n {{- end }}\n {{- end }}\n {{- if .Values.usePasswordFile }}\n - name: POSTGRES_PASSWORD_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-password\"\n {{- else }}\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-password\n {{- end }}\n ports:\n - name: tcp-postgresql\n containerPort: {{ template \"postgresql.port\" . }}\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n exec:\n command:\n - /bin/sh\n - -c\n {{- if (include \"postgresql.database\" .) }}\n - exec pg_isready -U {{ include \"postgresql.username\" . | quote }} -d {{ (include \"postgresql.database\" .) | quote }} -h 127.0.0.1 -p {{ template \"postgresql.port\" . }}\n {{- else }}\n - exec pg_isready -U {{ include \"postgresql.username\" . | quote }} -h 127.0.0.1 -p {{ template \"postgresql.port\" . }}\n {{- end }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n exec:\n command:\n - /bin/sh\n - -c\n - -e\n {{- include \"postgresql.readinessProbeCommand\" . | nindent 16 }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n volumeMounts:\n {{- if .Values.usePasswordFile }}\n - name: postgresql-password\n mountPath: /opt/bitnami/postgresql/secrets/\n {{- end }}\n {{- if .Values.shmVolume.enabled }}\n - name: dshm\n mountPath: /dev/shm\n {{- end }}\n {{- if .Values.persistence.enabled }}\n - name: data\n mountPath: {{ .Values.persistence.mountPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{ end }}\n {{- if or (.Files.Glob \"files/conf.d/*.conf\") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}\n - name: postgresql-extended-config\n mountPath: /bitnami/postgresql/conf/conf.d/\n {{- end }}\n {{- if or (.Files.Glob \"files/postgresql.conf\") (.Files.Glob \"files/pg_hba.conf\") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}\n - name: postgresql-config\n mountPath: /bitnami/postgresql/conf\n {{- end }}\n {{- if .Values.slave.extraVolumeMounts }}\n {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }}\n {{- end }}\n{{- if .Values.slave.sidecars }}\n{{- include \"postgresql.tplValue\" ( dict \"value\" .Values.slave.sidecars \"context\" $ ) | nindent 8 }}\n{{- end }}\n volumes:\n {{- if .Values.usePasswordFile }}\n - name: postgresql-password\n secret:\n secretName: {{ template \"postgresql.secretName\" . }}\n {{- end }}\n {{- if or (.Files.Glob \"files/postgresql.conf\") (.Files.Glob \"files/pg_hba.conf\") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}\n - name: postgresql-config\n configMap:\n name: {{ template \"postgresql.configurationCM\" . }}\n {{- end }}\n {{- if or (.Files.Glob \"files/conf.d/*.conf\") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}\n - name: postgresql-extended-config\n configMap:\n name: {{ template \"postgresql.extendedConfigurationCM\" . }}\n {{- end }}\n {{- if .Values.shmVolume.enabled }}\n - name: dshm\n emptyDir:\n medium: Memory\n sizeLimit: 1Gi\n {{- end }}\n {{- if not .Values.persistence.enabled }}\n - name: data\n emptyDir: {}\n {{- end }}\n {{- if .Values.slave.extraVolumes }}\n {{- toYaml .Values.slave.extraVolumes | nindent 8 }}\n {{- end }}\n updateStrategy:\n type: {{ .Values.updateStrategy.type }}\n {{- if (eq \"Recreate\" .Values.updateStrategy.type) }}\n rollingUpdate: null\n {{- end }}\n{{- if .Values.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: data\n {{- with .Values.persistence.annotations }}\n annotations:\n {{- range $key, $value := . }}\n {{ $key }}: {{ $value }}\n {{- end }}\n {{- end }}\n spec:\n accessModes:\n {{- range .Values.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"postgresql.storageClass\" . }}\n{{- end }}\n{{- end }}\n",
"# statefulset.yaml\napiVersion: {{ template \"postgresql.statefulset.apiVersion\" . }}\nkind: StatefulSet\nmetadata:\n name: {{ template \"postgresql.master.fullname\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.master.labels }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n{{- with .Values.master.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n serviceName: {{ template \"postgresql.fullname\" . }}-headless\n replicas: 1\n updateStrategy:\n type: {{ .Values.updateStrategy.type }}\n {{- if (eq \"Recreate\" .Values.updateStrategy.type) }}\n rollingUpdate: null\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n role: master\n template:\n metadata:\n name: {{ template \"postgresql.fullname\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n role: master\n{{- with .Values.master.podLabels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.master.podAnnotations }}\n annotations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n{{- include \"postgresql.imagePullSecrets\" . | indent 6 }}\n {{- if .Values.master.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.master.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.master.affinity }}\n affinity:\n{{ toYaml .Values.master.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.master.tolerations }}\n tolerations:\n{{ toYaml .Values.master.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.terminationGracePeriodSeconds }}\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- end }}\n {{- if .Values.serviceAccount.enabled }}\n serviceAccountName: {{ default (include \"postgresql.fullname\" . ) .Values.serviceAccount.name }}\n {{- end }}\n {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }}\n initContainers:\n {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }}\n - name: init-chmod-data\n image: {{ template \"postgresql.volumePermissions.image\" . }}\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n {{- if .Values.resources }}\n resources: {{- toYaml .Values.resources | nindent 12 }}\n {{- end }}\n command:\n - /bin/sh\n - -cx\n - |\n {{ if .Values.persistence.enabled }}\n mkdir -p {{ .Values.persistence.mountPath }}/data\n chmod 700 {{ .Values.persistence.mountPath }}/data\n find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name \".snapshot\" -not -name \"lost+found\" | \\\n {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) \"auto\" }}\n xargs chown -R `id -u`:`id -G | cut -d \" \" -f2`\n {{- else }}\n xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\n {{- end }}\n {{- end }}\n {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }}\n chmod -R 777 /dev/shm\n {{- end }}\n {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) \"auto\" }}\n securityContext:\n {{- else }}\n securityContext:\n runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }}\n {{- end }}\n volumeMounts:\n {{ if .Values.persistence.enabled }}\n - name: data\n mountPath: {{ .Values.persistence.mountPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{- end }}\n {{- if .Values.shmVolume.enabled }}\n - name: dshm\n mountPath: /dev/shm\n {{- end }}\n {{- end }}\n {{- if .Values.master.extraInitContainers }}\n{{ tpl .Values.master.extraInitContainers . | indent 8 }}\n {{- end }}\n {{- end }}\n {{- if .Values.master.priorityClassName }}\n priorityClassName: {{ .Values.master.priorityClassName }}\n {{- end }}\n containers:\n - name: {{ template \"postgresql.fullname\" . }}\n image: {{ template \"postgresql.image\" . }}\n imagePullPolicy: \"{{ .Values.image.pullPolicy }}\"\n {{- if .Values.resources }}\n resources: {{- toYaml .Values.resources | nindent 12 }}\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n env:\n - name: BITNAMI_DEBUG\n value: {{ ternary \"true\" \"false\" .Values.image.debug | quote }}\n - name: POSTGRESQL_PORT_NUMBER\n value: \"{{ template \"postgresql.port\" . }}\"\n - name: POSTGRESQL_VOLUME_DIR\n value: \"{{ .Values.persistence.mountPath }}\"\n {{- if .Values.postgresqlInitdbArgs }}\n - name: POSTGRES_INITDB_ARGS\n value: {{ .Values.postgresqlInitdbArgs | quote }}\n {{- end }}\n {{- if .Values.postgresqlInitdbWalDir }}\n - name: POSTGRES_INITDB_WALDIR\n value: {{ .Values.postgresqlInitdbWalDir | quote }}\n {{- end }}\n {{- if .Values.initdbUser }}\n - name: POSTGRESQL_INITSCRIPTS_USERNAME\n value: {{ .Values.initdbUser }}\n {{- end }}\n {{- if .Values.initdbPassword }}\n - name: POSTGRESQL_INITSCRIPTS_PASSWORD\n value: .Values.initdbPassword\n {{- end }}\n {{- if .Values.persistence.mountPath }}\n - name: PGDATA\n value: {{ .Values.postgresqlDataDir | quote }}\n {{- end }}\n {{- if .Values.replication.enabled }}\n - name: POSTGRES_REPLICATION_MODE\n value: \"master\"\n - name: POSTGRES_REPLICATION_USER\n value: {{ include \"postgresql.replication.username\" . | quote }}\n {{- if .Values.usePasswordFile }}\n - name: POSTGRES_REPLICATION_PASSWORD_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-replication-password\"\n {{- else }}\n - name: POSTGRES_REPLICATION_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-replication-password\n {{- end }}\n {{- if not (eq .Values.replication.synchronousCommit \"off\")}}\n - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE\n value: {{ .Values.replication.synchronousCommit | quote }}\n - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS\n value: {{ .Values.replication.numSynchronousReplicas | quote }}\n {{- end }}\n - name: POSTGRES_CLUSTER_APP_NAME\n value: {{ .Values.replication.applicationName }}\n {{- end }}\n {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername \"postgres\")) }}\n {{- if .Values.usePasswordFile }}\n - name: POSTGRES_POSTGRES_PASSWORD_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-postgres-password\"\n {{- else }}\n - name: POSTGRES_POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-postgres-password\n {{- end }}\n {{- end }}\n - name: POSTGRES_USER\n value: {{ include \"postgresql.username\" . | quote }}\n {{- if .Values.usePasswordFile }}\n - name: POSTGRES_PASSWORD_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-password\"\n {{- else }}\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-password\n {{- end }}\n {{- if (include \"postgresql.database\" .) }}\n - name: POSTGRES_DB\n value: {{ (include \"postgresql.database\" .) | quote }}\n {{- end }}\n {{- if .Values.extraEnv }}\n {{- include \"postgresql.tplValue\" (dict \"value\" .Values.extraEnv \"context\" $) | nindent 12 }}\n {{- end }}\n - name: POSTGRESQL_ENABLE_LDAP\n value: {{ ternary \"yes\" \"no\" .Values.ldap.enabled | quote }}\n {{- if .Values.ldap.enabled }}\n - name: POSTGRESQL_LDAP_SERVER\n value: {{ .Values.ldap.server }}\n - name: POSTGRESQL_LDAP_PORT\n value: {{ .Values.ldap.port | quote }}\n - name: POSTGRESQL_LDAP_SCHEME\n value: {{ .Values.ldap.scheme }}\n {{- if .Values.ldap.tls }}\n - name: POSTGRESQL_LDAP_TLS\n value: \"1\"\n {{- end}}\n - name: POSTGRESQL_LDAP_PREFIX\n value: {{ .Values.ldap.prefix | quote }}\n - name: POSTGRESQL_LDAP_SUFFIX\n value: {{ .Values.ldap.suffix | quote}}\n - name: POSTGRESQL_LDAP_BASE_DN\n value: {{ .Values.ldap.baseDN }}\n - name: POSTGRESQL_LDAP_BIND_DN\n value: {{ .Values.ldap.bindDN }}\n {{- if (not (empty .Values.ldap.bind_password)) }}\n - name: POSTGRESQL_LDAP_BIND_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-ldap-password\n {{- end}}\n - name: POSTGRESQL_LDAP_SEARCH_ATTR\n value: {{ .Values.ldap.search_attr }}\n - name: POSTGRESQL_LDAP_SEARCH_FILTER\n value: {{ .Values.ldap.search_filter }}\n - name: POSTGRESQL_LDAP_URL\n value: {{ .Values.ldap.url }}\n {{- end}}\n {{- if .Values.extraEnvVarsCM }}\n envFrom:\n - configMapRef:\n name: {{ .Values.extraEnvVarsCM }}\n {{- end }}\n ports:\n - name: tcp-postgresql\n containerPort: {{ template \"postgresql.port\" . }}\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n exec:\n command:\n - /bin/sh\n - -c\n {{- if (include \"postgresql.database\" .) }}\n - exec pg_isready -U {{ include \"postgresql.username\" . | quote }} -d {{ (include \"postgresql.database\" .) | quote }} -h 127.0.0.1 -p {{ template \"postgresql.port\" . }}\n {{- else }}\n - exec pg_isready -U {{ include \"postgresql.username\" . | quote }} -h 127.0.0.1 -p {{ template \"postgresql.port\" . }}\n {{- end }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n exec:\n command:\n - /bin/sh\n - -c\n - -e\n {{- include \"postgresql.readinessProbeCommand\" . | nindent 16 }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n volumeMounts:\n {{- if or (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}\") .Values.initdbScriptsConfigMap .Values.initdbScripts }}\n - name: custom-init-scripts\n mountPath: /docker-entrypoint-initdb.d/\n {{- end }}\n {{- if .Values.initdbScriptsSecret }}\n - name: custom-init-scripts-secret\n mountPath: /docker-entrypoint-initdb.d/secret\n {{- end }}\n {{- if or (.Files.Glob \"files/conf.d/*.conf\") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}\n - name: postgresql-extended-config\n mountPath: /bitnami/postgresql/conf/conf.d/\n {{- end }}\n {{- if .Values.usePasswordFile }}\n - name: postgresql-password\n mountPath: /opt/bitnami/postgresql/secrets/\n {{- end }}\n {{- if .Values.shmVolume.enabled }}\n - name: dshm\n mountPath: /dev/shm\n {{- end }}\n {{- if .Values.persistence.enabled }}\n - name: data\n mountPath: {{ .Values.persistence.mountPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{- end }}\n {{- if or (.Files.Glob \"files/postgresql.conf\") (.Files.Glob \"files/pg_hba.conf\") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }}\n - name: postgresql-config\n mountPath: /bitnami/postgresql/conf\n {{- end }}\n {{- if .Values.master.extraVolumeMounts }}\n {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }}\n {{- end }}\n{{- if .Values.master.sidecars }}\n{{- include \"postgresql.tplValue\" ( dict \"value\" .Values.master.sidecars \"context\" $ ) | nindent 8 }}\n{{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"postgresql.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n {{- if .Values.metrics.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.metrics.securityContext.runAsUser }}\n {{- end }}\n env:\n {{- $database := required \"In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)\" (include \"postgresql.database\" .) }}\n - name: DATA_SOURCE_URI\n value: {{ printf \"127.0.0.1:%d/%s?sslmode=disable\" (int (include \"postgresql.port\" .)) $database | quote }}\n {{- if .Values.usePasswordFile }}\n - name: DATA_SOURCE_PASS_FILE\n value: \"/opt/bitnami/postgresql/secrets/postgresql-password\"\n {{- else }}\n - name: DATA_SOURCE_PASS\n valueFrom:\n secretKeyRef:\n name: {{ template \"postgresql.secretName\" . }}\n key: postgresql-password\n {{- end }}\n - name: DATA_SOURCE_USER\n value: {{ template \"postgresql.username\" . }}\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /\n port: http-metrics\n initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /\n port: http-metrics\n initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}\n {{- end }}\n volumeMounts:\n {{- if .Values.usePasswordFile }}\n - name: postgresql-password\n mountPath: /opt/bitnami/postgresql/secrets/\n {{- end }}\n {{- if .Values.metrics.customMetrics }}\n - name: custom-metrics\n mountPath: /conf\n readOnly: true\n args: [\"--extend.query-path\", \"/conf/custom-metrics.yaml\"]\n {{- end }}\n ports:\n - name: http-metrics\n containerPort: 9187\n {{- if .Values.metrics.resources }}\n resources: {{- toYaml .Values.metrics.resources | nindent 12 }}\n {{- end }}\n{{- end }}\n volumes:\n {{- if or (.Files.Glob \"files/postgresql.conf\") (.Files.Glob \"files/pg_hba.conf\") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}}\n - name: postgresql-config\n configMap:\n name: {{ template \"postgresql.configurationCM\" . }}\n {{- end }}\n {{- if or (.Files.Glob \"files/conf.d/*.conf\") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }}\n - name: postgresql-extended-config\n configMap:\n name: {{ template \"postgresql.extendedConfigurationCM\" . }}\n {{- end }}\n {{- if .Values.usePasswordFile }}\n - name: postgresql-password\n secret:\n secretName: {{ template \"postgresql.secretName\" . }}\n {{- end }}\n {{- if or (.Files.Glob \"files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}\") .Values.initdbScriptsConfigMap .Values.initdbScripts }}\n - name: custom-init-scripts\n configMap:\n name: {{ template \"postgresql.initdbScriptsCM\" . }}\n {{- end }}\n {{- if .Values.initdbScriptsSecret }}\n - name: custom-init-scripts-secret\n secret:\n secretName: {{ template \"postgresql.initdbScriptsSecret\" . }}\n {{- end }}\n {{- if .Values.master.extraVolumes }}\n {{- toYaml .Values.master.extraVolumes | nindent 8 }}\n {{- end }}\n {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }}\n - name: custom-metrics\n configMap:\n name: {{ template \"postgresql.metricsCM\" . }}\n {{- end }}\n {{- if .Values.shmVolume.enabled }}\n - name: dshm\n emptyDir:\n medium: Memory\n sizeLimit: 1Gi\n {{- end }}\n{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }}\n - name: data\n persistentVolumeClaim:\n{{- with .Values.persistence.existingClaim }}\n claimName: {{ tpl . $ }}\n{{- end }}\n{{- else if not .Values.persistence.enabled }}\n - name: data\n emptyDir: {}\n{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\n volumeClaimTemplates:\n - metadata:\n name: data\n {{- with .Values.persistence.annotations }}\n annotations:\n {{- range $key, $value := . }}\n {{ $key }}: {{ $value }}\n {{- end }}\n {{- end }}\n spec:\n accessModes:\n {{- range .Values.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"postgresql.storageClass\" . }}\n{{- end }}\n",
"# svc-headless.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}-headless\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n type: ClusterIP\n clusterIP: None\n ports:\n - name: tcp-postgresql\n port: {{ template \"postgresql.port\" . }}\n targetPort: tcp-postgresql\n selector:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n",
"# svc-read.yaml\n{{- if .Values.replication.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}-read\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.service.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n {{- if and .Values.service.loadBalancerIP (eq .Values.service.type \"LoadBalancer\") }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: tcp-postgresql\n port: {{ template \"postgresql.port\" . }}\n targetPort: tcp-postgresql\n {{- if .Values.service.nodePort }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n selector:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n role: slave\n{{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"postgresql.fullname\" . }}\n labels:\n app: {{ template \"postgresql.name\" . }}\n chart: {{ template \"postgresql.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.service.annotations }}\n annotations:\n{{ tpl (toYaml .) $ | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n {{- if and .Values.service.loadBalancerIP (eq .Values.service.type \"LoadBalancer\") }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if and (eq .Values.service.type \"LoadBalancer\") .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{ with .Values.service.loadBalancerSourceRanges }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n {{- end }}\n {{- if and (eq .Values.service.type \"ClusterIP\") .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{- end }}\n ports:\n - name: tcp-postgresql\n port: {{ template \"postgresql.port\" . }}\n targetPort: tcp-postgresql\n {{- if .Values.service.nodePort }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n selector:\n app: {{ template \"postgresql.name\" . }}\n release: {{ .Release.Name | quote }}\n role: master\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
postgresql: {}
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami PostgreSQL image version
## ref: https://hub.docker.com/r/bitnami/postgresql/tags/
##
image:
registry: docker.io
repository: bitnami/postgresql
tag: 11.7.0-debian-10-r9
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Set to true if you would like to see extra information on logs
## It turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## String to partially override postgresql.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override postgresql.fullname template
##
# fullnameOverride:
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container Security Context
## Note: the chown of the data folder is done to securityContext.runAsUser
## and not the below volumePermissions.securityContext.runAsUser
## When runAsUser is set to special value "auto", init container will try to chwon the
## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2`
## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed).
## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with
## pod securityContext.enabled=false and shmVolume.chmod.enabled=false
##
securityContext:
runAsUser: 0
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Pod Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
enabled: false
## Name of an already existing service account. Setting this value disables the automatic service account creation.
# name:
replication:
enabled: false
user: repl_user
password: repl_password
slaveReplicas: 1
## Set synchronous commit mode: on, off, remote_apply, remote_write and local
## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL
synchronousCommit: "off"
## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication
## NOTE: It cannot be > slaveReplicas
numSynchronousReplicas: 0
## Replication Cluster application name. Useful for defining multiple replication policies
applicationName: my_application
## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!)
# postgresqlPostgresPassword:
## PostgreSQL user (has superuser privileges if username is `postgres`)
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
postgresqlUsername: postgres
## PostgreSQL password
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run
##
# postgresqlPassword:
## PostgreSQL password using existing secret
## existingSecret: secret
## Mount PostgreSQL secret as a file instead of passing environment variable
# usePasswordFile: false
## Create a database
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run
##
# postgresqlDatabase:
## PostgreSQL data dir
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
postgresqlDataDir: /bitnami/postgresql/data
## An array to add extra environment variables
## For example:
## extraEnv:
## - name: FOO
## value: "bar"
##
# extraEnv:
extraEnv: []
## Name of a ConfigMap containing extra env vars
##
# extraEnvVarsCM:
## Specify extra initdb args
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbArgs:
## Specify a custom location for the PostgreSQL transaction log
## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md
##
# postgresqlInitdbWalDir:
## PostgreSQL configuration
## Specify runtime configuration parameters as a dict, using camelCase, e.g.
## {"sharedBuffers": "500MB"}
## Alternatively, you can put your postgresql.conf under the files/ directory
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
# postgresqlConfiguration:
## PostgreSQL extended configuration
## As above, but _appended_ to the main configuration
## Alternatively, you can put your *.conf under the files/conf.d/ directory
## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf
##
# postgresqlExtendedConf:
## PostgreSQL client authentication configuration
## Specify content for pg_hba.conf
## Default: do not create pg_hba.conf
## Alternatively, you can put your pg_hba.conf under the files/ directory
# pgHbaConfiguration: |-
# local all all trust
# host all all localhost trust
# host mydatabase mysuser 192.168.0.0/24 md5
## ConfigMap with PostgreSQL configuration
## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration
# configurationConfigMap:
## ConfigMap with PostgreSQL extended configuration
# extendedConfConfigMap:
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
# initdbScripts:
# my_init_script.sh: |
# #!/bin/sh
# echo "Do something."
## ConfigMap with scripts to be run at first boot
## NOTE: This will override initdbScripts
# initdbScriptsConfigMap:
## Secret with scripts to be run at first boot (in case it contains sensitive information)
## NOTE: This can work along initdbScripts or initdbScriptsConfigMap
# initdbScriptsSecret:
## Specify the PostgreSQL username and password to execute the initdb scripts
# initdbUser:
# initdbPassword:
## Optional duration in seconds the pod needs to terminate gracefully.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods
##
# terminationGracePeriodSeconds: 30
## LDAP configuration
##
ldap:
enabled: false
url: ""
server: ""
port: ""
prefix: ""
suffix: ""
baseDN: ""
bindDN: ""
bind_password:
search_attr: ""
search_filter: ""
scheme: ""
tls: false
## PostgreSQL service configuration
service:
## PosgresSQL service type
type: ClusterIP
# clusterIP: None
port: 5432
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required.
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
annotations: {}
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Start master and slave(s) pod(s) without limitations on shm memory.
## By default docker and containerd (and possibly other container runtimes)
## limit `/dev/shm` to `64M` (see e.g. the
## [docker issue](https://github.com/docker-library/postgres/issues/416) and the
## [containerd issue](https://github.com/containerd/containerd/issues/3654),
## which could be not enough if PostgreSQL uses parallel workers heavily.
##
shmVolume:
## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove
## this limitation.
##
enabled: true
## Set to `true` to `chmod 777 /dev/shm` on a initContainer.
## This option is ingored if `volumePermissions.enabled` is `false`
##
chmod:
enabled: true
## PostgreSQL data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
persistence:
enabled: true
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart
##
# existingClaim:
## The path the volume will be mounted at, useful when using different
## PostgreSQL images.
##
mountPath: /bitnami/postgresql
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
##
subPath: ""
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
##
## PostgreSQL Master parameters
##
master:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Additional PostgreSQL Master Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Master Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
##
## PostgreSQL Slave parameters
##
slave:
## Node, affinity, tolerations, and priorityclass settings for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption
nodeSelector: {}
affinity: {}
tolerations: []
labels: {}
annotations: {}
podLabels: {}
podAnnotations: {}
priorityClassName: ""
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## Additional PostgreSQL Slave Volume mounts
##
extraVolumeMounts: []
## Additional PostgreSQL Slave Volumes
##
extraVolumes: []
## Add sidecars to the pod
##
## For example:
## sidecars:
## - name: your-image-name
## image: your-image
## imagePullPolicy: Always
## ports:
## - name: portname
## containerPort: 1234
sidecars: []
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 250m
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port PostgreSQL is listening
## on. When true, PostgreSQL will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace
## and that match other criteria, the ones that have the good label, can reach the DB.
## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this
## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added.
##
# explicitNamespacesSelector:
# matchLabels:
# role: frontend
# matchExpressions:
# - {key: role, operator: In, values: [frontend]}
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure metrics exporter
##
metrics:
enabled: false
# resources: {}
service:
type: ClusterIP
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9187"
loadBalancerIP:
serviceMonitor:
enabled: false
additionalLabels: {}
# namespace: monitoring
# interval: 30s
# scrapeTimeout: 10s
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current postgresql service.
# - alert: HugeReplicationLag
# expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1
# for: 1m
# labels:
# severity: critical
# annotations:
# description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s).
# summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s).
image:
registry: docker.io
repository: bitnami/postgres-exporter
tag: 0.8.0-debian-10-r28
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Define additional custom metrics
## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file
# customMetrics:
# pg_database:
# query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')"
# metrics:
# - name:
# usage: "LABEL"
# description: "Name of the database"
# - size_bytes:
# usage: "GAUGE"
# description: "Size of the database in bytes"
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: false
runAsUser: 1001
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
## Configure extra options for liveness and readiness probes
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
|
inbucket | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"inbucket.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"inbucket.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"inbucket.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name for the tls secret.\n*/}}\n{{- define \"inbucket.tlsSecret\" -}}\n {{- if .Values.ingress.tls.existingSecret -}}\n {{- .Values.ingress.tls.existingSecret -}}\n {{- else -}}\n {{- template \"inbucket.fullname\" . -}}-tls\n {{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"inbucket.name\" . }}-configmap\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\ndata:\n{{ toYaml .Values.extraEnv | indent 2 }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\n name: {{ include \"inbucket.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\n spec:\n containers:\n - name: {{ include \"inbucket.name\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: \"{{ .Values.image.pullPolicy }}\"\n envFrom:\n - configMapRef:\n name: {{ include \"inbucket.name\" . }}-configmap\n ports:\n - name: http\n containerPort: 9000\n protocol: TCP\n - name: smtp\n containerPort: 2500\n protocol: TCP\n - name: pop3\n containerPort: 1100\n protocol: TCP\n livenessProbe:\n tcpSocket:\n port: smtp\n initialDelaySeconds: 10\n timeoutSeconds: 5\n readinessProbe:\n tcpSocket:\n port: smtp\n initialDelaySeconds: 10\n timeoutSeconds: 5\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n",
"# inbucket-config-test.yaml\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"inbucket.name\" . }}-tests\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\ndata:\n run.sh: |-\n @test \"Testing Inbucket is accessible\" {\n curl --retry 48 --retry-delay 10 {{ include \"inbucket.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.port.http }}\n }\n",
"# inbucket-test.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ include \"inbucket.name\" . }}-test-{{ randAlphaNum 5 | lower }}\"\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: \"test-framework\"\n image: \"dduportal/bats:0.4.0\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-test\n image: \"dduportal/bats:0.4.0\"\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ include \"inbucket.name\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\n{{- $fullName := include \"inbucket.fullname\" . -}}\n{{- $path := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n{{- if .Values.ingress.annotations }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\n{{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\n name: {{ $fullName }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $path }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"inbucket.chart\" . }}\n name: {{ include \"inbucket.fullname\" . }}\nspec:\n type: \"{{ .Values.service.type }}\"\n clusterIP: \"{{ .Values.service.clusterIP }}\"\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n{{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: \"{{ .Values.service.loadBalancerIP }}\"\n{{- end }}\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port.http }}\n protocol: TCP\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort.http))) }}\n nodePort: {{ .Values.service.nodePort.http }}\n {{- end }}\n - name: smtp\n port: {{ .Values.service.port.smtp }}\n protocol: TCP\n targetPort: smtp\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort.smtp))) }}\n nodePort: {{ .Values.service.nodePort.smtp }}\n {{- end }}\n - name: pop3\n port: {{ .Values.service.port.pop3 }}\n protocol: TCP\n targetPort: pop3\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort.pop3))) }}\n nodePort: {{ .Values.service.nodePort.pop3 }}\n {{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"inbucket.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n"
] | image:
repository: jhillyerd/inbucket
tag: release-2.0.0
pullPolicy: IfNotPresent
service:
annotations: {}
clusterIP: ""
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
type: ClusterIP
port:
http: 9000
smtp: 2500
pop3: 1100
nodePort:
http: ""
smtp: ""
pop3: ""
extraEnv:
INBUCKET_LOGLEVEL: "info"
INBUCKET_MAILBOXNAMING: "local"
INBUCKET_SMTP_ADDR: "0.0.0.0:2500"
INBUCKET_SMTP_DOMAIN: "inbucket"
INBUCKET_SMTP_MAXRECIPIENTS: "200"
INBUCKET_SMTP_MAXMESSAGEBYTES: "10240000"
INBUCKET_SMTP_DEFAULTACCEPT: "true"
INBUCKET_SMTP_REJECTDOMAINS: ""
INBUCKET_SMTP_DEFAULTSTORE: "true"
INBUCKET_SMTP_DISCARDDOMAINS: ""
INBUCKET_SMTP_TIMEOUT: "300s"
INBUCKET_POP3_ADDR: "0.0.0.0:1100"
INBUCKET_POP3_DOMAIN: "inbucket"
INBUCKET_POP3_TIMEOUT: "600s"
INBUCKET_WEB_ADDR: "0.0.0.0:9000"
INBUCKET_WEB_UIDIR: "ui"
INBUCKET_WEB_GREETINGFILE: "ui/greeting.html"
INBUCKET_WEB_TEMPLATECACHE: "true"
INBUCKET_WEB_MAILBOXPROMPT: "@inbucket"
INBUCKET_WEB_COOKIEAUTHKEY: ""
INBUCKET_WEB_MONITORVISIBLE: "true"
INBUCKET_WEB_MONITORHISTORY: "30"
INBUCKET_STORAGE_TYPE: "memory"
INBUCKET_STORAGE_PARAMS: ""
INBUCKET_STORAGE_RETENTIONPERIOD: "24h"
INBUCKET_STORAGE_RETENTIONSLEEP: "50ms"
INBUCKET_STORAGE_MAILBOXMSGCAP: "500"
ingress:
enabled: false
annotations: {}
path: /
hosts:
- inbucket.example.com
tls: []
# - hosts:
# - inbucket.example.com
# secretName: tls-inbucket
podAnnotations: {}
resources: {}
|
vsphere-cpi | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"cpi.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec)\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"cpi.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified daemonset name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"cpi.daemonset.name\" -}}\n{{- $nameGlobalOverride := printf \"%s-daemonset\" (include \"cpi.fullname\" .) -}}\n{{- if .Values.daemonset.fullnameOverride -}}\n{{- printf \"%s\" .Values.daemonset.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s\" $nameGlobalOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{- define \"api.binding\" -}}\n{{- printf \":%.0f\" .Values.service.endpointPort | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nConfigure list of IP CIDRs allowed access to load balancer (if supported)\n*/}}\n{{- define \"loadBalancerSourceRanges\" -}}\n{{- if .service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- range $cidr := .service.loadBalancerSourceRanges }}\n - {{ $cidr }}\n {{- end }}\n{{- end }}\n{{- end -}}\n",
"# common.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"cpi.fullname\" . }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: common-configmap\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\ndata:\n api.binding: \"{{ template \"api.binding\" . }}\"\n",
"# configmap.yaml\n{{- if .Values.config.enabled | default .Values.global.config.enabled -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cloud-config\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: cloud-config\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\ndata:\n vsphere.conf: |\n # Global properties in this section will be used for all specified vCenters unless overriden in VirtualCenter section.\n global:\n port: 443\n # set insecure-flag to true if the vCenter uses a self-signed cert\n insecureFlag: true\n # settings for using k8s secret\n secretName: vsphere-cpi\n secretNamespace: {{ .Release.Namespace }}\n\n # VirtualCenter section\n vcenter:\n {{ .Release.Name }}:\n server: {{ .Values.config.vcenter | default .Values.global.config.vcenter }}\n datacenters:\n - {{ .Values.config.datacenter | default .Values.global.config.datacenter }}\n{{- end -}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"cpi.name\" . }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: daemonset\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n tier: control-plane\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: \"\"\n {{- if .Values.daemonset.annotations }}\n {{- toYaml .Values.daemonset.annotations | nindent 4 }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"cpi.name\" . }}\n updateStrategy:\n type: RollingUpdate\n template:\n metadata:\n {{- if .Values.daemonset.podAnnotations }}\n annotations:\n {{- toYaml .Values.daemonset.podAnnotations | nindent 8 }}\n {{- end }}\n labels:\n app: {{ template \"cpi.name\" . }}\n component: cloud-controller-manager\n tier: control-plane\n release: {{ .Release.Name }}\n vsphere-cpi-infra: daemonset\n {{- if .Values.daemonset.podLabels }}\n {{- toYaml .Values.daemonset.podLabels | nindent 8 }}\n {{- end }}\n spec:\n nodeSelector:\n node-role.kubernetes.io/master: \"\"\n {{- if .Values.daemonset.nodeSelector }}\n {{- toYaml .Values.daemonset.nodeSelector | nindent 8 }}\n {{- end }}\n tolerations:\n - key: node.cloudprovider.kubernetes.io/uninitialized\n value: \"true\"\n effect: NoSchedule\n - key: node-role.kubernetes.io/master\n effect: NoSchedule\n - key: node.kubernetes.io/not-ready\n effect: NoSchedule\n operator: Exists\n {{- if .Values.daemonset.tolerations }}\n {{- toYaml .Values.daemonset.tolerations | nindent 6 }}\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n serviceAccountName: {{ .Values.serviceAccount.name }}\n hostNetwork: true\n dnsPolicy: {{ .Values.daemonset.dnsPolicy }}\n containers:\n - name: {{ template \"cpi.name\" . }}\n image: {{ .Values.daemonset.image }}:{{ .Values.daemonset.tag }}\n imagePullPolicy: {{ .Values.daemonset.pullPolicy }}\n args:\n - --cloud-provider=vsphere\n - --v={{ .Values.daemonset.cmdline.logging }}\n - --cloud-config={{ .Values.daemonset.cmdline.cloudConfig.dir }}/{{ .Values.daemonset.cmdline.cloudConfig.file }}\n {{- range $key, $value := .Values.daemonset.cmdline.additionalParams }}\n - --{{ $key }}{{ if $value }}={{ $value }}{{ end }}\n {{- end }}\n {{- if .Values.service.enabled }}\n env:\n - name: VSPHERE_API_DISABLE\n value: \"true\"\n - name: VSPHERE_API_BINDING\n valueFrom:\n configMapKeyRef:\n name: {{ template \"cpi.fullname\" . }}\n key: api.binding\n ports:\n - containerPort: {{ .Values.service.endpointPort }}\n protocol: TCP\n {{- end }}\n volumeMounts:\n - mountPath: {{ .Values.daemonset.cmdline.cloudConfig.dir }}\n name: vsphere-config-volume\n readOnly: true\n {{- if .Values.daemonset.resources }}\n resources:\n {{- toYaml .Values.daemonset.resources | nindent 10 }}\n {{- end }}\n volumes:\n - name: vsphere-config-volume\n configMap:\n name: cloud-config\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"cpi.name\" . }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: ingress\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\n {{- if .Values.ingress.annotations }}\n annotations:\n {{- toYaml .Values.ingress.annotations | indent 4 }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"cpi.name\" $ }}\n servicePort: {{ .Values.service.endpointPort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n {{- toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"cpi.name\" . }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: pod-security-policy\n component: cloud-controller-manager\n release: {{ .Release.Name }}\n {{- if .Values.podSecurityPolicy.annotations }}\n annotations:\n {{- toYaml .Values.podSecurityPolicy.annotations | indent 4 }}\n {{- end }}\nspec:\n allowPrivilegeEscalation: false\n privileged: false\n volumes:\n - 'configMap'\n - 'secret'\n - 'emptyDir'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'MustRunAs'\n ranges:\n - min: {{ .Values.securityContext.runAsUser }}\n max: {{ .Values.securityContext.runAsUser }}\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n - min: {{ .Values.securityContext.runAsUser }}\n max: {{ .Values.securityContext.runAsUser }}\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: true\n requiredDropCapabilities:\n - ALL\n{{- end }}",
"# role-binding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: v1\nkind: List\nmetadata: {}\nitems:\n- apiVersion: rbac.authorization.k8s.io/v1\n kind: RoleBinding\n metadata:\n name: servicecatalog.k8s.io:apiserver-authentication-reader\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: role-binding\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\n roleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: extension-apiserver-authentication-reader\n subjects:\n - apiGroup: \"\"\n kind: ServiceAccount\n name: {{ .Values.serviceAccount.name }}\n namespace: {{ .Release.Namespace }}\n - apiGroup: \"\"\n kind: User\n name: {{ .Values.serviceAccount.name }}\n- apiVersion: rbac.authorization.k8s.io/v1\n kind: ClusterRoleBinding\n metadata:\n name: {{ .Values.serviceAccount.name }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: cluster-role-binding\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n roleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ .Values.serviceAccount.name }}\n subjects:\n - kind: ServiceAccount\n name: {{ .Values.serviceAccount.name }}\n namespace: {{ .Release.Namespace }}\n - kind: User\n name: {{ .Values.serviceAccount.name }}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ .Values.serviceAccount.name }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: role\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n - update\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - '*'\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n- apiGroups:\n - \"\"\n resources:\n - services\n verbs:\n - list\n - patch\n - update\n - watch\n- apiGroups:\n - \"\"\n resources:\n - services/status\n verbs:\n - patch\n- apiGroups:\n - \"\"\n resources:\n - serviceaccounts\n verbs:\n - create\n - get\n - list\n - watch\n - update\n- apiGroups:\n - \"\"\n resources:\n - persistentvolumes\n verbs:\n - get\n - list\n - update\n - watch\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n verbs:\n - create\n - get\n - list\n - watch\n - update\n- apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - get\n - list\n - watch\n{{- end -}}\n",
"# secret.yaml\n{{- if .Values.config.enabled | default .Values.global.config.enabled -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: vsphere-cpi\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: secret\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\nstringData:\n {{ .Values.config.vcenter | default .Values.global.config.vcenter }}.username: {{ .Values.config.username | default .Values.global.config.username }}\n {{ .Values.config.vcenter | default .Values.global.config.vcenter }}.password: {{ .Values.config.password | default .Values.global.config.password }}\n{{- end -}}\n",
"# service-account.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ .Values.serviceAccount.name }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: service-account\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# service.yaml\n{{- if .Values.service.enabled -}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"cpi.name\" . }}\n labels:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: service\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n component: cloud-controller-manager\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n namespace: {{ .Release.Namespace }}\n {{- if .Values.service.annotations }}\n annotations:\n {{- toYaml .Values.service.annotations | indent 4 }}\n {{- end }}\nspec:\n ports:\n - name: cpi-api\n port: {{ .Values.service.endpointPort }}\n protocol: TCP\n targetPort: {{ .Values.service.targetPort }}\n selector:\n app: {{ template \"cpi.name\" . }}\n vsphere-cpi-infra: service\n component: cloud-controller-manager\n type: {{ .Values.service.type }}\n{{- template \"loadBalancerSourceRanges\" .Values }}\n{{- end -}}\n"
] | # Default values for vSphere CPI.
# This is a YAML-formatted file.
# vSohere CPI values are grouped by component
global:
config:
enabled: false
config:
enabled: false
vcenter: "vcenter.local"
username: "user"
password: "pass"
datacenter: "dc"
## Specify if a Pod Security Policy for kube-state-metrics must be created
## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
annotations: {}
# Specify pod annotations
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
#
# seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
# seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
# apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
# Run containers to have security context. Default is 'nobody' (65534/65534) in distroless
securityContext:
enabled: true
runAsUser: 1001
fsGroup: 1001
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
name: cloud-controller-manager
daemonset:
annotations: {}
image: gcr.io/cloud-provider-vsphere/cpi/release/manager
tag: v1.2.1
pullPolicy: IfNotPresent
dnsPolicy: ClusterFirst
cmdline:
logging: 2
# Location of the cloud configmap to be mounted on the filesystem
cloudConfig:
dir: "/etc/cloud"
file: "vsphere.conf"
additionalParams: {}
replicaCount: 1
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 256m
# memory: 128Mi
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
## Allows for the default selector to be replaced with user-defined ones
nodeSelector: {}
## Allows for the default tolerations to be replaced with user-defined ones
tolerations: []
service:
enabled: false
annotations: {}
type: ClusterIP
# List of IP ranges that are allowed to access the load balancer (if supported)
loadBalancerSourceRanges: []
# endpointPort: externally accessible port for UI and API
endpointPort: 43001
# targetPort: the internal port the UI and API are exposed on
targetPort: 43001
ingress:
enabled: false
annotations: {}
# Used to create an Ingress record.
# hosts:
# - chart-example.local
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
|
minecraft | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"minecraft.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"minecraft.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# datadir-pvc.yaml\n{{- if and .Values.persistence.dataDir.enabled (not .Values.persistence.dataDir.existingClaim ) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"minecraft.fullname\" . }}-datadir\n labels:\n app: {{ template \"minecraft.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n {{- if .Values.persistence.storageClass }}\n volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }}\n {{- else }}\n volume.alpha.kubernetes.io/storage-class: default\n {{- end }}\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: {{ .Values.persistence.dataDir.Size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# deployment.yaml\n{{- if ne (printf \"%s\" .Values.minecraftServer.eula) \"FALSE\" }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"minecraft.fullname\" . }}\n {{- if .Values.deploymentAnnotations }}\n annotations:\n {{- range $key, $value := .Values.deploymentAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n labels:\n app: {{ template \"minecraft.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n strategy:\n type: {{ .Values.strategyType }}\n selector:\n matchLabels:\n app: {{ template \"minecraft.fullname\" . }}\n template:\n metadata:\n labels:\n app: {{ template \"minecraft.fullname\" . }}\n {{- if .Values.podAnnotations }}\n annotations:\n {{- range $key, $value := .Values.podAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n spec:\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n fsGroup: {{ .Values.securityContext.fsGroup }}\n containers:\n - name: {{ template \"minecraft.fullname\" . }}\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: Always\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n readinessProbe:\n exec:\n command: \n{{ toYaml .Values.readinessProbe.command | indent 14 }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n livenessProbe:\n exec:\n command:\n{{ toYaml .Values.livenessProbe.command | indent 14 }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n env:\n - name: EULA\n value: {{ .Values.minecraftServer.eula | quote }}\n - name: TYPE\n value: {{ default \"\" .Values.minecraftServer.type | quote }}\n {{- if eq .Values.minecraftServer.type \"FORGE\" }}\n {{- if .Values.minecraftServer.forgeInstallerUrl }}\n - name: FORGE_INSTALLER_URL\n value: {{ .Values.minecraftServer.forgeInstallerUrl | quote }}\n {{- else }}\n - name: FORGEVERSION\n value: {{ .Values.minecraftServer.forgeVersion | quote }}\n {{- end }}\n {{- else if eq .Values.minecraftServer.type \"SPIGOT\" }}\n - name: SPIGOT_DOWNLOAD_URL\n value: {{ .Values.minecraftServer.spigotDownloadUrl | quote }}\n {{- else if eq .Values.minecraftServer.type \"BUKKIT\" }}\n - name: BUKKIT_DOWNLOAD_URL\n value: {{ .Values.minecraftServer.bukkitDownloadUrl | quote }}\n {{- else if eq .Values.minecraftServer.type \"PAPER\" }}\n - name: PAPER_DOWNLOAD_URL\n value: {{ .Values.minecraftServer.paperDownloadUrl | quote }}\n {{- else if eq .Values.minecraftServer.type \"FTB\" }}\n - name: FTB_SERVER_MOD\n value: {{ .Values.minecraftServer.ftbServerMod | quote }}\n - name: FTB_LEGACYJAVAFIXER\n value: {{ default false .Values.minecraftServer.ftbLegacyJavaFixer | quote }}\n {{- end }}\n - name: VERSION\n value: {{ .Values.minecraftServer.version | quote }}\n - name: DIFFICULTY\n value: {{ .Values.minecraftServer.difficulty | quote }}\n - name: WHITELIST\n value: {{ default \"\" .Values.minecraftServer.whitelist | quote }}\n - name: OPS\n value: {{ default \"\" .Values.minecraftServer.ops | quote }}\n - name: ICON\n value: {{ default \"\" .Values.minecraftServer.icon | quote }}\n - name: MAX_PLAYERS\n value: {{ .Values.minecraftServer.maxPlayers | quote }}\n - name: MAX_WORLD_SIZE\n value: {{ .Values.minecraftServer.maxWorldSize | quote }}\n - name: ALLOW_NETHER\n value: {{ .Values.minecraftServer.allowNether | quote }}\n - name: ANNOUNCE_PLAYER_ACHIEVEMENTS\n value: {{ .Values.minecraftServer.announcePlayerAchievements | quote }}\n - name: ENABLE_COMMAND_BLOCK\n value: {{ .Values.minecraftServer.enableCommandBlock | quote }}\n - name: FORCE_gameMode\n value: {{ .Values.minecraftServer.forcegameMode | quote }}\n {{- if .Values.minecraftServer.forceReDownload }}\n - name: FORCE_REDOWNLOAD\n value: \"TRUE\"\n {{- end }}\n - name: GENERATE_STRUCTURES\n value: {{ .Values.minecraftServer.generateStructures | quote }}\n - name: HARDCORE\n value: {{ .Values.minecraftServer.hardcore | quote }}\n - name: MAX_BUILD_HEIGHT\n value: {{ .Values.minecraftServer.maxBuildHeight | quote }}\n - name: MAX_TICK_TIME\n value: {{ .Values.minecraftServer.maxTickTime | quote }}\n - name: SPAWN_ANIMALS\n value: {{ .Values.minecraftServer.spawnAnimals | quote }}\n - name: SPAWN_MONSTERS\n value: {{ .Values.minecraftServer.spawnMonsters | quote }}\n - name: SPAWN_NPCS\n value: {{ .Values.minecraftServer.spawnNPCs | quote }}\n - name: VIEW_DISTANCE\n value: {{ .Values.minecraftServer.viewDistance | quote }}\n - name: SEED\n value: {{ default \"\" .Values.minecraftServer.levelSeed | quote }}\n - name: MODE\n value: {{ .Values.minecraftServer.gameMode | quote }}\n - name: MOTD\n value: {{ .Values.minecraftServer.motd | quote }}\n - name: PVP\n value: {{ .Values.minecraftServer.pvp | quote }}\n - name: LEVEL_TYPE\n value: {{ .Values.minecraftServer.levelType | quote }}\n - name: GENERATOR_SETTINGS\n value: {{ default \"\" .Values.minecraftServer.generatorSettings | quote }}\n - name: LEVEL\n value: {{ .Values.minecraftServer.worldSaveName | quote }}\n {{- if .Values.minecraftServer.downloadWorldUrl }}\n - name: WORLD\n value: {{ .Values.minecraftServer.downloadWorldUrl | quote }}\n {{- end }}\n {{- if .Values.minecraftServer.downloadModpackUrl }}\n - name: MODPACK\n value: {{ .Values.minecraftServer.downloadModpackUrl | quote }}\n {{- if .Values.minecraftServer.removeOldMods }}\n - name: REMOVE_OLD_MODS\n value: \"TRUE\"\n {{- end }}\n {{- end }}\n - name: ONLINE_MODE\n value: {{ .Values.minecraftServer.onlineMode | quote }}\n - name: MEMORY\n value: {{ .Values.minecraftServer.memory | quote }}\n - name: JVM_OPTS\n value: {{ .Values.minecraftServer.jvmOpts | quote }}\n - name: JVM_XX_OPTS\n value: {{ .Values.minecraftServer.jvmXXOpts | quote }}\n\n {{- if .Values.minecraftServer.rcon.enabled }}\n - name: ENABLE_RCON\n value: \"true\"\n - name: RCON_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"minecraft.fullname\" . }}\n key: rcon-password\n {{- end }}\n\n {{- if .Values.minecraftServer.query.enabled }}\n - name: ENABLE_QUERY\n value: \"true\"\n - name: QUERY_PORT\n value: {{ .Values.minecraftServer.query.port }}\n {{- end }}\n\n {{- range $key, $value := .Values.extraEnv }}\n - name: {{ $key }}\n value: {{ $value }}\n {{- end }}\n\n ports:\n - name: minecraft\n containerPort: 25565\n protocol: TCP\n {{- if .Values.minecraftServer.rcon.enabled }}\n - name: rcon\n containerPort: {{ .Values.minecraftServer.rcon.port }}\n protocol: TCP\n {{- end }}\n volumeMounts:\n - name: datadir\n mountPath: /data\n volumes:\n - name: datadir\n {{- if .Values.persistence.dataDir.enabled }}\n persistentVolumeClaim:\n {{- if .Values.persistence.dataDir.existingClaim }}\n claimName: {{ .Values.persistence.dataDir.existingClaim }}\n {{- else }}\n claimName: {{ template \"minecraft.fullname\" . }}-datadir\n {{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n{{ end }}\n",
"# minecraft-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"minecraft.fullname\" . }}\n labels:\n app: {{ template \"minecraft.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n{{- if (or (eq .Values.minecraftServer.serviceType \"ClusterIP\") (empty .Values.minecraftServer.serviceType)) }}\n type: ClusterIP\n{{- else if eq .Values.minecraftServer.serviceType \"LoadBalancer\" }}\n type: {{ .Values.minecraftServer.serviceType }}\n {{- if .Values.minecraftServer.loadBalancerIP }}\n loadBalancerIP: {{ .Values.minecraftServer.loadBalancerIP }}\n {{- end }}\n {{- if .Values.minecraftServer.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.minecraftServer.loadBalancerSourceRanges | indent 4 }}\n {{- end -}}\n{{- else }}\n type: {{ .Values.minecraftServer.serviceType }}\n{{- end }}\n {{- if .Values.minecraftServer.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.minecraftServer.externalTrafficPolicy }}\n {{- end }}\n ports:\n - name: minecraft\n port: 25565\n targetPort: minecraft\n protocol: TCP\n selector:\n app: {{ template \"minecraft.fullname\" . }}\n",
"# rcon-svc.yaml\n{{- if default \"\" .Values.minecraftServer.rcon.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: \"{{ template \"minecraft.fullname\" . }}-rcon\"\n labels:\n app: {{ template \"minecraft.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n{{- if (or (eq .Values.minecraftServer.rcon.serviceType \"ClusterIP\") (empty .Values.minecraftServer.rcon.serviceType)) }}\n type: ClusterIP\n{{- else if eq .Values.minecraftServer.rcon.serviceType \"LoadBalancer\" }}\n type: {{ .Values.minecraftServer.rcon.serviceType }}\n {{- if .Values.minecraftServer.rcon.loadBalancerIP }}\n loadBalancerIP: {{ .Values.minecraftServer.rcon.loadBalancerIP }}\n {{- end }}\n {{- if .Values.minecraftServer.rcon.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.minecraftServer.rcon.loadBalancerSourceRanges | indent 4 }}\n {{- end -}}\n{{- else }}\n type: {{ .Values.minecraftServer.rcon.serviceType }}\n{{- end }}\n {{- if .Values.minecraftServer.rcon.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.minecraftServer.rcon.externalTrafficPolicy }}\n {{- end }}\n ports:\n - name: rcon\n port: {{ .Values.minecraftServer.rcon.port }}\n targetPort: rcon\n protocol: TCP\n selector:\n app: {{ template \"minecraft.fullname\" . }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"minecraft.fullname\" . }}\n labels:\n app: {{ template \"minecraft.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n rcon-password: {{ default \"\" .Values.minecraftServer.rcon.password | b64enc | quote }}\n"
] | # ref: https://hub.docker.com/r/itzg/minecraft-server/
image: itzg/minecraft-server
imageTag: latest
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 500m
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
nodeSelector: {}
tolerations: []
affinity: {}
securityContext:
# Security context settings
runAsUser: 1000
fsGroup: 2000
# Most of these map to environment variables. See Minecraft for details:
# https://hub.docker.com/r/itzg/minecraft-server/
livenessProbe:
command:
- mc-monitor
- status
- localhost:25565
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
command:
- mc-monitor
- status
- localhost:25565
initialDelaySeconds: 30
periodSeconds: 5
failureThreshold: 10
successThreshold: 1
timeoutSeconds: 1
minecraftServer:
# This must be overridden, since we can't accept this for the user.
eula: "FALSE"
# One of: LATEST, SNAPSHOT, or a specific version (ie: "1.7.9").
version: "1.14.4"
# This can be one of "VANILLA", "FORGE", "SPIGOT", "BUKKIT", "PAPER", "FTB", "SPONGEVANILLA"
type: "VANILLA"
# If type is set to FORGE, this sets the version; this is ignored if forgeInstallerUrl is set
forgeVersion:
# If type is set to SPONGEVANILLA, this sets the version
spongeVersion:
# If type is set to FORGE, this sets the URL to download the Forge installer
forgeInstallerUrl:
# If type is set to BUKKIT, this sets the URL to download the Bukkit package
bukkitDownloadUrl:
# If type is set to SPIGOT, this sets the URL to download the Spigot package
spigotDownloadUrl:
# If type is set to PAPER, this sets the URL to download the PaperSpigot package
paperDownloadUrl:
# If type is set to FTB, this sets the server mod to run. You can also provide the URL to download the FTB package
ftbServerMod:
# Set to true if running Feed The Beast and get an error like "unable to launch forgemodloader"
ftbLegacyJavaFixer: false
# One of: peaceful, easy, normal, and hard
difficulty: easy
# A comma-separated list of player names to whitelist.
whitelist:
# A comma-separated list of player names who should be admins.
ops:
# A server icon URL for server listings. Auto-scaled and transcoded.
icon:
# Max connected players.
maxPlayers: 20
# This sets the maximum possible size in blocks, expressed as a radius, that the world border can obtain.
maxWorldSize: 10000
# Allows players to travel to the Nether.
allowNether: true
# Allows server to announce when a player gets an achievement.
announcePlayerAchievements: true
# Enables command blocks.
enableCommandBlock: true
# If true, players will always join in the default gameMode even if they were previously set to something else.
forcegameMode: false
# Defines whether structures (such as villages) will be generated.
generateStructures: true
# If set to true, players will be set to spectator mode if they die.
hardcore: false
# The maximum height in which building is allowed.
maxBuildHeight: 256
# The maximum number of milliseconds a single tick may take before the server watchdog stops the server with the message. -1 disables this entirely.
maxTickTime: 60000
# Determines if animals will be able to spawn.
spawnAnimals: true
# Determines if monsters will be spawned.
spawnMonsters: true
# Determines if villagers will be spawned.
spawnNPCs: true
# Max view distance (in chunks).
viewDistance: 10
# Define this if you want a specific map generation seed.
levelSeed:
# One of: creative, survival, adventure, spectator
gameMode: survival
# Message of the Day
motd: "Welcome to Minecraft on Kubernetes!"
# If true, enable player-vs-player damage.
pvp: false
# One of: DEFAULT, FLAT, LARGEBIOMES, AMPLIFIED, CUSTOMIZED
levelType: DEFAULT
# When levelType == FLAT or CUSTOMIZED, this can be used to further customize map generation.
# ref: https://hub.docker.com/r/itzg/minecraft-server/
generatorSettings:
worldSaveName: world
# If set, this URL will be downloaded at startup and used as a starting point
downloadWorldUrl:
# force re-download of server file
forceReDownload: false
# If set, the modpack at this URL will be downloaded at startup
downloadModpackUrl:
# If true, old versions of downloaded mods will be replaced with new ones from downloadModpackUrl
removeOldMods: false
# Check accounts against Minecraft account service.
onlineMode: true
# If you adjust this, you may need to adjust resources.requests above to match.
memory: 512M
# General JVM options to be passed to the Minecraft server invocation
jvmOpts: ""
# Options like -X that need to proceed general JVM options
jvmXXOpts: ""
serviceType: LoadBalancer
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
rcon:
# If you enable this, make SURE to change your password below.
enabled: false
port: 25575
password: "CHANGEME!"
serviceType: LoadBalancer
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
query:
# If you enable this, your server will be "published" to Gamespy
enabled: false
port: 25565
## Additional minecraft container environment variables
##
extraEnv: {}
persistence:
## minecraft data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
dataDir:
# Set this to false if you don't care to persist state between restarts.
enabled: true
# existingClaim: nil
Size: 1Gi
podAnnotations: {}
deploymentAnnotations: {}
|
percona | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"percona.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"percona.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"percona.fullname\" . }}\n labels:\n app: {{ template \"percona.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"percona.fullname\" . }}\n template:\n metadata:\n labels:\n app: {{ template \"percona.fullname\" . }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n initContainers:\n - name: \"remove-lost-found\"\n image: \"{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}\"\n imagePullPolicy: {{ .Values.initImage.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.initResources | indent 10 }}\n command:\n - \"rm\"\n - \"-fr\"\n - \"/var/lib/mysql/lost+found\"\n volumeMounts:\n - name: data\n mountPath: /var/lib/mysql\n containers:\n - name: {{ template \"percona.fullname\" . }}\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: {{ .Values.imagePullPolicy | quote }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n env:\n {{- if .Values.mysqlAllowEmptyPassword }}\n - name: MYSQL_ALLOW_EMPTY_PASSWORD\n value: \"true\"\n {{- else }}\n - name: MYSQL_ROOT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"percona.fullname\" . }}\n key: mysql-root-password\n - name: MYSQL_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"percona.fullname\" . }}\n key: mysql-password\n {{- end }}\n - name: MYSQL_USER\n value: {{ default \"\" .Values.mysqlUser | quote }}\n - name: MYSQL_DATABASE\n value: {{ default \"\" .Values.mysqlDatabase | quote }}\n ports:\n - name: mysql\n containerPort: 3306\n livenessProbe:\n exec:\n command:\n - mysqladmin\n - ping\n initialDelaySeconds: 30\n timeoutSeconds: 5\n readinessProbe:\n exec:\n command:\n - mysqladmin\n - ping\n initialDelaySeconds: 5\n timeoutSeconds: 1\n volumeMounts:\n - name: data\n mountPath: /var/lib/mysql\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"percona.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end -}}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end -}}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"percona.fullname\" . }}\n labels:\n app: {{ template \"percona.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"percona.fullname\" . }}\n labels:\n app: {{ template \"percona.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.mysqlRootPassword }}\n mysql-root-password: {{ .Values.mysqlRootPassword | b64enc | quote }}\n {{ else }}\n mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n {{ if .Values.mysqlPassword }}\n mysql-password: {{ .Values.mysqlPassword | b64enc | quote }}\n {{ else }}\n mysql-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"percona.fullname\" . }}\n labels:\n app: {{ template \"percona.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n ports:\n - name: mysql\n port: 3306\n targetPort: mysql\n selector:\n app: {{ template \"percona.fullname\" . }}\n"
] | ## percona image
## ref: https://hub.docker.com/_/percona/
image: "percona"
## percona image version
## ref: https://hub.docker.com/r/library/percona/tags/
##
imageTag: "5.7.26"
## Specify password for root user
##
## Default: random 10 character string
# mysqlRootPassword: testing
## Create a database user
##
# mysqlUser:
# mysqlPassword:
## Allow unauthenticated access, uncomment to enable
##
# mysqlAllowEmptyPassword: true
## Create a database
##
# mysqlDatabase:
## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
##
imagePullPolicy: IfNotPresent
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
## Persist data to a persistent volume
persistence:
enabled: false
## percona data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 100m
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations labels for pod assignment
## Allow the scheduling on tainted nodes (requires Kubernetes >= 1.6)
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity labels for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Configure resources for the init container
##
initResources: {}
# limits:
# cpu: 25m
# memory: 128Mi
# requests:
# cpu: 25m
# memory 128Mi
## Override image used by init container
##
initImage:
repository: "busybox"
tag: "1.25.0"
pullPolicy: "IfNotPresent"
|
zetcd | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"zetcd.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"zetcd.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"zetcd.fullname\" . }}\n labels:\n app: {{ template \"zetcd.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"zetcd.name\" . }}\n release: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - \"/usr/local/bin/zetcd\"\n - \"-zkaddr\"\n - \"0.0.0.0:{{ .Values.service.internalPort }}\"\n{{- if .Values.etcd.operatorEnabled }}\n - \"-endpoints\"\n - \"{{ index .Values \"etcd-operator\" \"cluster\" \"name\" }}-client:2379\"\n{{- else }}\n - \"-endpoints\" \n - \"{{ .Values.etcd.endpoints }}\"\n{{- end }}\n{{- if .Values.etcd.tls }}\n - \"-certfile\"\n - \"/etc/zetcd/secrets/{{ .Values.etcd.tls.cert }}\"\n - \"-cafile\"\n - \"/etc/zetcd/secrets/{{ .Values.etcd.tls.ca }}\"\n - \"-keyfile\"\n - \"/etc/zetcd/secrets/{{ .Values.etcd.tls.key }}\"\n volumeMounts:\n - name: tls\n mountPath: /etc/zetcd/secrets\n readOnly: true\n{{- end }}\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n livenessProbe:\n tcpSocket:\n port: {{ .Values.service.internalPort }}\n readinessProbe:\n tcpSocket:\n port: {{ .Values.service.internalPort }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n{{- if .Values.etcd.tls }}\n volumes:\n - name: tls\n secret:\n secretName: {{ .Values.etcd.tls.existingSecret }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"zetcd.fullname\" . }}\n labels:\n app: {{ template \"zetcd.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"zetcd.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for zetcd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: quay.io/coreos/zetcd
tag: v0.0.3
pullPolicy: IfNotPresent
service:
name: zetcd
type: ClusterIP
externalPort: 2181
internalPort: 2181
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
etcd:
operatorEnabled: true
endpoints: localhost:2379
# Communication with etcd can be encrypted and authenticated with a certificate.
# In order to enable it, add 'tls' section providing existing secret
# containing CA certificate, client certificate and client key.
# tls:
# existingSecret: etcd-tls-secret
# cert: client.crt
# key: client.key
# ca: ca.crt
etcd-operator:
cluster:
enabled: true
|
kapacitor | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kapacitor.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"kapacitor.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- $bl := empty .Values.influxURL }}\n{{- if not $bl }}\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"kapacitor.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n app: {{ template \"kapacitor.fullname\" . }}\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"kapacitor.fullname\" . }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: KAPACITOR_HOSTNAME\n value: {{ template \"kapacitor.fullname\" . }}.{{ .Release.Namespace }}\n - name: KAPACITOR_INFLUXDB_0_URLS_0\n value: {{ .Values.influxURL }}\n {{- range $key, $val := .Values.envVars }}\n - name: {{ $key }}\n value: {{ $val | quote }}\n {{- end }}\n {{- if .Values.existingSecret }}\n - name: KAPACITOR_INFLUXDB_0_USERNAME\n valueFrom:\n secretKeyRef:\n key: influxdb-user\n name: {{ .Values.existingSecret }}\n - name: KAPACITOR_INFLUXDB_0_PASSWORD\n valueFrom:\n secretKeyRef:\n key: influxdb-password\n name: {{ .Values.existingSecret }}\n {{- end }}\n ports:\n - containerPort: 9092\n volumeMounts:\n - name: data\n mountPath: /var/lib/kapacitor\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"kapacitor.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\n{{- if .Values.influxURL }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"kapacitor.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n app: {{ template \"kapacitor.fullname\" . }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"kapacitor.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n app: {{ template \"kapacitor.fullname\" . }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: 9092\n targetPort: 9092\n name: api\n selector:\n app: {{ template \"kapacitor.fullname\" . }}\n"
] | ## kapacitor image version
## ref: https://hub.docker.com/r/library/kapacitor/tags/
##
image:
repository: "kapacitor"
tag: "1.5.2-alpine"
pullPolicy: "IfNotPresent"
## Specify a service type, defaults to NodePort
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
## Persist data to a persistent volume
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## kapacitor data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
# existingClaim: ""
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 0.1
limits:
memory: 2Gi
cpu: 2
## Set the environment variables for kapacitor (or anything else you want to use)
## ref: https://hub.docker.com/_/kapacitor/
## ref: https://docs.influxdata.com/kapacitor/latest/administration/configuration/
##
# Examples below
#
# envVars:
# KAPACITOR_SLACK_ENABLED: true
# KAPACITOR_SLACK_URL: "http://slack.com/xxxxx/xxxxx/xxxx/xxxxxxx"
# KAPACITOR_HTTP_LOG_ENABLED: true
# KAPACITOR_LOGGING_LEVEL: "INFO"
#
# or, at your terminal, with
#
# helm install --name kapacitor-rls --set influxURL=http://influxurl.com,envVars.KAPACITOR_SLACK_ENABLED=true,envVars.KAPACITOR_SLACK_URL="http://slack.com/xxxxx/xxxxx/xxxx/xxxxxxx" stable/kapacitor
## Set the URL of InfluxDB instance to create subscription on
## ref: https://docs.influxdata.com/kapacitor/v1.1/introduction/getting_started/
##
# influxURL: http://influxdb-influxdb.tick:8086
## Name of an existing Secrect used to set the environment variables for the
## InfluxDB user and password. The expected keys in the secret are
## `influxdb-user` and `influxdb-password`.
##
# existingSecret: influxdb-auth
|
unbound | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"unbound.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified app name\n*/}}\n{{- define \"unbound.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"unbound.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"unbound.fullname\" . }}\n labels:\n app: {{ template \"unbound.name\" . }}\n chart: {{ template \"unbound.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n unbound.conf: |-\n server:\n chroot: \"\"\n num-threads: {{ .Values.unbound.numThreads }}\n directory: \"/etc/unbound\"\n port: {{ .Values.unbound.serverPort }}\n so-reuseport: yes\n do-daemonize: no\n logfile: \"\"\n use-syslog: no\n auto-trust-anchor-file: \"/var/lib/unbound/root.key\"\n verbosity: {{ .Values.unbound.verbosity }}\n statistics-interval: {{ .Values.unbound.statsInterval }}\n statistics-cumulative: {{ .Values.unbound.statsCumulative }}\n\n interface: 127.0.0.1\n interface: 0.0.0.0\n\n access-control: 127.0.0.1/32 allow\n\n {{- range .Values.allowedIpRanges }}\n access-control: {{ . }} allow\n {{- end }}\n\n {{- range .Values.localRecords }}\n local-data: \"{{ .name }} A {{ .ip }}\"\n local-data-ptr: \"{{ .ip }} {{ .name }}\"\n {{- end }}\n\n local-data: \"health.check.unbound A 127.0.0.1\"\n local-data-ptr: \"127.0.0.1 health.check.unbound\"\n\n {{- range .Values.localZones }}\n local-zone: \"{{ .name }}\" {{ .localType }}\n {{- end }}\n\n {{- range .Values.forwardZones }}\n\n forward-zone:\n name: {{ .name }}\n {{- range .forwardHosts }}\n forward-host: {{ . }}\n {{- end }}\n {{- range .forwardIps }}\n forward-addr: {{ . }}\n {{- end }}\n {{- end }}\n\n {{- range .Values.stubZones }}\n\n stub-zone:\n name: {{ .name }}\n {{- range .stubHosts }}\n stub-host: {{ . }}\n {{- end }}\n {{- range .stubIps }}\n stub-addr: {{ . }}\n {{- end }}\n {{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"unbound.fullname\" . }}\n labels:\n app: {{ template \"unbound.name\" . }}\n chart: {{ template \"unbound.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"unbound.name\" . }}\n release: {{ .Release.Name }}\n strategy:\n type: RollingUpdate\n rollingUpdate:\n maxSurge: 1\n maxUnavailable: 1\n template:\n metadata:\n labels:\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n app: {{ template \"unbound.name\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n spec:\n containers:\n - name: \"unbound\"\n image: {{ .Values.unbound.image.repository }}:{{ .Values.unbound.image.tag }}\n imagePullPolicy: {{ .Values.unbound.image.pullPolicy | quote }}\n{{- with .Values.resources }}\n resources:\n{{ toYaml . | indent 10 }}\n{{- end }}\n ports:\n - name: \"dns-udp\"\n containerPort: {{ .Values.unbound.serverPort }}\n protocol: \"UDP\"\n - name: \"dns-tcp\"\n containerPort: {{ .Values.unbound.serverPort }}\n protocol: \"TCP\"\n volumeMounts:\n - name: \"unbound-conf\"\n mountPath: \"/etc/unbound/\"\n readOnly: true\n livenessProbe:\n httpGet:\n path: \"/healthz\"\n port: 8080\n initialDelaySeconds: 5\n timeoutSeconds: 2\n readinessProbe:\n httpGet:\n path: \"/healthz\"\n port: 8080\n initialDelaySeconds: 5\n timeoutSeconds: 2\n - name: \"healthz\"\n image: {{ .Values.healthz.image.repository }}:{{ .Values.healthz.image.tag }}\n imagePullPolicy: {{ .Values.healthz.image.pullPolicy | quote }}\n args:\n - \"-cmd=nslookup health.check.unbound 127.0.0.1:{{ .Values.unbound.serverPort }} > /dev/null\"\n ports:\n - name: healthz\n containerPort: 8080\n protocol: TCP\n volumes:\n - name: \"unbound-conf\"\n configMap:\n name: {{ template \"unbound.fullname\" . }}\n{{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n\n",
"# disruption_budget.yaml\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"unbound.fullname\" . }}\n labels:\n app: {{ template \"unbound.name\" . }}\n chart: {{ template \"unbound.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n maxUnavailable: 1\n selector:\n matchLabels:\n app: {{ template \"unbound.name\" . }}\n release: {{ .Release.Name }}\n",
"# service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"unbound.fullname\" . }}\n labels:\n app: {{ template \"unbound.name\" . }}\n chart: {{ template \"unbound.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n {{- if .Values.clusterIP }}\n clusterIP: {{ .Values.clusterIP }}\n {{- end }}\n {{- if .Values.externalIP }}\n externalIPs:\n - {{ .Values.externalIP }}\n {{- end }}\n selector:\n app: {{ template \"unbound.name\" . }}\n release: {{ .Release.Name }}\n ports:\n - name: dns-udp\n protocol: UDP\n port: {{ .Values.unbound.serverPort }}\n targetPort: dns-udp\n - name: dns-tcp\n protocol: TCP\n port: {{ .Values.unbound.serverPort }}\n targetPort: dns-tcp\n"
] | replicaCount: 1
# values that pertain to the unbound container, for more information
# on unbound configuration see http://unbound.net/documentation/unbound.conf.html
unbound:
image:
repository: markbnj/unbound-docker
tag: "0.1.0"
pullPolicy: IfNotPresent
verbosity: 1
numThreads: 1
statsInterval: 0
statsCumulative: "no"
serverPort: 53
# values that pertain to the exechealthz container, for more information see
# https://github.com/kubernetes/contrib/tree/master/exec-healthz
healthz:
image:
repository: gcr.io/google-containers/exechealthz
tag: "1.2"
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
# clusterIP:
# Controls which IP address ranges unbound will allow queries from.
# If you want to use unbound as an upstream for kube-dns, or allow other pods
# to query the resolver directly, you'll at least need to allow the
# clusterIpV4Cidr range.
# allowedIpRanges:
# - "10.10.10.10/20"
# You can set as many forward zones as needed by specifying the zone name
# and forward hosts. Forward hosts can be set by hostname or ip.
# forwardZones:
# - name: "fake.net"
# forwardHosts:
# - "fake1.host.net"
# forwardIps:
# - "10.10.10.10"
# Unbound can store DNS records in a "local zone." This facility can be used to
# assign context-specific names to a given IP address, and could also be used for
# private DNS if you don't want or have an external resolver.
# localRecords:
# - name: "fake3.host.net"
# ip: "10.12.10.10"
|
spring-cloud-data-flow | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"scdf.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default short app name to use for resource naming.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"scdf.fullname\" -}}\n{{- $name := default \"data-flow\" .Values.appNameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate an uppercase app name to use for environment variables.\n*/}}\n{{- define \"scdf.envname\" -}}\n{{- $name := default \"data-flow\" .Values.appNameOverride -}}\n{{- printf \"%s_%s\" .Release.Name $name | upper | replace \"-\" \"_\" | trimSuffix \"_\" -}}\n{{- end -}}\n\n{{/*\nCreate an uppercase release prefix to use for environment variables.\n*/}}\n{{- define \"scdf.envrelease\" -}}\n{{- printf \"%s\" .Release.Name | upper | replace \"-\" \"_\" | trimSuffix \"_\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"scdf.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"scdf.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.driver\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"org.mariadb.jdbc.Driver\" -}}\n {{- else -}}\n {{- .Values.database.driver -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.scheme\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"mysql\" -}}\n {{- else -}}\n {{- .Values.database.scheme -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.host\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"${%s_MYSQL_SERVICE_HOST}\" (include \"scdf.envrelease\" . ) -}}\n {{- else -}}\n {{- .Values.database.host -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.port\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"${%s_MYSQL_SERVICE_PORT}\" (include \"scdf.envrelease\" . ) -}}\n {{- else -}}\n {{- .Values.database.port -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.user\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"root\" -}}\n {{- else -}}\n {{- .Values.database.user -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.password\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"${mysql-root-password}\" -}}\n {{- else -}}\n {{- printf \"${database-password}\" -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.dataflow\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- .Values.mysql.mysqlDatabase -}}\n {{- else -}}\n {{- .Values.database.dataflow -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.database.skipper\" -}}\n {{- if .Values.mysql.enabled -}}\n {{- printf \"skipper\" -}}\n {{- else -}}\n {{- .Values.database.skipper -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.broker.rabbitmq.host\" -}}\n {{- if index .Values \"rabbitmq-ha\" \"enabled\" -}}\n {{- printf \"%s-rabbitmq-ha\" .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n {{- else if .Values.rabbitmq.enabled -}}\n {{- printf \"${%s_RABBITMQ_SERVICE_HOST}\" (include \"scdf.envrelease\" . ) -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.broker.rabbitmq.port\" -}}\n {{- if index .Values \"rabbitmq-ha\" \"enabled\" -}}\n {{- index .Values \"rabbitmq-ha\" \"rabbitmqNodePort\" -}}\n {{- else if .Values.rabbitmq.enabled -}}\n {{- printf \"${%s_RABBITMQ_SERVICE_PORT_AMQP}\" (include \"scdf.envrelease\" . ) -}}\n {{- end -}}\n{{- end -}}\n\n{{- define \"scdf.broker.rabbitmq.user\" -}}\n {{- if index .Values \"rabbitmq-ha\" \"enabled\" -}}\n {{ index .Values \"rabbitmq-ha\" \"rabbitmqUsername\" }}\n {{- else if .Values.rabbitmq.enabled -}}\n {{ .Values.rabbitmq.rabbitmq.username }}\n {{- end -}}\n{{- end -}}\n",
"# database-secret.yaml\n{{- if not .Values.mysql.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"database\" | trunc 63 | trimSuffix \"-\" }}\n labels:\n app: {{ template \"scdf.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n database-password: {{ .Values.database.password | b64enc | quote }}\n{{- end }}\n",
"# grafana-configmap.yaml\n{{- if .Values.features.monitoring.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: scdf-grafana-ds-cm\n labels:\n app: scdf-grafana-ds-cm\ndata:\n datasources.yaml: |\n apiVersion: 1\n datasources:\n - name: ScdfPrometheus\n type: prometheus\n access: proxy\n org_id: 1\n url: http://{{- printf \"${%s_PROMETHEUS_SERVER_SERVICE_HOST}\" (include \"scdf.envrelease\" . ) -}}:{{- printf \"${%s_PROMETHEUS_SERVER_SERVICE_PORT}\" (include \"scdf.envrelease\" . ) }}\n is_default: true\n version: 5\n editable: true\n read_only: false\n{{- end }}\n",
"# grafana-secret.yaml\n{{- if .Values.features.monitoring.enabled }}\napiVersion: v1\nkind: Secret\ntype: Opaque\nmetadata:\n name: scdf-grafana-secret\n labels:\n app: scdf-grafana-secret\ndata:\n admin-user: {{ .Values.grafana.admin.defaultUsername }}\n admin-password: {{ .Values.grafana.admin.defaultPassword }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"scdf.fullname\" . }} \nspec:\n rules:\n - host: {{ .Values.ingress.server.host }}\n {{ .Values.ingress.protocol }}:\n paths:\n - backend:\n serviceName: {{ template \"scdf.fullname\" . }}-server\n servicePort: {{ .Values.server.service.externalPort }}\n path: /\n {{- if .Values.features.monitoring.enabled }}\n - host: {{ .Values.ingress.grafana.host }}\n {{ .Values.ingress.protocol }}:\n paths:\n - backend:\n serviceName: {{ .Release.Name }}-grafana\n servicePort: {{ .Values.grafana.service.port }}\n path: /\n {{- end }}\n{{- end }}",
"# prometheus-proxy-clusterrolebinding.yaml\n{{- if .Values.features.monitoring.enabled }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ .Release.Name }}-prometheus-proxy\n labels:\n app: {{ .Release.Name }}-prometheus-proxy\nsubjects:\n - kind: ServiceAccount\n name: {{ .Release.Name }}-prometheus-proxy\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: cluster-admin\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n",
"# prometheus-proxy-deployment.yaml\n{{- if .Values.features.monitoring.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ .Release.Name }}-prometheus-proxy\n labels:\n app: prometheus-proxy\nspec:\n selector:\n matchLabels:\n app: prometheus-proxy\n template:\n metadata:\n labels:\n app: prometheus-proxy\n spec:\n serviceAccountName: {{ .Release.Name }}-prometheus-proxy\n containers:\n - name: prometheus-proxy\n image: micrometermetrics/prometheus-rsocket-proxy:0.9.0\n imagePullPolicy: Always\n ports:\n - name: scrape\n containerPort: 8080\n - name: rsocket\n containerPort: 7001\n resources:\n limits:\n cpu: 1.0\n memory: 2048Mi\n requests:\n cpu: 0.5\n memory: 1024Mi\n securityContext:\n fsGroup: 2000\n runAsNonRoot: true\n runAsUser: 1000\n{{- end }}\n",
"# prometheus-proxy-service.yaml\n{{- if .Values.features.monitoring.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ .Release.Name }}-prometheus-proxy\n labels:\n app: {{ .Release.Name }}-prometheus-proxy\nspec:\n selector:\n app: prometheus-proxy\n ports:\n - name: scrape\n port: 8080\n targetPort: 8080\n - name: rsocket\n port: 7001\n targetPort: 7001\n type: {{ .Values.prometheus.proxy.service.type }}\n{{- end }}\n",
"# prometheus-proxy-serviceaccount.yaml\n{{- if .Values.features.monitoring.enabled }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ .Release.Name }}-prometheus-proxy\n labels:\n app: {{ .Release.Name }}-prometheus-proxy\n namespace: {{ .Release.Namespace }}\n{{- end }}\n\n",
"# server-config.yaml\n{{- if not .Values.server.configMap }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"scdf.fullname\" . }}-server\n labels:\n app: {{ template \"scdf.name\" . }}\n component: server\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n application.yaml: |-\n spring:\n {{- if .Values.features.batch.enabled }}\n cloud:\n dataflow:\n task:\n platform:\n kubernetes:\n accounts:\n {{ .Values.server.platformName }}:\n limits:\n memory: {{ .Values.deployer.resourceLimits.memory }}\n cpu: {{ .Values.deployer.resourceLimits.cpu }}\n{{- if .Values.server.containerConfiguration }}\n{{ toYaml .Values.server.containerConfiguration | indent 10 }}\n{{- end }}\n {{- end }}\n {{- if .Values.features.monitoring.enabled }}\n applicationProperties:\n {{- if .Values.features.streaming.enabled }}\n stream:\n management:\n metrics:\n export:\n prometheus:\n enabled: true\n rsocket:\n enabled: true\n host: {{ printf \"${%s_PROMETHEUS_PROXY_SERVICE_HOST}\" (include \"scdf.envrelease\" . ) }}\n port: {{ printf \"${%s_PROMETHEUS_PROXY_SERVICE_PORT_RSOCKET}\" (include \"scdf.envrelease\" . ) }}\n {{- end }}\n {{- if .Values.features.batch.enabled }}\n task:\n management:\n metrics:\n export:\n prometheus:\n enabled: true\n rsocket:\n enabled: true\n host: {{ printf \"${%s_PROMETHEUS_PROXY_SERVICE_HOST}\" (include \"scdf.envrelease\" . ) }}\n port: {{ printf \"${%s_PROMETHEUS_PROXY_SERVICE_PORT_RSOCKET}\" (include \"scdf.envrelease\" . ) }}\n {{- end }}\n {{- end }}\n {{- if .Values.features.monitoring.enabled }}\n grafana-info:\n {{- if .Values.ingress.enabled }}\n url: {{ .Values.ingress.protocol }}://{{ .Values.ingress.grafana.host }}\n {{- else }}\n url: http://{{- printf \"${%s_GRAFANA_SERVICE_HOST}\" (include \"scdf.envrelease\" . ) -}}:{{- printf \"${%s_GRAFANA_SERVICE_PORT}\" (include \"scdf.envrelease\" . ) }}\n {{- end }}\n {{- end }}\n datasource:\n url: 'jdbc:{{ template \"scdf.database.scheme\" . }}://{{ template \"scdf.database.host\" . }}:{{ template \"scdf.database.port\" . }}/{{ template \"scdf.database.dataflow\" . }}'\n driverClassName: {{ template \"scdf.database.driver\" . }}\n username: {{ template \"scdf.database.user\" . }}\n password: {{ template \"scdf.database.password\" . }}\n testOnBorrow: true\n validationQuery: \"SELECT 1\"\n{{- end }}\n",
"# server-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"scdf.fullname\" . }}-server\n labels:\n app: {{ template \"scdf.name\" . }}\n component: server\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"scdf.name\" . }}\n component: server\n release: \"{{ .Release.Name }}\"\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"scdf.name\" . }}\n component: server\n release: \"{{ .Release.Name }}\"\n spec:\n containers:\n - name: {{ template \"scdf.fullname\" . }}-server\n image: {{ .Values.server.image }}:{{ .Values.server.version }}\n imagePullPolicy: {{ .Values.server.imagePullPolicy }}\n volumeMounts:\n - name: database\n mountPath: /etc/secrets/database\n readOnly: true\n ports:\n - containerPort: 8080\n name: http\n resources:\n{{ toYaml .Values.server.resources | indent 10 }}\n livenessProbe:\n httpGet:\n path: /management/health\n port: http\n initialDelaySeconds: 45\n readinessProbe:\n httpGet:\n path: /management/info\n port: http\n initialDelaySeconds: 45\n env:\n - name: KUBERNETES_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: \"metadata.namespace\"\n - name: SERVER_PORT\n value: '8080'\n - name: SPRING_CLOUD_CONFIG_ENABLED\n value: 'false'\n - name: SPRING_CLOUD_KUBERNETES_SECRETS_ENABLE_API\n value: 'true'\n - name: SPRING_CLOUD_KUBERNETES_SECRETS_PATHS\n value: /etc/secrets\n - name: SPRING_CLOUD_KUBERNETES_CONFIG_NAME\n {{- if .Values.server.configMap }}\n value: {{ .Values.server.configMap | quote }}\n {{- else }}\n value: {{ template \"scdf.fullname\" . }}-server\n {{- end }}\n {{- if .Values.features.streaming.enabled }}\n - name: SPRING_CLOUD_SKIPPER_CLIENT_SERVER_URI\n value: 'http://${{ printf \"{\" }}{{ template \"scdf.envname\" . }}_SKIPPER_SERVICE_HOST}/api'\n {{- end }}\n - name: SPRING_CLOUD_DATAFLOW_SERVER_URI\n value: 'http://${{ printf \"{\" }}{{ template \"scdf.envname\" . }}_SERVER_SERVICE_HOST}:${{ printf \"{\" }}{{ template \"scdf.envname\" . }}_SERVER_SERVICE_PORT}'\n - name: SPRING_APPLICATION_JSON\n value: \"{ \\\"maven\\\": { \\\"local-repository\\\": null, \\\"remote-repositories\\\": { \\\"repo1\\\": { \\\"url\\\": \\\"https://repo.spring.io/libs-snapshot\\\"} } } }\"\n - name: KUBERNETES_TRUST_CERTIFICATES\n value: {{ .Values.server.trustCerts | quote }}\n - name: SPRING_CLOUD_DATAFLOW_FEATURES_STREAMS_ENABLED\n value: {{ .Values.features.streaming.enabled | quote }}\n - name: SPRING_CLOUD_DATAFLOW_FEATURES_TASKS_ENABLED\n value: {{ .Values.features.batch.enabled | quote }}\n - name: SPRING_CLOUD_DATAFLOW_FEATURES_SCHEDULES_ENABLED\n value: {{ .Values.features.batch.enabled | quote }}\n - name: SPRING_CLOUD_DATAFLOW_TASK_COMPOSED_TASK_RUNNER_URI\n value: 'docker://springcloud/spring-cloud-dataflow-composed-task-runner:2.6.0'\n {{- range $key, $value := .Values.server.extraEnv }}\n - name: {{ $key }}\n value: \"{{ $value }}\"\n {{- end }}\n volumes:\n - name: database\n secret:\n {{- if .Values.mysql.enabled }}\n secretName: {{ printf \"%s-%s\" .Release.Name \"mysql\" | trunc 63 | trimSuffix \"-\" }}\n {{- else }}\n secretName: {{ printf \"%s-%s\" .Release.Name \"database\" | trunc 63 | trimSuffix \"-\" }}\n {{- end }}\n initContainers:\n - name: init-db-wait\n image: {{ .Values.initContainers.dbWait.image }}:{{ .Values.initContainers.dbWait.tag }}\n imagePullPolicy: {{ .Values.initContainers.dbWait.imagePullPolicy }}\n command: ['sh', '-c', 'until nc -w3 -z {{ template \"scdf.database.host\" . }} {{ template \"scdf.database.port\" . }}; do echo waiting for database; sleep 3; done;']\n serviceAccountName: {{ template \"scdf.serviceAccountName\" . }}\n",
"# server-rbac.yaml\n{{- if .Values.rbac.create -}}\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"scdf.fullname\" . }}\n labels:\n app: {{ template \"scdf.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n - apiGroups: [\"\"]\n resources: [\"services\", \"pods\", \"replicationcontrollers\", \"persistentvolumeclaims\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\", \"update\"]\n - apiGroups: [\"\"]\n resources: [\"configmaps\", \"secrets\", \"pods/log\", \"pods/status\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiGroups: [\"apps\"]\n resources: [\"statefulsets\", \"deployments\", \"replicasets\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\", \"update\", \"patch\"]\n - apiGroups: [\"extensions\"]\n resources: [\"deployments\", \"replicasets\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\", \"update\", \"patch\"]\n - apiGroups: [\"batch\"]\n resources: [\"cronjobs\", \"jobs\"]\n verbs: [\"create\", \"delete\", \"get\", \"list\", \"watch\", \"update\", \"patch\"]\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"scdf.fullname\" . }}\n labels:\n app: {{ template \"scdf.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nroleRef:\n kind: Role\n name: {{ template \"scdf.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"scdf.serviceAccountName\" . }}\n{{- end -}}\n",
"# server-service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"scdf.fullname\" . }}-server\n labels:\n app: {{ template \"scdf.name\" . }}\n component: server\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.server.service.labels }}\n{{ toYaml .Values.server.service.labels | indent 4 }}\n{{- end }}\n{{- if .Values.server.service.annotations }}\n annotations:\n{{ toYaml .Values.server.service.annotations | indent 4 }}\n{{- end }}\nspec:\n # If you are running k8s on a local dev box, you can use type NodePort instead\n type: {{ .Values.server.service.type }}\n{{- if .Values.server.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.server.service.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n ports:\n - port: {{ .Values.server.service.externalPort }}\n targetPort: http\n name: http\n selector:\n app: {{ template \"scdf.name\" . }}\n component: server\n release: {{ .Release.Name }}\n",
"# service-account.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"scdf.serviceAccountName\" . }}\n labels:\n app: {{ template \"scdf.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end -}}\n",
"# skipper-config.yaml\n{{- if .Values.features.streaming.enabled }}\n{{- if not .Values.skipper.configMap }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"scdf.fullname\" . }}-skipper\n labels:\n app: {{ template \"scdf.name\" . }}\n component: skipper\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n application.yaml: |-\n spring:\n cloud:\n skipper:\n server:\n platform:\n kubernetes:\n accounts:\n {{ .Values.skipper.platformName }}:\n {{- if or .Values.rabbitmq.enabled (index .Values \"rabbitmq-ha\" \"enabled\") }}\n environmentVariables: 'SPRING_RABBITMQ_HOST={{ template \"scdf.broker.rabbitmq.host\" . }},SPRING_RABBITMQ_PORT={{ template \"scdf.broker.rabbitmq.port\" . }},SPRING_RABBITMQ_USERNAME={{ template \"scdf.broker.rabbitmq.user\" . }},SPRING_RABBITMQ_PASSWORD=${RABBITMQ_PASSWORD}'\n {{- else if .Values.kafka.enabled }}\n environmentVariables: 'SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS=${{ printf \"{\" }}{{ template \"scdf.envrelease\" . }}_KAFKA_SERVICE_HOST}:${{ printf \"{\" }}{{ template \"scdf.envrelease\" . }}_KAFKA_SERVICE_PORT},SPRING_CLOUD_STREAM_KAFKA_BINDER_ZK_NODES=${{ printf \"{\" }}{{ template \"scdf.envrelease\" . }}_ZOOKEEPER_SERVICE_HOST}:${{ printf \"{\" }}{{ template \"scdf.envrelease\" . }}_ZOOKEEPER_SERVICE_PORT}'\n {{- end }}\n limits:\n memory: {{ .Values.deployer.resourceLimits.memory }}\n cpu: {{ .Values.deployer.resourceLimits.cpu }}\n readinessProbeDelay: {{ .Values.deployer.readinessProbe.initialDelaySeconds }}\n livenessProbeDelay: {{ .Values.deployer.livenessProbe.initialDelaySeconds }}\n datasource:\n url: 'jdbc:{{ template \"scdf.database.scheme\" . }}://{{ template \"scdf.database.host\" . }}:{{ template \"scdf.database.port\" . }}/{{ template \"scdf.database.skipper\" . }}'\n driverClassName: {{ template \"scdf.database.driver\" . }}\n username: {{ template \"scdf.database.user\" . }}\n password: {{ template \"scdf.database.password\" . }}\n testOnBorrow: true\n validationQuery: \"SELECT 1\"\n{{- end }}\n{{- end }}\n",
"# skipper-deployment.yaml\n{{- if .Values.features.streaming.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"scdf.fullname\" . }}-skipper\n labels:\n app: {{ template \"scdf.name\" . }}\n component: skipper\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"scdf.name\" . }}\n component: skipper\n release: \"{{ .Release.Name }}\"\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"scdf.name\" . }}\n component: skipper\n release: \"{{ .Release.Name }}\"\n spec:\n containers:\n - name: {{ template \"scdf.fullname\" . }}-skipper\n image: {{ .Values.skipper.image }}:{{ .Values.skipper.version }}\n imagePullPolicy: {{ .Values.skipper.imagePullPolicy }}\n volumeMounts:\n {{- if or .Values.rabbitmq.enabled (index .Values \"rabbitmq-ha\" \"enabled\") }}\n - name: rabbitmq\n mountPath: /etc/secrets/rabbitmq\n readOnly: true\n {{- end}}\n - name: database\n mountPath: /etc/secrets/database\n readOnly: true\n ports:\n - containerPort: 7577\n name: http\n resources:\n{{ toYaml .Values.skipper.resources | indent 10 }}\n livenessProbe:\n httpGet:\n path: /actuator/health\n port: http\n initialDelaySeconds: 45\n readinessProbe:\n httpGet:\n path: /actuator/info\n port: http\n initialDelaySeconds: 45\n env:\n - name: KUBERNETES_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: \"metadata.namespace\"\n - name: SERVER_PORT\n value: '7577'\n - name: SPRING_CLOUD_KUBERNETES_SECRETS_ENABLE_API\n value: 'true'\n - name: SPRING_CLOUD_KUBERNETES_SECRETS_PATHS\n value: /etc/secrets\n - name: SPRING_CLOUD_KUBERNETES_CONFIG_NAME\n {{- if .Values.skipper.configMap }}\n value: {{ .Values.skipper.configMap | quote }}\n {{- else }}\n value: {{ template \"scdf.fullname\" . }}-skipper\n {{- end }}\n {{- if or .Values.rabbitmq.enabled (index .Values \"rabbitmq-ha\" \"enabled\") }}\n - name: RABBITMQ_PASSWORD\n valueFrom:\n secretKeyRef:\n {{- if index .Values \"rabbitmq-ha\" \"enabled\" }}\n name: {{ printf \"%s-%s\" .Release.Name \"rabbitmq-ha\" | trunc 63 | trimSuffix \"-\" }}\n {{- else }}\n name: {{ printf \"%s-%s\" .Release.Name \"rabbitmq\" | trunc 63 | trimSuffix \"-\" }}\n {{- end }}\n key: rabbitmq-password\n {{- end }}\n - name: KUBERNETES_TRUST_CERTIFICATES\n value: {{ .Values.skipper.trustCerts | quote }}\n {{- range $key, $value := .Values.skipper.extraEnv }}\n - name: {{ $key }}\n value: \"{{ $value }}\"\n {{- end }}\n volumes:\n {{- if or .Values.rabbitmq.enabled (index .Values \"rabbitmq-ha\" \"enabled\") }}\n - name: rabbitmq\n secret:\n {{- if index .Values \"rabbitmq-ha\" \"enabled\" }}\n secretName: {{ printf \"%s-%s\" .Release.Name \"rabbitmq-ha\" | trunc 63 | trimSuffix \"-\" }}\n {{- else }}\n secretName: {{ printf \"%s-%s\" .Release.Name \"rabbitmq\" | trunc 63 | trimSuffix \"-\" }}\n {{- end }}\n {{- end }}\n - name: database\n secret:\n {{- if .Values.mysql.enabled }}\n secretName: {{ printf \"%s-%s\" .Release.Name \"mysql\" | trunc 63 | trimSuffix \"-\" }}\n {{- else }}\n secretName: {{ printf \"%s-%s\" .Release.Name \"database\" | trunc 63 | trimSuffix \"-\" }}\n {{- end }}\n initContainers:\n - name: init-db-wait\n image: {{ .Values.initContainers.dbWait.image }}:{{ .Values.initContainers.dbWait.tag }}\n imagePullPolicy: {{ .Values.initContainers.dbWait.imagePullPolicy }}\n command: ['sh', '-c', 'until nc -w3 -z {{ template \"scdf.database.host\" . }} {{ template \"scdf.database.port\" . }}; do echo waiting for database; sleep 3; done;']\n {{- if .Values.mysql.enabled }}\n - name: init-mysql-database\n image: \"{{ .Values.mysql.image }}:{{ .Values.mysql.imageTag }}\"\n imagePullPolicy: {{ .Values.mysql.imagePullPolicy }}\n env:\n - name: MYSQL_PWD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"mysql\" | trunc 63 | trimSuffix \"-\" }}\n key: mysql-root-password\n command:\n - 'sh'\n - '-c'\n - 'mysql -h {{ template \"scdf.database.host\" . }} -P {{ template \"scdf.database.port\" . }} -u root -e \"CREATE DATABASE IF NOT EXISTS {{ template \"scdf.database.skipper\" . }};\"'\n {{- end }}\n serviceAccountName: {{ template \"scdf.serviceAccountName\" . }}\n{{- end }}\n",
"# skipper-service.yaml\n{{- if .Values.features.streaming.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"scdf.fullname\" . }}-skipper\n labels:\n app: {{ template \"scdf.name\" . }}\n component: skipper\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.skipper.service.labels }}\n{{ toYaml .Values.skipper.service.labels | indent 4 }}\n{{- end }}\n{{- if .Values.skipper.service.annotations }}\n annotations:\n{{ toYaml .Values.skipper.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.skipper.service.type }}\n ports:\n - port: 80\n targetPort: http\n name: http\n selector:\n app: {{ template \"scdf.name\" . }}\n component: skipper\n release: {{ .Release.Name }}\n{{- end }}\n"
] | # Default values for spring-cloud-data-flow.
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the serviceAccountName template
name:
initContainers:
dbWait:
image: "busybox"
tag: "1.30.1"
imagePullPolicy: IfNotPresent
server:
image: springcloud/spring-cloud-dataflow-server
version: 2.6.0
imagePullPolicy: IfNotPresent
platformName: default
trustCerts: false
service:
type: LoadBalancer
externalPort: 80
annotations: {}
labels: {}
loadBalancerSourceRanges: []
configMap:
resources: {}
# limits:
# cpu: 1.0
# memory: 2048Mi
# requests:
# cpu: 0.5
# memory: 640Mi
extraEnv: {}
containerConfiguration: {}
# container:
# registry-configurations:
# default:
# registry-host: registry-1.docker.io
# authorization-type: dockeroauth2
skipper:
enabled: true
image: springcloud/spring-cloud-skipper-server
version: 2.5.0
imagePullPolicy: IfNotPresent
platformName: default
trustCerts: false
service:
type: ClusterIP
annotations: {}
labels: {}
configMap:
resources: {}
# limits:
# cpu: 1.0
# memory: 1024Mi
# requests:
# cpu: 0.5
# memory: 640Mi
extraEnv: {}
deployer:
resourceLimits:
cpu: 500m
memory: 1024Mi
readinessProbe:
initialDelaySeconds: 120
livenessProbe:
initialDelaySeconds: 90
rabbitmq:
enabled: true
rabbitmq:
username: user
# this value will be encoded into a secret
password: changeme
rabbitmq-ha:
enabled: false
rabbitmqUsername: user
kafka:
enabled: false
replicas: 1
configurationOverrides:
"offsets.topic.replication.factor": 1
"confluent.support.metrics.enable": false
zookeeper:
replicaCount: 1
mysql:
enabled: true
mysqlDatabase: dataflow
## If you are using an external database,
## you must specify the following database details
database:
driver:
scheme:
host:
port:
user: scdf
password:
dataflow: dataflow
skipper: skipper
features:
streaming:
enabled: true
batch:
enabled: true
monitoring:
enabled: false
## If you are using an ingress server then you can override the following
## default values to create an Ingress resource
ingress:
enabled: false
server:
host: data-flow.local
grafana:
host: grafana.local
protocol: https
grafana:
service:
type: LoadBalancer
admin:
existingSecret: scdf-grafana-secret
userKey: admin-user
passwordKey: admin-password
defaultUsername: YWRtaW4=
defaultPassword: cGFzc3dvcmQ=
extraConfigmapMounts:
- name: scdf-grafana-ds-cm
mountPath: /etc/grafana/provisioning/datasources/datasources.yaml
subPath: datasources.yaml
configMap: scdf-grafana-ds-cm
readOnly: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: default
orgId: 1
folder:
type: file
disableDeletion: true
editable: false
options:
path: /var/lib/grafana/dashboards/default
dashboards:
default:
scdf-applications:
url: https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/src/grafana/prometheus/docker/grafana/dashboards/scdf-applications.json
scdf-streams:
url: https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/src/grafana/prometheus/docker/grafana/dashboards/scdf-streams.json
scdf-task-batch:
url: https://raw.githubusercontent.com/spring-cloud/spring-cloud-dataflow/master/src/grafana/prometheus/docker/grafana/dashboards/scdf-task-batch.json
prometheus:
podSecurityPolicy:
enabled: true
alertmanager:
enabled: false
kubeStateMetrics:
enabled: false
nodeExporter:
enabled: false
pushgateway:
enabled: false
server:
global:
scrape_interval: 10s
scrape_timeout: 9s
evaluation_interval: 10s
extraScrapeConfigs: |
- job_name: 'proxied-applications'
metrics_path: '/metrics/connected'
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- {{ .Release.Namespace }}
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app]
action: keep
regex: prometheus-proxy
- source_labels: [__meta_kubernetes_pod_container_port_number]
action: keep
regex: 8080
- job_name: 'proxies'
metrics_path: '/metrics/proxy'
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- {{ .Release.Namespace }}
relabel_configs:
- source_labels: [__meta_kubernetes_pod_label_app]
action: keep
regex: prometheus-proxy
- source_labels: [__meta_kubernetes_pod_container_port_number]
action: keep
regex: 8080
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
proxy:
service:
type: LoadBalancer
|
kibana | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kibana.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"kibana.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"kibana.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n{{ default (include \"kibana.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n{{- if .Values.serviceAccountName -}}\n{{- .Values.serviceAccountName }}\n{{- else -}}\n{{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# configmap-dashboardimport.yaml\n{{- if .Values.dashboardImport.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"kibana.fullname\" . }}-importscript\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n dashboardImport.sh: |\n #!/usr/bin/env bash\n #\n # kibana dashboard import script\n #\n\n cd /kibanadashboards\n\n echo \"Starting Kibana...\"\n\n /usr/local/bin/kibana-docker $@ &\n\n echo \"Waiting up to {{ .Values.dashboardImport.timeout }} seconds for Kibana to get in green overall state...\"\n\n for i in {1..{{ .Values.dashboardImport.timeout }}}; do\n curl -s localhost:5601{{ .Values.dashboardImport.basePath }}/api/status | python -c 'import sys, json; print json.load(sys.stdin)[\"status\"][\"overall\"][\"state\"]' 2> /dev/null | grep green > /dev/null && break || sleep 1\n done\n\n for DASHBOARD_FILE in *; do\n echo -e \"Importing ${DASHBOARD_FILE} dashboard...\"\n\n if ! python -c 'import sys, json; print json.load(sys.stdin)' < \"${DASHBOARD_FILE}\" &> /dev/null ; then\n echo \"${DASHBOARD_FILE} is not valid JSON, assuming it's an URL...\"\n TMP_FILE=\"$(mktemp)\"\n curl -s $(cat ${DASHBOARD_FILE}) > ${TMP_FILE}\n curl -v {{ if .Values.dashboardImport.xpackauth.enabled }}--user {{ .Values.dashboardImport.xpackauth.username }}:{{ .Values.dashboardImport.xpackauth.password }}{{ end }} -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601{{ .Values.dashboardImport.basePath }}/api/kibana/dashboards/import?force=true -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d @${TMP_FILE}\n rm ${TMP_FILE}\n else\n echo \"Valid JSON found in ${DASHBOARD_FILE}, importing...\"\n curl -v {{ if .Values.dashboardImport.xpackauth.enabled }}--user {{ .Values.dashboardImport.xpackauth.username }}:{{ .Values.dashboardImport.xpackauth.password }}{{ end }} -s --connect-timeout 60 --max-time 60 -XPOST localhost:5601{{ .Values.dashboardImport.basePath }}/api/kibana/dashboards/import?force=true -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d @./${DASHBOARD_FILE}\n fi\n\n if [ \"$?\" != \"0\" ]; then\n echo -e \"\\nImport of ${DASHBOARD_FILE} dashboard failed... Exiting...\"\n exit 1\n else\n echo -e \"\\nImport of ${DASHBOARD_FILE} dashboard finished :-)\"\n fi\n\n done\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"kibana.fullname\" . }}-dashboards\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.dashboardImport.dashboards }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"kibana.fullname\" . }}\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.files }}\n {{ $key }}: |\n{{ toYaml $value | default \"{}\" | indent 4 }}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kibana.fullname\" . }}\n{{- if .Values.deployment.annotations }}\n annotations:\n{{ toYaml .Values.deployment.annotations | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"kibana.name\" . }}\n release: {{ .Release.Name }}\n revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}\n template:\n metadata:\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n{{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n{{- end }}\n labels:\n app: {{ template \"kibana.name\" . }}\n release: \"{{ .Release.Name }}\"\n{{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n{{- end }}\n spec:\n serviceAccountName: {{ template \"kibana.serviceAccountName\" . }}\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n{{- if or (.Values.initContainers) (.Values.dashboardImport.enabled) (.Values.plugins.enabled) }}\n initContainers:\n{{- if .Values.initContainers }}\n{{- range $key, $value := .Values.initContainers }}\n - name: {{ $key | quote }}\n{{ toYaml $value | indent 8 }}\n{{- end }}\n{{- end }}\n{{- if .Values.dashboardImport.enabled }}\n - name: {{ .Chart.Name }}-dashboardimport\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command: [\"/bin/bash\"]\n args:\n - \"-c\"\n - \"/tmp/dashboardImport.sh\"\n{{- if .Values.commandline.args }}\n{{ toYaml .Values.commandline.args | indent 10 }}\n{{- end }}\n env:\n {{- range $key, $value := .Values.env }}\n - name: {{ $key | quote }}\n value: {{ tpl $value $ | quote }}\n {{- end }}\n volumeMounts:\n - name: {{ template \"kibana.fullname\" . }}-dashboards\n mountPath: \"/kibanadashboards\"\n - name: {{ template \"kibana.fullname\" . }}-importscript\n mountPath: \"/tmp/dashboardImport.sh\"\n subPath: dashboardImport.sh\n {{- range $configFile := (keys .Values.files) }}\n - name: {{ template \"kibana.name\" $ }}\n mountPath: \"/usr/share/kibana/config/{{ $configFile }}\"\n subPath: {{ $configFile }}\n {{- end }}\n{{- end }}\n{{- if .Values.plugins.enabled}}\n - name: {{ .Chart.Name }}-plugins-install\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - /bin/bash\n - \"-c\"\n - |\n set -e\n rm -rf plugins/lost+found\n plugins=(\n {{- range .Values.plugins.values }}\n {{ . }}\n {{- end }}\n )\n if {{ .Values.plugins.reset }}\n then\n for p in $(./bin/kibana-plugin list | cut -d \"@\" -f1)\n do\n ./bin/kibana-plugin remove ${p}\n done\n fi\n for i in \"${plugins[@]}\"\n do\n IFS=',' read -ra PLUGIN <<< \"$i\"\n pluginInstalledCheck=$(./bin/kibana-plugin list | grep \"${PLUGIN[0]}\" | cut -d '@' -f1 || true)\n pluginVersionCheck=$(./bin/kibana-plugin list | grep \"${PLUGIN[0]}\" | cut -d '@' -f2 || true)\n if [ \"${pluginInstalledCheck}\" = \"${PLUGIN[0]}\" ]\n then\n if [ \"${pluginVersionCheck}\" != \"${PLUGIN[1]}\" ]\n then\n ./bin/kibana-plugin remove \"${PLUGIN[0]}\"\n ./bin/kibana-plugin install \"${PLUGIN[2]}\"\n fi\n else\n ./bin/kibana-plugin install \"${PLUGIN[2]}\"\n fi\n done\n env:\n {{- range $key, $value := .Values.env }}\n - name: {{ $key | quote }}\n value: {{ tpl $value $ | quote }}\n {{- end }}\n volumeMounts:\n - name: plugins\n mountPath: /usr/share/kibana/plugins\n {{- range $configFile := (keys .Values.files) }}\n - name: {{ template \"kibana.name\" $ }}\n mountPath: \"/usr/share/kibana/config/{{ $configFile }}\"\n subPath: {{ $configFile }}\n {{- end }}\n{{- if .Values.securityContext.enabled }}\n securityContext:\n allowPrivilegeEscalation: {{ .Values.securityContext.allowPrivilegeEscalation }}\n{{- end }}\n{{- end }}\n{{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if .Values.commandline.args }}\n args:\n - \"/bin/bash\"\n - \"/usr/local/bin/kibana-docker\"\n{{ toYaml .Values.commandline.args | indent 10 }}\n {{- end }}\n env:\n {{- range $key, $value := .Values.env }}\n - name: {{ $key | quote }}\n value: {{ tpl $value $ | quote }}\n {{- end }}\n{{- if .Values.envFromSecrets }}\n {{- range $key,$value := .Values.envFromSecrets }}\n - name: {{ $key | upper | quote}}\n valueFrom:\n secretKeyRef:\n name: {{ $value.from.secret | quote}}\n key: {{ $value.from.key | quote}}\n {{- end }}\n{{- end }}\n{{- if (not .Values.authProxyEnabled) }}\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n name: {{ template \"kibana.name\" . }}\n protocol: TCP\n{{- end }}\n{{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: {{ .Values.livenessProbe.path }}\n port: {{ .Values.service.internalPort }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n{{- end }}\n{{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: {{ .Values.readinessProbe.path }}\n port: {{ .Values.service.internalPort }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n{{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n {{- range $configFile := (keys .Values.files) }}\n - name: {{ template \"kibana.name\" $ }}\n mountPath: \"/usr/share/kibana/config/{{ $configFile }}\"\n subPath: {{ $configFile }}\n {{- end }}\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n{{- if .Values.plugins.enabled}}\n - name: plugins\n mountPath: /usr/share/kibana/plugins\n{{- end }}\n{{- with .Values.extraContainers }}\n{{ tpl . $ | indent 6 }}\n{{- end }}\n{{- range .Values.extraConfigMapMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n subPath: {{ .subPath }}\n{{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.image.pullSecrets | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n{{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n fsGroup: {{ .Values.securityContext.fsGroup }}\n{{- end }}\n volumes:\n - name: {{ template \"kibana.name\" . }}\n configMap:\n name: {{ template \"kibana.fullname\" . }}\n{{- if .Values.plugins.enabled}}\n - name: plugins\n {{- if .Values.persistentVolumeClaim.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"kibana.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n{{- end }}\n{{- if .Values.dashboardImport.enabled }}\n - name: {{ template \"kibana.fullname\" . }}-dashboards\n configMap:\n name: {{ template \"kibana.fullname\" . }}-dashboards\n - name: {{ template \"kibana.fullname\" . }}-importscript\n configMap:\n name: {{ template \"kibana.fullname\" . }}-importscript\n defaultMode: 0777\n{{- end }}\n{{- range .Values.extraConfigMapMounts }}\n - name: {{ .name }}\n configMap:\n name: {{ .configMap }}\n{{- end }}\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 8 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"kibana.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\n{{- if semverCompare \">=1.14\" .Capabilities.KubeVersion.GitVersion }}\napiVersion: networking.k8s.io/v1beta1\n{{ else }}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kibana.fullname\" . }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n {{- $url := splitList \"/\" . }}\n - host: {{ first $url }}\n http:\n paths:\n - path: /{{ rest $url | join \"/\" }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- range $key, $value := .Values.service.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n name: {{ template \"kibana.fullname\" . }}\n {{- with .Values.service.annotations }}\n annotations:\n {{- range $key, $value := . }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\nspec:\n {{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- range $cidr := .Values.service.loadBalancerSourceRanges }}\n - {{ $cidr }}\n {{- end }}\n {{- end }}\n type: {{ .Values.service.type }}\n {{- if and (eq .Values.service.type \"ClusterIP\") .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{- end }}\n ports:\n - port: {{ .Values.service.externalPort }}\n{{- if not .Values.authProxyEnabled }}\n targetPort: {{ .Values.service.internalPort }}\n{{- else }}\n targetPort: {{ .Values.service.authProxyPort }}\n{{- end }}\n protocol: TCP\n{{ if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{ .Values.service.nodePort }}\n{{ end }}\n{{- if .Values.service.portName }}\n name: {{ .Values.service.portName }}\n{{- end }}\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n selector:\n app: {{ template \"kibana.name\" . }}\n release: {{ .Release.Name }}\n{{- range $key, $value := .Values.service.selector }}\n {{ $key }}: {{ $value | quote }}\n{{- end }}\n{{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"kibana.serviceAccountName\" . }}\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n{{- end -}}\n",
"# test-configmap.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"kibana.fullname\" . }}-test\n labels:\n app: {{ template \"kibana.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ndata:\n run.sh: |-\n @test \"Test Status\" {\n {{- if .Values.service.selector }}\n skip \"Can't guarentee pod names with selector\"\n {{- else }}\n {{- $port := .Values.service.externalPort }}\n url=\"http://{{ template \"kibana.fullname\" . }}{{ if $port }}:{{ $port }}{{ end }}/api{{ .Values.livenessProbe.path }}\"\n\n # retry for 1 minute\n run curl -s -o /dev/null -I -w \"%{http_code}\" --retry 30 --retry-delay 2 $url\n\n code=$(curl -s -o /dev/null -I -w \"%{http_code}\" $url)\n body=$(curl $url)\n if [ \"$code\" == \"503\" ]\n then\n skip \"Kibana Unavailable (503), can't get status - see pod logs: $body\"\n fi\n\n result=$(echo $body | jq -cr '.status.statuses[]')\n [ \"$result\" != \"\" ]\n\n result=$(echo $body | jq -cr '.status.statuses[] | select(.state != \"green\")')\n [ \"$result\" == \"\" ]\n {{- end }}\n }\n{{- end }}\n",
"# test.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: Pod\nmetadata:\n name: {{ template \"kibana.fullname\" . }}-test\n labels:\n app: {{ template \"kibana.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: test-framework\n image: \"{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-test\n image: \"dwdraju/alpine-curl-jq\"\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"kibana.fullname\" . }}-test\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n{{- end }}\n",
"# volume-claim.yaml\n{{- if and .Values.plugins.enabled .Values.persistentVolumeClaim.enabled -}}\n{{- if not .Values.persistentVolumeClaim.existingClaim -}}\napiVersion: \"v1\"\nkind: \"PersistentVolumeClaim\"\nmetadata:\n{{- if .Values.persistentVolumeClaim.annotations }}\n annotations:\n{{ toYaml .Values.persistentVolumeClaim.annotations | indent 4 }}\n{{- end }}\n labels:\n app: {{ template \"kibana.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n component: \"{{ .Values.persistentVolumeClaim.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kibana.fullname\" . }}\nspec:\n accessModes:\n{{ toYaml .Values.persistentVolumeClaim.accessModes | indent 4 }}\n{{- if .Values.persistentVolumeClaim.storageClass }}\n{{- if (eq \"-\" .Values.persistentVolumeClaim.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistentVolumeClaim.storageClass }}\"\n{{- end }}\n{{- end }}\n resources:\n requests:\n storage: \"{{ .Values.persistentVolumeClaim.size }}\"\n{{- end -}}\n{{- end -}}\n"
] | image:
repository: "docker.elastic.co/kibana/kibana-oss"
tag: "6.7.0"
pullPolicy: "IfNotPresent"
testFramework:
enabled: "true"
image: "dduportal/bats"
tag: "0.4.0"
commandline:
args: []
env: {}
## All Kibana configuration options are adjustable via env vars.
## To adjust a config option to an env var uppercase + replace `.` with `_`
## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
## For kibana < 6.6, use ELASTICSEARCH_URL instead
# ELASTICSEARCH_HOSTS: http://elasticsearch-client:9200
# SERVER_PORT: 5601
# LOGGING_VERBOSE: "true"
# SERVER_DEFAULTROUTE: "/app/kibana"
envFromSecrets: {}
## Create a secret manually. Reference it here to inject environment variables
# ELASTICSEARCH_USERNAME:
# from:
# secret: secret-name-here
# key: ELASTICSEARCH_USERNAME
# ELASTICSEARCH_PASSWORD:
# from:
# secret: secret-name-here
# key: ELASTICSEARCH_PASSWORD
files:
kibana.yml:
## Default Kibana configuration from kibana-docker.
server.name: kibana
server.host: "0"
## For kibana < 6.6, use elasticsearch.url instead
elasticsearch.hosts: http://elasticsearch:9200
## Custom config properties below
## Ref: https://www.elastic.co/guide/en/kibana/current/settings.html
# server.port: 5601
# logging.verbose: "true"
# server.defaultRoute: "/app/kibana"
deployment:
annotations: {}
service:
type: ClusterIP
# clusterIP: None
# portName: kibana-svc
externalPort: 443
internalPort: 5601
# authProxyPort: 5602 To be used with authProxyEnabled and a proxy extraContainer
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
#
## LoadBalancer IP if service.type is LoadBalancer
## Default: nil
##
# loadBalancerIP: 10.2.2.2
annotations: {}
# Annotation example: setup ssl with aws cert when service.type is LoadBalancer
# service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:EXAMPLE_CERT
labels: {}
## Label example: show service URL in `kubectl cluster-info`
# kubernetes.io/cluster-service: "true"
## Limit load balancer source ips to list of CIDRs (where available)
# loadBalancerSourceRanges: []
selector: {}
ingress:
enabled: false
# hosts:
# - kibana.localhost.localdomain
# - localhost.localdomain/kibana
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
serviceAccount:
# Specifies whether a service account should be created
create: false
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# If set and create is false, the service account must be existing
name:
livenessProbe:
enabled: false
path: /status
initialDelaySeconds: 30
timeoutSeconds: 10
readinessProbe:
enabled: false
path: /status
initialDelaySeconds: 30
timeoutSeconds: 10
periodSeconds: 10
successThreshold: 5
# Enable an authproxy. Specify container in extraContainers
authProxyEnabled: false
extraContainers: |
# - name: proxy
# image: quay.io/gambol99/keycloak-proxy:latest
# args:
# - --resource=uri=/*
# - --discovery-url=https://discovery-url
# - --client-id=client
# - --client-secret=secret
# - --listen=0.0.0.0:5602
# - --upstream-url=http://127.0.0.1:5601
# ports:
# - name: web
# containerPort: 9090
extraVolumeMounts: []
extraVolumes: []
resources: {}
# limits:
# cpu: 100m
# memory: 300Mi
# requests:
# cpu: 100m
# memory: 300Mi
priorityClassName: ""
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
# affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
podAnnotations: {}
replicaCount: 1
revisionHistoryLimit: 3
# Custom labels for pod assignment
podLabels: {}
# To export a dashboard from a running Kibana 6.3.x use:
# curl --user <username>:<password> -XGET https://kibana.yourdomain.com:5601/api/kibana/dashboards/export?dashboard=<some-dashboard-uuid> > my-dashboard.json
# A dashboard is defined by a name and a string with the json payload or the download url
dashboardImport:
enabled: false
timeout: 60
basePath: /
xpackauth:
enabled: false
username: myuser
password: mypass
dashboards: {}
# k8s: https://raw.githubusercontent.com/monotek/kibana-dashboards/master/k8s-fluentd-elasticsearch.json
# List of plugins to install using initContainer
# NOTE : We notice that lower resource constraints given to the chart + plugins are likely not going to work well.
plugins:
# set to true to enable plugins installation
enabled: false
# set to true to remove all kibana plugins before installation
reset: false
# Use <plugin_name,version,url> to add/upgrade plugin
values:
# - elastalert-kibana-plugin,1.0.1,https://github.com/bitsensor/elastalert-kibana-plugin/releases/download/1.0.1/elastalert-kibana-plugin-1.0.1-6.4.2.zip
# - logtrail,0.1.31,https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-6.6.0-0.1.31.zip
# - other_plugin
persistentVolumeClaim:
# set to true to use pvc
enabled: false
# set to true to use you own pvc
existingClaim: false
annotations: {}
accessModes:
- ReadWriteOnce
size: "5Gi"
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# default security context
securityContext:
enabled: false
allowPrivilegeEscalation: false
runAsUser: 1000
fsGroup: 2000
extraConfigMapMounts: []
# - name: logtrail-configs
# configMap: kibana-logtrail
# mountPath: /usr/share/kibana/plugins/logtrail/logtrail.json
# subPath: logtrail.json
# Add your own init container or uncomment and modify the given example.
initContainers: {}
## Don't start kibana till Elasticsearch is reachable.
## Ensure that it is available at http://elasticsearch:9200
##
# es-check: # <- will be used as container name
# image: "appropriate/curl:latest"
# imagePullPolicy: "IfNotPresent"
# command:
# - "/bin/sh"
# - "-c"
# - |
# is_down=true
# while "$is_down"; do
# if curl -sSf --fail-early --connect-timeout 5 http://elasticsearch:9200; then
# is_down=false
# else
# sleep 5
# fi
# done
|
bitcoind | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"bitcoind.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"bitcoind.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"bitcoind.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"bitcoind.fullname\" . }}\n labels:\n app: {{ template \"bitcoind.name\" . }}\n chart: {{ template \"bitcoind.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{ toYaml .Values.configurationFile | indent 2 }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"bitcoind.fullname\" . }}\n labels:\n app: {{ template \"bitcoind.name\" . }}\n chart: {{ template \"bitcoind.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n strategy:\n type: Recreate\n selector:\n matchLabels:\n app: {{ template \"bitcoind.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"bitcoind.name\" . }}\n release: {{ .Release.Name }}\n spec:\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n {{- if .Values.configurationFile }}\n initContainers:\n - name: copy-bitcoind-config\n image: busybox\n command: ['sh', '-c', 'cp /configmap/bitcoin.conf /bitcoin/.bitcoin/bitcoin.conf']\n volumeMounts:\n - name: configmap\n mountPath: /configmap\n - name: config\n mountPath: /bitcoin/.bitcoin/\n {{- end }}\n containers:\n - name: {{ template \"bitcoind.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n ports:\n - name: rpc\n containerPort: {{ .Values.service.rpcPort }}\n - name: p2p\n containerPort: {{ .Values.service.p2pPort }}\n - name: testnet\n containerPort: {{ .Values.service.testnetPort }}\n - name: testnetp2pport\n containerPort: {{ .Values.service.testnetP2pPort }}\n volumeMounts:\n - name: data\n mountPath: /bitcoin\n {{- if .Values.configurationFile }}\n - name: config\n mountPath: /bitcoin/.bitcoin/bitcoin.conf\n subPath: bitcoin.conf\n {{- end }}\n volumes:\n {{- if .Values.configurationFile }}\n - name: config\n emptyDir: {}\n - name: configmap\n configMap:\n name: {{ template \"bitcoind.fullname\" . }}\n {{- end }}\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"bitcoind.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end -}}",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"bitcoind.fullname\" . }}\n annotations:\n \"helm.sh/resource-policy\": keep\n labels:\n app: {{ template \"bitcoind.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"bitcoind.fullname\" . }}\n labels:\n app: {{ template \"bitcoind.name\" . }}\n chart: {{ template \"bitcoind.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n ports:\n - name: rpc\n port: {{ .Values.service.rpcPort }}\n targetPort: rpc\n - name: p2p\n port: {{ .Values.service.p2pPort }}\n targetPort: p2p\n - name: testnet\n port: {{ .Values.service.testnetPort }}\n - name: testnet-p2p\n port: {{ .Values.service.testnetP2pPort }}\n selector:\n app: {{ template \"bitcoind.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for bitcoind.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
terminationGracePeriodSeconds: 30
image:
repository: arilot/docker-bitcoind
tag: 0.17.1
pullPolicy: IfNotPresent
service:
rpcPort: 8332
p2pPort: 8333
testnetPort: 18332
testnetP2pPort: 18333
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 300Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# requests:
# memory: 512Mi
# cpu: 300m
# Custom bitcoind configuration file used to override default bitcoind settings
configurationFile:
bitcoin.conf: |-
server=1
printtoconsole=1
rpcuser=rpcuser
rpcpassword=rpcpassword
|
spark-history-server | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"spark-history-server.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"spark-history-server.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"spark-history-server.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"spark-history-server.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"spark-history-server.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# cleanup-job.yaml\n# Delete the parent chart before the sub-chart\n{{- if .Values.nfs.enableExampleNFS }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ include \"spark-history-server.fullname\" . }}\n annotations:\n \"helm.sh/hook\": pre-delete\n \"helm.sh/hook-delete-policy\": hook-succeeded\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n template:\n spec:\n serviceAccountName: {{ include \"spark-history-server.serviceAccountName\" . }}\n restartPolicy: OnFailure\n containers:\n - name: main\n image: \"lightbend/curl:7.47.0\"\n imagePullPolicy: IfNotPresent\n command:\n - \"/bin/sh\"\n - \"-c\"\n - \"curl -ik \\\n -X DELETE \\\n -H 'Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)' \\\n -H 'Accept: application/json' \\\n -H 'Content-Type: application/json' \\\n https://kubernetes.default.svc/api/v1/deployments/{{ include \"spark-history-server.fullname\" . }}\"\n{{- end }}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"spark-history-server.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ndata:\n enablePVC: {{ .Values.pvc.enablePVC | quote }}\n enableGCS: {{ .Values.gcs.enableGCS | quote }}\n enableS3: {{ .Values.s3.enableS3 | quote }}\n enableWASBS: {{ .Values.wasbs.enableWASBS | quote }}\n {{- range $key, $val := .Values.environment }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- if .Values.pvc.enablePVC }}\n {{- range $key, $val := omit .Values.pvc \"enablePVC\" }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- else if .Values.gcs.enableGCS }}\n {{- range $key, $val := omit .Values.gcs \"enableGCS\" }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- else if .Values.s3.enableS3 }}\n {{- range $key, $val := omit .Values.s3 \"enableS3\" }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- else if .Values.wasbs.enableWASBS }}\n {{- range $key, $val := omit .Values.wasbs \"enableWASBS\" }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- else }}\n {{- range $key, $val := .Values.hdfs }}\n {{ $key }}: {{ $val | quote }}\n {{- end }}\n {{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"spark-history-server.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n{{- end }}\n spec:\n serviceAccountName: {{ include \"spark-history-server.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: HADOOP_CONF_DIR\n value: {{ .Values.hdfs.HADOOP_CONF_DIR }}\n - name: SPARK_NO_DAEMONIZE\n value: \"true\"\n ports:\n - name: historyport\n containerPort: 18080\n protocol: TCP\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n command:\n - \"/bin/sh\"\n - \"-c\"\n - >\n if [ \"$enablePVC\" == \"true\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.history.fs.logDirectory=file:/data/$eventsDir\";\n elif [ \"$enableGCS\" == \"true\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.history.fs.logDirectory=$logDirectory\";\n if [ \"$enableIAM\" == \"false\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.hadoop.google.cloud.auth.service.account.json.keyfile=/etc/secrets/$key\";\n fi;\n elif [ \"$enableS3\" == \"true\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.history.fs.logDirectory=$logDirectory \\\n -Dspark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem\";\n if [ -n \"$endpoint\" ] && [ \"$endpoint\" != \"default\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.hadoop.fs.s3a.endpoint=$endpoint\";\n fi;\n if [ \"$enableIAM\" == \"false\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.hadoop.fs.s3a.access.key=$(cat /etc/secrets/${accessKeyName}) \\\n -Dspark.hadoop.fs.s3a.secret.key=$(cat /etc/secrets/${secretKeyName})\";\n fi;\n elif [ \"$enableWASBS\" == \"true\" ]; then\n container=$(cat /etc/secrets/${containerKeyName})\n storageAccount=$(cat /etc/secrets/${storageAccountNameKeyName})\n\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.history.fs.logDirectory=$logDirectory \\\n -Dspark.hadoop.fs.defaultFS=wasbs://$container@$storageAccount.blob.core.windows.net \\\n -Dspark.hadoop.fs.wasbs.impl=org.apache.hadoop.fs.azure.NativeAzureFileSystem \\\n -Dspark.hadoop.fs.AbstractFileSystem.wasbs.impl=org.apache.hadoop.fs.azure.Wasbs\";\n if [ \"$sasKeyMode\" == \"true\" ]; then\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.hadoop.fs.azure.local.sas.key.mode=true \\\n -Dspark.hadoop.fs.azure.sas.$container.$storageAccount.blob.core.windows.net=$(cat /etc/secrets/${sasKeyName})\";\n else\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.hadoop.fs.azure.account.key.$storageAccount.blob.core.windows.net=$(cat /etc/secrets/${storageAccountKeyName})\";\n fi;\n else\n export SPARK_HISTORY_OPTS=\"$SPARK_HISTORY_OPTS \\\n -Dspark.history.fs.logDirectory=$logDirectory\";\n fi;\n /opt/spark/bin/spark-class org.apache.spark.deploy.history.HistoryServer;\n envFrom:\n - configMapRef:\n name: {{ include \"spark-history-server.fullname\" . }}\n livenessProbe:\n httpGet:\n path: /\n port: historyport\n readinessProbe:\n httpGet:\n path: /\n port: historyport\n {{- if .Values.pvc.enablePVC }}\n volumeMounts:\n - name: data\n mountPath: /data\n {{- else if .Values.gcs.enableGCS }}\n {{- if (not .Values.gcs.enableIAM) }}\n volumeMounts:\n - name: secrets-volume\n mountPath: /etc/secrets\n {{- end }}\n {{- else if .Values.s3.enableS3 }}\n {{- if (not .Values.s3.enableIAM) }}\n volumeMounts:\n - name: secrets-volume\n mountPath: /etc/secrets\n {{- end }}\n {{- else if .Values.wasbs.enableWASBS }}\n volumeMounts:\n - name: secrets-volume\n mountPath: /etc/secrets\n {{- else }}\n volumeMounts:\n - name: core-site\n mountPath: /etc/hadoop/core-site.xml\n subPath: core-site.xml\n - name: hdfs-site\n mountPath: /etc/hadoop/hdfs-site.xml\n subPath: hdfs-site.xml\n {{- end }}\n {{- if .Values.pvc.enablePVC }}\n volumes:\n - name: data\n persistentVolumeClaim:\n claimName: {{ .Values.pvc.existingClaimName }}\n {{- else if .Values.gcs.enableGCS }}\n {{- if (not .Values.gcs.enableIAM) }}\n volumes:\n - name: secrets-volume\n secret:\n secretName: {{ .Values.gcs.secret }}\n {{- end }}\n {{- else if .Values.s3.enableS3 }}\n {{- if (not .Values.s3.enableIAM) }}\n volumes:\n - name: secrets-volume\n secret:\n secretName: {{ .Values.s3.secret }}\n {{- end }}\n {{- else if .Values.wasbs.enableWASBS }}\n volumes:\n - name: secrets-volume\n secret:\n secretName: {{ .Values.wasbs.secret }}\n {{- else }}\n volumes:\n - name: hdfs-site\n configMap:\n name: {{ .Values.hdfs.hdfsSiteConfigMap }}\n - name: core-site\n configMap:\n name: {{ .Values.hdfs.coreSiteConfigMap }}\n {{- end }}\n {{- with .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullname := include \"spark-history-server.fullname\" . -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullname }}\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n annotations:\n{{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n{{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: {{ $.Values.ingress.path }}\n backend:\n serviceName: {{ $fullname }}\n servicePort: {{ .Values.service.port.number }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end -}}\n{{- end -}}\n",
"# nfs-pv.yaml\n{{- if .Values.enableExampleNFS }}\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: {{ .Values.pvName }}\n labels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n helm.sh/chart: {{ include \"nfs.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n capacity:\n storage: {{ .Values.pvStorage }}\n accessModes:\n - ReadWriteMany\n nfs:\n server: {{ include \"nfs.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local\n path: \"/\"\n{{- end }}\n",
"# nfs-pvc.yaml\n{{- if .Values.enableExampleNFS }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ .Values.pvcName }}\n labels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n helm.sh/chart: {{ include \"nfs.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n accessModes:\n - ReadWriteMany\n storageClassName: \"\"\n resources:\n requests:\n storage: {{ .Values.pvcStorage }}\n{{- end }}\n",
"# nfs-server-deployment.yaml\n{{- if .Values.enableExampleNFS }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"nfs.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n helm.sh/chart: {{ include \"nfs.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ include \"nfs.fullname\" . }}\n image: k8s.gcr.io/volume-nfs:0.8\n ports:\n - name: nfs\n containerPort: 2049\n - name: mountd\n containerPort: 20048\n - name: rpcbind\n containerPort: 111\n securityContext:\n privileged: true\n volumeMounts:\n - mountPath: /exports\n name: mypvc\n volumes:\n - name: mypvc\n persistentVolumeClaim:\n claimName: {{ include \"nfs.fullname\" . }}\n{{- end }}\n",
"# nfs-server-gce-pv.yaml\n{{- if .Values.enableExampleNFS }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ include \"nfs.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n helm.sh/chart: {{ include \"nfs.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n accessModes: [ \"ReadWriteOnce\" ]\n resources:\n requests:\n storage: {{ .Values.gceStorage }}\n{{- end }}\n",
"# nfs-server-service.yaml\n{{- if .Values.enableExampleNFS }}\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ include \"nfs.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n helm.sh/chart: {{ include \"nfs.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n ports:\n - name: nfs\n port: 2049\n - name: mountd\n port: 20048\n - name: rpcbind\n port: 111\n selector:\n app.kubernetes.io/name: {{ include \"nfs.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n",
"# rbac.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ include \"spark-history-server.fullname\" . }}-cr\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nrules:\n- apiGroups: [\"\"]\n resources: [\"deployments\", \"pods\"]\n verbs: [\"*\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ include \"spark-history-server.fullname\" . }}-crb\nsubjects:\n- kind: ServiceAccount\n name: {{ include \"spark-history-server.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: {{ include \"spark-history-server.fullname\" . }}-cr\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"spark-history-server.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n {{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | trim | indent 4 }}\n {{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port.number }}\n targetPort: historyport\n protocol: TCP\n name: {{ .Values.service.port.name }}\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"spark-history-server.serviceAccountName\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"spark-history-server.name\" . }}\n helm.sh/chart: {{ include \"spark-history-server.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n"
] | gceStorage: 5Gi
pvcStorage: 1Mi
pvStorage: 1Mi
pvName: nfs-pv
pvcName: nfs-pvc
enableExampleNFS: true
|
phabricator | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"phabricator.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"phabricator.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"phabricator.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"phabricator.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"phabricator.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"phabricator.host\" -}}\n{{- $host := index .Values (printf \"%sHost\" .Chart.Name) | default \"\" -}}\n{{- default (include \"phabricator.serviceIP\" .) $host -}}\n{{- end -}}\n\n{{/*\nReturn the proper Phabricator image name\n*/}}\n{{- define \"phabricator.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"phabricator.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"phabricator.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"phabricator.storageClass\" -}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"phabricator.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if include \"phabricator.host\" . -}}\napiVersion: {{ template \"phabricator.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"phabricator.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n helm.sh/chart: {{ include \"phabricator.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n helm.sh/chart: {{ include \"phabricator.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"phabricator.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"phabricator.fullname\" . }}\n image: {{ template \"phabricator.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: MARIADB_HOST\n value: {{ template \"phabricator.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: MARIADB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"phabricator.mariadb.fullname\" . }}\n key: mariadb-root-password\n{{- $port:=.Values.service.port | toString }}\n - name: PHABRICATOR_HOST\n value: \"{{ include \"phabricator.host\" . }}{{- if ne $port \"80\" }}:{{ .Values.service.port }}{{ end }}\"\n {{- if .Values.phabricatorAlternateFileDomain }}\n - name: PHABRICATOR_ALTERNATE_FILE_DOMAIN\n value: {{ .Values.phabricatorAlternateFileDomain | quote }}\n {{- end }}\n - name: PHABRICATOR_USERNAME\n value: {{ default \"\" .Values.phabricatorUsername | quote }}\n - name: PHABRICATOR_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"phabricator.fullname\" . }}\n key: phabricator-password\n {{- if .Values.phabricatorEmail }}\n - name: PHABRICATOR_EMAIL\n value: {{ .Values.phabricatorEmail | quote }}\n {{- end }}\n {{- if .Values.phabricatorFirstName }}\n - name: PHABRICATOR_FIRSTNAME\n value: {{ .Values.phabricatorFirstName | quote }}\n {{- end }}\n {{- if .Values.phabricatorLastName }}\n - name: PHABRICATOR_LASTNAME\n value: {{ .Values.phabricatorLastName | quote }}\n {{- end }}\n {{- if .Values.smtpHost }}\n - name: SMTP_HOST\n value: {{ .Values.smtpHost | quote }}\n {{- end }}\n {{- if .Values.smtpPort }}\n - name: SMTP_PORT\n value: {{ .Values.smtpPort | quote }}\n {{- end }}\n {{- if .Values.smtpUser }}\n - name: SMTP_USER\n value: {{ .Values.smtpUser | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"phabricator.fullname\" . }}\n key: smtp-password\n {{- end }}\n {{- if .Values.smtpProtocol }}\n - name: SMTP_PROTOCOL\n value: {{ .Values.smtpProtocol | quote }}\n {{- end }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n httpGet:\n path: /auth/\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"phabricator.host\" . | quote }}\n initialDelaySeconds: 180\n timeoutSeconds: 5\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /auth/\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"phabricator.host\" . | quote }}\n initialDelaySeconds: 30\n timeoutSeconds: 3\n periodSeconds: 5\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: phabricator-data\n mountPath: /bitnami/phabricator\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"phabricator.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: phabricator-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"phabricator.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n{{- end -}}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"phabricator.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n helm.sh/chart: {{ include \"phabricator.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"phabricator.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"phabricator.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n helm.sh/chart: {{ include \"phabricator.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"phabricator.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"phabricator.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n helm.sh/chart: {{ include \"phabricator.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{ if .Values.phabricatorPassword }}\n phabricator-password: {{ default \"\" .Values.phabricatorPassword | b64enc | quote }}\n {{ else }}\n phabricator-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n smtp-password: {{ default \"\" .Values.smtpPassword | b64enc | quote }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"phabricator.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n helm.sh/chart: {{ include \"phabricator.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n {{- if eq .Values.service.type \"LoadBalancer\" }}\n loadBalancerIP: {{ default \"\" .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\") }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)) }}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)) }}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"phabricator.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami Phabricator image version
## ref: https://hub.docker.com/r/bitnami/phabricator/tags/
##
image:
registry: docker.io
repository: bitnami/phabricator
tag: 2020.7.0-debian-10-r10
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override phabricator.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override phabricator.fullname template
##
# fullnameOverride:
## Phabricator host to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration
##
# phabricatorHost:
## Phabricator alternate domain to upload files
## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration
##
# phabricatorAlternateFileDomain:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration
##
phabricatorUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration
##
# phabricatorPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-phabricator#configuration
##
phabricatorEmail: [email protected]
## First name
## ref: https://github.com/bitnami/bitnami-docker-phabricator#environment-variables
##
phabricatorFirstName: First Name
## Last name
## ref: https://github.com/bitnami/bitnami-docker-phabricator#environment-variables
##
phabricatorLastName: Last Name
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-phabricator/#smtp-configuration
##
# smtpHost:
# smtpPort:
# smtpUser:
# smtpPassword:
# smtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
## Disable local_infile for MariaDB: https://secure.phabricator.com/T13238
extraFlags: "--local-infile=0"
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
## loadBalancerIP:
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## Phabricator data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure the ingress resource that allows you to access the
## Phabricator installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
##
hosts:
- name: phabricator.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.phabricator.local
# - phabricator.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: phabricator.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: phabricator.local-tls
# key:
# certificate:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r37
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
# nodeSelector: {}
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
# affinity: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
# tolerations: []
|
gcloud-endpoints | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 -}}\n{{- end -}}\n\n{{- define \"toYaml\" -}}\n {{- range $key, $value := . -}}\n {{- $map := kindIs \"map\" $value -}}\n {{- if $map }}\n{{ $key }}:\n {{- include \"toYaml\" $value | indent 2 }}\n {{- else }}\n{{ $key }}: {{ $value }}\n {{- end }}\n {{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"fullname\" . }}\n labels:\n app: {{ template \"fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"fullname\" . }}\n spec:\n containers:\n - name: endpoints\n image: \"{{ .Values.image }}\"\n imagePullPolicy: {{ default \"\" .Values.imagePullPolicy | quote }}\n args: [\n{{- if (.Values.httpPort) }}\n \"--http_port\", {{ default \"\" .Values.httpPort | quote }},\n{{- end }}\n{{- if (.Values.http2Port) }}\n \"--http2_port\", {{ default \"\" .Values.http2Port | quote }},\n{{- end }}\n{{- if (.Values.sslPort) }}\n \"--ssl_port\", {{ default \"\" .Values.sslPort | quote }},\n{{- end }}\n{{- if (.Values.backend) }}\n \"--backend\", {{ default \"\" .Values.backend | quote }},\n{{- end }}\n{{- if (.Values.service) }}\n \"--service\", {{ default \"\" .Values.service | quote }},\n{{- end }}\n{{- if (.Values.version) }}\n \"--version\", {{ default \"\" .Values.version | quote }},\n{{- end }}\n{{- if (.Values.serviceAccountKey) }}\n \"--service_account_key\", {{ default \"\" .Values.serviceAccountKey | quote }},\n{{- end }}\n{{- if (.Values.nginxConfig) }}\n \"--nginx_config\", {{ default \"\" .Values.nginxConfig | quote }},\n{{- end }}\n{{- if (.Values.statusPort) }}\n \"--status_port\", {{ default \"\" .Values.statusPort | quote }},\n{{- end }}\n ]\n ports:\n{{- if (.Values.httpPort) }}\n - containerPort: {{ default \"0\" .Values.httpPort }}\n name: http\n{{- else if and (not .Values.http2Port) (not .Values.sslPort) }}\n - containerPort: 8080\n name: http\n{{- end }}\n{{- if (.Values.http2Port) }}\n - containerPort: {{ default \"0\" .Values.http2Port }}\n name: http2\n{{- end }}\n{{- if (.Values.sslPort) }}\n - containerPort: {{ default \"0\" .Values.sslPort }}\n name: https\n{{- end }}\n{{- if (.Values.statusPort) }}\n - containerPort: {{ default \"0\" .Values.statusPort }}\n name: status\n livenessProbe:\n httpGet:\n path: /endpoints_status\n port: {{ default \"0\" .Values.statusPort }}\n initialDelaySeconds: 5\n timeoutSeconds: 1\n readinessProbe:\n httpGet:\n path: /endpoints_status\n port: {{ default \"0\" .Values.statusPort }}\n initialDelaySeconds: 5\n timeoutSeconds: 1\n{{- end }}\n resources:\n {{ include \"toYaml\" .Values.resources | indent 12 }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"fullname\" . }}\nspec:\n ports:\n{{- if (.Values.httpPort) }}\n - port: 80\n targetPort: http\n protocol: TCP\n name: http\n{{- else if and (not .Values.http2Port) (not .Values.sslPort) }}\n - port: 80\n targetPort: http\n protocol: TCP\n name: http\n{{- end }}\n{{- if (.Values.http2Port) }}\n - port: 81\n targetPort: http2\n protocol: TCP\n name: http2\n{{- end }}\n{{- if (.Values.sslPort) }}\n - port: 443\n targetPort: https\n protocol: TCP\n name: https\n{{- end }}\n selector:\n app: {{ template \"fullname\" . }}\n type: {{ .Values.serviceType }}\n\n"
] | ## Google Cloud Endpoints Runtime image
## ref: https://cloud.google.com/endpoints/docs/quickstart-container-engine#deploying_the_sample_api_to_the_cluster
image: b.gcr.io/endpoints/endpoints-runtime:1
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
# imagePullPolicy:
## Set the application server address to which ESP proxies the requests. For
## GRPC backends, please use grpc:// prefix, e.g. grpc://localhost:8081.
## (default: localhost:8081)
##
# backend:
## Set the name of the Endpoints service. If omitted and serviceConfigURL not
## specified, ESP contacts the metadata service to fetch the service name.
## (default: None)
##
# service:
## Specify the URL to fetch the service configuration. (default: None)
##
# serviceConfigURL:
## Expose a port to accept HTTP/1.x connections. Note that if you do not
## specify httpPort, http2Port, and sslPort, then the default httpPort 8080 is
## set. (default: None)
##
# httpPort: 8080
## Expose a port to accept HTTP/2 connections. Note that this cannot be the
## same port as HTTP/1.x port. (default: None)
##
# http2Port:
## Expose a port for HTTPS requests. Accepts both HTTP/1.x and HTTP/2
## connections. (default: None)
##
# sslPort:
## Set the ESP status port. Status information is available at
## /endpoints_status location over HTTP/1.x. (default: 8090)
##
statusPort: 8090
## Set the config version of the Endpoints service. If omitted and
## serviceConfigURL not specified, ESP contacts the metadata service to fetch
## the service version. (default: None)
##
# version:
## Set the service account key JSON file. Used to access the service control
## and the service management. If the option is omitted, ESP contacts the
## metadata service to fetch an access token. (default: None)
##
# serviceAccountKey:
## Set a custom nginx config file. (default: None)
##
# nginxConfig:
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
serviceType: ClusterIP
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 128Mi
cpu: 100m
|
mssql-linux | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"mssql.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"mssql.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name for the SA password secret.\n*/}}\n{{- define \"mssql.secret\" -}}\n{{- if .Values.existingSecret -}}\n {{- .Values.existingSecret -}}\n{{- else -}}\n {{- include \"mssql.fullname\" . -}}-secret\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name for the SA password secret key.\n*/}}\n{{- define \"mssql.passwordKey\" -}}\n{{- if .Values.existingSecret -}}\n {{- .Values.existingSecretKey -}}\n{{- else -}}\n sapassword\n{{- end -}}\n{{- end -}}\n\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"deployment.apiVersion\" -}}\n{{- if semverCompare \">=1.9-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"mssql.fullname\" . }}\n labels:\n app: {{ template \"mssql.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.deployment.labels }}\n{{ toYaml .Values.deployment.labels | indent 4 }}\n{{- end }}\n{{- if .Values.deployment.annotations }}\n annotations:\n{{ toYaml .Values.deployment.annotations | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"mssql.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"mssql.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.pod.labels }}\n{{ toYaml .Values.pod.labels | indent 8 }}\n{{- end }}\n{{- if .Values.pod.annotations }}\n annotations:\n{{ toYaml .Values.pod.annotations | indent 8 }}\n{{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: ACCEPT_EULA\n value: \"{{ .Values.acceptEula.value | upper }}\"\n - name: MSSQL_PID\n value: \"{{ .Values.edition.value }}\"\n - name: SA_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"mssql.secret\" . }}\n key: {{ template \"mssql.passwordKey\" . }}\n - name: MSSQL_DATA_DIR\n value: /mssql-data/data\n - name: MSSQL_LOG_DIR\n value: /mssql-translog/translog\n - name: MSSQL_TCP_PORT\n value: \"{{ .Values.service.port }}\"\n - name: MSSQL_BACKUP_DIR\n value: \"/mssql-backup/backup\"\n - name: MSSQL_COLLATION\n value: \"{{ .Values.collation }}\"\n - name: MSSQL_LCID\n value: \"{{ .Values.lcid }}\"\n - name: MSSQL_MASTER_DATA_FILE\n value: /mssql-data/master/master.mdf\n - name: MSSQL_MASTER_LOG_FILE\n value: /mssql-data/master/mastlog.ldf\n - name: MSSQL_ENABLE_HADR\n value: \"{{ .Values.hadr }}\"\n - name: MSSQL_AGENT_ENABLED\n value: {{ .Values.agent.enabled | quote }}\n {{ if .Values.resources.limits.memory }}\n - name: MSSQL_MEMORY_LIMIT_MB\n valueFrom:\n resourceFieldRef:\n resource: limits.memory\n divisor: 1Mi\n {{ end }}\n ports:\n - name: mssql\n containerPort: {{ .Values.service.port }}\n volumeMounts:\n - name: data\n mountPath: /mssql-data/data\n - name: transactionlog\n mountPath: /mssql-translog/translog\n - name: backup\n mountPath: /mssql-backup/backup\n - name: master\n mountPath: /mssql-data/master\n livenessProbe:\n tcpSocket:\n port: mssql\n initialDelaySeconds: {{ .Values.livenessprobe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessprobe.periodSeconds }}\n readinessProbe:\n tcpSocket:\n port: mssql\n initialDelaySeconds: {{ .Values.readinessprobe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessprobe.periodSeconds }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.image.pullSecrets | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.securityContext }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n {{- end }}\n volumes:\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n {{- if .Values.persistence.existingDataClaim }}\n claimName: {{ .Values.persistence.existingDataClaim }}\n {{- else }}\n claimName: {{ template \"mssql.fullname\" . }}-data\n {{- end -}}\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: transactionlog\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n {{- if .Values.persistence.existingTransactionLogClaim }}\n claimName: {{ .Values.persistence.existingTransactionLogClaim }}\n {{- else }}\n claimName: {{ template \"mssql.fullname\" . }}-translog\n {{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: backup\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n {{- if .Values.persistence.existingBackupClaim }}\n claimName: {{ .Values.persistence.existingBackupClaim }}\n {{- else }}\n claimName: {{ template \"mssql.fullname\" . }}-backup\n {{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: master\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n {{- if .Values.persistence.existingMasterClaim }}\n claimName: {{ .Values.persistence.existingMasterClaim }}\n {{- else }}\n claimName: {{ template \"mssql.fullname\" . }}-master\n {{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# pvc-backup.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingBackupClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"mssql.fullname\" . }}-backup\n labels:\n app: {{ template \"mssql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.backupAccessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.backupSize | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}",
"# pvc-data.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingDataClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"mssql.fullname\" . }}-data\n labels:\n app: {{ template \"mssql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.dataAccessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.dataSize | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n",
"# pvc-master.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingMasterClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"mssql.fullname\" . }}-master\n labels:\n app: {{ template \"mssql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.masterAccessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.masterSize | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}",
"# pvc-tranlog.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingTransactionLogClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"mssql.fullname\" . }}-translog\n labels:\n app: {{ template \"mssql.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.transactionLogAccessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.transactionLogSize | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}",
"# secret.yaml\n{{- if not .Values.existingSecret -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"mssql.fullname\" . }}-secret\n labels:\n app: {{ template \"mssql.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{ if .Values.sapassword }}\n sapassword: {{ .Values.sapassword | b64enc | quote }}\n {{ else }}\n sapassword: {{ randAlphaNum 20 | b64enc | quote }}\n {{ end }}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"mssql.fullname\" . }}\n labels:\n app: {{ template \"mssql.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n {{- if .Values.service.headless }}\n type: ClusterIP\n clusterIP: None\n {{- else }}\n type: {{ .Values.service.type }}\n {{- end }}\n {{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: mssql\n port: {{ .Values.service.port }}\n targetPort: mssql\n {{- if and (eq \"NodePort\" .Values.service.type) .Values.service.nodePort }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n protocol: TCP\n selector:\n app: {{ template \"mssql.name\" . }}\n release: {{ .Release.Name }}\n"
] | acceptEula:
value: "n"
edition:
value: Express
collation: SQL_Latin1_General_CP1_CI_AS
lcid: 1033
hadr: 0
agent:
enabled: false
# Override sapassword in templates/secret.yaml
# sapassword: "MyStrongPassword1234"
existingSecret: ""
existingSecretKey: sapassword
image:
repository: microsoft/mssql-server-linux
tag: 2017-CU5
pullPolicy: IfNotPresent
## It is possible to specify docker registry credentials
## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# pullSecrets:
# - name: regsecret
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName: "default-scheduler"
service:
# If headless is set to TRUE then the service type is ignored
headless: false
type: ClusterIP
port: 1433
nodePort:
annotations: {}
labels: {}
loadBalancerIP:
deployment:
annotations: {}
labels: {}
pod:
annotations: {}
labels: {}
persistence:
enabled: true
# existingDataClaim:
# existingTransactionLogClaim:
# existingBackupClaim:
# existingMasterClaim:
storageClass: ""
dataAccessMode: ReadWriteOnce
dataSize: 1Gi
transactionLogAccessMode: ReadWriteOnce
transactionLogSize: 1Gi
backupAccessMode: ReadWriteOnce
backupSize: 1Gi
masterAccessMode: ReadWriteOnce
masterSize: 1Gi
livenessprobe:
initialDelaySeconds: 15
periodSeconds: 20
readinessprobe:
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
# cpu: 100m
memory: 2Gi
# requests:
# cpu: 100m
# memory: 2Gi
nodeSelector: {}
# kubernetes.io/hostname: minikube
tolerations: []
affinity: {}
securityContext: {}
# runAsUser: 1000
|
zeppelin | [
"# _helpers.yaml\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"zeppelin.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"zeppelin.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 24 | trimSuffix \"-\" -}}\n{{- end -}}",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ .Release.Name }}-zeppelin\n labels:\n app: {{ template \"zeppelin.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"zeppelin.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"zeppelin.name\" . }}\n release: {{ .Release.Name }}\n spec:\n terminationGracePeriodSeconds: 0\n containers:\n - name: zeppelin\n image: {{ .Values.zeppelin.image }}\n ports:\n - containerPort: 8080\n name: web\n env:\n - name: ZEPPELIN_PORT\n value: \"8080\"\n - name: ZEPPELIN_JAVA_OPTS\n value: >-\n -Dspark.driver.memory={{ .Values.spark.driverMemory }}\n -Dspark.executor.memory={{ .Values.spark.executorMemory }}\n{{- if .Values.hadoop.useConfigMap }}\n - name: MASTER\n value: \"yarn\"\n - name: SPARK_SUBMIT_OPTIONS\n value: >-\n --deploy-mode client\n --num-executors {{ .Values.spark.numExecutors }}\n{{- end }}\n volumeMounts:\n{{- if .Values.hadoop.useConfigMap }}\n - mountPath: {{ .Values.hadoop.configPath }}\n name: hadoop-config\n{{- end }}\n resources:\n{{ toYaml .Values.zeppelin.resources | indent 12 }} \n readinessProbe:\n httpGet:\n path: /\n port: 8080\n initialDelaySeconds: 20\n timeoutSeconds: 1\n{{- if .Values.hadoop.useConfigMap }}\n volumes:\n - name: hadoop-config\n configMap:\n name: {{ .Values.hadoop.configMapName }}\n{{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\n{{- $ingressPath := .Values.ingress.path -}}\n{{- $fullName := printf \"%s-zeppelin\" .Release.Name -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"zeppelin.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: 8080\n {{- end }}\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n{{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ .Release.Name }}-zeppelin\n labels:\n app: {{ template \"zeppelin.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: ClusterIP\n ports:\n - port: 8080\n name: web\n selector:\n app: {{ template \"zeppelin.name\" . }}\n release: {{ .Release.Name }}"
] | zeppelin:
image: dylanmei/zeppelin:0.7.2
resources:
limits:
memory: "4096Mi"
cpu: "2000m"
hadoop:
useConfigMap: false
configMapName: hadoop-hadoop
configPath: /usr/hadoop-2.7.3/etc/hadoop
spark:
driverMemory: 1g
executorMemory: 1g
numExecutors: 2
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# nginx.ingress.kubernetes.io/auth-secret: zeppelin-tls-secret
path: /
hosts:
- zeppelin.local
tls: []
# - secretName: zeppelin-tls-secret
# hosts: zeppelin.local
nodeSelector: {}
|
prometheus-snmp-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-snmp-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"prometheus-snmp-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-snmp-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"prometheus-snmp-exporter.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"prometheus-snmp-exporter.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\n{{- if .Values.config }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\ndata:\n snmp.yaml: |\n{{ .Values.config | indent 4 }}\n{{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\nspec:\n replicas: {{ .Values.replicas }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n strategy:\n rollingUpdate:\n maxSurge: 1\n maxUnavailable: 0\n type: RollingUpdate\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n spec:\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 6 }}\n {{- end }}\n\n restartPolicy: {{ .Values.restartPolicy }}\n serviceAccountName: {{ template \"prometheus-snmp-exporter.serviceAccountName\" . }}\n containers:\n - name: snmp-exporter\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n securityContext:\n runAsNonRoot: true\n runAsUser: 1000\n args:\n{{- if .Values.config }}\n - \"--config.file=/config/snmp.yaml\"\n{{- end }}\n {{- if .Values.extraArgs }}\n{{ toYaml .Values.extraArgs | indent 12 }}\n {{- end }}\n securityContext:\n readOnlyRootFilesystem: true\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n ports:\n - containerPort: {{ .Values.service.port }}\n name: http\n livenessProbe:\n httpGet:\n path: /health\n port: http\n readinessProbe:\n httpGet:\n path: /health\n port: http\n {{- if .Values.config }}\n volumeMounts:\n - mountPath: /config\n name: config\n - name: configmap-reload\n image: \"{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}\"\n imagePullPolicy: \"{{ .Values.configmapReload.image.pullPolicy }}\"\n args:\n - --volume-dir=/etc/config\n - --webhook-url=http://localhost:{{ .Values.service.port }}/-/reload\n resources:\n{{ toYaml .Values.configmapReload.resources | indent 12 }}\n volumeMounts:\n - mountPath: /etc/config\n name: config\n readOnly: true\n volumes:\n - name: config\n configMap:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"prometheus-snmp-exporter.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# role.yaml\n{{- if .Values.rbac.create }}\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\nrules:\n- apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\"]\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create }}\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"prometheus-snmp-exporter.serviceAccountName\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n{{- end }}\n",
"# service.yaml\nkind: Service\napiVersion: v1\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n {{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n protocol: TCP\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# serviceaccount.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app.kubernetes.io/name: {{ include \"prometheus-snmp-exporter.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n helm.sh/chart: {{ include \"prometheus-snmp-exporter.chart\" . }}\n name: {{ template \"prometheus-snmp-exporter.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# servicemonitor.yaml\n{{- if .Values.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"prometheus-snmp-exporter.name\" . }}\n {{- if .Values.serviceMonitor.namespace }}\n namespace: {{ .Values.serviceMonitor.namespace }}\n {{- end }}\n labels:\n {{- range $key, $value := .Values.serviceMonitor.selector }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n endpoints:\n - port: http\n {{- if .Values.serviceMonitor.interval }}\n interval: {{ .Values.serviceMonitor.interval }}\n {{- end }}\n honorLabels: {{ .Values.serviceMonitor.honorLabels }}\n path: {{ .Values.serviceMonitor.path }}\n {{- if .Values.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}\n {{- end }}\n {{- if .Values.serviceMonitor.params.enabled }}\n params:\n{{ toYaml .Values.serviceMonitor.params.conf | indent 6 }}\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-snmp-exporter.name\" . }}\n{{- end -}}\n"
] | restartPolicy: Always
image:
repository: prom/snmp-exporter
tag: v0.14.0
pullPolicy: IfNotPresent
nodeSelector: {}
tolerations: []
affinity: {}
# config:
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
resources: {}
# limits:
# memory: 300Mi
# requests:
# memory: 50Mi
service:
annotations: {}
type: ClusterIP
port: 9116
## An Ingress resource can provide name-based virtual hosting and TLS
## termination among other things for CouchDB deployments which are accessed
## from outside the Kubernetes cluster.
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
hosts: []
# - chart-example.local
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls: []
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
podAnnotations: {}
extraArgs: []
# --history.limit=1000
replicas: 1
## Monitors ConfigMap changes and POSTs to a URL
## Ref: https://github.com/jimmidyson/configmap-reload
##
configmapReload:
## configmap-reload container name
##
name: configmap-reload
## configmap-reload container image
##
image:
repository: jimmidyson/configmap-reload
tag: v0.2.2
pullPolicy: IfNotPresent
## configmap-reload resource requests and limits
## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
namespace: monitoring
# fallback to the prometheus default unless specified
# interval: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
# Retain the job and instance labels of the metrics pushed to the snmp-exporter
# [Scraping SNMP-exporter](https://github.com/prometheus/snmp_exporter#configure-the-snmp_exporter-as-a-target-to-scrape)
honorLabels: true
params:
enabled: false
conf:
module:
- if_mib
target:
- 127.0.0.1
path: /snmp
scrapeTimeout: 10s
|
express-gateway | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n\n{{- define \"eg.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"eg.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{- define \"eg.redis.fullname\" -}}\n{{- $name := default \"redis\" .Values.redis.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: \"{{ template \"eg.fullname\" . }}\"\n labels:\n app: \"{{ template \"eg.name\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n system.config.yml: |-\n db:\n redis:\n emulate: {{ .Values.storage.emulate }}\n sentinels:\n - host: {{ template \"eg.redis.fullname\" . }}-master\n port: 6379\n - host: {{ template \"eg.redis.fullname\" . }}-slave\n port: 6379\n name: {{ template \"eg.redis.fullname\" . }}-master\n password: express-gateway\n namespace: {{ .Values.storage.namespace }}\n\n cli:\n url: http://localhost:{{ .Values.admin.containerPort }}\n\n crypto:\n{{ toYaml .Values.crypto | indent 6 }}\n session:\n{{ toYaml .Values.session | indent 6 }}\n accessTokens:\n{{ toYaml .Values.accessTokens | indent 6 }}\n refreshTokens:\n{{ toYaml .Values.refreshTokens | indent 6 }}\n authorizationCodes:\n{{ toYaml .Values.authorizationCodes | indent 6 }}\n\n gateway.config.yml: |-\n admin:\n port: {{ .Values.admin.containerPort }}\n hostname: {{ .Values.admin.hostname }}\n {{- if .Values.proxy.https }}\n https:\n port: {{ .Values.proxy.containerPort }}\n tls:\n{{ toYaml .Values.proxy.tls | indent 8 }}\n {{- else }}\n http:\n port: {{ .Values.proxy.containerPort }}\n {{- end }}\n apiEndpoints:\n serviceEndpoints:\n policies:\n - proxy\n - cors\n - expression\n - jwt\n - terminate\n pipelines:\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: \"{{ template \"eg.fullname\" . }}\"\n labels:\n app: \"{{ template \"eg.name\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"eg.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"eg.name\" . }}\n release: {{ .Release.Name }}\n spec:\n volumes:\n - configMap:\n name: \"{{ template \"eg.fullname\" . }}\"\n name: config\n containers:\n - name: {{ template \"eg.name\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n volumeMounts:\n - mountPath: /var/lib/eg/system.config.yml\n name: config\n subPath: system.config.yml\n - mountPath: /var/lib/eg/gateway.config.yml\n name: config\n subPath: gateway.config.yml\n ports:\n - name: admin\n containerPort: {{ .Values.admin.containerPort }}\n protocol: TCP\n - name: proxy\n containerPort: {{ .Values.proxy.containerPort }}\n protocol: TCP\n readinessProbe:\n{{ toYaml .Values.readinessProbe | indent 10 }}\n livenessProbe:\n{{ toYaml .Values.livenessProbe | indent 10 }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n",
"# service-eg-admin.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"eg.fullname\" . }}-admin\n annotations:\n {{- range $key, $value := .Values.admin.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n labels:\n app: {{ template \"eg.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.admin.type }}\n {{- if and (eq .Values.admin.type \"LoadBalancer\") .Values.admin.loadBalancerIP }}\n loadBalancerIP: {{ .Values.admin.loadBalancerIP }}\n {{- end }}\n ports:\n - name: eg-admin\n port: {{ .Values.admin.servicePort }}\n targetPort: {{ .Values.admin.containerPort }}\n {{- if (and (eq .Values.admin.type \"NodePort\") (not (empty .Values.admin.nodePort))) }}\n nodePort: {{ .Values.admin.nodePort }}\n {{- end }}\n protocol: TCP\n selector:\n app: {{ template \"eg.name\" . }}\n release: {{ .Release.Name }}\n",
"# service-eg-proxy.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"eg.fullname\" . }}-proxy\n annotations:\n {{- range $key, $value := .Values.proxy.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n labels:\n app: {{ template \"eg.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.proxy.type }}\n {{- if and (eq .Values.proxy.type \"LoadBalancer\") .Values.proxy.loadBalancerIP }}\n loadBalancerIP: {{ .Values.proxy.loadBalancerIP }}\n {{- end }}\n ports:\n - name: eg-proxy\n port: {{ .Values.proxy.servicePort }}\n targetPort: {{ .Values.proxy.containerPort }}\n {{- if (and (eq .Values.proxy.type \"NodePort\") (not (empty .Values.proxy.nodePort))) }}\n nodePort: {{ .Values.proxy.nodePort }}\n {{- end }}\n protocol: TCP\n selector:\n app: {{ template \"eg.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for Express Gateway.
# Declare variables to be passed into your templates.
image:
repository: expressgateway/express-gateway
tag: v1.16.9
pullPolicy: IfNotPresent
# Specify Express Gateway Admin API
admin:
# HTTPS traffic on the admin port
https: true
hostname: 0.0.0.0
servicePort: 9876
containerPort: 9876
# Admin Service type
type: NodePort
# Specify Express Gateway main listening service
proxy:
# HTTPS traffic on the proxy port
https: true
tls: {}
servicePort: 8080
containerPort: 8080
type: NodePort
# readinessProbe for EG pods
readinessProbe:
httpGet:
path: "/users"
port: admin
scheme: HTTP
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 2
# livenessProbe for EG pods
livenessProbe:
httpGet:
path: "/users"
port: admin
scheme: HTTP
initialDelaySeconds: 10
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 2
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# Annotation to be added to pods
podAnnotations: {}
# pod count
replicaCount: 1
# Express Gateway has a choice of either run with with transactional data in memory or use Redis as
# backend database. Redis is used by default.
redis:
password: express-gateway
storage:
emulate: true
namespace: EG
crypto:
cipherKey: sensitiveKey
algorithm: aes256
saltRounds: 10
session:
secret: keyboard cat
resave: false
saveUninitialized: false
accessTokens:
timeToExpiry: 7200000
refreshTokens:
timeToExpiry: 7200000
authorizationCodes:
timeToExpiry: 300000
|
sonarqube | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sonarqube.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"sonarqube.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\n Create a default fully qualified mysql/postgresql name.\n We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"postgresql.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"postgresql\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- define \"mysql.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mysql\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end}}\n\n{{/*\n Determine the hostname to use for PostgreSQL/mySQL.\n*/}}\n{{- define \"postgresql.hostname\" -}}\n{{- if eq .Values.database.type \"postgresql\" -}}\n{{- if .Values.postgresql.enabled -}}\n{{- printf \"%s-%s\" .Release.Name \"postgresql\" | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s\" .Values.postgresql.postgresqlServer -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- define \"mysql.hostname\" -}}\n{{- if eq .Values.database.type \"mysql\" -}}\n{{- if .Values.mysql.enabled -}}\n{{- printf \"%s-%s\" .Release.Name \"mysql\" | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s\" .Values.mysql.mysqlServer -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}",
"# config.yaml\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}-config\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n {{- if and .Values.sonarSecretKey (not .Values.sonarProperties) (not .Values.elasticsearch.bootstrapChecks) }}\n sonar.properties: sonar.secretKeyPath={{ .Values.sonarqubeFolder }}/secret/sonar-secret.txt\n {{- end }}\n {{- if or .Values.sonarProperties (not .Values.elasticsearch.bootstrapChecks) }}\n sonar.properties:\n {{ range $key, $val := .Values.sonarProperties }}\n {{ $key }}={{ $val }}\n {{ end }}\n {{- if not .Values.elasticsearch.bootstrapChecks }}\n sonar.es.bootstrap.checks.disable=true\n {{- end }}\n {{- end }}\n {{- if and .Values.sonarSecretKey .Values.sonarProperties }}\n sonar.secretKeyPath={{ .Values.sonarqubeFolder }}/secret/sonar-secret.txt\n {{- end }}\n",
"# copy-plugins.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}-copy-plugins\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n copy_plugins.sh: |-\n #!/bin/bash\n {{- if .Values.plugins.deleteDefaultPlugins }}\n rm -f {{ .Values.sonarqubeFolder }}/extensions/plugins/*.jar\n {{- end }}\n for f in {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp/*.jar\n do\n file=${f##*/} && file=${file%-[0-9]*}\n for original in {{ .Values.sonarqubeFolder }}/extensions/plugins/*.jar\n do\n originalfile=${original##*/} && originalfile=${originalfile%-[0-9]*}\n if [ \"$originalfile\" = \"$file\" ]; then\n rm -f \"$original\"\n fi\n done\n done\n cp {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp/*.jar {{ .Values.sonarqubeFolder }}/extensions/plugins/\n {{- if .Values.plugins.lib }}\n {{- range $index, $val := .Values.plugins.lib }}\n cp -f {{ $.Values.sonarqubeFolder }}/extensions/plugins/{{ $val }} {{ $.Values.sonarqubeFolder }}/lib/common/\n {{- end }}\n {{- end }}\n {{ .Values.sonarqubeFolder }}/bin/run.sh\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"sonarqube.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.deploymentStrategy }}\n strategy:\n{{ toYaml .Values.deploymentStrategy | indent 4 }}\n{{- end }}\n template:\n metadata:\n labels:\n app: {{ template \"sonarqube.name\" . }}\n release: {{ .Release.Name }}\n{{- with .Values.podLabels }}\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{ if .Values.annotations}}\n annotations:\n {{- range $key, $value := .Values.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{- end }}\n spec:\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n {{- if .Values.image.pullSecret }}\n imagePullSecrets:\n - name: {{ .Values.image.pullSecret }}\n {{- end }}\n initContainers:\n - name: chmod-volume-mounts\n image: busybox:1.31\n command:\n - \"sh\"\n - \"-c\"\n - 'mkdir -p $(printf \"/opt/sonarqube/${1-%s\\n}\" temp logs data extensions/downloads extensions/plugins/tmp extensions/plugins certs) &&\n chown 999:999 -R $(printf \"/opt/sonarqube/${1-%s\\n}\" temp logs data extensions/downloads extensions/plugins/tmp extensions/plugins certs)'\n volumeMounts:\n - mountPath: /opt/sonarqube/temp\n name: sonarqube\n subPath: temp\n - mountPath: /opt/sonarqube/logs\n name: sonarqube\n subPath: logs\n - mountPath: /opt/sonarqube/data\n name: sonarqube\n subPath: data\n - mountPath: /opt/sonarqube/extensions/plugins/tmp\n name: sonarqube\n subPath: tmp\n - mountPath: /opt/sonarqube/extensions/downloads\n name: sonarqube\n subPath: downloads\n - mountPath: /opt/sonarqube/extensions/plugins\n name: sonarqube\n subPath: plugins\n {{- with .Values.env }}\n env:\n {{- . | toYaml | trim | nindent 12 }}\n {{- end }}\n {{- if .Values.caCerts }}\n - name: ca-certs\n image: adoptopenjdk/openjdk11:alpine\n command: [\"sh\"]\n args: [\"-c\", \"cp -f \\\"${JAVA_HOME}/lib/security/cacerts\\\" /tmp/certs/cacerts; for f in /tmp/secrets/ca-certs/*; do keytool -importcert -file \\\"${f}\\\" -alias \\\"$(basename \\\"${f}\\\")\\\" -keystore /tmp/certs/cacerts -storepass changeit -trustcacerts -noprompt; done;\"]\n volumeMounts:\n - mountPath: /tmp/certs\n name: sonarqube\n subPath: certs\n - mountPath: /tmp/secrets/ca-certs\n name: ca-certs\n {{- with .Values.env }}\n env:\n {{- . | toYaml | trim | nindent 12 }}\n {{- end }}\n {{- end }}\n {{- if .Values.elasticsearch.configureNode }}\n - name: init-sysctl\n image: {{ default \"busybox:1.31\" .Values.plugins.initSysctlContainerImage }}\n securityContext:\n privileged: true\n command:\n - sysctl\n - -w\n - vm.max_map_count=262144\n {{- with .Values.env }}\n env:\n {{- . | toYaml | trim | nindent 12 }}\n {{- end }}\n {{- end }}\n {{- if .Values.plugins.install }}\n - name: install-plugins\n image: {{ default \"alpine:3.10.3\" .Values.plugins.initContainerImage }}\n command: [\"sh\",\n \"-c\",\n \"mkdir -p {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp &&\n rm -f {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp/* &&\n cp /tmp/scripts/install_plugins.sh {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp/install_plugins.sh &&\n chmod 0775 {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp/install_plugins.sh &&\n {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp/install_plugins.sh && pwd && ls -lah\"]\n volumeMounts:\n - mountPath: {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp\n name: sonarqube\n subPath: tmp\n - name: install-plugins\n mountPath: /tmp/scripts/\n {{- with .Values.env }}\n env:\n {{- . | toYaml | trim | nindent 12 }}\n {{- end }}\n resources:\n{{ toYaml .Values.plugins.resources | indent 12 }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.hostAliases }}\n hostAliases:\n{{ toYaml .Values.hostAliases | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- end }}\n {{- if and .Values.sonarProperties .Values.sonarSecretProperties }}\n - name: concat-properties\n image: {{ default \"alpine:3.10.3\" .Values.plugins.initContainerImage }}\n command: [\"sh\",\n \"-c\",\n \"awk 1 /tmp/props/sonar.properties /tmp/props/secret.properties > /tmp/result/sonar.properties\"]\n volumeMounts:\n - mountPath: /tmp/props/sonar.properties\n name: config\n subPath: sonar.properties\n - mountPath: /tmp/props/secret.properties\n name: secret-config\n subPath: secret.properties\n - mountPath: /tmp/result\n name: concat-dir\n {{- with .Values.env }}\n env:\n {{- . | toYaml | trim | nindent 12 }}\n {{- end }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: {{ .Values.service.internalPort }}\n protocol: TCP\n {{- if .Values.plugins.install }}\n command:\n - /tmp/scripts/copy_plugins.sh\n {{- end }}\n env:\n {{- with .Values.env }}\n {{- . | toYaml | trim | nindent 12 }}\n {{- end }}\n - name: SONARQUBE_WEB_JVM_OPTS\n {{- if .Values.caCerts }}\n value: {{ printf \"-Djavax.net.ssl.trustStore=%s/certs/cacerts %s\" .Values.sonarqubeFolder .Values.jvmOpts | trim | quote }}\n {{- else }}\n value: \"{{ .Values.jvmOpts }}\"\n {{- end }}\n - name: SONARQUBE_JDBC_USERNAME\n {{- if eq .Values.database.type \"postgresql\" }}\n value: {{ .Values.postgresql.postgresqlUsername | quote }}\n {{- else if eq .Values.database.type \"mysql\" }}\n value: {{ .Values.mysql.mysqlUser | quote }}\n {{- end }}\n - name: SONARQUBE_JDBC_PASSWORD\n valueFrom:\n secretKeyRef:\n {{- if eq .Values.database.type \"postgresql\" }}\n name: {{- if .Values.postgresql.enabled }} {{ template \"postgresql.fullname\" .}} {{- else if .Values.postgresql.postgresqlPasswordSecret }} {{ .Values.postgresql.postgresqlPasswordSecret }} {{- else }} {{ template \"sonarqube.fullname\" . }} {{- end }}\n key: postgresql-password\n {{- else if eq .Values.database.type \"mysql\" }}\n name: {{- if .Values.mysql.enabled }} {{ template \"mysql.fullname\" .}} {{- else if .Values.mysql.mysqlPasswordSecret }} {{ .Values.mysql.mysqlPasswordSecret }} {{- else }} {{ template \"sonarqube.fullname\" . }} {{- end }}\n key: mysql-password\n {{- end }}\n - name: SONARQUBE_JDBC_URL\n {{- if eq .Values.database.type \"postgresql\" }}\n value: \"jdbc:postgresql://{{ template \"postgresql.hostname\" . }}:{{- .Values.postgresql.service.port -}}/{{- .Values.postgresql.postgresqlDatabase -}}\"\n {{- else if eq .Values.database.type \"mysql\" }}\n value: \"jdbc:mysql://{{ template \"mysql.hostname\" . }}:{{ .Values.mysql.service.port }}/{{ .Values.mysql.mysqlDatabase }}?useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true{{- range $key, $value := .Values.mysql.mysqlParams }}&{{ $key }}={{ $value }}{{- end }}\"\n {{- end }}\n livenessProbe:\n httpGet:\n path: {{ .Values.livenessProbe.sonarWebContext }}sessions/new\n port: http\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n readinessProbe:\n httpGet:\n path: {{ .Values.readinessProbe.sonarWebContext }}sessions/new\n port: http\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n volumeMounts:\n{{- if .Values.persistence.mounts }}\n{{ toYaml .Values.persistence.mounts | indent 12 }}\n{{- end }}\n {{- if and .Values.sonarProperties .Values.sonarSecretProperties }}\n - mountPath: {{ .Values.sonarqubeFolder }}/conf/\n name: concat-dir\n {{- else if or .Values.sonarProperties (not .Values.elasticsearch.bootstrapChecks) }}\n - mountPath: {{ .Values.sonarqubeFolder }}/conf/\n name: config\n {{- end }}\n {{- if .Values.sonarSecretKey }}\n - mountPath: {{ .Values.sonarqubeFolder }}/secret/\n name: secret\n {{- end }}\n {{- if .Values.caCerts }}\n - mountPath: {{ .Values.sonarqubeFolder }}/certs\n name: sonarqube\n subPath: certs\n {{- end }}\n - mountPath: {{ .Values.sonarqubeFolder }}/data\n name: sonarqube\n subPath: data\n - mountPath: {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp\n name: sonarqube\n subPath: tmp\n - mountPath: {{ .Values.sonarqubeFolder }}/extensions/downloads\n name: sonarqube\n subPath: downloads\n - mountPath: {{ .Values.sonarqubeFolder }}/extensions/plugins\n name: sonarqube\n subPath: plugins\n - mountPath: {{ .Values.sonarqubeFolder }}/temp\n name: sonarqube\n subPath: temp\n - mountPath: {{ .Values.sonarqubeFolder }}/logs\n name: sonarqube\n subPath: logs\n - mountPath: /tmp\n name: tmp-dir\n - name: copy-plugins\n mountPath: /tmp/scripts\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n volumes:\n{{- if .Values.persistence.volumes }}\n{{ tpl (toYaml .Values.persistence.volumes | indent 6) . }}\n{{- end }}\n {{- if or .Values.sonarProperties (not .Values.elasticsearch.bootstrapChecks) }}\n - name: config\n configMap:\n name: {{ template \"sonarqube.fullname\" . }}-config\n items:\n - key: sonar.properties\n path: sonar.properties\n {{- end }}\n {{- if .Values.sonarSecretProperties }}\n - name: secret-config\n secret:\n secretName: {{ .Values.sonarSecretProperties }}\n items:\n - key: secret.properties\n path: secret.properties\n {{- end }}\n {{- if .Values.sonarSecretKey }}\n - name: secret\n secret:\n secretName: {{ .Values.sonarSecretKey }}\n items:\n - key: sonar-secret.txt\n path: sonar-secret.txt\n {{- end }}\n {{- if .Values.caCerts }}\n - name: ca-certs\n secret:\n secretName: {{ .Values.caCerts.secret }}\n {{- end }}\n - name: install-plugins\n configMap:\n name: {{ template \"sonarqube.fullname\" . }}-install-plugins\n items:\n - key: install_plugins.sh\n path: install_plugins.sh\n - name: copy-plugins\n configMap:\n name: {{ template \"sonarqube.fullname\" . }}-copy-plugins\n defaultMode: 0755\n items:\n - key: copy_plugins.sh\n path: copy_plugins.sh\n - name: sonarqube\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"sonarqube.fullname\" . }}{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name : tmp-dir\n emptyDir: {}\n {{- if .Values.sonarSecretProperties }}\n - name : concat-dir\n emptyDir: {}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"sonarqube.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\n{{- if .Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }}\napiVersion: networking.k8s.io/v1beta1\n{{ else }}\napiVersion: extensions/v1beta1\n{{ end -}}\nkind: Ingress\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ .Values.ingress.labels | toYaml | trimSuffix \"\\n\"| indent 4 -}}\n{{- end}}\n{{- if .Values.ingress.annotations}}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n {{- $path := default \"/\" .path }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ $path }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n - path: {{ printf \"%s/*\" (trimSuffix \"/\" $path) }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# install-plugins.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}-install-plugins\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n install_plugins.sh: |-\n cd {{ .Values.sonarqubeFolder }}/extensions/plugins/tmp\n {{- if .Values.plugins.install }}\n {{ range $index, $val := .Values.plugins.install }}echo {{ $val | quote }} >> plugins.txt\n {{ end }}\n cat plugins.txt | xargs -n 1 -P 8 wget --no-check-certificate\n rm plugins.txt\n {{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{ if .Values.persistence.annotations}}\n annotations:\n {{- range $key, $value := .Values.persistence.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# secret.yaml\n{{- if eq .Values.database.type \"postgresql\" -}}\n{{- if eq .Values.postgresql.enabled false -}}\n{{- if not .Values.postgresql.postgresqlPasswordSecret -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n postgresql-password: {{ .Values.postgresql.postgresqlPassword | b64enc | quote }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- if eq .Values.database.type \"mysql\" -}}\n{{- if eq .Values.mysql.enabled false -}}\n{{- if not .Values.mysql.mysqlPasswordSecret -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n mysql-password: {{ .Values.mysql.mysqlPassword | b64enc | quote }}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- range $key, $value := .Values.service.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{ if .Values.service.annotations}}\n annotations:\n {{- range $key, $value := .Values.service.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app: {{ template \"sonarqube.name\" . }}\n release: {{ .Release.Name }}\n {{- if eq .Values.service.type \"LoadBalancer\"}}\n {{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- range .Values.service.loadBalancerSourceRanges }}\n - {{ . }}\n {{- end }}\n {{- end -}}\n {{- if .Values.service.loadBalancerIP}}\n loadBalancerIP: {{.Values.service.loadBalancerIP}}\n {{- end }}\n {{- end }}\n",
"# sonarqube-test.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ .Release.Name }}-ui-test\"\n annotations:\n \"helm.sh/hook\": test-success\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n initContainers:\n - name: \"test-framework\"\n image: \"dduportal/bats:0.4.0\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-ui-test\n image: {{ .Values.image.repository }}:{{ .Values.image.tag }}\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"sonarqube.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n",
"# test-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sonarqube.fullname\" . }}-tests\n labels:\n app: {{ template \"sonarqube.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n run.sh: |-\n @test \"Testing Sonarqube UI is accessible\" {\n curl --connect-timeout 5 --retry 12 --retry-delay 1 --retry-max-time 60 {{ template \"sonarqube.fullname\" . }}:{{ .Values.service.internalPort }}/sessions/new\n }\n"
] | # Default values for sonarqube.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# This will use the default deployment strategy unless it is overriden
deploymentStrategy: {}
image:
repository: sonarqube
tag: 7.9.2-community
# If using a private repository, the name of the imagePullSecret to use
# pullSecret: my-repo-secret
# Set security context for sonarqube pod
securityContext:
fsGroup: 999
# Settings to configure elasticsearch host requirements
elasticsearch:
configureNode: true
bootstrapChecks: true
service:
type: ClusterIP
externalPort: 9000
internalPort: 9000
labels:
annotations: {}
# May be used in example for internal load balancing in GCP:
# cloud.google.com/load-balancer-type: Internal
# loadBalancerSourceRanges:
# - 0.0.0.0/0
# loadBalancerIP: 1.2.3.4
ingress:
enabled: false
# Used to create an Ingress record.
hosts:
- name: sonar.organization.com
# default paths for "/" and "/*" will be added
path: /
# If a different path is defined, that path and {path}/* will be added to the ingress resource
# path: /sonarqube
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# This property allows for reports up to a certain size to be uploaded to SonarQube
# nginx.ingress.kubernetes.io/proxy-body-size: "8m"
# Additional labels for Ingress manifest file
# labels:
# traffic-type: external
# traffic-type: internal
tls: []
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# hostAliases allows the modification of the hosts file inside a container
hostAliases: []
# - ip: "192.168.1.10"
# hostnames:
# - "example.com"
# - "www.example.com"
readinessProbe:
initialDelaySeconds: 60
periodSeconds: 30
failureThreshold: 6
# If an ingress *path* other than the root (/) is defined, it should be reflected here
# A trailing "/" must be included
sonarWebContext: /
# sonarWebContext: /sonarqube/
livenessProbe:
initialDelaySeconds: 60
periodSeconds: 30
# If an ingress *path* other than the root (/) is defined, it should be reflected here
# A trailing "/" must be included
sonarWebContext: /
# sonarWebContext: /sonarqube/
# If an ingress *path* is defined, it should be reflected here
# sonar.web.context: /sonarqube
## Provide a secret containing one or more certificate files in the keys that will be added to cacerts
## The cacerts file will be set via SONARQUBE_WEB_JVM_OPTS
##
# caCerts:
# secret: my-secret
## Values to add to SONARQUBE_WEB_JVM_OPTS
##
# jvmOpts: "-Djava.net.preferIPv4Stack=true"
jvmOpts: ""
## Environment variables to attach to the pods
##
# env:
# - name: VARIABLE
# value: my-value
# Set annotations for pods
annotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
persistence:
enabled: false
## Set annotations on pvc
annotations: {}
## Specify an existing volume claim instead of creating a new one.
## When using this option all following options like storageClass, accessMode and size are ignored.
# existingClaim:
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass:
accessMode: ReadWriteOnce
size: 10Gi
## Specify extra volumes. Refer to ".spec.volumes" specification : https://kubernetes.io/fr/docs/concepts/storage/volumes/
volumes: []
## Specify extra mounts. Refer to ".spec.containers.volumeMounts" specification : https://kubernetes.io/fr/docs/concepts/storage/volumes/
mounts: []
# List of plugins to install.
# For example:
# plugins:
# install:
# - "https://github.com/AmadeusITGroup/sonar-stash/releases/download/1.3.0/sonar-stash-plugin-1.3.0.jar"
# - "https://github.com/SonarSource/sonar-ldap/releases/download/2.2-RC3/sonar-ldap-plugin-2.2.0.601.jar"
plugins:
install: []
lib: []
# initContainerImage: alpine:3.10.3
# deleteDefaultPlugins: true
resources: {}
# We allow the plugins init container to have a separate resources declaration because
# the initContainer does not take as much resources.
# A custom sonar.properties file can be provided via dictionary.
# For example:
# sonarProperties:
# sonar.forceAuthentication: true
# sonar.security.realm: LDAP
# ldap.url: ldaps://organization.com
# Additional sonar properties to load from a secret with a key "secret.properties" (must be a string)
# sonarSecretProperties:
# Kubernetes secret that contains the encryption key for the sonarqube instance.
# The secret must contain the key 'sonar-secret.txt'.
# The 'sonar.secretKeyPath' property will be set automatically.
# sonarSecretKey: "settings-encryption-secret"
## Configuration value to select database type
## Option to use "postgresql" or "mysql" database type, by default "postgresql" is chosen
## Set the "enable" field to true of the database type you select (if you want to use internal database) and false of the one you don't select
database:
type: "postgresql"
## Configuration values for postgresql dependency
## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
postgresql:
# Enable to deploy the PostgreSQL chart
enabled: true
# To use an external PostgreSQL instance, set enabled to false and uncomment
# the line below:
# postgresqlServer: ""
# To use an external secret for the password for an external PostgreSQL
# instance, set enabled to false and provide the name of the secret on the
# line below:
# postgresqlPasswordSecret: ""
postgresqlUsername: "sonarUser"
postgresqlPassword: "sonarPass"
postgresqlDatabase: "sonarDB"
# Specify the TCP port that PostgreSQL should use
service:
port: 5432
## Configuration values for the mysql dependency
## ref: https://github.com/kubernetes/charts/blob/master/stable/mysql/README.md
##
mysql:
# Enable to deploy the mySQL chart
enabled: false
# To use an external mySQL instance, set enabled to false and uncomment
# the line below:
# mysqlServer: ""
# To use an external secret for the password for an external mySQL instance,
# set enabled to false and provide the name of the secret on the line below:
# mysqlPasswordSecret: ""
mysqlUser: "sonarUser"
mysqlPassword: "sonarPass"
mysqlDatabase: "sonarDB"
# mysqlParams:
# useSSL: "true"
# Specify the TCP port that mySQL should use
service:
port: 3306
#
# Additional labels to add to the pods:
# podLabels:
# key: value
podLabels: {}
# For compatibility with 8.0 replace by "/opt/sq"
sonarqubeFolder: /opt/sonarqube
|
luigi | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"luigi.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"luigi.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"luigi.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"luigi.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n client.cfg: |\n{{ printf .Values.service.config | indent 4 }}\n\n logging.cfg: |\n [loggers]\n keys=root,tornado,client,scheduler,server\n\n [logger_root]\n level=DEBUG\n handlers=console\n\n [logger_client]\n level=DEBUG\n handlers=console\n qualname=luigi-interface\n propagate=0\n\n [logger_server]\n level=DEBUG\n handlers=console\n qualname=luigi.server\n propagate=0\n\n [logger_scheduler]\n level=DEBUG\n handlers=console\n qualname=luigi.scheduler\n propagate=0\n\n [logger_tornado]\n level=DEBUG\n handlers=warnconsole\n qualname=tornado\n propagate=0\n\n [formatters]\n keys=detail\n\n [formatter_detail]\n class=logging.Formatter\n format=%(asctime)s %(name)-15s %(levelname)-8s %(message)s\n\n [handlers]\n keys=console,warnconsole\n\n [handler_console]\n level=INFO\n class=StreamHandler\n args=(sys.stdout,)\n formatter=detail\n\n [handler_warnconsole]\n level=WARNING\n class=StreamHandler\n args=(sys.stdout,)\n formatter=detail\n\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"luigi.fullname\" . }}\n labels:\n app: {{ template \"luigi.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"luigi.name\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"luigi.name\" . }}\n release: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n command: [\"/bin/sh\", \"-c\", \"luigid\"]\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - containerPort: 8082\n livenessProbe:\n httpGet:\n path: /static/visualiser/index.html\n port: 8082\n readinessProbe:\n httpGet:\n path: /static/visualiser/index.html\n port: 8082\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: {{ template \"luigi.name\" . }}-luigi-conf\n mountPath: /etc/luigi/client.cfg\n subPath: client.cfg\n - name: {{ template \"luigi.name\" . }}-logging-conf\n mountPath: /etc/luigi/logging.cfg\n subPath: logging.cfg\n - name: luigi-state\n mountPath: /luigi/state/\n\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n\n volumes:\n - name: {{ template \"luigi.name\" . }}-luigi-conf\n configMap:\n name: {{ template \"luigi.fullname\" . }}\n items:\n - key: client.cfg\n path: client.cfg\n\n - name: {{ template \"luigi.name\" . }}-logging-conf\n configMap:\n name: {{ template \"luigi.fullname\" . }}\n items:\n - key: logging.cfg\n path: logging.cfg\n\n - name: luigi-state\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"luigi.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{ end }}\n",
"# ingress_api.yaml\n{{- if .Values.ingressAPI.enabled -}}\n{{- $serviceName := include \"luigi.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"luigi.fullname\" . }}-api\n labels:\n app: {{ template \"luigi.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n {{- range $key, $value := .Values.ingressAPI.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingressAPI.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingressAPI.tls }}\n tls:\n{{ toYaml .Values.ingressAPI.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n\n",
"# ingress_ui.yaml\n{{- if .Values.ingressUI.enabled -}}\n{{- $serviceName := include \"luigi.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"luigi.fullname\" . }}-ui\n labels:\n app: {{ template \"luigi.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n {{- range $key, $value := .Values.ingressUI.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingressUI.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingressUI.tls }}\n tls:\n{{ toYaml .Values.ingressUI.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"luigi.fullname\" . }}\n labels:\n app: {{ template \"luigi.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"luigi.fullname\" . }}\n labels:\n app: {{ template \"luigi.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: 8082\n protocol: TCP\n name: http\n selector:\n app: {{ template \"luigi.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for luigi.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: axiom/docker-luigi
tag: 2.7.2-alpine
pullPolicy: IfNotPresent
service:
name: luigi
type: LoadBalancer
externalPort: 80
# Luigi config: these values should mattch the luigi documentation
# https://luigi.readthedocs.io/en/stable/configuration.html
config: |
[core]
logging_conf_file=/etc/luigi/logging.cfg
[scheduler]
record_task_history=true
state-path=/luigi/state/luigi-state.pickle
[task_history]
db_connection=mysql://luigi-mysql/luigidb
# creates a persistent volume claim for
# luigi state pickel
persistence:
enabled: false
size: 1G
accessMode: ReadWriteOnce
# Ingress for ui access for use with authentication like oauth-proxy
# depending on the authentication you use. You may only need one ingress.
ingressUI:
enabled: false
path: /
# Used to create an Ingress record.
# hosts:
# - chart-example.local
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# Ingress for api access viahttps and whatever authentication you use
ingressAPI:
enabled: false
path: /
# Used to create an Ingress record.
# hosts:
# - chart-example.local
# annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
mysql:
mysqlDatabase: luigidb
mysqlAllowEmptyPassword: true
persistence:
enabled: false
|
metricbeat | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"metricbeat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"metricbeat.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"metricbeat.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"metricbeat.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"metricbeat.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n{{- if .Values.rbac.pspEnabled }}\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"metricbeat.fullname\" . }}]\n{{- end }}\n- apiGroups: [\"\"]\n resources:\n - nodes\n - namespaces\n - events\n - pods\n verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"extensions\"]\n resources:\n - replicasets\n verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"apps\"]\n resources:\n - statefulsets\n - deployments\n - replicasets\n verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n resources:\n - nodes/stats\n - nodes/metrics\n verbs: [\"get\"]\n- nonResourceURLs: [\"/metrics\"]\n verbs: [\"get\"]\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"metricbeat.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"metricbeat.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# daemonset.yaml\n{{- if .Values.daemonset.enabled }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"metricbeat.name\" . }}\n release: {{ .Release.Name }}\n minReadySeconds: 10\n updateStrategy:\n type: RollingUpdate\n rollingUpdate:\n maxUnavailable: 1\n template:\n metadata:\n labels:\n app: {{ template \"metricbeat.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ toYaml (default .Values.daemonset.config .Values.daemonset.overrideConfig) | sha256sum }}\n checksum/modules: {{ toYaml (default .Values.daemonset.modules .Values.daemonset.overrideModules) | sha256sum }}\n{{- if .Values.daemonset.podAnnotations }}\n {{- range $key, $value := .Values.daemonset.podAnnotations }}\n {{ $key }}: {{ $value }}\n {{- end }}\n{{- end }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n{{- if .Values.daemonset.args }}\n{{ toYaml .Values.daemonset.args | indent 8 }}\n{{- else }}\n - \"-e\"\n{{- if .Values.plugins }}\n - \"--plugin\"\n - {{ .Values.plugins | join \",\" | quote }}\n{{- end }}\n - \"-system.hostfs=/hostfs\"\n{{- if .Values.daemonset.debug }}\n - \"-d\"\n - \"*\"\n{{- end }}\n{{- end }}\n env:\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n{{- if .Values.extraEnv }}\n{{ toYaml .Values.extraEnv | indent 8 }}\n{{- end }}\n securityContext:\n runAsUser: 0\n{{- if .Values.daemonset.capabilities }}\n capabilities:\n add:\n{{ toYaml .Values.daemonset.capabilities | indent 12 }}\n{{- end }}\n resources:\n{{- if .Values.daemonset.resources }}\n{{ toYaml .Values.daemonset.resources | indent 10 }}\n{{- else if .Values.resources }}\n{{ toYaml .Values.resources | indent 10 }}\n{{- end }}\n volumeMounts:\n - name: config\n mountPath: /usr/share/metricbeat/metricbeat.yml\n readOnly: true\n subPath: metricbeat.yml\n - name: modules\n mountPath: /usr/share/metricbeat/modules.d\n readOnly: true\n - name: data\n mountPath: /usr/share/metricbeat/data\n - name: proc\n mountPath: /hostfs/proc\n readOnly: true\n - name: cgroup\n mountPath: /hostfs/sys/fs/cgroup\n readOnly: true\n - name: dockersock\n mountPath: /var/run/docker.sock\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n volumes:\n - name: config\n secret:\n secretName: {{ template \"metricbeat.fullname\" . }}-daemonset-config\n - name: modules\n secret:\n secretName: {{ template \"metricbeat.fullname\" . }}-daemonset-modules\n - name: data\n hostPath:\n path: /var/lib/metricbeat\n type: DirectoryOrCreate\n - name: proc\n hostPath:\n path: /proc\n - name: cgroup\n hostPath:\n path: /sys/fs/cgroup\n - name: dockersock\n hostPath:\n path: /var/run/docker.sock\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 6 }}\n{{- end }}\n terminationGracePeriodSeconds: 60\n{{- if .Values.daemonset.priorityClassName }}\n priorityClassName: {{ .Values.daemonset.priorityClassName }}\n{{- end }}\n serviceAccountName: {{ template \"metricbeat.serviceAccountName\" . }}\n hostNetwork: {{ .Values.daemonset.hostNetwork }}\n dnsPolicy: {{ .Values.daemonset.dnsPolicy }}\n{{- if .Values.daemonset.tolerations }}\n tolerations:\n{{ toYaml .Values.daemonset.tolerations | indent 6 }}\n{{- end }}\n{{- if .Values.daemonset.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.daemonset.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# deployment.yaml\n# Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics\n{{- if .Values.deployment.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"metricbeat.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"metricbeat.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ toYaml (default .Values.deployment.config .Values.deployment.overrideConfig) | sha256sum }}\n checksum/modules: {{ toYaml (default .Values.deployment.modules .Values.deployment.overrideModules) | sha256sum }}\n{{- if .Values.deployment.podAnnotations }}\n {{- range $key, $value := .Values.deployment.podAnnotations }}\n {{ $key }}: {{ $value }}\n {{- end }}\n{{- end }}\n spec:\n{{- if .Values.deployment.priorityClassName }}\n priorityClassName: {{ .Values.deployment.priorityClassName }}\n{{- end }}\n serviceAccountName: {{ template \"metricbeat.serviceAccountName\" . }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n{{- if .Values.deployment.args }}\n{{ toYaml .Values.deployment.args | indent 8 }}\n{{- else }}\n - \"-e\"\n{{- if .Values.plugins }}\n - \"--plugin\"\n - {{ .Values.plugins | join \",\" | quote }}\n{{- end }}\n{{- if .Values.deployment.debug }}\n - \"-d\"\n - \"*\"\n{{- end }}\n{{- end }}\n env:\n - name: POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n{{- if .Values.extraEnv }}\n{{ toYaml .Values.extraEnv | indent 8 }}\n{{- end }}\n securityContext:\n runAsUser: 0\n resources:\n{{- if .Values.deployment.resources }}\n{{ toYaml .Values.deployment.resources | indent 10 }}\n{{- else if .Values.resources }}\n{{ toYaml .Values.resources | indent 10 }}\n{{- end }}\n volumeMounts:\n - name: metricbeat-config\n mountPath: /usr/share/metricbeat/metricbeat.yml\n readOnly: true\n subPath: metricbeat.yml\n - name: modules\n mountPath: /usr/share/metricbeat/modules.d\n readOnly: true\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 8 }}\n{{- end }}\n{{- if .Values.deployment.tolerations }}\n tolerations:\n{{ toYaml .Values.deployment.tolerations | indent 6 }}\n{{- end }}\n{{- if .Values.deployment.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.deployment.nodeSelector | indent 8 }}\n{{- end }}\n volumes:\n - name: metricbeat-config\n secret:\n secretName: {{ template \"metricbeat.fullname\" . }}-deployment-config\n - name: modules\n secret:\n secretName: {{ template \"metricbeat.fullname\" . }}-deployment-modules\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 6 }}\n{{- end }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: extensions/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n privileged: false\n defaultAddCapabilities:\n - CHOWN\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'secret'\n allowPrivilegeEscalation: false\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n{{- end }}\n",
"# secret.yaml\n{{- if .Values.daemonset.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}-daemonset-config\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n metricbeat.yml: {{ toYaml (default .Values.daemonset.config .Values.daemonset.overrideConfig) | indent 4 | b64enc }}\n{{- end }}\n---\n{{- if .Values.deployment.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}-deployment-config\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n metricbeat.yml: {{ toYaml (default .Values.deployment.config .Values.deployment.overrideConfig) | indent 4 | b64enc }}\n{{- end }}\n---\n{{- if .Values.daemonset.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}-daemonset-modules\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- range $key, $value := (default .Values.daemonset.modules .Values.daemonset.overrideModules) }}\n {{- if eq $value.enabled true }}\n {{ $key }}.yml: {{ toYaml $value.config | b64enc }}\n {{- end }}\n {{- end }}\n{{- end }}\n---\n{{- if .Values.deployment.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"metricbeat.fullname\" . }}-deployment-modules\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- range $key, $value := (default .Values.deployment.modules .Values.deployment.overrideModules) }}\n {{- if eq $value.enabled true }}\n {{ $key }}.yml: {{ toYaml $value.config | b64enc }}\n {{- end }}\n {{- end }}\n{{- end }}\n{{- range $secret := .Values.extraSecrets }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ $secret.name }}\n labels:\n app: {{ template \"metricbeat.name\" $ }}\n chart: {{ template \"metricbeat.chart\" $ }}\n release: {{ $.Release.Name }}\n heritage: {{ $.Release.Service }}\ntype: Opaque\ndata:\n {{- range $key, $value := $secret.data }}\n {{ $key }}: {{ $value | b64enc }}\n {{- end }}\n{{- end}}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"metricbeat.serviceAccountName\" . }}\n labels:\n app: {{ template \"metricbeat.name\" . }}\n chart: {{ template \"metricbeat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n"
] | image:
repository: docker.elastic.co/beats/metricbeat
tag: 6.7.0
pullPolicy: IfNotPresent
# The instances created by daemonset retrieve most metrics from the host
daemonset:
enabled: true
podAnnotations: []
priorityClassName: ""
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
nodeSelector: {}
resources: {}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
config:
metricbeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- add_cloud_metadata:
output.file:
path: "/usr/share/metricbeat/data"
filename: metricbeat
rotate_every_kb: 10000
number_of_files: 5
# If overrideConfig is not empty, metricbeat chart's default config won't be used at all.
overrideConfig: {}
modules:
system:
enabled: true
config:
- module: system
period: 10s
metricsets:
- cpu
- load
- memory
- network
- process
- process_summary
# - core
# - diskio
# - socket
processes: ['.*']
process.include_top_n:
by_cpu: 5 # include top 5 processes by CPU
by_memory: 5 # include top 5 processes by memory
- module: system
period: 1m
metricsets:
- filesystem
- fsstat
processors:
- drop_event.when.regexp:
system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)'
kubernetes:
enabled: true
config:
- module: kubernetes
metricsets:
- node
- system
- pod
- container
- volume
period: 10s
host: ${NODE_NAME}
hosts: ["localhost:10255"]
# If using Red Hat OpenShift remove the previous hosts entry and
# uncomment these settings:
# hosts: ["https://${HOSTNAME}:10250"]
# bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
# ssl.certificate_authorities:
# - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
# If overrideModules is not empty, metricbeat chart's default modules won't be used at all.
overrideModules: {}
# The instance created by deployment retrieves metrics that are unique for the whole cluster, like Kubernetes events or kube-state-metrics
deployment:
enabled: true
podAnnotations: []
priorityClassName: ""
tolerations: []
nodeSelector: {}
resources: {}
config:
metricbeat.config:
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
processors:
- add_cloud_metadata:
output.file:
path: "/usr/share/metricbeat/data"
filename: metricbeat
rotate_every_kb: 10000
number_of_files: 5
# If overrideConfig is not empty, metricbeat chart's default config won't be used at all.
overrideConfig: {}
modules:
kubernetes:
enabled: true
config:
- module: kubernetes
metricsets:
- state_node
- state_deployment
- state_replicaset
- state_pod
- state_container
# Uncomment this to get k8s events:
# - event
period: 10s
hosts: ["kube-state-metrics:8080"]
# If overrideModules is not empty, metricbeat chart's default modules won't be used at all.
overrideModules: {}
# List of beat plugins
plugins: []
# - kinesis.so
# additional environment
# extraEnv:
# - name: test1
# value: "test1"
# - name: test2
# value: "test2"
# Add additional volumes and mounts, for example to read other log files on the host
extraVolumes: []
# - hostPath:
# path: /var/log
# name: varlog
extraVolumeMounts: []
# - name: varlog
# mountPath: /host/var/log
# readOnly: true
extraSecrets: []
# - name: ca-cert
# data:
# ca.pem: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# - name: userdata
# data:
# id: userid
# pw: userpassword
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
pspEnabled: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
|
traefik | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"traefik.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"traefik.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"traefik.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the block for the ProxyProtocol's Trusted IPs.\n*/}}\n{{- define \"traefik.trustedips\" -}}\n trustedIPs = [\n\t {{- range $idx, $ips := .Values.proxyProtocol.trustedIPs }}\n\t {{- if $idx }}, {{ end }}\n\t {{- $ips | quote }}\n\t {{- end -}}\n ]\n{{- end -}}\n\n{{/*\nCreate the block for the forwardedHeaders's Trusted IPs.\n*/}}\n{{- define \"traefik.forwardedHeadersTrustedIPs\" -}}\n trustedIPs = [\n\t {{- range $idx, $ips := .Values.forwardedHeaders.trustedIPs }}\n\t {{- if $idx }}, {{ end }}\n\t {{- $ips | quote }}\n\t {{- end -}}\n ]\n{{- end -}}\n\n{{/*\nCreate the block for whiteListSourceRange.\n*/}}\n{{- define \"traefik.whiteListSourceRange\" -}}\n whiteListSourceRange = [\n\t {{- range $idx, $ips := .Values.whiteListSourceRange }}\n\t {{- if $idx }}, {{ end }}\n\t {{- $ips | quote }}\n\t {{- end -}}\n ]\n{{- end -}}\n\n{{/*\nCreate the block for acme.domains.\n*/}}\n{{- define \"traefik.acme.domains\" -}}\n{{- range $idx, $value := .Values.acme.domains.domainsList }}\n {{- if $value.main }}\n [[acme.domains]]\n main = {{- range $mainIdx, $mainValue := $value }} {{ $mainValue | quote }}{{- end -}}\n {{- end -}}\n{{- if $value.sans }}\n sans = [\n {{- range $sansIdx, $domains := $value.sans }}\n\t\t\t {{- if $sansIdx }}, {{ end }}\n\t {{- $domains | quote }}\n {{- end -}}\n\t ]\n\t{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the block for acme.resolvers.\n*/}}\n{{- define \"traefik.acme.dnsResolvers\" -}}\n resolvers = [\n\t {{- range $idx, $ips := .Values.acme.resolvers }}\n\t {{- if $idx }},{{ end }}\n\t {{- $ips | quote }}\n\t {{- end -}}\n ]\n{{- end -}}\n\n{{/*\nCreate custom cipherSuites block\n*/}}\n{{- define \"traefik.ssl.cipherSuites\" -}}\n cipherSuites = [\n {{- range $idx, $cipher := .Values.ssl.cipherSuites }}\n {{- if $idx }},{{ end }}\n {{ $cipher | quote }}\n {{- end }}\n ]\n{{- end -}}\n\n{{/*\nCreate the block for RootCAs.\n*/}}\n{{- define \"traefik.rootCAs\" -}}\n rootCAs = [\n\t {{- range $idx, $ca := .Values.rootCAs }}\n\t {{- if $idx }}, {{ end }}\n\t {{- $ca | quote }}\n\t {{- end -}}\n ]\n{{- end -}}\n\n{{/*\nCreate the block for mTLS ClientCAs.\n*/}}\n{{- define \"traefik.ssl.mtls.clientCAs\" -}}\n files = [\n\t {{- range $idx, $_ := .Values.ssl.mtls.clientCaCerts }}\n\t {{- if $idx }}, {{ end }}\n\t {{- printf \"/mtls/clientCaCert-%d.crt\" $idx | quote }}\n\t {{- end -}}\n ]\n{{- end -}}\n\n{{/*\nHelper for containerPort (http)\n*/}}\n{{- define \"traefik.containerPort.http\" -}}\n\t{{- if .Values.useNonPriviledgedPorts -}}\n\t6080\n\t{{- else -}}\n\t80\n\t{{- end -}}\n{{- end -}}\n\n{{/*\nHelper for RBAC Scope\nIf Kubernetes namespace selection is defined and the (one) selected\nnamespace is the release namespace Cluster scope is unnecessary.\n*/}}\n{{- define \"traefik.rbac.scope\" -}}\n\t{{- if .Values.kubernetes -}}\n\t\t{{- if not (eq (.Values.kubernetes.namespaces | default (list) | toString) (list .Release.Namespace | toString)) -}}\n\t\tCluster\n\t\t{{- end -}}\n\t{{- else -}}\n\tCluster\n\t{{- end -}}\n{{- end -}}\n\n{{/*\nHelper for containerPort (https)\n*/}}\n{{- define \"traefik.containerPort.https\" -}}\n\t{{- if .Values.useNonPriviledgedPorts -}}\n\t6443\n\t{{- else -}}\n\t443\n\t{{- end -}}\n{{- end -}}\n",
"# acme-pvc.yaml\n{{- if and .Values.acme.enabled .Values.acme.persistence.enabled (not .Values.acme.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n{{- if .Values.acme.persistence.annotations }}\n annotations:\n{{ toYaml .Values.acme.persistence.annotations | indent 4 }}\n{{- end }}\n name: {{ template \"traefik.fullname\" . }}-acme\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.acme.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.acme.persistence.size | quote }}\n{{- if .Values.acme.persistence.storageClass }}\n{{- if (eq \"-\" .Values.acme.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.acme.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# client-ca-configmap.yaml\n{{- if and .Values.ssl.enabled .Values.ssl.mtls }}\n{{- if .Values.ssl.mtls.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-client-ca-certs\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n{{- range $idx, $caCert := .Values.ssl.mtls.clientCaCerts }}\n clientCaCert-{{ $idx }}.crt: {{ $caCert | quote }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# config-files.yaml\n{{- if .Values.configFiles }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-configs\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n{{- range $filename, $fileContents := .Values.configFiles }}\n {{ $filename }}: |-\n{{ $fileContents | indent 4 }}\n{{- end }}\n{{- end }}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ndata:\n traefik.toml: |\n # traefik.toml\n {{- if .Values.debug.enabled }}\n debug = true\n {{- else }}\n logLevel = {{ .Values.logLevel | default \"info\" | quote }}\n {{- end }}\n {{- if .Values.maxIdleConnsPerHost }}\n maxIdleConnsPerHost = {{ .Values.maxIdleConnsPerHost }}\n {{- end }}\n {{- if .Values.sendAnonymousUsage }}\n sendAnonymousUsage = true\n {{- end }}\n {{- if .Values.rootCAs }}\n {{ template \"traefik.rootCAs\" . }}\n {{- end }}\n {{- if .Values.ssl.insecureSkipVerify }}\n InsecureSkipVerify = true\n {{- end }}\n {{- if .Values.ssl.enabled }}\n defaultEntryPoints = [\"http\",\"https\"]\n {{- else }}\n defaultEntryPoints = [\"http\", \"httpn\"]\n {{- end }}\n [entryPoints]\n [entryPoints.http]\n address = \":{{ template \"traefik.containerPort.http\" . }}\"\n compress = {{ .Values.gzip.enabled }}\n {{- if .Values.forwardAuth }}\n {{- if has \"http\" .Values.forwardAuth.entryPoints }}\n [entryPoints.http.auth.forward]\n address = {{ .Values.forwardAuth.address | quote }}\n trustForwardHeader = {{ .Values.forwardAuth.trustForwardHeader }}\n {{- end }}\n {{- end }}\n {{- if .Values.whiteListSourceRange }}\n {{ template \"traefik.whiteListSourceRange\" . }}\n {{- end }}\n {{- if .Values.proxyProtocol.enabled }}\n [entryPoints.http.proxyProtocol]\n {{ template \"traefik.trustedips\" . }}\n {{- end }}\n {{- if .Values.forwardedHeaders.enabled }}\n [entryPoints.http.forwardedHeaders]\n {{ template \"traefik.forwardedHeadersTrustedIPs\" . }}\n {{- end }}\n {{- if .Values.ssl.enforced }}\n [entryPoints.http.redirect]\n regex = \"^http://(.*)\"\n replacement = \"https://$1\"\n {{- if .Values.ssl.permanentRedirect }}\n permanent = true\n {{- end }}\n {{- end }}\n {{- if .Values.ssl.enabled }}\n [entryPoints.https]\n {{- if .Values.whiteListSourceRange }}\n {{ template \"traefik.whiteListSourceRange\" . }}\n {{- end }}\n address = \":{{ template \"traefik.containerPort.https\" . }}\"\n compress = {{ .Values.gzip.enabled }}\n {{- if .Values.forwardAuth }}\n {{- if has \"https\" .Values.forwardAuth.entryPoints }}\n [entryPoints.https.auth.forward]\n address = {{ .Values.forwardAuth.address | quote }}\n trustForwardHeader = {{ .Values.forwardAuth.trustForwardHeader }}\n {{- end }}\n {{- end }}\n {{- if .Values.proxyProtocol.enabled }}\n [entryPoints.https.proxyProtocol]\n {{ template \"traefik.trustedips\" . }}\n {{- end }}\n {{- if .Values.forwardedHeaders.enabled }}\n [entryPoints.https.forwardedHeaders]\n {{ template \"traefik.forwardedHeadersTrustedIPs\" . }}\n {{- end }}\n {{- if not .Values.ssl.upstream }}\n [entryPoints.https.tls]\n {{- if .Values.ssl.tlsMinVersion }}\n minVersion = {{ .Values.ssl.tlsMinVersion | quote }}\n {{- end }}\n {{- if .Values.ssl.cipherSuites }}\n {{ template \"traefik.ssl.cipherSuites\" . }}\n {{- end }}\n {{- if .Values.ssl.sniStrict }}\n sniStrict = true\n {{- end }}\n {{- if .Values.ssl.mtls }}\n {{- if .Values.ssl.mtls.enabled }}\n [entryPoints.https.tls.ClientCA]\n {{ template \"traefik.ssl.mtls.clientCAs\" . }}\n optional = {{ .Values.ssl.mtls.optional }}\n {{- end }}\n {{- end }}\n [[entryPoints.https.tls.certificates]]\n CertFile = \"/ssl/tls.crt\"\n KeyFile = \"/ssl/tls.key\"\n {{- if .Values.ssl.extraCerts }}\n {{- range $i, $cert := .Values.ssl.extraCerts }}\n [[entryPoints.https.tls.certificates]]\n CertFile = \"{{ $cert.certFile }}\"\n KeyFile = \"{{ $cert.keyFile }}\"\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.ssl.auth }}\n {{- if .Values.ssl.auth.basic }}\n [entryPoints.https.auth]\n [entryPoints.https.auth.basic]\n users = [{{ range $key, $value := .Values.ssl.auth.basic }}\"{{ $key }}:{{ $value }}\",{{ end }}]\n {{- end }}\n {{- end }}\n {{- else }}\n [entryPoints.httpn]\n {{- if .Values.whiteListSourceRange }}\n {{ template \"traefik.whiteListSourceRange\" . }}\n {{- end }}\n address = \":8880\"\n compress = {{ .Values.gzip.enabled }}\n {{- if .Values.proxyProtocol.enabled }}\n [entryPoints.httpn.proxyProtocol]\n {{ template \"traefik.trustedips\" . }}\n {{- if .Values.forwardAuth }}\n {{- if has \"httpn\" .Values.forwardAuth.entryPoints }}\n [entryPoints.httpn.auth.forward]\n address = {{ .Values.forwardAuth.address | quote }}\n trustForwardHeader = {{ .Values.forwardAuth.trustForwardHeader }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.forwardedHeaders.enabled }}\n [entryPoints.httpn.forwardedHeaders]\n {{ template \"traefik.forwardedHeadersTrustedIPs\" . }}\n {{- end }}\n {{- end }}\n {{- if .Values.dashboard.enabled }}\n [entryPoints.traefik]\n address = \":8080\"\n {{- if .Values.dashboard.auth }}\n {{- if .Values.dashboard.auth.basic }}\n [entryPoints.traefik.auth]\n [entryPoints.traefik.auth.basic]\n users = [{{ range $key, $value := .Values.dashboard.auth.basic }}\"{{ $key }}:{{ $value }}\",{{ end }}]\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.metrics.prometheus.enabled }}\n [entryPoints.prometheus]\n address = \":9100\"\n {{- end }}\n [ping]\n entryPoint = \"{{ .Values.pingEntryPoint | default \"http\" }}\"\n [kubernetes]\n {{- if .Values.kubernetes}}\n {{- if .Values.kubernetes.endpoint }}\n endpoint = {{ .Values.kubernetes.endpoint | quote }}\n {{- end}}\n {{- if .Values.kubernetes.namespaces }}\n namespaces = [\n {{- range $idx, $element := .Values.kubernetes.namespaces }}\n {{- if $idx }}, {{ end }}\n {{- $element | quote }}\n {{- end -}}\n ]\n {{- end}}\n {{- if .Values.kubernetes.labelSelector }}\n labelselector = {{ .Values.kubernetes.labelSelector | quote }}\n {{- end}}\n {{- if .Values.kubernetes.ingressClass }}\n ingressClass = {{ .Values.kubernetes.ingressClass | quote }}\n {{- end}}\n {{- if .Values.kubernetes.ingressEndpoint }}\n [kubernetes.ingressEndpoint]\n {{- if .Values.kubernetes.ingressEndpoint.hostname }}\n hostname = {{ .Values.kubernetes.ingressEndpoint.hostname | quote }}\n {{- end}}\n {{- if .Values.kubernetes.ingressEndpoint.ip }}\n ip = {{ .Values.kubernetes.ingressEndpoint.ip | quote }}\n {{- end}}\n {{- if .Values.kubernetes.ingressEndpoint.publishedService }}\n publishedService = {{ .Values.kubernetes.ingressEndpoint.publishedService | quote }}\n {{- else if .Values.kubernetes.ingressEndpoint.useDefaultPublishedService }}\n publishedService = \"{{ .Release.Namespace }}/{{ template \"traefik.fullname\" . }}\"\n {{- end}}\n {{- end}}\n {{- end}}\n {{- if .Values.fileBackend }}\n [file]\n {{- .Values.fileBackend | nindent 4 }}\n {{- end }}\n {{- if .Values.traefikLogFormat }}\n [traefikLog]\n format = {{ .Values.traefikLogFormat | quote }}\n {{- end }}\n {{- if .Values.accessLogs.enabled }}\n [accessLog]\n {{- if .Values.accessLogs.filePath }}\n filePath = {{ .Values.accessLogs.filePath | quote }}\n {{- end}}\n format = {{ .Values.accessLogs.format | quote }}\n [accessLog.fields]\n defaultMode = {{ .Values.accessLogs.fields.defaultMode | quote }}\n {{- if .Values.accessLogs.filters }}\n [accessLog.filters]\n {{- if .Values.accessLogs.filters.statusCodes }}\n statusCodes = {{ toJson .Values.accessLogs.filters.statusCodes }}\n {{- end }}\n {{- if .Values.accessLogs.filters.retryAttempts }}\n retryAttempts = {{ .Values.accessLogs.filters.retryAttempts }}\n {{- end }}\n {{- if .Values.accessLogs.filters.minDuration }}\n minDuration = {{ .Values.accessLogs.filters.minDuration | quote }}\n {{- end }}\n {{- end }}\n [accessLog.fields.names]\n {{- range $field, $action := .Values.accessLogs.fields.names }}\n {{ printf \"\\\"%s\\\" = \\\"%s\\\"\" $field $action }}\n {{- end}}\n [accessLog.fields.headers]\n defaultMode = {{ .Values.accessLogs.fields.headers.defaultMode | quote }}\n [accessLog.fields.headers.names]\n {{- range $header, $action := .Values.accessLogs.fields.headers.names }}\n {{ printf \"\\\"%s\\\" = \\\"%s\\\"\" $header $action }}\n {{- end}}\n {{- end}}\n {{- if .Values.kvprovider.etcd }}\n [etcd]\n endpoint = {{ .Values.kvprovider.etcd.endpoint | quote }}\n watch = {{ .Values.kvprovider.etcd.watch }}\n prefix = {{ .Values.kvprovider.etcd.prefix | quote }}\n useAPIV3 = {{ .Values.kvprovider.etcd.useAPIV3 }}\n {{- if .Values.kvprovider.etcd.username }}username = {{ .Values.kvprovider.etcd.username }}{{- end }}\n {{- if .Values.kvprovider.etcd.password }}password = {{ .Values.kvprovider.etcd.password }}{{- end }}\n {{- if .Values.kvprovider.etcd.tls }}\n [etcd.tls]\n {{ range $key, $value := .Values.kvprovider.etcd.tls }}\n {{ $key }} = {{ $value | quote }}\n {{ end }}\n {{- end }}\n {{- end }}\n {{- if .Values.kvprovider.consul }}\n [consul]\n endpoint = {{ .Values.kvprovider.consul.endpoint | quote }}\n watch = {{ .Values.kvprovider.consul.watch }}\n prefix = {{ .Values.kvprovider.consul.prefix | quote }}\n {{- if .Values.kvprovider.consul.username }}username = {{ .Values.kvprovider.consul.username | quote }}{{- end }}\n {{- if .Values.kvprovider.consul.password }}password = {{ .Values.kvprovider.consul.password | quote }}{{- end }}\n {{- if .Values.kvprovider.consul.tls }}\n [consul.tls]\n {{ range $key, $value := .Values.kvprovider.consul.tls }}\n {{ $key }} = {{ $value | quote }}\n {{ end }}\n {{- end }}\n {{- end }}\n {{- if .Values.kvprovider.boltdb }}\n [boltdb]\n endpoint = {{ .Values.kvprovider.boltdb.endpoint | quote }}\n watch = {{ .Values.kvprovider.boltdb.watch }}\n prefix = {{ .Values.kvprovider.boltdb.prefix | quote }}\n {{- if .Values.kvprovider.boltdb.username }}username = {{ .Values.kvprovider.boltdb.username }}{{- end }}\n {{- if .Values.kvprovider.boltdb.password }}password = {{ .Values.kvprovider.boltdb.password }}{{- end }}\n {{- if .Values.kvprovider.boltdb.tls }}\n [boltdb.tls]\n {{ range $key, $value := .Values.kvprovider.boltdb.tls }}\n {{ $key }} = {{ $value }}\n {{ end }}\n {{- end }}\n {{- end }}\n {{- if .Values.kvprovider.zookeeper }}\n [zookeeper]\n endpoint = {{ .Values.kvprovider.zookeeper.endpoint | quote }}\n watch = {{ .Values.kvprovider.zookeeper.watch }}\n prefix = {{ .Values.kvprovider.zookeeper.prefix | quote }}\n {{- if .Values.kvprovider.zookeeper.username }}username = {{ .Values.kvprovider.zookeeper.username }}{{- end }}\n {{- if .Values.kvprovider.zookeeper.password }}password = {{ .Values.kvprovider.zookeeper.password }}{{- end }}\n {{- if .Values.kvprovider.zookeeper.tls }}\n [zookeeper.tls]\n {{ range $key, $value := .Values.kvprovider.zookeeper.tls }}\n {{ $key }} = {{ $value }}\n {{ end }}\n {{- end }}\n {{- end }}\n {{- if .Values.acme.enabled }}\n [acme]\n KeyType = {{ .Values.acme.keyType | quote }}\n email = {{ .Values.acme.email | quote }}\n {{- if .Values.kvprovider.storeAcme }}\n storage = \"{{ .Values.kvprovider.acmeStorageLocation }}\"\n {{- if .Values.kvprovider.importAcme }}\n storageFile = \"/acme/acme.json\"\n {{- end }}\n {{- if or (.Values.kvprovider.importAcme) (eq .Release.Revision 1) }}\n OverrideCertificates = true\n {{- end }}\n {{- else }}\n storage = \"/acme/acme.json\"\n {{- end }}\n entryPoint = \"https\"\n onHostRule = {{ .Values.acme.onHostRule }}\n {{- if .Values.acme.caServer }}\n caServer = {{ .Values.acme.caServer | quote }}\n {{- else }}\n {{- if eq (toString (.Values.acme.staging)) \"true\" }}\n caServer = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n {{- end }}\n {{- end -}}\n {{- if .Values.acme.logging }}\n acmeLogging = true\n {{- end }}\n {{- if eq .Values.acme.challengeType \"dns-01\" }}\n [acme.dnsChallenge]\n provider = {{ .Values.acme.dnsProvider.name | quote }}\n {{- if .Values.acme.resolvers }}\n {{ template \"traefik.acme.dnsResolvers\" . }}\n {{- end }}\n {{- if .Values.acme.delayBeforeCheck }}\n delayBeforeCheck = {{ .Values.acme.delayBeforeCheck }}\n {{- end }}\n {{- else if eq .Values.acme.challengeType \"http-01\" }}\n [acme.httpChallenge]\n entryPoint = {{ .Values.acme.httpChallenge.entrypoint | quote }}\n {{- else if eq .Values.acme.challengeType \"tls-alpn-01\" }}\n [acme.tlsChallenge]\n {{- end }}\n {{- if .Values.acme.domains.enabled }}\n {{- if .Values.acme.domains.domainsList }}{{ template \"traefik.acme.domains\" . }}{{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.dashboard.enabled }}\n [api]\n entryPoint = \"traefik\"\n dashboard = true\n {{- if .Values.dashboard.statistics }}\n {{- if .Values.dashboard.statistics.recentErrors }}\n [api.statistics]\n recentErrors = {{ .Values.dashboard.statistics.recentErrors }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if or .Values.metrics.prometheus.enabled .Values.metrics.statsd.enabled .Values.metrics.datadog.enabled }}\n [metrics]\n {{- end }}\n {{- if .Values.metrics.prometheus.enabled }}\n [metrics.prometheus]\n entryPoint = \"prometheus\"\n {{- if .Values.metrics.prometheus.buckets }}\n buckets = [\n {{- range $idx, $element := .Values.metrics.prometheus.buckets }}\n {{- if $idx }},{{ end }}\n {{- printf \"%f\" $element }}\n {{- end -}}\n ]\n {{- end -}}\n {{- end }}\n {{- if .Values.metrics.datadog.enabled }}\n [metrics.datadog]\n {{- if .Values.metrics.datadog.address }}\n address = {{ .Values.metrics.datadog.address | quote }}\n {{- end}}\n {{- if .Values.metrics.datadog.pushinterval }}\n pushinterval = {{ .Values.metrics.datadog.pushinterval | quote }}\n {{- end}}\n {{- end }}\n {{- if .Values.metrics.statsd.enabled }}\n [metrics.statsd]\n {{- if .Values.metrics.statsd.address }}\n address = {{ .Values.metrics.statsd.address | quote }}\n {{- end}}\n {{- if .Values.metrics.statsd.pushinterval }}\n pushinterval = {{ .Values.metrics.statsd.pushinterval | quote }}\n {{- end}}\n {{- end }}\n {{- if .Values.tracing.enabled }}\n [tracing]\n backend = {{ .Values.tracing.backend | quote }}\n serviceName = {{ .Values.tracing.serviceName | quote}}\n {{- if eq .Values.tracing.backend \"jaeger\" }}\n [tracing.jaeger]\n {{- if .Values.tracing.jaeger.localAgentHostPort }}\n localAgentHostPort = {{ .Values.tracing.jaeger.localAgentHostPort | quote }}\n {{- end }}\n {{- if .Values.tracing.jaeger.samplingServerUrl }}\n samplingServerUrl = {{ .Values.tracing.jaeger.samplingServerUrl | quote }}\n {{- end }}\n {{- if .Values.tracing.jaeger.samplingType }}\n samplingType = {{ .Values.tracing.jaeger.samplingType | quote }}\n {{- end }}\n {{- if ne (.Values.tracing.jaeger.samplingParam | quote) \"\" }}\n samplingParam = {{ .Values.tracing.jaeger.samplingParam }}\n {{- end }}\n {{- end }}\n {{- if eq .Values.tracing.backend \"zipkin\" }}\n [tracing.zipkin]\n {{- if .Values.tracing.zipkin.httpEndpoint }}\n httpEndpoint = {{ .Values.tracing.zipkin.httpEndpoint | quote }}\n {{- end }}\n {{- if ne (.Values.tracing.zipkin.debug | quote) \"\" }}\n debug = {{ .Values.tracing.zipkin.debug }}\n {{- end }}\n {{- if ne (.Values.tracing.zipkin.sameSpan | quote) \"\" }}\n sameSpan = {{ .Values.tracing.zipkin.sameSpan }}\n {{- end }}\n {{- if ne (.Values.tracing.zipkin.id128bit | quote) \"\" }}\n id128bit = {{ .Values.tracing.zipkin.id128bit }}\n {{- end }}\n {{- end }}\n {{- if eq .Values.tracing.backend \"datadog\" }}\n [tracing.datadog]\n {{- if .Values.tracing.datadog.localAgentHostPort }}\n localAgentHostPort = {{ .Values.tracing.datadog.localAgentHostPort | quote }}\n {{- end }}\n {{- if ne (.Values.tracing.datadog.debug | quote) \"\" }}\n debug = {{ .Values.tracing.datadog.debug }}\n {{- end }}\n {{- if ne (.Values.tracing.datadog.globalTag | quote) \"\" }}\n globalTag = {{ .Values.tracing.datadog.globalTag | quote }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.timeouts }}\n {{- if .Values.timeouts.responding }}\n [respondingTimeouts]\n {{- if .Values.timeouts.responding.readTimeout }}\n readTimeout = {{ .Values.timeouts.responding.readTimeout | quote }}\n {{- end }}\n {{- if .Values.timeouts.responding.writeTimeout }}\n writeTimeout = {{ .Values.timeouts.responding.writeTimeout | quote }}\n {{- end }}\n {{- if .Values.timeouts.responding.idleTimeout }}\n idleTimeout = {{ .Values.timeouts.responding.idleTimeout | quote }}\n {{- end }}\n {{- end }}\n {{- if .Values.timeouts.forwarding }}\n [forwardingTimeouts]\n {{- if .Values.timeouts.forwarding.dialTimeout }}\n dialTimeout = {{ .Values.timeouts.forwarding.dialTimeout | quote }}\n {{- end }}\n {{- if .Values.timeouts.forwarding.responseHeaderTimeout }}\n responseHeaderTimeout = {{ .Values.timeouts.forwarding.responseHeaderTimeout | quote }}\n {{- end }}\n {{- end }}\n {{- end }}\n",
"# dashboard-ingress.yaml\n{{- if .Values.dashboard.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-dashboard\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n {{- if .Values.dashboard.ingress }}\n {{- range $key, $value := .Values.dashboard.ingress.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n annotations:\n {{- if .Values.dashboard.ingress }}\n {{- range $key, $value := .Values.dashboard.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\nspec:\n rules:\n - host: {{ .Values.dashboard.domain }}\n http:\n paths:\n - backend:\n serviceName: {{ template \"traefik.fullname\" . }}-dashboard\n servicePort: dashboard-http\n {{- if .Values.dashboard.ingress.tls }}\n tls:\n{{ toYaml .Values.dashboard.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end }}\n",
"# dashboard-service.yaml\n{{- if .Values.dashboard.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-dashboard\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.dashboard.service }}\n {{- range $key, $value := .Values.dashboard.service.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\nspec:\n type: {{ .Values.dashboard.serviceType | default (\"ClusterIP\") }}\n selector:\n app: {{ template \"traefik.name\" . }}\n release: {{ .Release.Name }}\n ports:\n - name: dashboard-http\n port: 80\n targetPort: 8080\n{{- end }}\n",
"# default-cert-secret.yaml\n{{- if .Values.ssl.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-default-cert\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n{{- if .Values.ssl.generateTLS }}\n {{- $ca := genCA \"default-ca\" 365 }}\n {{- $cn := default \"example.com\" .Values.ssl.defaultCN }}\n {{- $server := genSignedCert $cn ( default nil .Values.ssl.defaultIPList ) ( default nil .Values.ssl.defaultSANList ) 365 $ca }}\n tls.crt: {{ $server.Cert | b64enc }}\n tls.key: {{ $server.Key | b64enc }}\n{{- else }}\n tls.crt: {{ .Values.ssl.defaultCert }}\n tls.key: {{ .Values.ssl.defaultKey }}\n{{- end }}\n{{- end }}\n",
"# deployment.yaml\n{{- if semverCompare \"^1.9-0\" .Capabilities.KubeVersion.GitVersion }}\napiVersion: apps/v1\n{{- else }}\napiVersion: apps/v1beta1\n{{- end }}\nkind: Deployment\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n {{- if .Values.deployment.labels }}\n {{- toYaml .Values.deployment.labels | nindent 4 }}\n {{- end }}\n {{- with .Values.deployment.annotations }}\n annotations:\n {{- toYaml . | nindent 4 }}\n {{- end }}\nspec:\n{{- if not .Values.autoscaling }}\n replicas: {{ default 1 .Values.replicas }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"traefik.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.deploymentStrategy }}\n strategy:\n{{ toYaml .Values.deploymentStrategy | indent 4 }}\n {{- end }}\n template:\n metadata:\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n {{- if and (.Values.tolerations) (semverCompare \"<1.6-0\" .Capabilities.KubeVersion.GitVersion) }}\n scheduler.alpha.kubernetes.io/tolerations: '{{ toJson .Values.tolerations }}'\n {{- end }}\n {{- range $key, $value := .Values.deployment.podAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n {{- if .Values.deployment.podLabels }}\n{{ toYaml .Values.deployment.podLabels | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.podSecurityContext }}\n securityContext:\n{{ toYaml .Values.podSecurityContext | indent 8 }}\n {{- end }}\n {{- if .Values.rbac.enabled }}\n serviceAccountName: {{ template \"traefik.fullname\" . }}\n {{- else }}\n serviceAccountName: default\n {{- end }}\n terminationGracePeriodSeconds: 60\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName | quote }}\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- range .Values.imagePullSecrets }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n containers:\n - image: {{ .Values.image }}:{{ .Values.imageTag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n name: {{ template \"traefik.fullname\" . }}\n resources:\n {{- if or .Values.cpuRequest .Values.memoryRequest .Values.cpuLimit .Values.memoryLimit }}\n requests:\n cpu: {{ .Values.cpuRequest | quote }}\n memory: {{ .Values.memoryRequest | quote }}\n limits:\n cpu: {{ .Values.cpuLimit | quote }}\n memory: {{ .Values.memoryLimit | quote }}\n {{- else }}\n{{ toYaml .Values.resources | indent 10 }}\n {{- end }}\n readinessProbe:\n httpGet:\n path: /ping\n port: \"{{ .Values.pingEntryPoint | default \"http\" }}\"\n failureThreshold: 1\n initialDelaySeconds: 10\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 2\n livenessProbe:\n httpGet:\n path: /ping\n port: \"{{ .Values.pingEntryPoint | default \"http\" }}\"\n failureThreshold: 3\n initialDelaySeconds: 10\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 2\n {{- if or (and .Values.acme.enabled (eq .Values.acme.challengeType \"dns-01\") .Values.acme.dnsProvider.name) .Values.env }}\n env:\n {{- range $k, $v := (index .Values.acme.dnsProvider .Values.acme.dnsProvider.name) }}\n {{- if or $v $.Values.acme.dnsProvider.existingSecretName }}\n - name: {{ $k }}\n valueFrom:\n secretKeyRef:\n {{- if $.Values.acme.dnsProvider.existingSecretName }}\n name: {{ $.Values.acme.dnsProvider.existingSecretName }}\n {{- else }}\n name: {{ template \"traefik.fullname\" $ }}-dnsprovider-config\n {{- end }}\n key: {{ $k }}\n {{- end }}\n {{- end }}\n {{- if .Values.env }}\n{{ toYaml .Values.env | indent 10 }}\n {{- end }}\n {{- end }}\n volumeMounts:\n {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }}{{ end }}\n - mountPath: /config\n name: config\n {{- if .Values.ssl.enabled }}\n {{- if not .Values.ssl.upstream }}\n - mountPath: /ssl\n name: ssl\n {{- end }}\n {{- if .Values.ssl.mtls }}\n {{- if .Values.ssl.mtls.enabled }}\n - mountPath: /mtls\n name: mtls\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if and (.Values.acme.enabled) (not .Values.kvprovider.storeAcme) }}\n - mountPath: /acme\n name: acme\n {{- end }}\n {{- if .Values.configFiles }}\n - mountPath: /configs\n name: {{ template \"traefik.fullname\" $ }}-configs\n {{ end }}\n {{- if .Values.secretFiles }}\n - mountPath: /secrets\n name: {{ template \"traefik.fullname\" $ }}-secrets\n {{- end }}\n ports:\n - name: http\n containerPort: {{ template \"traefik.containerPort.http\" . }}\n {{- if .Values.deployment.hostPort.httpEnabled }}\n hostPort: {{ default 80 .Values.deployment.hostPort.httpPort }}\n {{- end }}\n protocol: TCP\n - name: httpn\n containerPort: 8880\n protocol: TCP\n - name: https\n containerPort: {{ template \"traefik.containerPort.https\" . }}\n {{- if .Values.deployment.hostPort.httpsEnabled }}\n hostPort: {{ default 443 .Values.deployment.hostPort.httpsPort }}\n {{- end }}\n protocol: TCP\n {{- if or .Values.dashboard.enabled .Values.metrics.prometheus.enabled }}\n - name: dash\n containerPort: 8080\n {{- if .Values.deployment.hostPort.dashboardEnabled }}\n hostPort: {{ default 8080 .Values.deployment.hostPort.dashboardPort }}\n {{- end }}\n protocol: TCP\n {{- end }}\n {{- if .Values.metrics.prometheus.enabled }}\n - name: metrics\n containerPort: 9100\n protocol: TCP\n {{- end }}\n args:\n - --configfile=/config/traefik.toml\n {{- range .Values.startupArguments }}\n - {{ . }}\n {{- end }}\n {{- if .Values.containerSecurityContext }}\n securityContext:\n{{ toYaml .Values.containerSecurityContext | indent 10 }}\n {{- end }}\n\n volumes:\n {{- if .Values.extraVolumes }}{{ toYaml .Values.extraVolumes | trim | nindent 6 }}{{ end }}\n - name: config\n configMap:\n name: {{ template \"traefik.fullname\" . }}\n {{- if .Values.ssl.enabled }}\n {{- if not .Values.ssl.upstream }}\n - name: ssl\n secret:\n secretName: {{ template \"traefik.fullname\" . }}-default-cert\n {{- end }}\n {{- if .Values.ssl.mtls }}\n {{- if .Values.ssl.mtls.enabled }}\n - name: mtls\n configMap:\n name: {{ template \"traefik.fullname\" . }}-client-ca-certs\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if and (.Values.acme.enabled) (not .Values.kvprovider.storeAcme) }}\n - name: acme\n {{- if .Values.acme.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.acme.persistence.existingClaim | default (printf \"%s-acme\" (include \"traefik.fullname\" .)) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- end }}\n {{- if .Values.configFiles }}\n - name: {{ template \"traefik.fullname\" $ }}-configs\n configMap:\n name: {{ template \"traefik.fullname\" $ }}-configs\n {{ end }}\n {{- if .Values.secretFiles }}\n - name: {{ template \"traefik.fullname\" $ }}-secrets\n secret:\n secretName: {{ template \"traefik.fullname\" $ }}-secrets\n {{- end }}\n {{- if and (.Values.tolerations) (semverCompare \"^1.6-0\" .Capabilities.KubeVersion.GitVersion) }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 6 }}\n {{- end }}\n",
"# dns-provider-secret.yaml\n{{- if and .Values.acme.enabled (eq .Values.acme.challengeType \"dns-01\") .Values.acme.dnsProvider.name (not .Values.acme.dnsProvider.existingSecretName) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-dnsprovider-config\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n{{- range $k, $v := (index .Values.acme.dnsProvider .Values.acme.dnsProvider.name) }}\n {{- if $v }}\n {{ $k }}: {{ $v | b64enc | quote }}\n {{- end }}\n{{- end }}\n{{- end }}\n",
"# hpa.yaml\n{{- if .Values.autoscaling }}\napiVersion: autoscaling/v2beta1\nkind: HorizontalPodAutoscaler\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n scaleTargetRef:\n apiVersion: apps/v1\n kind: Deployment\n name: {{ template \"traefik.fullname\" . }}\n minReplicas: {{ .Values.autoscaling.minReplicas }}\n maxReplicas: {{ .Values.autoscaling.maxReplicas }}\n metrics:\n{{ toYaml .Values.autoscaling.metrics | indent 4 }}\n{{- end }}\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"traefik.name\" . }}\n release: {{ .Release.Name }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end -}}\n",
"# prometheus-service.yaml\n{{- if .Values.metrics.prometheus.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n {{- if .Values.metrics.prometheus.service.name }}\n name: {{ .Values.metrics.prometheus.service.name }}\n {{- else }}\n name: {{ template \"traefik.fullname\" . }}-prometheus\n {{- end }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n {{- with .Values.metrics.prometheus.service.annotations }}\n annotations:\n {{- toYaml . | nindent 4 }}\n {{- end }}\nspec:\n type: {{ .Values.metrics.prometheus.service.type }}\n {{- if .Values.metrics.prometheus.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.metrics.prometheus.service.loadBalancerIP }}\n {{- end }}\n {{- if .Values.metrics.prometheus.service.externalIP }}\n externalIPs:\n - {{ .Values.metrics.prometheus.service.externalIP }}\n {{- end }}\n {{- if .Values.metrics.prometheus.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- range $cidr := .Values.metrics.prometheus.service.loadBalancerSourceRanges }}\n - {{ $cidr }}\n {{- end }}\n {{- end }}\n {{- if .Values.metrics.prometheus.service.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.metrics.prometheus.service.externalTrafficPolicy }}\n {{- end }}\n selector:\n app: {{ template \"traefik.name\" . }}\n release: {{ .Release.Name }}\n ports:\n - port: {{ .Values.metrics.prometheus.service.port }}\n name: metrics\n targetPort: metrics\n {{- if (and (eq .Values.metrics.prometheus.service.type \"NodePort\") (not (empty .Values.metrics.prometheus.service.nodePorts )))}}\n nodePort: {{ .Values.metrics.prometheus.service.nodePort }}\n {{- end }}\n{{- end }}\n",
"# rbac.yaml\n{{- if .Values.rbac.enabled }}\nkind: ServiceAccount\napiVersion: v1\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\n---\nkind: {{ include \"traefik.rbac.scope\" . | printf \"%sRole\" }}\n{{- if semverCompare \"^1.8-0\" .Capabilities.KubeVersion.GitVersion }}\napiVersion: rbac.authorization.k8s.io/v1\n{{- else }}\napiVersion: rbac.authorization.k8s.io/v1beta1\n{{- end }}\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\nrules:\n - apiGroups:\n - \"\"\n resources:\n - pods\n - services\n - endpoints\n - secrets\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - extensions\n resources:\n - ingresses\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - extensions\n resources:\n - ingresses/status\n verbs:\n - update\n---\nkind: {{ include \"traefik.rbac.scope\" . | printf \"%sRoleBinding\" }}\n{{- if semverCompare \"^1.8-0\" .Capabilities.KubeVersion.GitVersion }}\napiVersion: rbac.authorization.k8s.io/v1\n{{- else }}\napiVersion: rbac.authorization.k8s.io/v1beta1\n{{- end }}\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: {{ include \"traefik.rbac.scope\" . | printf \"%sRole\" }}\n name: {{ template \"traefik.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"traefik.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# secret-files.yaml\n{{- if .Values.secretFiles }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-secrets\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n{{- range $filename, $fileContents := .Values.secretFiles }}\n {{ $filename }}: {{ $fileContents | b64enc | quote }}\n{{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"traefik.fullname\" . }}\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n {{- if .Values.service }}\n {{- range $key, $value := .Values.service.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n annotations:\n {{- if .Values.service }}\n {{- range $key, $value := .Values.service.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\nspec:\n type: {{ .Values.serviceType }}\n {{- if .Values.loadBalancerIP }}\n loadBalancerIP: {{ .Values.loadBalancerIP }}\n {{- end }}\n {{- if .Values.externalIP }}\n externalIPs:\n - {{ .Values.externalIP }}\n {{- end }}\n {{- if .Values.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- range $cidr := .Values.loadBalancerSourceRanges }}\n - {{ $cidr }}\n {{- end }}\n {{- end }}\n {{- if .Values.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.externalTrafficPolicy }}\n {{- end }}\n selector:\n app: {{ template \"traefik.name\" . }}\n release: {{ .Release.Name }}\n ports:\n - port: 443\n name: https\n {{- if (and (eq .Values.serviceType \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n {{- if not .Values.ssl.enabled }}\n targetPort: httpn\n {{- else }}\n targetPort: https\n {{- end }}\n - port: 80\n name: http\n {{- if (and (eq .Values.serviceType \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n targetPort: http\n",
"# servicemonitor.yaml\n{{- if ( .Values.metrics.serviceMonitor ) }}\n{{- if and ( .Values.metrics.serviceMonitor.enabled ) ( .Values.metrics.prometheus.enabled ) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- if .Values.metrics.serviceMonitor.labels }}\n{{ toYaml .Values.metrics.serviceMonitor.labels | indent 4}}\n{{- end }}\n name: {{ template \"traefik.fullname\" . }}-prometheus-exporter\n{{- if .Values.metrics.serviceMonitor.namespace }}\n namespace: {{ .Values.metrics.serviceMonitor.namespace }}\n{{- end }}\nspec:\n endpoints:\n - port: metrics\n path: /metrics\n{{- if .Values.metrics.serviceMonitor.interval }}\n interval: {{ .Values.metrics.serviceMonitor.interval }}\n{{- end }}\n jobLabel: {{ template \"traefik.fullname\" . }}-prometheus-exporter\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"traefik.name\" . }}\n release: {{ .Release.Name | quote }}\n{{- end }}\n{{- end }}\n",
"# storeconfig-job.yaml\n{{- if .Values.kvprovider.storeAcme }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: \"storeconfig-job-{{ .Release.Revision }}\"\n annotations:\n \"helm.sh/hook\": post-install,post-upgrade\n \"helm.sh/hook-delete-policy\": \"hook-succeeded,before-hook-creation\"\n labels:\n chart: {{ template \"traefik.chart\" . }}\n app: {{ template \"traefik.name\" . }}\nspec:\n backoffLimit: 5\n template:\n metadata:\n name: \"storeconfig-job-{{ .Release.Revision }}\"\n labels:\n app: {{ template \"traefik.name\" . }}\n chart: {{ template \"traefik.chart\" . }}\n spec:\n restartPolicy: Never\n containers:\n - name: storeconfig-job\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n args:\n - storeconfig\n - --configfile=/config/traefik.toml\n {{- range .Values.startupArguments }}\n - {{ . }}\n {{- end }}\n volumeMounts:\n - mountPath: /config\n name: config\n - mountPath: /acme\n name: acme\n {{- if .Values.env }}\n env:\n{{ toYaml .Values.env | indent 12 }}\n {{- end }}\n {{- if .Values.containerSecurityContext }}\n securityContext:\n{{ toYaml .Values.containerSecurityContext | indent 10 }}\n {{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ template \"traefik.fullname\" . }}\n - name: acme\n {{- if .Values.acme.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.acme.persistence.existingClaim | default (printf \"%s-acme\" (include \"traefik.fullname\" .)) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n{{- end }}\n",
"# test-configmap.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-test\n labels:\n app: {{ template \"traefik.fullname\" . }}\n chart: {{ template \"traefik.chart\" . }}\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ndata:\n run.sh: |-\n @test \"Test Access\" {\n curl -D - http://{{ template \"traefik.fullname\" . }}/\n }\n{{- end }}\n",
"# test.yaml\n{{- if .Values.testFramework.enabled }}\napiVersion: v1\nkind: Pod\nmetadata:\n name: {{ template \"traefik.fullname\" . }}-test\n labels:\n app: {{ template \"traefik.fullname\" . }}\n chart: {{ template \"traefik.chart\" . }}\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: test-framework\n image: \"{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-test\n image: \"{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}\"\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"traefik.fullname\" . }}-test\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n{{- end }}\n"
] | ## Default values for Traefik
image: traefik
imageTag: 1.7.26
imagePullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
# imagePullSecrets:
# - "regsecret"
testFramework:
enabled: false
image: "dduportal/bats"
tag: "0.4.0"
## can switch the service type to NodePort if required
serviceType: LoadBalancer
# loadBalancerIP: ""
# loadBalancerSourceRanges: []
whiteListSourceRange: []
externalTrafficPolicy: Cluster
replicas: 1
# startupArguments:
# - "--ping"
# - "--ping.entrypoint=http"
# /ping health-check entry point.
# pingEntryPoint: http
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 2
# priorityClassName: ""
# rootCAs: []
resources: {}
debug:
enabled: false
# logLevel: error
# maxIdleConnsPerHost: 200
deploymentStrategy: {}
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
# type: RollingUpdate
securityContext: {}
useNonPriviledgedPorts: false
env: {}
nodeSelector: {}
# key: value
affinity: {}
# key: value
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
## Kubernetes ingress filters
# kubernetes:
# endpoint:
# namespaces:
# - default
# labelSelector:
# ingressClass:
# ingressEndpoint:
# hostname: "localhost"
# ip: "127.0.0.1"
# publishedService: "namespace/servicename"
# useDefaultPublishedService: false
fileBackend: ""
# as in same traefik.toml
#
# [backends]
# [backends.backend1]
# # ...
# [backends.backend2]
# # ...
# [frontends]
# [frontends.frontend1]
# # ...
# [frontends.frontend2]
#
# or separated file from configFiles
# filename = "/configs/rules.toml"
proxyProtocol:
enabled: false
# trustedIPs is required when enabled
trustedIPs: []
# - 10.0.0.0/8
forwardedHeaders:
enabled: false
# trustedIPs is required when enabled
trustedIPs: []
# - 10.0.0.0/8
## Add arbitrary ConfigMaps to deployment
## Will be mounted to /configs/, i.e. myconfig.json would
## be mounted to /configs/myconfig.json.
configFiles: {}
# myconfig.json: |
# filecontents...
## Add arbitrary Secrets to deployment
## Will be mounted to /secrets/, i.e. file.name would
## be mounted to /secrets/mysecret.txt.
## The contents will be base64 encoded when added
secretFiles: {}
# mysecret.txt: |
# filecontents...
ssl:
enabled: false
enforced: false
permanentRedirect: false
upstream: false
insecureSkipVerify: false
generateTLS: false
# defaultCN: "example.com"
# or *.example.com
defaultSANList: []
# - example.com
# - test1.example.com
defaultIPList: []
# - 1.2.3.4
# cipherSuites: []
# https://docs.traefik.io/configuration/entrypoints/#specify-minimum-tls-version
# tlsMinVersion: VersionTLS12
# https://docs.traefik.io/configuration/entrypoints/#strict-sni-checking
# sniStrict: false
defaultCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVtekNDQTRPZ0F3SUJBZ0lKQUpBR1FsTW1DMGt5TUEwR0NTcUdTSWIzRFFFQkJRVUFNSUdQTVFzd0NRWUQKVlFRR0V3SlZVekVSTUE4R0ExVUVDQk1JUTI5c2IzSmhaRzh4RURBT0JnTlZCQWNUQjBKdmRXeGtaWEl4RkRBUwpCZ05WQkFvVEMwVjRZVzF3YkdWRGIzSndNUXN3Q1FZRFZRUUxFd0pKVkRFV01CUUdBMVVFQXhRTktpNWxlR0Z0CmNHeGxMbU52YlRFZ01CNEdDU3FHU0liM0RRRUpBUllSWVdSdGFXNUFaWGhoYlhCc1pTNWpiMjB3SGhjTk1UWXgKTURJME1qRXdPVFV5V2hjTk1UY3hNREkwTWpFd09UVXlXakNCanpFTE1Ba0dBMVVFQmhNQ1ZWTXhFVEFQQmdOVgpCQWdUQ0VOdmJHOXlZV1J2TVJBd0RnWURWUVFIRXdkQ2IzVnNaR1Z5TVJRd0VnWURWUVFLRXd0RmVHRnRjR3hsClEyOXljREVMTUFrR0ExVUVDeE1DU1ZReEZqQVVCZ05WQkFNVURTb3VaWGhoYlhCc1pTNWpiMjB4SURBZUJna3EKaGtpRzl3MEJDUUVXRVdGa2JXbHVRR1Y0WVcxd2JHVXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQwpBUThBTUlJQkNnS0NBUUVBdHVKOW13dzlCYXA2SDROdUhYTFB6d1NVZFppNGJyYTFkN1ZiRUJaWWZDSStZNjRDCjJ1dThwdTNhVTVzYXVNYkQ5N2pRYW95VzZHOThPUHJlV284b3lmbmRJY3RFcmxueGpxelUyVVRWN3FEVHk0bkEKNU9aZW9SZUxmZXFSeGxsSjE0VmlhNVFkZ3l3R0xoRTlqZy9jN2U0WUp6bmg5S1dZMnFjVnhEdUdEM2llaHNEbgphTnpWNFdGOWNJZm1zOHp3UHZPTk5MZnNBbXc3dUhUKzNiSzEzSUloeDI3ZmV2cXVWcENzNDFQNnBzdStWTG4yCjVIRHk0MXRoQkN3T0wrTithbGJ0ZktTcXM3TEFzM25RTjFsdHpITHZ5MGE1RGhkakpUd2tQclQrVXhwb0tCOUgKNFpZazErRUR0N09QbGh5bzM3NDFRaE4vSkNZK2RKbkFMQnNValFJREFRQUJvNEgzTUlIME1CMEdBMVVkRGdRVwpCQlJwZVc1dFhMdHh3TXJvQXM5d2RNbTUzVVVJTERDQnhBWURWUjBqQklHOE1JRzVnQlJwZVc1dFhMdHh3TXJvCkFzOXdkTW01M1VVSUxLR0JsYVNCa2pDQmp6RUxNQWtHQTFVRUJoTUNWVk14RVRBUEJnTlZCQWdUQ0VOdmJHOXkKWVdSdk1SQXdEZ1lEVlFRSEV3ZENiM1ZzWkdWeU1SUXdFZ1lEVlFRS0V3dEZlR0Z0Y0d4bFEyOXljREVMTUFrRwpBMVVFQ3hNQ1NWUXhGakFVQmdOVkJBTVVEU291WlhoaGJYQnNaUzVqYjIweElEQWVCZ2txaGtpRzl3MEJDUUVXCkVXRmtiV2x1UUdWNFlXMXdiR1V1WTI5dGdna0FrQVpDVXlZTFNUSXdEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3EKaGtpRzl3MEJBUVVGQUFPQ0FRRUFjR1hNZms4TlpzQit0OUtCemwxRmw2eUlqRWtqSE8wUFZVbEVjU0QyQjRiNwpQeG5NT2pkbWdQcmF1SGI5dW5YRWFMN3p5QXFhRDZ0YlhXVTZSeENBbWdMYWpWSk5aSE93NDVOMGhyRGtXZ0I4CkV2WnRRNTZhbW13QzFxSWhBaUE2MzkwRDNDc2V4N2dMNm5KbzdrYnIxWVdVRzN6SXZveGR6OFlEclpOZVdLTEQKcFJ2V2VuMGxNYnBqSVJQNFhac25DNDVDOWdWWGRoM0xSZTErd3lRcTZoOVFQaWxveG1ENk5wRTlpbVRPbjJBNQovYkozVktJekFNdWRlVTZrcHlZbEpCemRHMXVhSFRqUU9Xb3NHaXdlQ0tWVVhGNlV0aXNWZGRyeFF0aDZFTnlXCnZJRnFhWng4NCtEbFNDYzkzeWZrL0dsQnQrU0tHNDZ6RUhNQjlocVBiQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
defaultKey: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdHVKOW13dzlCYXA2SDROdUhYTFB6d1NVZFppNGJyYTFkN1ZiRUJaWWZDSStZNjRDCjJ1dThwdTNhVTVzYXVNYkQ5N2pRYW95VzZHOThPUHJlV284b3lmbmRJY3RFcmxueGpxelUyVVRWN3FEVHk0bkEKNU9aZW9SZUxmZXFSeGxsSjE0VmlhNVFkZ3l3R0xoRTlqZy9jN2U0WUp6bmg5S1dZMnFjVnhEdUdEM2llaHNEbgphTnpWNFdGOWNJZm1zOHp3UHZPTk5MZnNBbXc3dUhUKzNiSzEzSUloeDI3ZmV2cXVWcENzNDFQNnBzdStWTG4yCjVIRHk0MXRoQkN3T0wrTithbGJ0ZktTcXM3TEFzM25RTjFsdHpITHZ5MGE1RGhkakpUd2tQclQrVXhwb0tCOUgKNFpZazErRUR0N09QbGh5bzM3NDFRaE4vSkNZK2RKbkFMQnNValFJREFRQUJBb0lCQUhrTHhka0dxNmtCWWQxVAp6MkU4YWFENnhneGpyY2JSdGFCcTc3L2hHbVhuQUdaWGVWcE81MG1SYW8wbHZ2VUgwaE0zUnZNTzVKOHBrdzNmCnRhWTQxT1dDTk1PMlYxb1MvQmZUK3Zsblh6V1hTemVQa0pXd2lIZVZMdVdEaVVMQVBHaWl4emF2RFMyUnlQRmEKeGVRdVNhdE5pTDBGeWJGMG5Zd3pST3ZoL2VSa2NKVnJRZlZudU1melFkOGgyMzZlb1UxU3B6UnhSNklubCs5UApNc1R2Wm5OQmY5d0FWcFo5c1NMMnB1V1g3SGNSMlVnem5oMDNZWUZJdGtDZndtbitEbEdva09YWHBVM282aWY5ClRIenBleHdubVJWSmFnRG85bTlQd2t4QXowOW80cXExdHJoU1g1U2p1K0xyNFJvOHg5bytXdUF1VnVwb0lHd0wKMWVseERFRUNnWUVBNzVaWGp1enNJR09PMkY5TStyYVFQcXMrRHZ2REpzQ3gyZnRudk1WWVJKcVliaGt6YnpsVQowSHBCVnk3NmE3WmF6Umxhd3RGZ3ljMlpyQThpM0F3K3J6d1pQclNJeWNieC9nUVduRzZlbFF1Y0FFVWdXODRNCkdSbXhKUGlmOGRQNUxsZXdRalFjUFJwZVoxMzlYODJreGRSSEdma1pscHlXQnFLajBTWExRSEVDZ1lFQXcybkEKbUVXdWQzZFJvam5zbnFOYjBlYXdFUFQrbzBjZ2RyaENQOTZQK1pEekNhcURUblZKV21PeWVxRlk1eVdSSEZOLwpzbEhXU2lTRUFjRXRYZys5aGlMc0RXdHVPdzhUZzYyN2VrOEh1UUtMb2tWWEFUWG1NZG9xOWRyQW9INU5hV2lECmRSY3dEU2EvamhIN3RZV1hKZDA4VkpUNlJJdU8vMVZpbDBtbEk5MENnWUVBb2lsNkhnMFNUV0hWWDNJeG9raEwKSFgrK1ExbjRYcFJ5VEg0eldydWY0TjlhYUxxNTY0QThmZGNodnFiWGJHeEN6U3RxR1E2cW1peUU1TVpoNjlxRgoyd21zZEpxeE14RnEzV2xhL0lxSzM0cTZEaHk3cUNld1hKVGRKNDc0Z3kvY0twZkRmeXZTS1RGZDBFejNvQTZLCmhqUUY0L2lNYnpxUStQREFQR0YrVHFFQ2dZQmQ1YnZncjJMMURzV1FJU3M4MHh3MDBSZDdIbTRaQVAxdGJuNk8KK0IvUWVNRC92UXBaTWV4c1hZbU9lV2Noc3FCMnJ2eW1MOEs3WDY1NnRWdGFYay9nVzNsM3ZVNTdYSFF4Q3RNUwpJMVYvcGVSNHRiN24yd0ZncFFlTm1XNkQ4QXk4Z0xiaUZhRkdRSDg5QWhFa0dTd1d5cWJKc2NoTUZZOUJ5OEtUCkZaVWZsUUtCZ0V3VzJkVUpOZEJMeXNycDhOTE1VbGt1ZnJxbllpUTNTQUhoNFZzWkg1TXU0MW55Yi95NUUyMW4KMk55d3ltWGRlb3VJcFZjcUlVTXl0L3FKRmhIcFJNeVEyWktPR0QyWG5YaENNVlRlL0FQNDJod294Nm02QkZpQgpvemZFa2wwak5uZmREcjZrL1p2MlQ1TnFzaWxaRXJBQlZGOTBKazdtUFBIa0Q2R1ZMUUJ4Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
# Basic auth to protect all the routes. Can use htpasswd to generate passwords
# > htpasswd -n -b testuser testpass
# > testuser:$apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11
auth: {}
# basic:
# testuser: $apr1$JXRA7j2s$LpVns9vsme8FHN0r.aSt11
# a list of any extra certificate/key filenames you want included in the traefik instance. This must be used in conjunction with
# the "secretFiles" parameter to include the certs on each traefik pod. the expected format is:
# extraCerts:
# - certFile: /secrets/cert1.crt
# keyFile: /secrets/key1.key
# - certFile: /secrets/cert2.crt
# keyFile: /secrets/key2.key
# mtls:
# enabled: true
# optional: false
# clientCaCerts: []
# # When mTLS is enabled, the set of CA certificates used to validate client TLS certificates.
# # https://docs.traefik.io/configuration/entrypoints/#tls-mutual-authentication
# # CA certificates should be in PEM format.
kvprovider:
## If you want to run Traefik in HA mode, you will need to setup a KV Provider. Therefore you can choose one of
## * etcd
## * consul
## * boltdb
## * zookeeper
##
## ref: https://docs.traefik.io/user-guide/cluster/
## storeAcme has to be enabled to support HA Support using acme, but at least one kvprovider is needed
storeAcme: false
acmeStorageLocation: traefik/acme/account
importAcme: false
# etcd:
# endpoint: etcd-service:2379
# useAPIV3: false
# watch: true
# prefix: traefik
## Override default configuration template.
## For advanced users :)
##
## Optional
# filename: consul.tmpl
# username: foo
# password: bar
# tls:
# ca: "/etc/ssl/ca.crt"
# cert: "/etc/ssl/consul.crt"
# key: "/etc/ssl/consul.key"
# insecureSkipVerify: true
#
# consul:
# endpoint: consul-service:8500
# watch: true
# prefix: traefik
## Override default configuration template.
## For advanced users :)
##
## Optional
# filename: consul.tmpl
# username: foo
# password: bar
# tls:
# ca: "/etc/ssl/ca.crt"
# cert: "/etc/ssl/consul.crt"
# key: "/etc/ssl/consul.key"
# insecureSkipVerify: true
## only relevant for etcd
acme:
keyType: RSA4096
enabled: false
email: [email protected]
onHostRule: true
staging: true
## Specify a custom ACME server endpoint
## Optional
# caServer: https://acme-staging-v02.api.letsencrypt.org/directory
logging: false
# Configure a Let's Encrypt certificate to be managed by default.
# This is the only way to request wildcard certificates (works only with dns challenge).
domains:
enabled: false
# List of sets of main and (optional) SANs to generate for
# for wildcard certificates see https://docs.traefik.io/configuration/acme/#wildcard-domains
domainsList:
# - main: "*.example.com"
# - sans:
# - "example.com"
# - main: "*.example2.com"
# - sans:
# - "test1.example2.com"
# - "test2.example2.com"
## ACME challenge type: "tls-sni-01", "tls-alpn-01", "http-01" or "dns-01"
## Note that "tls-sni-01" has been DEPRECATED.
challengeType: tls-alpn-01
## Configure dnsProvider to perform domain verification using dns challenge
## Applicable only if using the dns-01 challenge type
delayBeforeCheck: 0
resolvers: []
# - 1.1.1.1:53
# - 8.8.8.8:53
## Configure the endpoint used for the HTTP challenge
## Applicable only if using the http-01 challenge type
httpChallenge:
entrypoint: http
dnsProvider:
name: nil
existingSecretName: ""
auroradns:
AURORA_USER_ID: ""
AURORA_KEY: ""
AURORA_ENDPOINT: ""
azure:
AZURE_CLIENT_ID: ""
AZURE_CLIENT_SECRET: ""
AZURE_SUBSCRIPTION_ID: ""
AZURE_TENANT_ID: ""
AZURE_RESOURCE_GROUP: ""
cloudflare:
CF_API_EMAIL: ""
CF_API_KEY: ""
digitalocean:
DO_AUTH_TOKEN: ""
dnsimple:
DNSIMPLE_OAUTH_TOKEN: ""
DNSIMPLE_BASE_URL: ""
dnsmadeeasy:
DNSMADEEASY_API_KEY: ""
DNSMADEEASY_API_SECRET: ""
DNSMADEEASY_SANDBOX: ""
dnspod:
DNSPOD_API_KEY: ""
dreamhost:
DREAMHOST_API_KEY: ""
dyn:
DYN_CUSTOMER_NAME: ""
DYN_USER_NAME: ""
DYN_PASSWORD: ""
exoscale:
EXOSCALE_API_KEY: ""
EXOSCALE_API_SECRET: ""
EXOSCALE_ENDPOINT: ""
gandi:
GANDI_API_KEY: ""
godaddy:
GODADDY_API_KEY: ""
GODADDY_API_SECRET: ""
gcloud:
GCE_PROJECT: ""
GCE_SERVICE_ACCOUNT_FILE: ""
linode:
LINODE_API_KEY: ""
namecheap:
NAMECHEAP_API_USER: ""
NAMECHEAP_API_KEY: ""
ns1:
NS1_API_KEY: ""
otc:
OTC_DOMAIN_NAME: ""
OTC_USER_NAME: ""
OTC_PASSWORD: ""
OTC_PROJECT_NAME: ""
OTC_IDENTITY_ENDPOINT: ""
ovh:
OVH_ENDPOINT: ""
OVH_APPLICATION_KEY: ""
OVH_APPLICATION_SECRET: ""
OVH_CONSUMER_KEY: ""
pdns:
PDNS_API_URL: ""
rackspace:
RACKSPACE_USER: ""
RACKSPACE_API_KEY: ""
rfc2136:
RFC2136_NAMESERVER: ""
RFC2136_TSIG_ALGORITHM: ""
RFC2136_TSIG_KEY: ""
RFC2136_TSIG_SECRET: ""
RFC2136_TIMEOUT: ""
route53:
AWS_REGION: ""
AWS_ACCESS_KEY_ID: ""
AWS_SECRET_ACCESS_KEY: ""
vultr:
VULTR_API_KEY: ""
## Save ACME certs to a persistent volume.
## WARNING: If you do not do this and you did not have configured
## a kvprovider, you will re-request certs every time a pod (re-)starts
## and you WILL be rate limited!
persistence:
enabled: true
annotations: {}
## acme data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 1Gi
## A manually managed Persistent Volume Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
dashboard:
enabled: false
domain: traefik.example.com
# serviceType: ClusterIP
service: {}
# annotations:
# key: value
ingress: {}
# annotations:
# key: value
# labels:
# key: value
# tls:
# - hosts:
# - traefik.example.com
# secretName: traefik-default-cert
auth: {}
# basic:
# username: password
statistics: {}
## Number of recent errors to show in the ‘Health’ tab
# recentErrors:
service:
# annotations:
# key: value
# labels:
# key: value
## Further config for service of type NodePort
## Default config with empty string "" will assign a dynamic
## nodePort to http and https ports
nodePorts:
http: ""
https: ""
## If static nodePort configuration is required it can be enabled as below
## Configure ports in allowable range (eg. 30000 - 32767 on minikube)
# nodePorts:
# http: 30080
# https: 30443
gzip:
enabled: true
traefikLogFormat: json
accessLogs:
enabled: false
filters: {}
# statusCodes:
# - "200"
# - "300-302"
# retryAttempts: true
# minDuration: "10ms"
## Path to the access logs file. If not provided, Traefik defaults it to stdout.
# filePath: ""
format: common # choices are: common, json
## for JSON logging, finer-grained control over what is logged. Fields can be
## retained or dropped, and request headers can be retained, dropped or redacted
fields:
# choices are keep, drop
defaultMode: keep
names: {}
# ClientUsername: drop
headers:
# choices are keep, drop, redact
defaultMode: keep
names: {}
# Authorization: redact
rbac:
enabled: false
## Enable the /metrics endpoint, for now only supports prometheus
## set to true to enable metric collection by prometheus
metrics:
prometheus:
enabled: false
# buckets: [0.1,0.3,1.2,5]
service:
# Set a custom service name
name:
annotations:
prometheus.io/scrape: "true"
port: 9100
type: ClusterIP
# loadBalancerIP: ""
# loadBalancerSourceRanges: []
# externalIP: ""
# externalTrafficPolicy: Cluster
# nodePort: 9100
# serviceMonitor:
# When set true and if Prometheus Operator is installed then use a ServiceMonitor to configure scraping
# enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
datadog:
enabled: false
# address: localhost:8125
# pushinterval: 10s
statsd:
enabled: false
# address: localhost:8125
# pushinterval: 10s
deployment:
# labels to add to the deployment
# labels:
# key: value
# annotations:
# key: value
# labels to add to the pod container metadata
# podLabels:
# key: value
# podAnnotations:
# key: value
hostPort:
httpEnabled: false
httpsEnabled: false
dashboardEnabled: false
# httpPort: 80
# httpsPort: 443
# dashboardPort: 8080
sendAnonymousUsage: false
tracing:
enabled: false
serviceName: traefik
# backend: choices are jaeger, zipkin, datadog
# jaeger:
# localAgentHostPort: "127.0.0.1:6831"
# samplingServerURL: http://localhost:5778/sampling
# samplingType: const
# samplingParam: 1.0
# zipkin:
# httpEndpoint: http://localhost:9411/api/v1/spans
# debug: false
# sameSpan: false
# id128bit: true
# datadog:
# localAgentHostPort: "127.0.0.1:8126"
# debug: false
# globalTag: ""
## Create HorizontalPodAutoscaler object.
##
# autoscaling:
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
## Timeouts
##
# timeouts:
# ## responding are timeouts for incoming requests to the Traefik instance
# responding:
# readTimeout: 0s
# writeTimeout: 0s
# idleTimeout: 180s
# ## forwarding are timeouts for requests forwarded to the backend servers
# forwarding:
# dialTimeout: 30s
# responseHeaderTimeout: 0s
# forwardAuth:
# entryPoints: ["http", "https"]
# address: https://authserver.com/auth
# trustForwardHeader: true
# Any extra volumes to define for the pod
extraVolumes: []
# - name: example-name
# hostPath:
# path: /path/on/host
# type: DirectoryOrCreate
# Any extra volume mounts to define for the Traefik container
extraVolumeMounts: []
# - name: example-name
# mountPath: /path/in/container
|
ignite | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"ignite.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"ignite.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"ignite.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"ignite.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"ignite.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# account-role.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n name: {{ include \"ignite.fullname\" . }}\nrules:\n- apiGroups:\n - \"\"\n resources: # Here are resources you can access\n - pods\n - endpoints\n verbs: # That is what you can do with them\n - get\n - list\n - watch\n{{- end }}",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"ignite.fullname\" . }}-configmap\ndata:\n ignite-config.xml: |\n <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\n <!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements. See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n -->\n\n <!--\n Configuration example with Kubernetes IP finder and Ignite persistence enabled.\n WAL files and database files are stored in separate disk drives.\n -->\n <beans xmlns=\"http://www.springframework.org/schema/beans\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xsi:schemaLocation=\"\n http://www.springframework.org/schema/beans\n http://www.springframework.org/schema/beans/spring-beans.xsd\">\n\n <bean class=\"org.apache.ignite.configuration.IgniteConfiguration\">\n\n <property name=\"peerClassLoadingEnabled\" value=\"{{ .Values.peerClassLoadingEnabled | default false }}\"/>\n\n <!-- Ignite Data Storage Configuration -->\n <property name=\"dataStorageConfiguration\">\n <bean class=\"org.apache.ignite.configuration.DataStorageConfiguration\">\n{{- if (.Values.persistence.enabled) }}\n <!-- Ignite Persistent Storage -->\n <property name=\"defaultDataRegionConfiguration\">\n <bean class=\"org.apache.ignite.configuration.DataRegionConfiguration\">\n <property name=\"persistenceEnabled\" value=\"true\"/>\n </bean>\n </property>\n\n <!--\n Sets a path to the root directory where data and indexes are\n to be persisted. It's assumed the directory is on a dedicated disk.\n -->\n <property name=\"storagePath\" value=\"/persistence\"/>\n\n <!--\n Sets a path to the directory where WAL is stored.\n It's assumed the directory is on a dedicated disk.\n -->\n <property name=\"walPath\" value=\"/wal\"/>\n\n <!--\n Sets a path to the directory where WAL archive is stored.\n It's assumed the directory is on the same drive with the WAL files.\n -->\n <property name=\"walArchivePath\" value=\"/wal/archive\"/>\n{{- end }}\n{{ .Values.dataStorage.config | indent 20 }}\n </bean>\n </property>\n\n <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->\n <property name=\"discoverySpi\">\n <bean class=\"org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi\">\n <property name=\"ipFinder\">\n <!--\n Enables Kubernetes IP finder and setting custom namespace and service names.\n -->\n <bean class=\"org.apache.ignite.spi.discovery.tcp.ipfinder.kubernetes.TcpDiscoveryKubernetesIpFinder\">\n <property name=\"namespace\" value=\"{{ .Release.Namespace }}\"/>\n <property name=\"serviceName\" value=\"{{ include \"ignite.fullname\" . }}\"/>\n </bean>\n </property>\n </bean>\n </property>\n </bean>\n </beans>\n",
"# persistence-storage-class.yaml\n{{- if (.Values.persistence.enabled) }}\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n name: {{ include \"ignite.fullname\" . }}-persistence-storage-class\nprovisioner: {{ .Values.persistence.persistenceVolume.provisioner }}\nparameters:\n{{ toYaml .Values.persistence.persistenceVolume.provisionerParameters | indent 2 }}\n{{- end }}\n",
"# role-binding.yaml\n{{- if .Values.rbac.create }}\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: {{ include \"ignite.fullname\" . }}\nroleRef:\n kind: ClusterRole\n name: {{ include \"ignite.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\nsubjects:\n- kind: ServiceAccount\n name: {{ include \"ignite.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}",
"# service-account.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"ignite.serviceAccountName\" . }}\n{{- end }}",
"# stateful-set.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"ignite.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"ignite.name\" . }}\n helm.sh/chart: {{ include \"ignite.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ include \"ignite.fullname\" . }}\n serviceName: {{ include \"ignite.fullname\" . }}\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ include \"ignite.fullname\" . }}\n spec:\n serviceAccountName: {{ include \"ignite.serviceAccountName\" . }}\n{{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n{{- end }}\n volumes:\n - name: config-volume\n configMap:\n name: {{ include \"ignite.fullname\" . }}-configmap\n items:\n - key: ignite-config.xml\n path: default-config.xml\n{{- with .Values.extraVolumes }}\n{{- toYaml . | nindent 8 }}\n{{- end }}\n{{- if .Values.extraInitContainers }}\n initContainers:\n{{- toYaml .Values.extraInitContainers | nindent 6 }}\n{{- end }}\n containers:\n{{- if .Values.extraContainers }}\n{{- toYaml .Values.extraContainers | nindent 6 }}\n{{- end }}\n - name: ignite\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n {{- if .Values.image.pullPolicy }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n {{ end -}}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n{{- if .Values.envVars }}\n env:\n{{ toYaml .Values.envVars | indent 10 }}\n{{- else }}\n{{- if .Values.env }}\n env:\n{{- range $name, $value := .Values.env }}\n - name: \"{{ $name }}\"\n value: \"{{ $value }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n{{- if .Values.envFrom }}\n envFrom:\n{{ toYaml .Values.envFrom | indent 10 }}\n{{- end }}\n ports:\n - containerPort: 11211 # JDBC port number.\n - containerPort: 47100 # communication SPI port number.\n - containerPort: 47500 # discovery SPI port number.\n - containerPort: 49112 # JMX port number.\n - containerPort: 10800 # SQL port number.\n - containerPort: 8080 # REST port number.\n - containerPort: 10900 #Thin clients port number.\n volumeMounts:\n{{- if (.Values.persistence.enabled) }}\n - mountPath: \"/wal\"\n name: ignite-wal\n - mountPath: \"/persistence\"\n name: ignite-persistence\n{{- end }}\n - name: config-volume\n mountPath: /opt/ignite/apache-ignite/config\n{{- with .Values.extraVolumeMounts }}\n{{- toYaml . | nindent 8 }}\n{{- end }}\n{{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumeClaimTemplates:\n{{- if (.Values.persistence.enabled) }}\n - metadata:\n name: ignite-persistence\n spec:\n accessModes: [ \"ReadWriteOnce\" ]\n storageClassName: \"{{ include \"ignite.fullname\" . }}-persistence-storage-class\"\n resources:\n requests:\n storage: {{ .Values.persistence.persistenceVolume.size }}\n - metadata:\n name: ignite-wal\n spec:\n accessModes: [ \"ReadWriteOnce\" ]\n storageClassName: \"{{ include \"ignite.fullname\" . }}-wal-storage-class\"\n resources:\n requests:\n storage: {{ .Values.persistence.walVolume.size }}\n{{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"ignite.fullname\" . }}\n labels:\n app: {{ include \"ignite.fullname\" . }}\nspec:\n ports:\n - name: jdbc\n port: 11211\n targetPort: 11211\n - name: spi-communication\n port: 47100\n targetPort: 47100\n - name: spi-discovery\n port: 47500\n targetPort: 47500\n - name: jmx\n port: 49112\n targetPort: 49112\n - name: sql\n port: 10800\n targetPort: 10800\n - name: rest\n port: 8080\n targetPort: 8080\n - name: thin-clients\n port: 10900\n targetPort: 10900\n clusterIP: None\n selector:\n app: {{ include \"ignite.fullname\" . }}\n",
"# wal-storage-class.yaml\n{{- if (.Values.persistence.enabled) }}\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\n name: {{ include \"ignite.fullname\" . }}-wal-storage-class\nprovisioner: {{ .Values.persistence.walVolume.provisioner }}\nparameters:\n{{ toYaml .Values.persistence.walVolume.provisionerParameters | indent 2 }}\n{{- end }}\n"
] | # Default values for ignite.
replicaCount: 2
image:
repository: apacheignite/ignite
tag: 2.7.6
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
rbac:
create: true
serviceAccount:
create: true
name:
dataStorage:
config: |-
env:
OPTION_LIBS: "ignite-kubernetes,ignite-rest-http"
IGNITE_QUIET: "false"
JVM_OPTS: "-Djava.net.preferIPv4Stack=true"
peerClassLoadingEnabled: false
## envFrom can be used to pass configmaps or secrets as environment
# envFrom:
# - configMapRef:
# name: env-configmap
# - secretRef:
# name: env-secrets
## Additional init containers to run before the pods.
## for example, be used to run a sidecar that chown Logs storage .
extraInitContainers: []
# - name: volume-mount-hack
# image: busybox
# command: ["sh", "-c", "chown -R 1000:1000 logs"]
## Additional containers to run alongside the pods
## This could, for example, be used to run jmx-exporter
extraContainers: []
# - name: jmxexporter
# image: sscaling/jmx-prometheus-exporter
# command: ["sh", "-c", "chown -R 1000:1000 logs"]
persistence:
enabled: false
persistenceVolume:
size: 8Gi
provisioner: kubernetes.io/aws-ebs
provisionerParameters:
type: gp2
fsType: ext4
walVolume:
size: 8Gi
provisioner: kubernetes.io/aws-ebs
provisionerParameters:
type: gp2
fsType: ext4
## extraVolumes and extraVolumeMounts allows you to mount other volumes
## Example Use Cases:
## mount certificates to enable tls
extraVolumes:
# - name: ignite-keystore
# secret:
# defaultMode: 288
# secretName: ignite-keystore
# - name: ignite-trustsore
# secret:
# defaultMode: 288
# secretName: ignite-truststore
# extraVolumeMounts:
# - name: ignite-keystore
# mountPath: /certs/keystore
# readOnly: true
# - name: ignite-truststore
# mountPath: /certs/truststore
# readOnly: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
priorityClassName: ""
|
node-problem-detector | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"node-problem-detector.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"node-problem-detector.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"node-problem-detector.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/* Create the name of the service account to use */}}\n{{- define \"node-problem-detector.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n{{ default (include \"node-problem-detector.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n{{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the configmap for storing custom monitor definitions\n*/}}\n{{- define \"node-problem-detector.customConfig\" -}}\n{{- $fullname := include \"node-problem-detector.fullname\" . -}}\n{{- printf \"%s-custom-config\" $fullname | replace \"+\" \"_\" | trunc 63 -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for podSecurityPolicy.\n*/}}\n{{- define \"podSecurityPolicy.apiVersion\" -}}\n{{- if semverCompare \">=1.10-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"policy/v1beta1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nrules:\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n- apiGroups:\n - \"\"\n resources:\n - nodes/status\n verbs:\n - patch\n- apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n - update\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"node-problem-detector.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: {{ template \"node-problem-detector.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\n{{- end -}}\n",
"# custom-config-configmap.yaml\napiVersion: v1\ndata:\n{{ .Values.settings.custom_monitor_definitions | toYaml | indent 2 }}\nkind: ConfigMap\nmetadata:\n name: {{ include \"node-problem-detector.customConfig\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ include \"node-problem-detector.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n {{- range $key, $val := .Values.labels }}\n {{ $key }}: {{ $val | quote }}\n {{- end}}\nspec:\n updateStrategy:\n type: {{ .Values.updateStrategy }}\n{{- if eq .Values.updateStrategy \"RollingUpdate\"}}\n rollingUpdate:\n maxUnavailable: {{ .Values.maxUnavailable }}\n{{- end}}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app: {{ include \"node-problem-detector.name\" . }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app: {{ include \"node-problem-detector.name\" . }}\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/custom-config-configmap.yaml\") . | sha256sum }}\n scheduler.alpha.kubernetes.io/critical-pod: ''\n{{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 8 }}\n{{- end }}\n spec:\n serviceAccountName: {{ template \"node-problem-detector.serviceAccountName\" . }}\n hostNetwork: {{ .Values.hostNetwork }}\n terminationGracePeriodSeconds: 30\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName | quote }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | default \"IfNotPresent\" | quote }}\n command:\n - \"/bin/sh\"\n - \"-c\"\n - \"exec /node-problem-detector --logtostderr --config.system-log-monitor={{- range $index, $monitor := .Values.settings.log_monitors }}{{if ne $index 0}},{{end}}{{ $monitor }}{{- end }} {{- if .Values.settings.custom_plugin_monitors }} --custom-plugin-monitors={{- range $index, $monitor := .Values.settings.custom_plugin_monitors }}{{if ne $index 0}},{{end}}{{ $monitor }}{{- end }} {{- end }} --prometheus-address={{ .Values.settings.prometheus_address }} --prometheus-port={{ .Values.settings.prometheus_port }} --k8s-exporter-heartbeat-period={{ .Values.settings.heartBeatPeriod }}\"\n{{- if .Values.securityContext }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 12 }}\n{{- end }}\n env:\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n{{- if .Values.env }}\n{{ toYaml .Values.env | indent 12 }}\n{{- end }}\n volumeMounts:\n - name: log\n mountPath: {{ .Values.hostpath.logdir }}\n - name: localtime\n mountPath: /etc/localtime\n readOnly: true\n - name: custom-config\n mountPath: /custom-config\n readOnly: true\n{{- if .Values.extraVolumeMounts }}\n{{ toYaml .Values.extraVolumeMounts | indent 12 }}\n{{- end }}\n ports:\n - containerPort: {{ .Values.settings.prometheus_port }}\n name: exporter\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end }}\n volumes:\n - name: log\n hostPath:\n path: {{ .Values.hostpath.logdir }}\n - name: localtime\n hostPath:\n path: /etc/localtime\n type: \"FileOrCreate\"\n - name: custom-config\n configMap:\n name: {{ include \"node-problem-detector.customConfig\" . }}\n{{- if .Values.extraVolumes }}\n{{ toYaml .Values.extraVolumes | indent 8 }}\n{{- end }}\n",
"# psp-clusterrole.yaml\n{{- if .Values.rbac.pspEnabled }}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}-psp\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"node-problem-detector.fullname\" . }}\n{{- end }}\n",
"# psp-clusterrolebinding.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}-psp\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"node-problem-detector.fullname\" . }}-psp\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"node-problem-detector.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# psp.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: {{ template \"podSecurityPolicy.apiVersion\" . }}\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n privileged: true\n allowPrivilegeEscalation: true\n allowedCapabilities:\n - '*'\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n - 'persistentVolumeClaim'\n - 'hostPath'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n{{- end }}\n",
"# service.yaml\n{{- if .Values.metrics.serviceMonitor.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app: {{ include \"node-problem-detector.name\" . }}\n namespace: {{ .Release.Namespace }}\nspec:\n type: ClusterIP\n clusterIP: None\n ports:\n - name: exporter\n port: {{ .Values.settings.prometheus_port }}\n protocol: TCP\n selector:\n app: {{ include \"node-problem-detector.name\" . }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"node-problem-detector.serviceAccountName\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n",
"# servicemonitor.yaml\n{{- if .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"node-problem-detector.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"node-problem-detector.name\" . }}\n helm.sh/chart: {{ include \"node-problem-detector.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n {{- if .Values.metrics.serviceMonitor.additionalLabels }}\n{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}\n {{- end }}\n namespace: {{ .Release.Namespace }}\nspec:\n selector:\n matchLabels:\n app: {{ include \"node-problem-detector.name\" . }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n endpoints:\n - port: exporter\n path: /metrics\n interval: 60s\n relabelings:\n - action: replace\n targetLabel: node\n sourceLabels:\n - __meta_kubernetes_pod_node_name\n - action: replace\n targetLabel: host_ip\n sourceLabels:\n - __meta_kubernetes_pod_host_ip\n{{- end }}\n"
] | settings:
# Custom monitor definitions to add to Node Problem Detector - to be
# mounted at /custom-config. These are in addition to pre-packaged monitor
# definitions provided within the default docker image available at /config:
# https://github.com/kubernetes/node-problem-detector/tree/master/config
custom_monitor_definitions: {}
# docker-monitor-filelog.json: |
# {
# "plugin": "filelog",
# "pluginConfig": {
# "timestamp": "^time=\"(\\S*)\"",
# "message": "msg=\"([^\n]*)\"",
# "timestampFormat": "2006-01-02T15:04:05.999999999-07:00"
# },
# "logPath": "/var/log/docker.log",
# "lookback": "5m",
# "bufferSize": 10,
# "source": "docker-monitor",
# "conditions": [],
# "rules": [
# {
# "type": "temporary",
# "reason": "CorruptDockerImage",
# "pattern": "Error trying v2 registry: failed to register layer: rename /var/lib/docker/image/(.+) /var/lib/docker/image/(.+): directory not empty.*"
# }
# ]
# }
log_monitors:
- /config/kernel-monitor.json
- /config/docker-monitor.json
# An example of activating a custom log monitor definition in
# Node Problem Detector
# - /custom-config/docker-monitor-filelog.json
custom_plugin_monitors: []
prometheus_address: 0.0.0.0
prometheus_port: 20257
# The period at which k8s-exporter does forcibly sync with apiserver
heartBeatPeriod: 5m0s
hostpath:
logdir: /var/log/
image:
repository: k8s.gcr.io/node-problem-detector
tag: v0.8.1
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
rbac:
create: true
pspEnabled: false
# Flag to run Node Problem Detector on the host's network. This is typically
# not recommended, but may be useful for certain use cases.
hostNetwork: false
priorityClassName: ""
securityContext:
privileged: true
resources: {}
annotations: {}
labels: {}
tolerations:
- effect: NoSchedule
operator: Exists
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
affinity: {}
nodeSelector: {}
metrics:
serviceMonitor:
enabled: false
additionalLabels: {}
env:
# - name: FOO
# value: BAR
# - name: POD_NAME
# valueFrom:
# fieldRef:
# fieldPath: metadata.name
extraVolumes: []
extraVolumeMounts: []
updateStrategy: RollingUpdate
maxUnavailable: 1
|
efs-provisioner | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"efs-provisioner.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"efs-provisioner.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified chart name.\n*/}}\n{{- define \"efs-provisioner.chartname\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" }}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"efs-provisioner.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"efs-provisioner.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"efs-provisioner.fullname\" . }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n env: {{ .Values.global.deployEnv }}\n chart: {{ template \"efs-provisioner.chartname\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n{{ toYaml .Values.annotations | indent 4 }}\nrules:\n - apiGroups: [\"\"]\n resources: [\"persistentvolumes\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n - apiGroups: [\"\"]\n resources: [\"persistentvolumeclaims\"]\n verbs: [\"get\", \"list\", \"watch\", \"update\"]\n - apiGroups: [\"storage.k8s.io\"]\n resources: [\"storageclasses\"]\n verbs: [\"get\", \"list\", \"watch\"]\n - apiGroups: [\"\"]\n resources: [\"events\"]\n verbs: [\"list\", \"watch\", \"create\", \"update\", \"patch\"]\n - apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\"]\n {{- if .Values.podSecurityPolicy.enabled }}\n - apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"efs-provisioner.fullname\" . }}\n {{- end }}\n{{- end }}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"efs-provisioner.fullname\" . }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n env: {{ .Values.global.deployEnv }}\n chart: {{ template \"efs-provisioner.chartname\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n{{ toYaml .Values.annotations | indent 4 }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"efs-provisioner.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\nroleRef:\n kind: ClusterRole\n name: {{ template \"efs-provisioner.fullname\" . }}\n apiGroup: rbac.authorization.k8s.io\n{{- end }}\n",
"# deployment.yaml\n{{- if or (ne .Values.efsProvisioner.efsFileSystemId \"fs-12345678\") (.Values.efsProvisioner.dnsName) }}\n{{/*\nThe `efsFileSystemId` value must be set.\n\nThe above `if` condition also prevents the helm integration tests from failing.\nGiven that the helm test infrastructure does not have access to valid\nAWS EFS resources, a deployment that references the example `fs-12345678`\ncreates pods that will never enter a clean, running state.\n\nOmitting the deployment hacks around this limitation.\n*/}}\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n name: {{ template \"efs-provisioner.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n env: {{ .Values.global.deployEnv }}\n chart: {{ template \"efs-provisioner.chartname\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n{{ toYaml .Values.annotations | indent 4 }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"efs-provisioner.name\" . }}\n release: \"{{ .Release.Name }}\"\n revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}\n strategy:\n type: Recreate\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8}}\n {{- end }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n release: \"{{ .Release.Name }}\"\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n spec:\n serviceAccount: {{ template \"efs-provisioner.serviceAccountName\" . }}\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n{{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range $pullSecret := .Values.image.pullSecrets }}\n - name: {{ $pullSecret }}\n {{- end }}\n{{- end }}\n containers:\n - name: {{ template \"efs-provisioner.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: FILE_SYSTEM_ID\n value: {{ .Values.efsProvisioner.efsFileSystemId }}\n - name: AWS_REGION\n value: {{ .Values.efsProvisioner.awsRegion }}\n - name: PROVISIONER_NAME\n value: {{ .Values.efsProvisioner.provisionerName }}\n {{- if .Values.efsProvisioner.dnsName }}\n - name: DNS_NAME\n value: {{ .Values.efsProvisioner.dnsName }}\n {{- end }}\n {{- if .Values.extraEnv }}\n {{ toYaml .Values.extraEnv | nindent 8 }}\n {{- end }}\n {{- if .Values.envFrom }}\n envFrom:\n {{ toYaml .Values.envFrom | nindent 8 }}\n {{- end }}\n resources:\n {{- toYaml .Values.resources | nindent 12 }}\n volumeMounts:\n - name: pv-volume\n subPath: {{ (trimPrefix \"/\" .Values.efsProvisioner.path) }}\n mountPath: /persistentvolumes\n {{- if ne .Values.efsProvisioner.path \"/\" }}\n initContainers:\n - name: \"init-path\"\n image: {{ .Values.busyboxImage.repository}}:{{ .Values.busyboxImage.tag}}\n imagePullPolicy: {{ .Values.busyboxImage.pullPolicy }}\n command: [ \"sh\", \"-c\", \"mkdir -p /efs-vol-root/{{ (trimPrefix \"/\" .Values.efsProvisioner.path) }}\" ]\n resources:\n {{- toYaml .Values.resources | nindent 12 }}\n volumeMounts:\n - name: pv-volume\n mountPath: /efs-vol-root\n {{- end }}\n volumes:\n - name: pv-volume\n nfs:\n {{- if .Values.efsProvisioner.dnsName }}\n server: {{ .Values.efsProvisioner.dnsName }}\n {{- else }}\n server: {{ .Values.efsProvisioner.efsFileSystemId }}.efs.{{ .Values.efsProvisioner.awsRegion }}.amazonaws.com\n {{- end }}\n path: /\n{{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations| indent 8 }}\n {{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.podSecurityPolicy.enabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"efs-provisioner.fullname\" . }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n env: {{ .Values.global.deployEnv }}\n chart: {{ template \"efs-provisioner.chartname\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.podSecurityPolicy.annotations }}\n annotations:\n{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }}\n{{- end }}\nspec:\n privileged: false\n allowPrivilegeEscalation: false\n requiredDropCapabilities:\n - ALL\n hostNetwork: false\n hostIPC: false\n hostPID: false\n volumes:\n - 'configMap'\n - 'secret'\n - 'nfs'\n runAsUser:\n rule: 'RunAsAny' \n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n - min: 1\n max: 65535\n readOnlyRootFilesystem: true\n{{- end }}",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"efs-provisioner.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n env: {{ .Values.global.deployEnv }}\n chart: {{ template \"efs-provisioner.chartname\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n {{- if .Values.serviceAccount.annotations -}}\n {{- toYaml .Values.serviceAccount.annotations | nindent 4 -}}\n {{- else -}}\n {{- toYaml .Values.annotations | nindent 4 -}}\n {{- end -}}\n{{- end }}\n",
"# storageclass.yaml\nkind: StorageClass\napiVersion: storage.k8s.io/v1beta1\nmetadata:\n name: {{ .Values.efsProvisioner.storageClass.name }}\n labels:\n app: {{ template \"efs-provisioner.name\" . }}\n env: {{ .Values.global.deployEnv }}\n chart: {{ template \"efs-provisioner.chartname\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n annotations:\n{{- if .Values.efsProvisioner.storageClass.isDefault }}\n storageclass.kubernetes.io/is-default-class: \"true\"\n{{- end }}\n{{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 4 }}\n{{- end }}\nprovisioner: {{ .Values.efsProvisioner.provisionerName }}\nparameters:\n{{- if .Values.efsProvisioner.storageClass.gidAllocate.enabled }}\n{{- with .Values.efsProvisioner.storageClass.gidAllocate }}\n gidAllocate: \"true\"\n gidMin: \"{{ .gidMin }}\"\n gidMax: \"{{ .gidMax }}\"\n{{- end }}\n{{- else }}\n gidAllocate: \"false\"\n{{- end }}\nreclaimPolicy: {{ .Values.efsProvisioner.storageClass.reclaimPolicy }}\n{{- if .Values.efsProvisioner.storageClass.mountOptions }}\nmountOptions:\n {{- range .Values.efsProvisioner.storageClass.mountOptions }}\n - {{ . }}\n {{- end }}\n{{- end }}\n"
] | #
# Default values for EFS provisioner service
# https://github.com/kubernetes-incubator/external-storage/tree/master/aws/efs
#
## Deploy environment label, e.g. dev, test, prod
##
global:
deployEnv: dev
## Containers
##
replicaCount: 1
revisionHistoryLimit: 10
image:
repository: quay.io/external_storage/efs-provisioner
tag: v2.4.0
pullPolicy: IfNotPresent
# If specified, use these secrets to access the images
# pullSecrets:
# - registry-secret
busyboxImage:
repository: gcr.io/google_containers/busybox
tag: 1.27
pullPolicy: IfNotPresent
## Extra env variables and envFrom
extraEnv: []
envFrom: []
## Deployment annotations
##
annotations: {}
## Configure provisioner
## https://github.com/kubernetes-incubator/external-storage/tree/master/aws/efs#deployment
##
efsProvisioner:
# If specified, use this DNS or IP to connect the EFS
# dnsName: "my-custom-efs-dns.com"
efsFileSystemId: fs-12345678
awsRegion: us-east-2
path: /example-pv
provisionerName: example.com/aws-efs
storageClass:
name: aws-efs
isDefault: false
gidAllocate:
enabled: true
gidMin: 40000
gidMax: 50000
reclaimPolicy: Delete
mountOptions: []
## Enable RBAC
##
rbac:
# Specifies whether RBAC resources should be created
create: true
## Create or use ServiceAccount
##
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
annotations: {}
# eks.amazonaws.com/role-arn: arn:aws:iam::AWS_ACCOUNT_ID:role/IAM_ROLE_NAME
## Annotations to be added to deployment
##
podAnnotations: {}
# iam.amazonaws.com/role: efs-provisioner-role
## Labels to be added to deployment
##
podLabels: {}
# environment: production
## Node labels for pod assignment
##
nodeSelector: {}
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: {}
## Configure resources
##
resources: {}
# To specify resources, uncomment the following lines, adjust them as necessary,
# and remove the curly braces after 'resources:'.
# limits:
# cpu: 200m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
priorityClassName: ""
# Configure podsecuritypolicy
# Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: true
annotations: {}
|
redis | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"redis.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nExpand the chart plus release name (used by the chart label)\n*/}}\n{{- define \"redis.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"redis.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for networkpolicy.\n*/}}\n{{- define \"networkPolicy.apiVersion\" -}}\n{{- if semverCompare \">=1.4-0, <1.7-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"networking.k8s.io/v1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiGroup for PodSecurityPolicy.\n*/}}\n{{- define \"podSecurityPolicy.apiGroup\" -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"policy\" -}}\n{{- else -}}\n{{- print \"extensions\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for PodSecurityPolicy.\n*/}}\n{{- define \"podSecurityPolicy.apiVersion\" -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"policy/v1beta1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Redis image name\n*/}}\n{{- define \"redis.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Redis Sentinel image name\n*/}}\n{{- define \"sentinel.image\" -}}\n{{- $registryName := .Values.sentinel.image.registry -}}\n{{- $repositoryName := .Values.sentinel.image.repository -}}\n{{- $tag := .Values.sentinel.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"redis.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the init container volume-permissions image)\n*/}}\n{{- define \"redis.volumePermissions.image\" -}}\n{{- $registryName := .Values.volumePermissions.image.registry -}}\n{{- $repositoryName := .Values.volumePermissions.image.repository -}}\n{{- $tag := .Values.volumePermissions.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"redis.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"redis.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the password secret.\n*/}}\n{{- define \"redis.secretName\" -}}\n{{- if .Values.existingSecret -}}\n{{- printf \"%s\" .Values.existingSecret -}}\n{{- else -}}\n{{- printf \"%s\" (include \"redis.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the password key to be retrieved from Redis secret.\n*/}}\n{{- define \"redis.secretPasswordKey\" -}}\n{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}}\n{{- printf \"%s\" .Values.existingSecretPasswordKey -}}\n{{- else -}}\n{{- printf \"redis-password\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn Redis password\n*/}}\n{{- define \"redis.password\" -}}\n{{- if not (empty .Values.global.redis.password) }}\n {{- .Values.global.redis.password -}}\n{{- else if not (empty .Values.password) -}}\n {{- .Values.password -}}\n{{- else -}}\n {{- randAlphaNum 10 -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn sysctl image\n*/}}\n{{- define \"redis.sysctl.image\" -}}\n{{- $registryName := default \"docker.io\" .Values.sysctlImage.registry -}}\n{{- $repositoryName := .Values.sysctlImage.repository -}}\n{{- $tag := default \"buster\" .Values.sysctlImage.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"redis.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.sysctlImage.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.sysctlImage.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/* Check if there are rolling tags in the images */}}\n{{- define \"redis.checkRollingTags\" -}}\n{{- if and (contains \"bitnami/\" .Values.image.repository) (not (.Values.image.tag | toString | regexFind \"-r\\\\d+$|sha256:\")) }}\nWARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.\n+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/\n{{- end }}\n{{- if and (contains \"bitnami/\" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind \"-r\\\\d+$|sha256:\")) }}\nWARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.\n+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/\n{{- end }}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class for master\n*/}}\n{{- define \"redis.master.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.master.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.master.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.master.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.master.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.master.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.master.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class for slave\n*/}}\n{{- define \"redis.slave.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.slave.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.slave.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.slave.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.slave.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.slave.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.slave.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n redis.conf: |-\n{{- if .Values.configmap }}\n # User-supplied configuration:\n{{ tpl .Values.configmap . | indent 4 }}\n{{- end }}\n master.conf: |-\n dir {{ .Values.master.persistence.path }}\n{{- if .Values.master.configmap }}\n # User-supplied master configuration:\n{{ tpl .Values.master.configmap . | indent 4 }}\n{{- end }}\n{{- if .Values.master.disableCommands }}\n{{- range .Values.master.disableCommands }}\n rename-command {{ . }} \"\"\n{{- end }}\n{{- end }}\n replica.conf: |-\n dir {{ .Values.slave.persistence.path }}\n slave-read-only yes\n{{- if .Values.slave.configmap }}\n # User-supplied slave configuration:\n{{ tpl .Values.slave.configmap . | indent 4 }}\n{{- end }}\n{{- if .Values.slave.disableCommands }}\n{{- range .Values.slave.disableCommands }}\n rename-command {{ . }} \"\"\n{{- end }}\n{{- end }}\n{{- if .Values.sentinel.enabled }}\n sentinel.conf: |-\n dir \"/tmp\"\n bind 0.0.0.0\n port {{ .Values.sentinel.port }}\n sentinel monitor {{ .Values.sentinel.masterSet }} {{ template \"redis.fullname\" . }}-master-0.{{ template \"redis.fullname\" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }}\n sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }}\n sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }}\n sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }}\n{{- if .Values.sentinel.configmap }}\n # User-supplied sentinel configuration:\n{{ tpl .Values.sentinel.configmap . | indent 4 }}\n{{- end }}\n{{- end }}\n",
"# headless-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"redis.fullname\" . }}-headless\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: ClusterIP\n clusterIP: None\n ports:\n - name: redis\n port: {{ .Values.redisPort }}\n targetPort: redis\n{{- if .Values.sentinel.enabled }}\n - name: redis-sentinel\n port: {{ .Values.sentinel.port }}\n targetPort: redis-sentinel\n{{- end }}\n selector:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n",
"# health-configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"redis.fullname\" . }}-health\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n ping_readiness_local.sh: |-\n{{- if .Values.usePasswordFile }}\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n{{- end }}\n response=$(\n timeout -s 9 $1 \\\n redis-cli \\\n{{- if .Values.usePassword }}\n -a $REDIS_PASSWORD --no-auth-warning \\\n{{- end }}\n -h localhost \\\n -p $REDIS_PORT \\\n ping\n )\n if [ \"$response\" != \"PONG\" ]; then\n echo \"$response\"\n exit 1\n fi\n ping_liveness_local.sh: |-\n{{- if .Values.usePasswordFile }}\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n{{- end }}\n response=$(\n timeout -s 9 $1 \\\n redis-cli \\\n{{- if .Values.usePassword }}\n -a $REDIS_PASSWORD --no-auth-warning \\\n{{- end }}\n -h localhost \\\n -p $REDIS_PORT \\\n ping\n )\n if [ \"$response\" != \"PONG\" ] && [ \"$response\" != \"LOADING Redis is loading the dataset in memory\" ]; then\n echo \"$response\"\n exit 1\n fi\n{{- if .Values.sentinel.enabled }}\n ping_sentinel.sh: |-\n{{- if .Values.usePasswordFile }}\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n{{- end }}\n response=$(\n timeout -s 9 $1 \\\n redis-cli \\\n{{- if .Values.usePassword }}\n -a $REDIS_PASSWORD --no-auth-warning \\\n{{- end }}\n -h localhost \\\n -p $REDIS_SENTINEL_PORT \\\n ping\n )\n if [ \"$response\" != \"PONG\" ]; then\n echo \"$response\"\n exit 1\n fi\n parse_sentinels.awk: |-\n /ip/ {FOUND_IP=1}\n /port/ {FOUND_PORT=1}\n /runid/ {FOUND_RUNID=1}\n !/ip|port|runid/ {\n if (FOUND_IP==1) {\n IP=$1; FOUND_IP=0;\n }\n else if (FOUND_PORT==1) {\n PORT=$1;\n FOUND_PORT=0;\n } else if (FOUND_RUNID==1) {\n printf \"\\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s\", IP, PORT, $0; FOUND_RUNID=0;\n }\n }\n{{- end }}\n ping_readiness_master.sh: |-\n{{- if .Values.usePasswordFile }}\n password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}`\n export REDIS_MASTER_PASSWORD=$password_aux\n{{- end }}\n response=$(\n timeout -s 9 $1 \\\n redis-cli \\\n{{- if .Values.usePassword }}\n -a $REDIS_MASTER_PASSWORD --no-auth-warning \\\n{{- end }}\n -h $REDIS_MASTER_HOST \\\n -p $REDIS_MASTER_PORT_NUMBER \\\n ping\n )\n if [ \"$response\" != \"PONG\" ]; then\n echo \"$response\"\n exit 1\n fi\n ping_liveness_master.sh: |-\n{{- if .Values.usePasswordFile }}\n password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}`\n export REDIS_MASTER_PASSWORD=$password_aux\n{{- end }}\n response=$(\n timeout -s 9 $1 \\\n redis-cli \\\n{{- if .Values.usePassword }}\n -a $REDIS_MASTER_PASSWORD --no-auth-warning \\\n{{- end }}\n -h $REDIS_MASTER_HOST \\\n -p $REDIS_MASTER_PORT_NUMBER \\\n ping\n )\n if [ \"$response\" != \"PONG\" ] && [ \"$response\" != \"LOADING Redis is loading the dataset in memory\" ]; then\n echo \"$response\"\n exit 1\n fi\n ping_readiness_local_and_master.sh: |-\n script_dir=\"$(dirname \"$0\")\"\n exit_status=0\n \"$script_dir/ping_readiness_local.sh\" $1 || exit_status=$?\n \"$script_dir/ping_readiness_master.sh\" $1 || exit_status=$?\n exit $exit_status\n ping_liveness_local_and_master.sh: |-\n script_dir=\"$(dirname \"$0\")\"\n exit_status=0\n \"$script_dir/ping_liveness_local.sh\" $1 || exit_status=$?\n \"$script_dir/ping_liveness_master.sh\" $1 || exit_status=$?\n exit $exit_status\n",
"# metrics-prometheus.yaml\n{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n {{- if .Values.metrics.serviceMonitor.namespace }}\n namespace: {{ .Values.metrics.serviceMonitor.namespace }}\n {{- end }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- range $key, $value := .Values.metrics.serviceMonitor.selector }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n endpoints:\n - port: metrics\n {{- if .Values.metrics.serviceMonitor.interval }}\n interval: {{ .Values.metrics.serviceMonitor.interval }}\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n{{- end -}}\n",
"# metrics-svc.yaml\n{{- if .Values.metrics.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"redis.fullname\" . }}-metrics\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- if .Values.metrics.service.labels -}}\n {{ toYaml .Values.metrics.service.labels | nindent 4 }}\n {{- end -}}\n {{- if .Values.metrics.service.annotations }}\n annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }}\n {{- end }}\nspec:\n type: {{ .Values.metrics.service.type }}\n {{ if eq .Values.metrics.service.type \"LoadBalancer\" -}} {{ if .Values.metrics.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}\n {{ end -}}\n {{- end -}}\n ports:\n - name: metrics\n port: 9121\n targetPort: metrics\n selector:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n",
"# networkpolicy.yaml\n{{- if .Values.networkPolicy.enabled }}\nkind: NetworkPolicy\napiVersion: {{ template \"networkPolicy.apiVersion\" . }}\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n podSelector:\n matchLabels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.cluster.enabled }}\n policyTypes:\n - Ingress\n - Egress\n egress:\n # Allow dns resolution\n - ports:\n - port: 53\n protocol: UDP\n # Allow outbound connections to other cluster pods\n - ports:\n - port: {{ .Values.redisPort }}\n {{- if .Values.sentinel.enabled }}\n - port: {{ .Values.sentinel.port }}\n {{- end }}\n to:\n - podSelector:\n matchLabels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n {{- end }}\n ingress:\n # Allow inbound connections\n - ports:\n - port: {{ .Values.redisPort }}\n {{- if .Values.sentinel.enabled }}\n - port: {{ .Values.sentinel.port }}\n {{- end }}\n {{- if not .Values.networkPolicy.allowExternal }}\n from:\n - podSelector:\n matchLabels:\n {{ template \"redis.fullname\" . }}-client: \"true\"\n - podSelector:\n matchLabels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.networkPolicy.ingressNSMatchLabels }}\n - namespaceSelector:\n matchLabels:\n {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}\n {{ $key | quote }}: {{ $value | quote }}\n {{- end }}\n {{- if .Values.networkPolicy.ingressNSPodMatchLabels }}\n podSelector:\n matchLabels:\n {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}\n {{ $key | quote }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- if .Values.metrics.enabled }}\n # Allow prometheus scrapes for metrics\n - ports:\n - port: 9121\n {{- end }}\n{{- end }}\n",
"# prometheusrule.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n{{- with .Values.metrics.prometheusRule.namespace }}\n namespace: {{ . }}\n{{- end }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.metrics.prometheusRule.additionalLabels }}\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- with .Values.metrics.prometheusRule.rules }}\n groups:\n - name: {{ template \"redis.name\" $ }}\n rules: {{ tpl (toYaml .) $ | nindent 8 }}\n{{- end }}\n{{- end }}\n",
"# psp.yaml\n{{- if .Values.podSecurityPolicy.create }}\napiVersion: {{ template \"podSecurityPolicy.apiVersion\" . }}\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\nspec:\n allowPrivilegeEscalation: false\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n - min: {{ .Values.securityContext.fsGroup }}\n max: {{ .Values.securityContext.fsGroup }}\n hostIPC: false\n hostNetwork: false\n hostPID: false\n privileged: false\n readOnlyRootFilesystem: false\n requiredDropCapabilities:\n - ALL\n runAsUser:\n rule: 'MustRunAs'\n ranges:\n - min: {{ .Values.securityContext.runAsUser }}\n max: {{ .Values.securityContext.runAsUser }}\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n - min: {{ .Values.securityContext.runAsUser }}\n max: {{ .Values.securityContext.runAsUser }}\n volumes:\n - 'configMap'\n - 'secret'\n - 'emptyDir'\n - 'persistentVolumeClaim'\n{{- end }}\n",
"# redis-master-statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"redis.fullname\" . }}-master\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n role: master\n serviceName: {{ template \"redis.fullname\" . }}-headless\n template:\n metadata:\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n role: master\n{{- if .Values.master.podLabels }}\n{{ toYaml .Values.master.podLabels | indent 8 }}\n{{- end }}\n{{- if and .Values.metrics.enabled .Values.metrics.podLabels }}\n{{ toYaml .Values.metrics.podLabels | indent 8 }}\n{{- end }}\n annotations:\n checksum/health: {{ include (print $.Template.BasePath \"/health-configmap.yaml\") . | sha256sum }}\n checksum/configmap: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n {{- if .Values.master.podAnnotations }}\n{{ toYaml .Values.master.podAnnotations | indent 8 }}\n {{- end }}\n {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n{{- include \"redis.imagePullSecrets\" . | indent 6 }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- if .Values.securityContext.sysctls }}\n sysctls:\n{{ toYaml .Values.securityContext.sysctls | indent 8 }}\n {{- end }}\n {{- end }}\n serviceAccountName: \"{{ template \"redis.serviceAccountName\" . }}\"\n {{- if .Values.master.priorityClassName }}\n priorityClassName: \"{{ .Values.master.priorityClassName }}\"\n {{- end }}\n {{- with .Values.master.affinity }}\n affinity:\n{{ tpl (toYaml .) $ | indent 8 }}\n {{- end }}\n {{- if .Values.master.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.master.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.master.tolerations }}\n tolerations:\n{{ toYaml .Values.master.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.master.schedulerName }}\n schedulerName: \"{{ .Values.master.schedulerName }}\"\n {{- end }}\n containers:\n - name: {{ template \"redis.fullname\" . }}\n image: \"{{ template \"redis.image\" . }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n command:\n - /bin/bash\n - -c\n - |\n {{- if (eq (.Values.securityContext.runAsUser | int) 0) }}\n useradd redis\n chown -R redis {{ .Values.master.persistence.path }}\n {{- end }}\n if [[ -n $REDIS_PASSWORD_FILE ]]; then\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n fi\n if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then\n cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf\n fi\n if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then\n cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf\n fi\n ARGS=(\"--port\" \"${REDIS_PORT}\")\n {{- if .Values.usePassword }}\n ARGS+=(\"--requirepass\" \"${REDIS_PASSWORD}\")\n ARGS+=(\"--masterauth\" \"${REDIS_PASSWORD}\")\n {{- else }}\n ARGS+=(\"--protected-mode\" \"no\")\n {{- end }}\n ARGS+=(\"--include\" \"/opt/bitnami/redis/etc/redis.conf\")\n ARGS+=(\"--include\" \"/opt/bitnami/redis/etc/master.conf\")\n {{- if .Values.master.extraFlags }}\n {{- range .Values.master.extraFlags }}\n ARGS+=({{ . | quote }})\n {{- end }}\n {{- end }}\n {{- if .Values.master.command }}\n {{ .Values.master.command }} ${ARGS[@]}\n {{- else }}\n redis-server \"${ARGS[@]}\"\n {{- end }}\n env:\n - name: REDIS_REPLICATION_MODE\n value: master\n {{- if .Values.usePassword }}\n {{- if .Values.usePasswordFile }}\n - name: REDIS_PASSWORD_FILE\n value: \"/opt/bitnami/redis/secrets/redis-password\"\n {{- else }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n {{- end }}\n {{- else }}\n - name: ALLOW_EMPTY_PASSWORD\n value: \"yes\"\n {{- end }}\n - name: REDIS_PORT\n value: {{ .Values.redisPort | quote }}\n ports:\n - name: redis\n containerPort: {{ .Values.redisPort }}\n {{- if .Values.master.livenessProbe.enabled }}\n livenessProbe:\n initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.master.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }}\n {{- end }}\n {{- if .Values.master.readinessProbe.enabled}}\n readinessProbe:\n initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.master.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n - /health/ping_readiness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }}\n {{- end }}\n resources:\n{{ toYaml .Values.master.resources | indent 10 }}\n volumeMounts:\n - name: health\n mountPath: /health\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n mountPath: /opt/bitnami/redis/secrets/\n {{- end }}\n - name: redis-data\n mountPath: {{ .Values.master.persistence.path }}\n subPath: {{ .Values.master.persistence.subPath }}\n - name: config\n mountPath: /opt/bitnami/redis/mounted-etc\n - name: redis-tmp-conf\n mountPath: /opt/bitnami/redis/etc/\n {{- if and .Values.cluster.enabled .Values.sentinel.enabled }}\n - name: sentinel\n image: \"{{ template \"sentinel.image\" . }}\"\n imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n command:\n - /bin/bash\n - -c\n - |\n if [[ -n $REDIS_PASSWORD_FILE ]]; then\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n fi\n if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then\n cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- if .Values.usePassword }}\n printf \"\\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD\" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- if .Values.sentinel.usePassword }}\n printf \"\\nrequirepass $REDIS_PASSWORD\" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- end }}\n {{- end }}\n {{- if .Values.sentinel.staticID }}\n printf \"\\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')\" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- end }}\n fi\n echo \"Getting information about current running sentinels\"\n # Get information from existing sentinels\n existing_sentinels=$(timeout -s 9 {{ .Values.sentinel.initialCheckTimeout }} redis-cli --raw -h {{ template \"redis.fullname\" . }} -a \"$REDIS_PASSWORD\" -p {{ .Values.sentinel.service.sentinelPort }} SENTINEL sentinels {{ .Values.sentinel.masterSet }})\n echo \"$existing_sentinels\" | awk -f /health/parse_sentinels.awk | tee -a /opt/bitnami/redis-sentinel/etc/sentinel.conf\n\n redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel\n env:\n {{- if .Values.usePassword }}\n {{- if .Values.usePasswordFile }}\n - name: REDIS_PASSWORD_FILE\n value: \"/opt/bitnami/redis/secrets/redis-password\"\n {{- else }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n {{- end }}\n {{- else }}\n - name: ALLOW_EMPTY_PASSWORD\n value: \"yes\"\n {{- end }}\n - name: REDIS_SENTINEL_PORT\n value: {{ .Values.sentinel.port | quote }}\n ports:\n - name: redis-sentinel\n containerPort: {{ .Values.sentinel.port }}\n {{- if .Values.sentinel.livenessProbe.enabled }}\n livenessProbe:\n initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }}\n {{- end }}\n {{- if .Values.sentinel.readinessProbe.enabled}}\n readinessProbe:\n initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }}\n {{- end }}\n resources:\n{{ toYaml .Values.sentinel.resources | indent 10 }}\n volumeMounts:\n - name: health\n mountPath: /health\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n mountPath: /opt/bitnami/redis/secrets/\n {{- end }}\n - name: redis-data\n mountPath: {{ .Values.master.persistence.path }}\n subPath: {{ .Values.master.persistence.subPath }}\n - name: config\n mountPath: /opt/bitnami/redis-sentinel/mounted-etc\n - name: sentinel-tmp-conf\n mountPath: /opt/bitnami/redis-sentinel/etc/\n {{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"redis.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command:\n - /bin/bash\n - -c\n - |\n if [[ -f '/secrets/redis-password' ]]; then\n export REDIS_PASSWORD=$(cat /secrets/redis-password)\n fi\n redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }}\n env:\n - name: REDIS_ALIAS\n value: {{ template \"redis.fullname\" . }}\n {{- if and .Values.usePassword (not .Values.usePasswordFile) }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n {{- end }}\n volumeMounts:\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n mountPath: /secrets/\n {{- end }}\n ports:\n - name: metrics\n containerPort: 9121\n resources:\n{{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and ( and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) ) .Values.securityContext.enabled) }}\n {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }}\n initContainers:\n {{- if $needsVolumePermissions }}\n - name: volume-permissions\n image: \"{{ template \"redis.volumePermissions.image\" . }}\"\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n command: [\"/bin/chown\", \"-R\", \"{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\", \"{{ .Values.master.persistence.path }}\"]\n securityContext:\n runAsUser: 0\n resources:\n{{ toYaml .Values.volumePermissions.resources | indent 10 }}\n volumeMounts:\n - name: redis-data\n mountPath: {{ .Values.master.persistence.path }}\n subPath: {{ .Values.master.persistence.subPath }}\n {{- end }}\n {{- if .Values.sysctlImage.enabled }}\n - name: init-sysctl\n image: {{ template \"redis.sysctl.image\" . }}\n imagePullPolicy: {{ default \"\" .Values.sysctlImage.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.sysctlImage.resources | indent 10 }}\n {{- if .Values.sysctlImage.mountHostSys }}\n volumeMounts:\n - name: host-sys\n mountPath: /host-sys\n {{- end }}\n command:\n{{ toYaml .Values.sysctlImage.command | indent 10 }}\n securityContext:\n privileged: true\n runAsUser: 0\n {{- end }}\n {{- end }}\n volumes:\n - name: health\n configMap:\n name: {{ template \"redis.fullname\" . }}-health\n defaultMode: 0755\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n secret:\n secretName: {{ template \"redis.secretName\" . }}\n items:\n - key: {{ template \"redis.secretPasswordKey\" . }}\n path: redis-password\n {{- end }}\n - name: config\n configMap:\n name: {{ template \"redis.fullname\" . }}\n {{- if not .Values.master.persistence.enabled }}\n - name: \"redis-data\"\n emptyDir: {}\n {{- else }}\n {{- if .Values.persistence.existingClaim }}\n - name: \"redis-data\"\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim }}\n {{- end }}\n {{- end }}\n {{- if .Values.sysctlImage.mountHostSys }}\n - name: host-sys\n hostPath:\n path: /sys\n {{- end }}\n - name: redis-tmp-conf\n emptyDir: {}\n {{- if and .Values.cluster.enabled .Values.sentinel.enabled }}\n - name: sentinel-tmp-conf\n emptyDir: {}\n {{- end }}\n {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) }}\n volumeClaimTemplates:\n - metadata:\n name: redis-data\n labels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: master\n spec:\n accessModes:\n {{- range .Values.master.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.master.persistence.size | quote }}\n {{ include \"redis.master.storageClass\" . }}\n selector:\n {{- if .Values.master.persistence.matchLabels }}\n matchLabels:\n{{ toYaml .Values.master.persistence.matchLabels | indent 12 }}\n {{- end -}}\n {{- if .Values.master.persistence.matchExpressions }}\n matchExpressions:\n{{ toYaml .Values.master.persistence.matchExpressions | indent 12 }}\n {{- end -}}\n {{- end }}\n updateStrategy:\n type: {{ .Values.master.statefulset.updateStrategy }}\n {{- if .Values.master.statefulset.rollingUpdatePartition }}\n {{- if (eq \"Recreate\" .Values.master.statefulset.updateStrategy) }}\n rollingUpdate: null\n {{- else }}\n rollingUpdate:\n partition: {{ .Values.master.statefulset.rollingUpdatePartition }}\n {{- end }}\n {{- end }}\n",
"# redis-master-svc.yaml\n{{- if not .Values.sentinel.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"redis.fullname\" . }}-master\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- if .Values.master.service.labels -}}\n {{ toYaml .Values.master.service.labels | nindent 4 }}\n {{- end -}}\n{{- if .Values.master.service.annotations }}\n annotations: {{ toYaml .Values.master.service.annotations | nindent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.master.service.type }}\n {{- if and (eq .Values.master.service.type \"LoadBalancer\") .Values.master.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.master.service.loadBalancerIP }}\n {{- end }}\n {{- if and (eq .Values.master.service.type \"LoadBalancer\") .Values.master.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- with .Values.master.service.loadBalancerSourceRanges }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n {{- end }}\n ports:\n - name: redis\n port: {{ .Values.master.service.port }}\n targetPort: redis\n {{- if .Values.master.service.nodePort }}\n nodePort: {{ .Values.master.service.nodePort }}\n {{- end }}\n selector:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n role: master\n{{- end }}\n",
"# redis-role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n{{- if .Values.podSecurityPolicy.create }}\n - apiGroups: ['{{ template \"podSecurityPolicy.apiGroup\" . }}']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames: [{{ template \"redis.fullname\" . }}]\n{{- end -}}\n{{- if .Values.rbac.role.rules }}\n{{ toYaml .Values.rbac.role.rules | indent 2 }}\n{{- end -}}\n{{- end -}}\n",
"# redis-rolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"redis.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"redis.serviceAccountName\" . }}\n{{- end -}}\n",
"# redis-serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"redis.serviceAccountName\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end -}}\n",
"# redis-slave-statefulset.yaml\n{{- if .Values.cluster.enabled }}\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"redis.fullname\" . }}-slave\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n{{- if .Values.slave.updateStrategy }}\n strategy:\n{{ toYaml .Values.slave.updateStrategy | indent 4 }}\n{{- end }}\n replicas: {{ .Values.cluster.slaveCount }}\n serviceName: {{ template \"redis.fullname\" . }}-headless\n selector:\n matchLabels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n role: slave\n template:\n metadata:\n labels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n chart: {{ template \"redis.chart\" . }}\n role: slave\n {{- if .Values.slave.podLabels }}\n{{ toYaml .Values.slave.podLabels | indent 8 }}\n {{- end }}\n {{- if and .Values.metrics.enabled .Values.metrics.podLabels }}\n{{ toYaml .Values.metrics.podLabels | indent 8 }}\n {{- end }}\n annotations:\n checksum/health: {{ include (print $.Template.BasePath \"/health-configmap.yaml\") . | sha256sum }}\n checksum/configmap: {{ include (print $.Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secret.yaml\") . | sha256sum }}\n {{- if .Values.slave.podAnnotations }}\n{{ toYaml .Values.slave.podAnnotations | indent 8 }}\n {{- end }}\n {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n{{- include \"redis.imagePullSecrets\" . | indent 6 }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- if .Values.securityContext.sysctls }}\n sysctls:\n{{ toYaml .Values.securityContext.sysctls | indent 8 }}\n {{- end }}\n {{- end }}\n serviceAccountName: \"{{ template \"redis.serviceAccountName\" . }}\"\n {{- if .Values.slave.priorityClassName }}\n priorityClassName: \"{{ .Values.slave.priorityClassName }}\"\n {{- end }}\n {{- if .Values.slave.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.slave.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.slave.tolerations }}\n tolerations:\n{{ toYaml .Values.slave.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.slave.schedulerName }}\n schedulerName: \"{{ .Values.slave.schedulerName }}\"\n {{- end }}\n {{- with .Values.slave.affinity }}\n affinity:\n{{ tpl (toYaml .) $ | indent 8 }}\n {{- end }}\n containers:\n - name: {{ template \"redis.fullname\" . }}\n image: {{ template \"redis.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n command:\n - /bin/bash\n - -c\n - |\n {{- if (eq (.Values.securityContext.runAsUser | int) 0) }}\n useradd redis\n chown -R redis {{ .Values.slave.persistence.path }}\n {{- end }}\n if [[ -n $REDIS_PASSWORD_FILE ]]; then\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n fi\n if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then\n password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}`\n export REDIS_MASTER_PASSWORD=$password_aux\n fi\n if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then\n cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf\n fi\n if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then\n cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf\n fi\n ARGS=(\"--port\" \"${REDIS_PORT}\")\n ARGS+=(\"--slaveof\" \"${REDIS_MASTER_HOST}\" \"${REDIS_MASTER_PORT_NUMBER}\")\n {{- if .Values.usePassword }}\n ARGS+=(\"--requirepass\" \"${REDIS_PASSWORD}\")\n ARGS+=(\"--masterauth\" \"${REDIS_MASTER_PASSWORD}\")\n {{- else }}\n ARGS+=(\"--protected-mode\" \"no\")\n {{- end }}\n ARGS+=(\"--include\" \"/opt/bitnami/redis/etc/redis.conf\")\n ARGS+=(\"--include\" \"/opt/bitnami/redis/etc/replica.conf\")\n {{- if .Values.slave.extraFlags }}\n {{- range .Values.slave.extraFlags }}\n ARGS+=({{ . | quote }})\n {{- end }}\n {{- end }}\n {{- if .Values.slave.command }}\n {{ .Values.slave.command }} \"${ARGS[@]}\"\n {{- else }}\n redis-server \"${ARGS[@]}\"\n {{- end }}\n env:\n - name: REDIS_REPLICATION_MODE\n value: slave\n - name: REDIS_MASTER_HOST\n value: {{ template \"redis.fullname\" . }}-master-0.{{ template \"redis.fullname\" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}\n - name: REDIS_PORT\n value: {{ .Values.redisPort | quote }}\n - name: REDIS_MASTER_PORT_NUMBER\n value: {{ .Values.redisPort | quote }}\n {{- if .Values.usePassword }}\n {{- if .Values.usePasswordFile }}\n - name: REDIS_PASSWORD_FILE\n value: \"/opt/bitnami/redis/secrets/redis-password\"\n - name: REDIS_MASTER_PASSWORD_FILE\n value: \"/opt/bitnami/redis/secrets/redis-password\"\n {{- else }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n - name: REDIS_MASTER_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n {{- end }}\n {{- else }}\n - name: ALLOW_EMPTY_PASSWORD\n value: \"yes\"\n {{- end }}\n ports:\n - name: redis\n containerPort: {{ .Values.redisPort }}\n {{- if .Values.slave.livenessProbe.enabled }}\n livenessProbe:\n initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.slave.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}}\n exec:\n command:\n - sh\n - -c\n {{- if .Values.sentinel.enabled }}\n - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }}\n {{- else }}\n - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }}\n {{- end }}\n {{- end }}\n\n {{- if .Values.slave.readinessProbe.enabled }}\n readinessProbe:\n initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.slave.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n {{- if .Values.sentinel.enabled }}\n - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }}\n {{- else }}\n - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.slave.resources | indent 10 }}\n volumeMounts:\n - name: health\n mountPath: /health\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n mountPath: /opt/bitnami/redis/secrets/\n {{- end }}\n - name: redis-data\n mountPath: /data\n - name: config\n mountPath: /opt/bitnami/redis/mounted-etc\n - name: redis-tmp-conf\n mountPath: /opt/bitnami/redis/etc\n {{- if and .Values.cluster.enabled .Values.sentinel.enabled }}\n - name: sentinel\n image: \"{{ template \"sentinel.image\" . }}\"\n imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n command:\n - /bin/bash\n - -c\n - |\n if [[ -n $REDIS_PASSWORD_FILE ]]; then\n password_aux=`cat ${REDIS_PASSWORD_FILE}`\n export REDIS_PASSWORD=$password_aux\n fi\n if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]];then\n cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- if .Values.usePassword }}\n printf \"\\nsentinel auth-pass {{ .Values.sentinel.masterSet }} $REDIS_PASSWORD\" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- if .Values.sentinel.usePassword }}\n printf \"\\nrequirepass $REDIS_PASSWORD\" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- end }}\n {{- end }}\n {{- if .Values.sentinel.staticID }}\n printf \"\\nsentinel myid $(echo $HOSTNAME | openssl sha1 | awk '{ print $2 }')\" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf\n {{- end }}\n fi\n\n redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel\n env:\n {{- if .Values.usePassword }}\n {{- if .Values.usePasswordFile }}\n - name: REDIS_PASSWORD_FILE\n value: \"/opt/bitnami/redis/secrets/redis-password\"\n {{- else }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n {{- end }}\n {{- else }}\n - name: ALLOW_EMPTY_PASSWORD\n value: \"yes\"\n {{- end }}\n - name: REDIS_SENTINEL_PORT\n value: {{ .Values.sentinel.port | quote }}\n ports:\n - name: redis-sentinel\n containerPort: {{ .Values.sentinel.port }}\n {{- if .Values.sentinel.livenessProbe.enabled }}\n livenessProbe:\n initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }}\n {{- end }}\n {{- if .Values.sentinel.readinessProbe.enabled}}\n readinessProbe:\n initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }}\n exec:\n command:\n - sh\n - -c\n - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }}\n {{- end }}\n resources:\n{{ toYaml .Values.sentinel.resources | indent 10 }}\n volumeMounts:\n - name: health\n mountPath: /health\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n mountPath: /opt/bitnami/redis/secrets/\n {{- end }}\n - name: redis-data\n mountPath: {{ .Values.master.persistence.path }}\n subPath: {{ .Values.master.persistence.subPath }}\n - name: config\n mountPath: /opt/bitnami/redis-sentinel/mounted-etc\n - name: sentinel-tmp-conf\n mountPath: /opt/bitnami/redis-sentinel/etc\n {{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"redis.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command:\n - /bin/bash\n - -c\n - |\n if [[ -f '/secrets/redis-password' ]]; then\n export REDIS_PASSWORD=$(cat /secrets/redis-password)\n fi\n redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }}\n env:\n - name: REDIS_ALIAS\n value: {{ template \"redis.fullname\" . }}\n {{- if and .Values.usePassword (not .Values.usePasswordFile) }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"redis.secretName\" . }}\n key: {{ template \"redis.secretPasswordKey\" . }}\n {{- end }}\n volumeMounts:\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n mountPath: /secrets/\n {{- end }}\n ports:\n - name: metrics\n containerPort: 9121\n resources:\n{{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n {{- $needsVolumePermissions := and .Values.volumePermissions.enabled (and .Values.slave.persistence.enabled .Values.securityContext.enabled) }}\n {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }}\n initContainers:\n {{- if $needsVolumePermissions }}\n - name: volume-permissions\n image: \"{{ template \"redis.volumePermissions.image\" . }}\"\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n command: [\"/bin/chown\", \"-R\", \"{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\", \"{{ .Values.slave.persistence.path }}\"]\n securityContext:\n runAsUser: 0\n resources:\n{{ toYaml .Values.volumePermissions.resources | indent 10 }}\n volumeMounts:\n - name: redis-data\n mountPath: {{ .Values.slave.persistence.path }}\n subPath: {{ .Values.slave.persistence.subPath }}\n {{- end }}\n {{- if .Values.sysctlImage.enabled }}\n - name: init-sysctl\n image: {{ template \"redis.sysctl.image\" . }}\n imagePullPolicy: {{ default \"\" .Values.sysctlImage.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.sysctlImage.resources | indent 10 }}\n {{- if .Values.sysctlImage.mountHostSys }}\n volumeMounts:\n - name: host-sys\n mountPath: /host-sys\n {{- end }}\n command:\n{{ toYaml .Values.sysctlImage.command | indent 10 }}\n securityContext:\n privileged: true\n runAsUser: 0\n {{- end }}\n {{- end }}\n volumes:\n - name: health\n configMap:\n name: {{ template \"redis.fullname\" . }}-health\n defaultMode: 0755\n {{- if .Values.usePasswordFile }}\n - name: redis-password\n secret:\n secretName: {{ template \"redis.secretName\" . }}\n items:\n - key: {{ template \"redis.secretPasswordKey\" . }}\n path: redis-password\n {{- end }}\n - name: config\n configMap:\n name: {{ template \"redis.fullname\" . }}\n {{- if .Values.sysctlImage.mountHostSys }}\n - name: host-sys\n hostPath:\n path: /sys\n {{- end }}\n - name: sentinel-tmp-conf\n emptyDir: {}\n - name: redis-tmp-conf\n emptyDir: {}\n {{- if not .Values.slave.persistence.enabled }}\n - name: redis-data\n emptyDir: {}\n {{- else }}\n volumeClaimTemplates:\n - metadata:\n name: redis-data\n labels:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: slave\n spec:\n accessModes:\n {{- range .Values.slave.persistence.accessModes }}\n - {{ . | quote }}\n {{- end }}\n resources:\n requests:\n storage: {{ .Values.slave.persistence.size | quote }}\n {{ include \"redis.slave.storageClass\" . }}\n selector:\n {{- if .Values.slave.persistence.matchLabels }}\n matchLabels:\n{{ toYaml .Values.slave.persistence.matchLabels | indent 12 }}\n {{- end -}}\n {{- if .Values.slave.persistence.matchExpressions }}\n matchExpressions:\n{{ toYaml .Values.slave.persistence.matchExpressions | indent 12 }}\n {{- end -}}\n {{- end }}\n updateStrategy:\n type: {{ .Values.slave.statefulset.updateStrategy }}\n {{- if .Values.slave.statefulset.rollingUpdatePartition }}\n {{- if (eq \"Recreate\" .Values.slave.statefulset.updateStrategy) }}\n rollingUpdate: null\n {{- else }}\n rollingUpdate:\n partition: {{ .Values.slave.statefulset.rollingUpdatePartition }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# redis-slave-svc.yaml\n{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"redis.fullname\" . }}-slave\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- if .Values.slave.service.labels -}}\n {{ toYaml .Values.slave.service.labels | nindent 4 }}\n {{- end -}}\n{{- if .Values.slave.service.annotations }}\n annotations:\n{{ toYaml .Values.slave.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.slave.service.type }}\n {{- if and (eq .Values.slave.service.type \"LoadBalancer\") .Values.slave.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }}\n {{- end }}\n {{- if and (eq .Values.slave.service.type \"LoadBalancer\") .Values.slave.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{- with .Values.slave.service.loadBalancerSourceRanges }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n {{- end }}\n ports:\n - name: redis\n port: {{ .Values.slave.service.port }}\n targetPort: redis\n {{- if .Values.slave.service.nodePort }}\n nodePort: {{ .Values.slave.service.nodePort }}\n {{- end }}\n selector:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n role: slave\n{{- end }}\n",
"# redis-with-sentinel-svc.yaml\n{{- if .Values.sentinel.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- if .Values.sentinel.service.labels }}\n {{ toYaml .Values.sentinel.service.labels | nindent 4 }}\n {{- end }}\n{{- if .Values.sentinel.service.annotations }}\n annotations:\n{{ toYaml .Values.sentinel.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.sentinel.service.type }}\n {{ if eq .Values.sentinel.service.type \"LoadBalancer\" -}} {{ if .Values.sentinel.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }}\n {{ end -}}\n {{- end -}}\n ports:\n - name: redis\n port: {{ .Values.sentinel.service.redisPort }}\n targetPort: redis\n {{- if .Values.sentinel.service.redisNodePort }}\n nodePort: {{ .Values.sentinel.service.redisNodePort }}\n {{- end }}\n - name: redis-sentinel\n port: {{ .Values.sentinel.service.sentinelPort }}\n targetPort: redis-sentinel\n {{- if .Values.sentinel.service.sentinelNodePort }}\n nodePort: {{ .Values.sentinel.service.sentinelNodePort }}\n {{- end }}\n selector:\n app: {{ template \"redis.name\" . }}\n release: {{ .Release.Name }}\n{{- end }}\n",
"# secret.yaml\n{{- if and .Values.usePassword (not .Values.existingSecret) -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"redis.fullname\" . }}\n labels:\n app: {{ template \"redis.name\" . }}\n chart: {{ template \"redis.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n redis-password: {{ include \"redis.password\" . | b64enc | quote }}\n{{- end -}}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
redis: {}
## Bitnami Redis image version
## ref: https://hub.docker.com/r/bitnami/redis/tags/
##
image:
registry: docker.io
repository: bitnami/redis
## Bitnami Redis image tag
## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links
##
tag: 5.0.7-debian-10-r32
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override redis.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override redis.fullname template
##
# fullnameOverride:
## Cluster settings
cluster:
enabled: true
slaveCount: 2
## Use redis sentinel in the redis pod. This will disable the master and slave services and
## create one redis service with ports to the sentinel and the redis instances
sentinel:
enabled: false
## Require password authentication on the sentinel itself
## ref: https://redis.io/topics/sentinel
usePassword: true
## Bitnami Redis Sentintel image version
## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/
##
image:
registry: docker.io
repository: bitnami/redis-sentinel
## Bitnami Redis image tag
## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links
##
tag: 5.0.7-debian-10-r27
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
masterSet: mymaster
initialCheckTimeout: 5
quorum: 2
downAfterMilliseconds: 60000
failoverTimeout: 18000
parallelSyncs: 1
port: 26379
## Additional Redis configuration for the sentinel nodes
## ref: https://redis.io/topics/config
##
configmap:
## Enable or disable static sentinel IDs for each replicas
## If disabled each sentinel will generate a random id at startup
## If enabled, each replicas will have a constant ID on each start-up
##
staticID: false
## Configure extra options for Redis Sentinel liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
## Redis Sentinel resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Redis Sentinel Service properties
service:
## Redis Sentinel Service type
type: ClusterIP
sentinelPort: 26379
redisPort: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# sentinelNodePort:
# redisNodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
loadBalancerIP:
## Specifies the Kubernetes Cluster's Domain Name.
##
clusterDomain: cluster.local
networkPolicy:
## Specifies whether a NetworkPolicy should be created
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the port Redis is listening
## on. When true, Redis will accept connections from any source
## (with the correct destination port).
##
# allowExternal: true
## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional).
##
ingressNSMatchLabels: {}
ingressNSPodMatchLabels: {}
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the fullname template
name:
rbac:
## Specifies whether RBAC resources should be created
##
create: false
role:
## Rules to create. It follows the role specification
# rules:
# - apiGroups:
# - extensions
# resources:
# - podsecuritypolicies
# verbs:
# - use
# resourceNames:
# - gce.unprivileged
rules: []
## Redis pod Security Context
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## sysctl settings for master and slave pods
##
## Uncomment the setting below to increase the net.core.somaxconn value
##
# sysctls:
# - name: net.core.somaxconn
# value: "10000"
## Use password authentication
usePassword: true
## Redis password (both master and slave)
## Defaults to a random 10-character alphanumeric string if not set and usePassword is true
## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run
##
password: ""
## Use existing secret (ignores previous password)
# existingSecret:
## Password key to be retrieved from Redis secret
##
# existingSecretPasswordKey:
## Mount secrets as files instead of environment variables
usePasswordFile: false
## Persist data to a persistent volume (Redis Master)
persistence: {}
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# Redis port
redisPort: 6379
##
## Redis Master parameters
##
master:
## Redis command arguments
##
## Can be used to specify command line arguments, for example:
##
command: "/run.sh"
## Additional Redis configuration for the master nodes
## ref: https://redis.io/topics/config
##
configmap:
## Redis additional command line flags
##
## Can be used to specify command line flags, for example:
##
## extraFlags:
## - "--maxmemory-policy volatile-ttl"
## - "--repl-backlog-size 1024mb"
extraFlags: []
## Comma-separated list of Redis commands to disable
##
## Can be used to disable Redis commands for security reasons.
## Commands will be completely disabled by renaming each to an empty string.
## ref: https://redis.io/topics/security#disabling-of-specific-commands
##
disableCommands:
- FLUSHDB
- FLUSHALL
## Redis Master additional pod labels and annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: {}
podAnnotations: {}
## Redis Master resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Configure extra options for Redis Master liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
## Redis Master Node selectors and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
##
# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
# tolerations: []
## Redis Master pod/node affinity/anti-affinity
##
affinity: {}
## Redis Master Service properties
service:
## Redis Master Service type
type: ClusterIP
port: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
loadBalancerIP:
# loadBalancerSourceRanges: ["10.0.0.0/8"]
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## The path the volume will be mounted at, useful when using different
## Redis images.
path: /data
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
subPath: ""
## redis data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
## Persistent Volume selectors
## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
matchLabels: {}
matchExpressions: {}
## Update strategy, can be set to RollingUpdate or onDelete by default.
## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
statefulset:
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
# rollingUpdatePartition:
## Redis Master pod priorityClassName
# priorityClassName: {}
##
## Redis Slave properties
## Note: service.type is a mandatory parameter
## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis Master
##
slave:
## Slave Service properties
service:
## Redis Slave Service type
type: ClusterIP
## Redis port
port: 6379
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
loadBalancerIP:
# loadBalancerSourceRanges: ["10.0.0.0/8"]
## Redis slave port
port: 6379
## Can be used to specify command line arguments, for example:
##
command: "/run.sh"
## Additional Redis configuration for the slave nodes
## ref: https://redis.io/topics/config
##
configmap:
## Redis extra flags
extraFlags: []
## List of Redis commands to disable
disableCommands:
- FLUSHDB
- FLUSHALL
## Redis Slave pod/node affinity/anti-affinity
##
affinity: {}
## Configure extra options for Redis Slave liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 10
successThreshold: 1
failureThreshold: 5
## Redis slave Resource
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
## Redis slave selectors and tolerations for pod assignment
# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
# tolerations: []
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Redis slave pod Annotation and Labels
podLabels: {}
podAnnotations: {}
## Redis slave pod priorityClassName
# priorityClassName: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## The path the volume will be mounted at, useful when using different
## Redis images.
path: /data
## The subdirectory of the volume to mount to, useful in dev environments
## and one PV for multiple services.
subPath: ""
## redis data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessModes:
- ReadWriteOnce
size: 8Gi
## Persistent Volume selectors
## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector
matchLabels: {}
matchExpressions: {}
## Update strategy, can be set to RollingUpdate or onDelete by default.
## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
statefulset:
updateStrategy: RollingUpdate
## Partition update strategy
## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
# rollingUpdatePartition:
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/redis-exporter
tag: 1.4.0-debian-10-r3
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Extra arguments for Metrics exporter, for example:
## extraArgs:
## check-keys: myKey,myOtherKey
# extraArgs: {}
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9121"
# podLabels: {}
# Enable this if you're using https://github.com/coreos/prometheus-operator
serviceMonitor:
enabled: false
## Specify a namespace if needed
# namespace: monitoring
# fallback to the prometheus default unless specified
# interval: 10s
## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
selector:
prometheus: kube-prometheus
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current postgresql service.
# - alert: RedisDown
# expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0
# for: 2m
# labels:
# severity: error
# annotations:
# summary: Redis instance {{ "{{ $instance }}" }} down
# description: Redis instance {{ "{{ $instance }}" }} is down.
# - alert: RedisMemoryHigh
# expr: >
# redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100
# /
# redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"}
# > 90 =< 100
# for: 2m
# labels:
# severity: error
# annotations:
# summary: Redis instance {{ "{{ $instance }}" }} is using too much memory
# description: Redis instance {{ "{{ $instance }}" }} is using {{ "{{ $value }}" }}% of its available memory.
# - alert: RedisKeyEviction
# expr: increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0
# for: 1s
# labels:
# severity: error
# annotations:
# summary: Redis instance {{ "{{ $instance }}" }} has evicted keys
# description: Redis instance {{ "{{ $instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes.
## Metrics exporter pod priorityClassName
# priorityClassName: {}
service:
type: ClusterIP
## Use serviceLoadBalancerIP to request a specific static IP,
## otherwise leave blank
# loadBalancerIP:
annotations: {}
labels: {}
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## Redis config file
## ref: https://redis.io/topics/config
##
configmap: |-
# Enable AOF https://redis.io/topics/persistence#append-only-file
appendonly yes
# Disable RDB persistence, AOF persistence already enabled.
save ""
## Sysctl InitContainer
## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings)
sysctlImage:
enabled: false
command: []
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
mountHostSys: false
resources: {}
# resources:
# requests:
# memory: 128Mi
# cpu: 100m
## PodSecurityPolicy configuration
## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
##
podSecurityPolicy:
## Specifies whether a PodSecurityPolicy should be created
##
create: false
|
prometheus-rabbitmq-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-rabbitmq-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prometheus-rabbitmq-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-rabbitmq-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"prometheus-rabbitmq-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n chart: {{ template \"prometheus-rabbitmq-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n{{ toYaml .Values.annotations | indent 8 }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: RABBIT_URL\n value: \"{{ .Values.rabbitmq.url }}\"\n - name: RABBIT_USER\n value: \"{{ .Values.rabbitmq.user }}\"\n {{- if .Values.rabbitmq.existingPasswordSecret }}\n - name: RABBIT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: \"{{ .Values.rabbitmq.existingPasswordSecret }}\"\n key: password\n {{- else }}\n - name: RABBIT_PASSWORD\n value: \"{{ .Values.rabbitmq.password }}\"\n {{- end }}\n - name: PUBLISH_PORT\n value: \"{{ .Values.service.internalPort }}\"\n - name: LOG_LEVEL\n value: \"{{ .Values.loglevel }}\"\n - name: RABBIT_CAPABILITIES\n value: \"{{ .Values.rabbitmq.capabilities }}\"\n - name: INCLUDE_QUEUES\n value: \"{{ .Values.rabbitmq.include_queues }}\"\n - name: INCLUDE_VHOST\n value: \"{{ .Values.rabbitmq.include_vhost }}\"\n - name: SKIP_QUEUES\n value: \"{{ .Values.rabbitmq.skip_queues }}\"\n - name: SKIPVERIFY\n value: \"{{ .Values.rabbitmq.skip_verify }}\"\n - name: SKIP_VHOST\n value: \"{{ .Values.rabbitmq.skip_vhost }}\"\n - name: RABBIT_EXPORTERS\n value: \"{{ .Values.rabbitmq.exporters }}\"\n - name: OUTPUT_FORMAT\n value: \"{{ .Values.rabbitmq.output_format }}\"\n - name: RABBIT_TIMEOUT\n value: \"{{ .Values.rabbitmq.timeout }}\"\n - name: MAX_QUEUES\n value: \"{{ .Values.rabbitmq.max_queues }}\"\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n name: publish\n livenessProbe:\n httpGet:\n path: /\n port: publish\n readinessProbe:\n httpGet:\n path: /\n port: publish\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop: [\"all\"]\n readOnlyRootFilesystem: true\n runAsNonRoot: true\n runAsUser: 10002\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# prometheusrule.yaml\n{{- if .Values.prometheus.rules.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n name: {{ template \"prometheus-rabbitmq-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n chart: {{ template \"prometheus-rabbitmq-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n {{- if .Values.prometheus.rules.additionalLabels }}\n{{ toYaml .Values.prometheus.rules.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n groups:\n - name: rabbitmq\n rules:\n - alert: RabbitmqDown\n expr: |\n absent(rabbitmq_up == 1)\n for: 1m\n labels:\n severity: critical\n - alert: RabbitmqNotRunning\n expr: |\n rabbitmq_running{self=\"1\"} != 1\n for: 1m\n labels:\n severity: critical\n - alert: RabbitmqMessages\n expr: |\n rabbitmq_queue_messages > 250\n for: 10m\n labels:\n severity: warning\n - alert: RabbitmqMessages\n expr: |\n rabbitmq_queue_messages > 500\n for: 10m\n labels:\n severity: critical\n - alert: RabbitmqPartition\n expr: |\n rabbitmq_partitions{self=\"1\"} != 0\n for: 1m\n labels:\n severity: critical\n{{- end }}",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prometheus-rabbitmq-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n chart: {{ template \"prometheus-rabbitmq-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: publish\n protocol: TCP\n name: rabbitmq-exporter\n selector:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n release: {{ .Release.Name }}\n",
"# servicemonitor.yaml\n{{- if .Values.prometheus.monitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"prometheus-rabbitmq-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n chart: {{ template \"prometheus-rabbitmq-exporter.chart\" . }}\n heritage: {{ .Release.Service }}\n {{- if .Values.prometheus.monitor.additionalLabels }}\n{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"prometheus-rabbitmq-exporter.name\" . }}\n release: {{ .Release.Name }}\n endpoints:\n - port: rabbitmq-exporter\n {{- if .Values.prometheus.monitor.interval }}\n interval: {{ .Values.prometheus.monitor.interval }}\n {{- end }}\n {{- if .Values.prometheus.monitor.namespace }}\n namespaceSelector:\n matchNames:\n {{- range .Values.prometheus.monitor.namespace }}\n - {{ . }}\n {{- end }}\n {{- end }}\n{{- end }}\n"
] | # Default values for prometheus-rabbitmq-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: kbudde/rabbitmq-exporter
tag: v0.29.0
pullPolicy: IfNotPresent
service:
type: ClusterIP
externalPort: 9419
internalPort: 9419
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
loglevel: info
rabbitmq:
url: http://myrabbit:15672
user: guest
password: guest
# If existingPasswordSecret is set then password is ignored
existingPasswordSecret: ~
capabilities: bert,no_sort
include_queues: ".*"
include_vhost: ".*"
skip_queues: "^$"
skip_verify: "false"
skip_vhost: "^$"
exporters: "exchange,node,overview,queue"
output_format: "TTY"
timeout: 30
max_queues: 0
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/path: "/metrics"
# prometheus.io/port: 9419
prometheus:
monitor:
enabled: false
additionalLabels: {}
interval: 15s
namespace: []
rules:
enabled: false
additionalLabels: {}
|
influxdb | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"influxdb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"influxdb.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"influxdb.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"influxdb.labels\" -}}\nhelm.sh/chart: {{ include \"influxdb.chart\" . }}\n{{ include \"influxdb.selectorLabels\" . }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n\n{{/*\nSelector labels\n*/}}\n{{- define \"influxdb.selectorLabels\" -}}\napp.kubernetes.io/name: {{ include \"influxdb.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"influxdb.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"influxdb.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# backup-cronjob.yaml\n{{- if .Values.backup.enabled }}\napiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}-backup\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n app.kubernetes.io/component: backup\n annotations:\n {{- toYaml .Values.backup.annotations | nindent 4 }}\nspec:\n schedule: {{.Values.backup.schedule | quote }}\n concurrencyPolicy: Forbid\n jobTemplate:\n spec:\n template:\n metadata:\n labels:\n {{- include \"influxdb.selectorLabels\" . | nindent 12 }}\n spec:\n restartPolicy: OnFailure\n volumes:\n - name: backups\n emptyDir: {}\n {{- if .Values.backup.gcs }}\n {{- if .Values.backup.gcs.serviceAccountSecret }}\n - name: google-cloud-key\n secret:\n secretName: {{ .Values.backup.gcs.serviceAccountSecret | quote }}\n {{- end }}\n {{- end }}\n serviceAccountName: {{ include \"influxdb.serviceAccountName\" . }}\n initContainers:\n - name: influxdb-backup\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n volumeMounts:\n - name: backups\n mountPath: /backups\n command:\n - /bin/sh\n args:\n - '-c'\n - |\n influxd backup -host {{ include \"influxdb.fullname\" . }}.{{ .Release.Namespace }}.svc:{{ .Values.config.rpc.bind_address | default 8088 }} -portable /backups/backup_$(date +%Y%m%d_%H%M%S)\n containers:\n {{- if .Values.backup.gcs }}\n - name: gsutil-cp\n image: google/cloud-sdk:alpine\n command:\n - /bin/sh\n args:\n - '-c'\n - |\n if [ -n \"$KEY_FILE\" ]; then\n gcloud auth activate-service-account --key-file $KEY_FILE\n fi\n gsutil -m cp -r \"$SRC_URL\" \"$DST_URL\"\n volumeMounts:\n - name: backups\n mountPath: /backups\n {{- if .Values.backup.gcs.serviceAccountSecretKey}}\n - name: google-cloud-key\n mountPath: /var/secrets/google/\n {{- end }}\n env:\n - name: SRC_URL\n value: /backups\n - name: DST_URL\n value: {{ .Values.backup.gcs.destination}}\n {{- if .Values.backup.gcs.serviceAccountSecretKey}}\n - name: KEY_FILE\n value: /var/secrets/google/{{ .Values.backup.gcs.serviceAccountSecretKey }}\n {{- end }}\n {{- end }}\n {{- if .Values.backup.azure }}\n - name: azure-cli\n image: microsoft/azure-cli\n command:\n - /bin/sh\n args:\n - '-c'\n - |\n az storage container create --name \"$DST_CONTAINER\"\n az storage blob upload-batch --destination \"$DST_CONTAINER\" --destination-path \"$DST_PATH\" --source \"$SRC_URL\"\n volumeMounts:\n - name: backups\n mountPath: /backups\n env:\n - name: SRC_URL\n value: /backups\n - name: DST_CONTAINER\n value: {{ .Values.backup.azure.destination_container }}\n - name: DST_PATH\n value: {{ .Values.backup.azure.destination_path }}\n - name: AZURE_STORAGE_CONNECTION_STRING\n valueFrom:\n secretKeyRef:\n name: {{ .Values.backup.azure.storageAccountSecret }}\n key: connection-string\n {{- end }}\n{{- end }}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\ndata:\n influxdb.conf: |+\n reporting-disabled = {{ .Values.config.reporting_disabled | default false }}\n bind-address = \":{{ .Values.config.rpc.bind_address | default 8088 }}\"\n\n [meta]\n dir = \"/var/lib/influxdb/meta\"\n {{- range $key, $value := index .Values.config.meta }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n {{- if .Values.enterprise.enabled }}\n internal-shared-secret = \"{{ sha256sum .Values.enterprise.meta.seed }}\"\n meta-auth-enabled = {{ .Values.config.meta.authEnabled }}\n {{- end }}\n\n [data]\n dir = \"/var/lib/influxdb/data\"\n wal-dir = \"/var/lib/influxdb/wal\"\n {{- range $key, $value := index .Values.config.data }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [coordinator]\n {{- range $key, $value := index .Values.config.coordinator }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [retention]\n {{- range $key, $value := index .Values.config.retention }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [shard-precreation]\n {{- range $key, $value := index .Values.config.shard_precreation }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [monitor]\n {{- range $key, $value := index .Values.config.monitor }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [subscriber]\n {{- range $key, $value := index .Values.config.subscriber }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [http]\n {{- range $key, $value := index .Values.config.http }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n # TODO: allow multiple graphite listeners\n\n [[graphite]]\n {{- range $key, $value := index .Values.config.graphite }}\n {{ $key }} = {{ $value }}\n {{- end }}\n {{- if .Values.config.graphite.templates }}\n templates = [\n {{- range .Values.config.graphite.templates }}\n {{ quote . }},\n {{- end }}\n ]\n {{- end }}\n\n # TODO: allow multiple collectd listeners with templates\n\n [[collectd]]\n {{- range $key, $value := index .Values.config.collectd }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n # TODO: allow multiple opentsdb listeners with templates\n\n [[opentsdb]]\n {{- range $key, $value := index .Values.config.opentsdb }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n # TODO: allow multiple udp listeners with templates\n\n [[udp]]\n {{- range $key, $value := index .Values.config.udp }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [continuous_queries]\n {{- range $key, $value := index .Values.config.continuous_queries }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [logging]\n {{- range $key, $value := index .Values.config.logging }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n {{ if .Values.enterprise.enabled -}}\n [enterprise]\n license-key = {{ .Values.enterprise.licensekey | quote }}\n\n [hinted-handoff]\n enabled = true\n dir = \"/var/lib/influxdb/hh\"\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n - hosts:\n - {{ .Values.ingress.hostname | quote }}\n secretName: {{ .Values.ingress.secretName }}\n{{- end }}\n rules:\n - host: {{ .Values.ingress.hostname }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ include \"influxdb.fullname\" . }}\n servicePort: 8086\n{{- end -}}\n",
"# init-config.yaml\n{{- if .Values.initScripts.enabled -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}-init\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\ndata:\n{{ toYaml .Values.initScripts.scripts | indent 2 }}\n{{- end -}}\n",
"# meta-configmap.yaml\n{{ if .Values.enterprise.enabled -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}-meta\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n app.kubernetes.io/component: meta\ndata:\n influxdb-meta.conf: |+\n reporting-disabled = {{ .Values.config.reporting_disabled | default false }}\n bind-address = \":{{ .Values.config.meta.bind_address | default 8091 }}\"\n\n [enterprise]\n license-key = {{ .Values.enterprise.licensekey | quote }}\n\n [meta]\n dir = \"/var/lib/influxdb/meta\"\n {{- range $key, $value := index .Values.config.meta }}\n {{ $key }} = {{ $value }}\n {{- end }}\n {{- if .Values.enterprise.enabled }}\n meta-auth-enabled = {{ .Values.config.meta.authEnabled }}\n {{- end }}\n\n [logging]\n {{- range $key, $value := index .Values.config.logging }}\n {{ $key }} = {{ $value }}\n {{- end }}\n\n [tls]\n {{- range $key, $value := index .Values.config.tls }}\n {{ $key }} = {{ $value }}\n {{- end }}\n{{- end }}\n",
"# meta-service.yaml\n{{ if .Values.enterprise.enabled -}}\napiVersion: v1\nkind: Service\nmetadata:\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n name: {{ include \"influxdb.fullname\" . }}-meta\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n app.kubernets.io/component: meta\nspec:\n type: ClusterIP\n clusterIP: None\n # publishNotReadyAddresses is used for service discovery of meta and data nodes by querying the service's SRV record.\n publishNotReadyAddresses: true\n ports:\n - name: meta\n port: {{ .Values.config.meta.bind_address | default 8091 }}\n targetPort: meta\n selector:\n {{- include \"influxdb.selectorLabels\" . | nindent 4 }}\n app.kubernets.io/component: meta\n{{- end }}\n",
"# meta-statefulset.yaml\n{{- if .Values.enterprise.enabled }}\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}-meta\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n app.kubernetes.io/component: meta\nspec:\n replicas: {{ .Values.enterprise.meta.clusterSize }}\n selector:\n matchLabels:\n {{- include \"influxdb.selectorLabels\" . | nindent 6 }}\n app.kubernetes.io/component: meta\n serviceName: \"{{ include \"influxdb.fullname\" . }}-meta\"\n template:\n metadata:\n labels:\n {{- include \"influxdb.selectorLabels\" . | nindent 8 }}\n app.kubernetes.io/component: meta\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end }}\n serviceAccountName: {{ include \"influxdb.serviceAccountName\" . }}\n containers:\n - name: \"{{ include \"influxdb.fullname\" . }}-meta\"\n image: \"{{ .Values.image.repository }}:{{ .Values.enterprise.meta.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.enterprise.meta.resources | indent 10 }}\n ports:\n - name: udp\n containerPort: {{ .Values.config.udp.bind_address | default 8089 }}\n - name: rpc\n containerPort: {{ .Values.config.rpc.bind_address | default 8088 }}\n - name: meta\n containerPort: {{ .Values.config.meta.bind_address | default 8091 }}\n {{- if .Values.env }}\n env:\n{{ toYaml .Values.env | indent 10 }}\n # Values.env's HOSTNAME isn't fundamentally different from $HOSTNAME, but this way we get a distinguished name for InfluxDB at runtime.\n - name: INFLUXDB_HOSTNAME\n value: \"$(_HOSTNAME).{{ include \"influxdb.fullname\" . }}-meta\"\n {{- end }}\n livenessProbe:\n httpGet:\n path: /ping\n port: meta\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds | default 30 }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds | default 5 }}\n readinessProbe:\n httpGet:\n path: /ping\n port: meta\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds | default 5 }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds | default 1 }}\n {{- if .Values.startupProbe.enabled }}\n startupProbe:\n httpGet:\n path: /ping\n port: meta\n failureThreshold: {{ .Values.startupProbe.failureThreshold | default 6 }}\n periodSeconds: {{ .Values.startupProbe.periodSeconds | default 5 }}\n {{- end }}\n volumeMounts:\n - name: {{ include \"influxdb.fullname\" . }}-meta\n mountPath: /var/lib/influxdb\n - name: config\n mountPath: /etc/influxdb\n {{- if .Values.initScripts.enabled }}\n - name: init\n mountPath: /docker-entrypoint-initdb.d\n {{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ include \"influxdb.fullname\" . }}-meta\n {{- if .Values.initScripts.enabled }}\n - name: init\n configMap:\n name: {{ include \"influxdb.fullname\" . }}-init\n {{- end }}\n {{- if (not .Values.persistence.enabled ) }}\n - name: {{ include \"influxdb.fullname\" . }}-meta\n emptyDir: {}\n {{- end }}\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end -}}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: {{ include \"influxdb.fullname\" . }}-meta\n annotations:\n {{- range $key, $value := .Values.persistence.annotations }}\n {{ $key }}: \"{{ $value }}\"\n {{- end }}\n spec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote}}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- if .Values.persistence.storageClass }}\n {{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# post-install-set-auth.yaml\n{{- if .Values.setDefaultUser.enabled -}}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}-set-auth\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n annotations:\n \"helm.sh/hook\": post-install\n \"helm.sh/hook-delete-policy\": {{ .Values.setDefaultUser.hookDeletePolicy }}\nspec:\n activeDeadlineSeconds: {{ .Values.setDefaultUser.activeDeadlineSeconds }}\n backoffLimit: {{ .Values.setDefaultUser.backoffLimit }}\n template:\n metadata:\n labels:\n {{- include \"influxdb.selectorLabels\" . | nindent 8 }}\n spec:\n containers:\n - name: {{ include \"influxdb.fullname\" . }}-set-auth\n image: \"{{ .Values.setDefaultUser.image }}\"\n env:\n - name: INFLUXDB_USER\n valueFrom:\n secretKeyRef:\n {{- if .Values.setDefaultUser.user.existingSecret }}\n name: {{ .Values.setDefaultUser.user.existingSecret -}}\n {{ else }}\n name: {{ include \"influxdb.fullname\" . }}-auth\n {{- end }}\n key: influxdb-user\n - name: INFLUXDB_PASSWORD\n valueFrom:\n secretKeyRef:\n {{- if .Values.setDefaultUser.user.existingSecret }}\n name: {{ .Values.setDefaultUser.user.existingSecret -}}\n {{ else }}\n name: {{ include \"influxdb.fullname\" . }}-auth\n {{- end }}\n key: influxdb-password\n args:\n - \"/bin/sh\"\n - \"-c\"\n - |\n curl -X POST http://{{ include \"influxdb.fullname\" . }}:{{ .Values.config.http.bind_address | default 8086 }}/query \\\n --data-urlencode \\\n \"q=CREATE USER \\\"${INFLUXDB_USER}\\\" WITH PASSWORD '${INFLUXDB_PASSWORD}' {{ .Values.setDefaultUser.user.privileges }}\"\n restartPolicy: {{ .Values.setDefaultUser.restartPolicy }}\n{{- end -}}\n",
"# secret.yaml\n{{- if .Values.setDefaultUser.enabled -}}\n{{- if not (.Values.setDefaultUser.user.existingSecret) -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}-auth\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\ndata:\n {{- if .Values.setDefaultUser.user.password }}\n influxdb-password: {{ .Values.setDefaultUser.user.password | b64enc | quote }}\n {{- else }}\n influxdb-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{- end }}\n influxdb-user: {{ .Values.setDefaultUser.user.username | b64enc | quote }}\n{{- end -}}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n name: {{ include \"influxdb.fullname\" . }}\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: api\n port: {{ .Values.config.http.bind_address | default 8086 }}\n targetPort: api\n - name: rpc\n port: {{ .Values.config.rpc.bind_address | default 8088 }}\n targetPort: rpc\n {{- if .Values.config.graphite.enabled }}\n - name: graphite\n port: {{ .Values.config.graphite.bind_address | default 2003 }}\n targetPort: graphite\n {{- end }}\n {{- if .Values.config.collectd.enabled }}\n - name: collectd\n port: {{ .Values.config.collectd.bind_address | default 25826 }}\n targetPort: collectd\n {{- end }}\n {{- if .Values.config.udp.enabled }}\n - name: udp\n port: {{ .Values.config.udp.bind_address | default 8089 }}\n targetPort: udp\n {{- end }}\n {{- if .Values.config.opentsdb.enabled }}\n - name: opentsdb\n port: {{ .Values.config.opentsdb.bind_address | default 4242 }}\n targetPort: opentsdb\n {{- end }}\n selector:\n {{- include \"influxdb.selectorLabels\" . | nindent 4 }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ include \"influxdb.serviceAccountName\" . }}\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\n {{- with .Values.serviceAccount.annotations }}\n annotations:\n {{- toYaml . | nindent 4 }}\n {{- end }}\n{{- end -}}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ include \"influxdb.fullname\" . }}\n labels:\n {{- include \"influxdb.labels\" . | nindent 4 }}\nspec:\n {{- if .Values.enterprise.enabled }}\n replicas: {{ .Values.enterprise.clusterSize }}\n {{ else }}\n replicas: 1\n {{- end}}\n selector:\n matchLabels:\n {{- include \"influxdb.selectorLabels\" . | nindent 6 }}\n serviceName: \"{{ include \"influxdb.fullname\" . }}\"\n template:\n metadata:\n labels:\n {{- include \"influxdb.selectorLabels\" . | nindent 8 }}\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end -}}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end }}\n serviceAccountName: {{ include \"influxdb.serviceAccountName\" . }}\n containers:\n - name: {{ include \"influxdb.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n ports:\n - name: api\n containerPort: {{ .Values.config.http.bind_address | default 8086 }}\n {{- if .Values.config.graphite.enabled }}\n - name: graphite\n containerPort: {{ .Values.config.graphite.bind_address | default 2003 }}\n {{- end }}\n {{- if .Values.config.collectd.enabled }}\n - name: collectd\n containerPort: {{ .Values.config.collectd.bind_address | default 25826 }}\n {{- end }}\n {{- if .Values.config.udp.enabled }}\n - name: udp\n containerPort: {{ .Values.config.udp.bind_address | default 8089 }}\n {{- end }}\n {{- if .Values.config.opentsdb.enabled }}\n - name: opentsdb\n containerPort: {{ .Values.config.opentsdb.bind_address | default 4242 }}\n {{- end }}\n {{- if .Values.enterprise.enabled }}\n - name: rpc\n containerPort: {{ .Values.config.rpc.bind_address | default 8088 }}\n - name: meta\n containerPort: {{ .Values.config.meta.bind_address | default 8091 }}\n {{- end }}\n {{- if .Values.env }}\n env:\n{{ toYaml .Values.env | indent 10 }}\n {{- if .Values.enterprise.enabled }}\n - name: INFLUXDB_HOSTNAME # Values.env's HOSTNAME isn't fundamentally different from $HOSTNAME, but this way weg get a distinguished name at runtime.\n value: \"$(_HOSTNAME).{{ include \"influxdb.fullname\" . }}\"\n {{- end }}\n {{- end }}\n livenessProbe:\n httpGet:\n path: /ping\n port: api\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds | default 30 }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds | default 5 }}\n readinessProbe:\n httpGet:\n path: /ping\n port: api\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds | default 5 }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds | default 1 }}\n {{- if .Values.startupProbe.enabled }}\n startupProbe:\n httpGet:\n path: /ping\n port: api\n failureThreshold: {{ .Values.startupProbe.failureThreshold | default 6 }}\n periodSeconds: {{ .Values.startupProbe.periodSeconds | default 5 }}\n {{- end }}\n volumeMounts:\n - name: {{ include \"influxdb.fullname\" . }}-data\n mountPath: /var/lib/influxdb\n - name: config\n mountPath: /etc/influxdb\n {{- if .Values.initScripts.enabled }}\n - name: init\n mountPath: /docker-entrypoint-initdb.d\n {{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ include \"influxdb.fullname\" . }}\n {{- if .Values.initScripts.enabled }}\n - name: init\n configMap:\n name: {{ include \"influxdb.fullname\" . }}-init\n {{- end }}\n {{- if (not .Values.persistence.enabled ) }}\n - name: {{ include \"influxdb.fullname\" . }}-data\n emptyDir: {}\n {{- end }}\n{{- if .Values.persistence.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: {{ include \"influxdb.fullname\" . }}-data\n annotations:\n {{- range $key, $value := .Values.persistence.annotations }}\n {{ $key }}: \"{{ $value }}\"\n {{- end }}\n spec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote}}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{- if .Values.persistence.storageClass }}\n {{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n {{- end }}\n {{- end }}\n{{- end }}\n"
] | ## influxdb image version
## ref: https://hub.docker.com/r/library/influxdb/tags/
image:
repository: "influxdb"
tag: "1.7.9-alpine"
pullPolicy: IfNotPresent
## If specified, use these secrets to access the images
# pullSecrets:
# - registry-secret
serviceAccount:
create: true
name:
annotations: {}
## Customize liveness, readiness and startup probes
## ref: https://docs.influxdata.com/influxdb/v1.7/tools/api/#ping-http-endpoint
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
livenessProbe: {}
# initialDelaySeconds: 30
# timeoutSeconds: 5
readinessProbe: {}
# initialDelaySeconds: 5
# timeoutSeconds: 1
startupProbe:
enabled: false
# failureThreshold: 6
# periodSeconds: 5
## Specify a service type
## NodePort is default
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
## Add annotations to service
# annotations: {}
type: ClusterIP
## Persist data to a persistent volume
##
persistence:
enabled: true
## influxdb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
annotations:
accessMode: ReadWriteOnce
size: 8Gi
## Deploy InfluxDB Enterprise - License required
## ref: https://www.influxdata.com/products/influxdb-enterprise/
enterprise:
enabled: false
licensekey: {}
clusterSize: 4
meta:
image:
## This image contains the enterprise meta node package for clustering.
## It is meant to be used in conjunction with the influxdb:data package of the same version.
## ref: https://hub.docker.com/_/influxdb
tag: meta
clusterSize: 3
## seed is hashed and used as `internal-shared-secret` for Meta service.
seed: dead-beef-cafe-bae
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# resources:
# requests:
# memory: 512Mi
# cpu: 2
# limits:
# memory: 1Gi
# cpu: 4
## Create default user through Kubernetes job
## Defaults indicated below
##
setDefaultUser:
enabled: false
## Image of the container used for job
## Default: appropriate/curl:latest
##
image: appropriate/curl:latest
## Deadline for job so it does not retry forever.
## Default: activeDeadline: 300
##
activeDeadline: 300
## Specify the number of retries before considering job as failed.
## https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
##
backoffLimit: 6
## Hook delete policy for helm.
## Default: hookDeletePolicy: hook-succeeded
##
hookDeletePolicy: hook-succeeded
## Restart policy for job
## Default: OnFailure
restartPolicy: OnFailure
user:
## The user name
## Default: "admin"
username: "admin"
## User password
## single quotes must be escaped (\')
## Default: (Randomly generated 10 characters of AlphaNum)
# password:
## The user name and password are obtained from an existing secret. The expected
## keys are `influxdb-user` and `influxdb-password`.
## If set, the username and password values above are ignored.
# existingSecret: influxdb-auth
## User privileges
## Default: "WITH ALL PRIVILEGES"
privileges: "WITH ALL PRIVILEGES"
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources: {}
# requests:
# memory: 256Mi
# cpu: 0.1
# limits:
# memory: 16Gi
# cpu: 8
# Annotations to be added to InfluxDB pods
podAnnotations: {}
ingress:
enabled: false
tls: false
# secretName: my-tls-cert # only needed if tls above is true
hostname: influxdb.foobar.com
annotations:
# kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true"
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
## The InfluxDB image uses several environment variables to automatically
## configure certain parts of the server.
## Ref: https://hub.docker.com/_/influxdb/
env: {}
# - name: INFLUXDB_DB
# value: "demo"
## InfluxDB configuration
## ref: https://docs.influxdata.com/influxdb/v1.7/administration/config
config:
reporting_disabled: false
rpc: {}
meta: {}
data: {}
coordinator: {}
retention: {}
shard_precreation: {}
monitor: {}
http: {}
logging: {}
subscriber: {}
graphite: {}
collectd: {}
opentsdb: {}
udp: {}
continuous_queries: {}
tls: {}
# Allow executing custom init scripts
#
# If the container finds any files with the extensions .sh or .iql inside of the
# /docker-entrypoint-initdb.d folder, it will execute them. The order they are
# executed in is determined by the shell. This is usually alphabetical order.
initScripts:
enabled: false
scripts:
init.iql: |+
CREATE DATABASE "telegraf" WITH DURATION 30d REPLICATION 1 NAME "rp_30d"
backup:
enabled: false
schedule: "0 0 * * *"
annotations: {}
## Google Cloud Storage
# gcs:
# serviceAccountSecret: influxdb-backup-key
# serviceAccountSecretKey: key.json
# destination: gs://bucket/influxdb
## Azure
## Secret is expected to have connection string stored in `connection-string` field
## Existing container will be used or private one withing storage account will be created.
# azure:
# storageAccountSecret: influxdb-backup-azure-key
# destination_container: influxdb-container
# destination_path: ""
|
newrelic-infrastructure | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"newrelic.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"newrelic.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if ne $name .Release.Name -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s\" $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/* Generate mode label */}}\n{{- define \"newrelic.mode\" }}\n{{- if .Values.privileged -}}\nprivileged\n{{- else -}}\nunprivileged\n{{- end }}\n{{- end -}}\n\n{{/* Generate basic labels */}}\n{{- define \"newrelic.labels\" }}\napp: {{ template \"newrelic.name\" . }}\nchart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\nheritage: {{.Release.Service }}\nrelease: {{.Release.Name }}\nmode: {{ template \"newrelic.mode\" . }}\n{{- end }}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"newrelic.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"newrelic.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"newrelic.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the image name depending on the \"privileged\" flag\n*/}}\n{{- define \"newrelic.image\" -}}\n{{- if .Values.privileged -}}\n\"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n{{- else -}}\n\"{{ .Values.image.repository }}:{{ .Values.image.tag }}-unprivileged\"\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the licenseKey\n*/}}\n{{- define \"newrelic.licenseKey\" -}}\n{{- if .Values.global}}\n {{- if .Values.global.licenseKey }}\n {{- .Values.global.licenseKey -}}\n {{- else -}}\n {{- .Values.licenseKey | default \"\" -}}\n {{- end -}}\n{{- else -}}\n {{- .Values.licenseKey | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the cluster\n*/}}\n{{- define \"newrelic.cluster\" -}}\n{{- if .Values.global -}}\n {{- if .Values.global.cluster -}}\n {{- .Values.global.cluster -}}\n {{- else -}}\n {{- .Values.cluster | default \"\" -}}\n {{- end -}}\n{{- else -}}\n {{- .Values.cluster | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the customSecretName\n*/}}\n{{- define \"newrelic.customSecretName\" -}}\n{{- if .Values.global }}\n {{- if .Values.global.customSecretName }}\n {{- .Values.global.customSecretName -}}\n {{- else -}}\n {{- .Values.customSecretName | default \"\" -}}\n {{- end -}}\n{{- else -}}\n {{- .Values.customSecretName | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the customSecretLicenseKey\n*/}}\n{{- define \"newrelic.customSecretLicenseKey\" -}}\n{{- if .Values.global }}\n {{- if .Values.global.customSecretLicenseKey }}\n {{- .Values.global.customSecretLicenseKey -}}\n {{- else -}}\n {{- .Values.customSecretLicenseKey | default \"\" -}}\n {{- end -}}\n{{- else -}}\n {{- .Values.customSecretLicenseKey | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturns if the template should render, it checks if the required values\nlicenseKey and cluster are set.\n*/}}\n{{- define \"newrelic.areValuesValid\" -}}\n{{- $cluster := include \"newrelic.cluster\" . -}}\n{{- $licenseKey := include \"newrelic.licenseKey\" . -}}\n{{- $customSecretName := include \"newrelic.customSecretName\" . -}}\n{{- $customSecretLicenseKey := include \"newrelic.customSecretLicenseKey\" . -}}\n{{- and (or $licenseKey (and $customSecretName $customSecretLicenseKey)) $cluster}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n labels: {{ include \"newrelic.labels\" . | indent 4 }}\n name: {{ template \"newrelic.fullname\" . }}\nrules:\n - apiGroups: [\"\"]\n resources:\n - \"nodes\"\n - \"nodes/metrics\"\n - \"nodes/stats\"\n - \"nodes/proxy\"\n - \"pods\"\n - \"services\"\n - \"secrets\"\n verbs: [\"get\", \"list\"]\n - nonResourceURLs: [\"/metrics\"]\n verbs: [\"get\"]\n{{- if .Values.rbac.pspEnabled }}\n - apiGroups:\n - extensions\n resources:\n - podsecuritypolicies\n resourceNames:\n - privileged-{{ template \"newrelic.fullname\" . }}\n verbs:\n - use\n{{- end -}}\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n labels: {{ include \"newrelic.labels\" . | indent 4 }}\n name: {{ template \"newrelic.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"newrelic.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"newrelic.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n",
"# configmap.yaml\n{{ if .Values.config }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n namespace: {{ .Release.Namespace }}\n labels: {{ include \"newrelic.labels\" . | indent 4 }}\n name: {{ template \"newrelic.fullname\" . }}\ndata:\n newrelic-infra.yml: |\n{{ toYaml .Values.config | indent 6 }}\n{{ end }}\n{{ if .Values.integrations_config }}\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n namespace: {{ .Release.Namespace }}\n labels: {{ include \"newrelic.labels\" . | indent 4 }}\n name: {{ template \"newrelic.fullname\" . }}-integrations-cfg\ndata:\n{{ range .Values.integrations_config -}}\n{{ .name | indent 2 }}: |\n ---\n{{ toYaml .data | indent 4 }}\n{{ end }}\n{{ end }}\n",
"# daemonset.yaml\n{{- if (include \"newrelic.areValuesValid\" .) }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n namespace: {{ .Release.Namespace }}\n labels: {{ include \"newrelic.labels\" . | indent 4 }}\n name: {{ template \"newrelic.fullname\" . }}\nspec:\n updateStrategy:\n type: {{ .Values.updateStrategy }}\n selector:\n matchLabels:\n app: {{ template \"newrelic.name\" . }}\n release: {{.Release.Name }}\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8}}\n {{- end }}\n labels:\n app: {{ template \"newrelic.name\" . }}\n release: {{.Release.Name }}\n {{- if .Values.podLabels}}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n mode: {{ template \"newrelic.mode\" . }}\n spec:\n serviceAccountName: {{ template \"newrelic.serviceAccountName\" . }}\n {{- if .Values.privileged }}\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n {{- end }}\n containers:\n - name: {{ template \"newrelic.name\" . }}\n image: {{ template \"newrelic.image\" . }}\n imagePullPolicy: \"{{ .Values.image.pullPolicy }}\"\n securityContext:\n {{- if .Values.privileged }}\n privileged: true\n {{- else }}\n runAsUser: 1000 # nri-agent\n allowPrivilegeEscalation: false\n readOnlyRootFilesystem: true\n {{- end }}\n env:\n - name: NRIA_LICENSE_KEY\n valueFrom:\n secretKeyRef:\n {{- if (include \"newrelic.licenseKey\" .) }}\n name: {{ template \"newrelic.fullname\" . }}-config\n key: license\n {{- else }}\n name: {{ include \"newrelic.customSecretName\" . }}\n key: {{ include \"newrelic.customSecretLicenseKey\" . }}\n {{- end }}\n - name: \"CLUSTER_NAME\"\n value: {{ include \"newrelic.cluster\" . }}\n {{- if .Values.disableKubeStateMetrics }}\n - name: \"DISABLE_KUBE_STATE_METRICS\"\n value: {{ .Values.disableKubeStateMetrics | quote }}\n {{- end }}\n {{- if .Values.kubeStateMetricsUrl }}\n - name: \"KUBE_STATE_METRICS_URL\"\n value: {{ .Values.kubeStateMetricsUrl | quote }}\n {{- end }}\n {{- if .Values.kubeStateMetricsPodLabel }}\n - name: \"KUBE_STATE_METRICS_POD_LABEL\"\n value: {{ .Values.kubeStateMetricsPodLabel | quote }}\n {{- end }}\n {{- if .Values.kubeStateMetricsTimeout }}\n - name: TIMEOUT\n value: {{ .Values.kubeStateMetricsTimeout | quote }}\n {{- end }}\n {{- if .Values.kubeStateMetricsScheme }}\n - name: \"KUBE_STATE_METRICS_SCHEME\"\n value: {{ .Values.kubeStateMetricsScheme | quote }}\n {{- end }}\n {{- if .Values.kubeStateMetricsPort }}\n - name: \"KUBE_STATE_METRICS_PORT\"\n value: {{ .Values.kubeStateMetricsPort | quote }}\n {{- end }}\n {{- if .Values.etcdTlsSecretName }}\n - name: ETCD_TLS_SECRET_NAME\n value: {{ .Values.etcdTlsSecretName | quote }}\n {{- end }}\n {{- if .Values.etcdTlsSecretNamespace }}\n - name: ETCD_TLS_SECRET_NAMESPACE\n value: {{ .Values.etcdTlsSecretNamespace | quote }}\n {{- end }}\n {{- if .Values.etcdEndpointUrl }}\n - name: \"ETCD_ENDPOINT_URL\"\n value: {{ .Values.etcdEndpointUrl | quote }}\n {{- end }}\n {{- if .Values.apiServerSecurePort }}\n - name: \"API_SERVER_SECURE_PORT\"\n value: {{ .Values.apiServerSecurePort | quote }}\n {{- end }}\n {{- if .Values.apiServerEndpointUrl }}\n - name: \"API_SERVER_ENDPOINT_URL\"\n value: {{ .Values.apiServerEndpointUrl | quote }}\n {{- end }}\n {{- if .Values.schedulerEndpointUrl }}\n - name: \"SCHEDULER_ENDPOINT_URL\"\n value: {{ .Values.schedulerEndpointUrl | quote }}\n {{- end }}\n {{- if .Values.controllerManagerEndpointUrl }}\n - name: \"CONTROLLER_MANAGER_ENDPOINT_URL\"\n value: {{ .Values.controllerManagerEndpointUrl | quote }}\n {{- end }}\n {{- if .Values.eventQueueDepth }}\n - name: \"NRIA_EVENT_QUEUE_DEPTH\"\n value: {{ .Values.eventQueueDepth | quote }}\n {{- end }}\n - name: \"NRIA_DISPLAY_NAME\"\n valueFrom:\n fieldRef:\n apiVersion: \"v1\"\n fieldPath: \"spec.nodeName\"\n - name: \"NRK8S_NODE_NAME\"\n valueFrom:\n fieldRef:\n apiVersion: \"v1\"\n fieldPath: \"spec.nodeName\"\n - name: \"NRIA_CUSTOM_ATTRIBUTES\"\n value: {{ .Values.customAttribues }}\n - name: \"NRIA_PASSTHROUGH_ENVIRONMENT\"\n value: \"KUBERNETES_SERVICE_HOST,KUBERNETES_SERVICE_PORT,CLUSTER_NAME,CADVISOR_PORT,NRK8S_NODE_NAME,KUBE_STATE_METRICS_URL,KUBE_STATE_METRICS_POD_LABEL,TIMEOUT,ETCD_TLS_SECRET_NAME,ETCD_TLS_SECRET_NAMESPACE,API_SERVER_SECURE_PORT,KUBE_STATE_METRICS_SCHEME,KUBE_STATE_METRICS_PORT,SCHEDULER_ENDPOINT_URL,ETCD_ENDPOINT_URL,CONTROLLER_MANAGER_ENDPOINT_URL,API_SERVER_ENDPOINT_URL,DISABLE_KUBE_STATE_METRICS\"\n {{- if .Values.verboseLog }}\n - name: NRIA_VERBOSE\n value: \"1\"\n {{- end }}\n {{- if .Values.logFile }}\n - name: NRIA_LOG_FILE\n value: {{ .Values.logFile }}\n {{- end }}\n volumeMounts:\n {{- if .Values.config }}\n - name: config\n mountPath: /etc/newrelic-infra.yml\n subPath: newrelic-infra.yml\n {{- end }}\n {{- if .Values.integrations_config }}\n - name: nri-integrations-cfg-volume\n mountPath: /etc/newrelic-infra/integrations.d/\n {{- end }}\n {{- if .Values.privileged }}\n - name: dev\n mountPath: /dev\n - name: host-docker-socket\n mountPath: /var/run/docker.sock\n - name: log\n mountPath: /var/log\n - name: host-volume\n mountPath: /host\n readOnly: true\n {{- else }}\n - mountPath: /var/db/newrelic-infra/data\n name: tmpfs-data\n - mountPath: /var/db/newrelic-infra/user_data\n name: tmpfs-user-data\n - mountPath: /tmp\n name: tmpfs-tmp\n - mountPath: /var/cache/nr-kubernetes\n name: tmpfs-cache\n {{- end }}\n {{- if .Values.resources }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- end }}\n volumes:\n {{- if .Values.privileged }}\n - name: dev\n hostPath:\n path: /dev\n - name: host-docker-socket\n hostPath:\n path: /var/run/docker.sock\n - name: log\n hostPath:\n path: /var/log\n - name: host-volume\n hostPath:\n path: /\n {{- else }}\n - name: tmpfs-data\n emptyDir: {}\n - name: tmpfs-user-data\n emptyDir: {}\n - name: tmpfs-tmp\n emptyDir: {}\n - name: tmpfs-cache\n emptyDir: {}\n {{- end }}\n {{- if .Values.config }}\n - name: config\n configMap:\n name: {{ template \"newrelic.fullname\" . }}\n items:\n - key: newrelic-infra.yml\n path: newrelic-infra.yml\n {{- end }}\n {{- if .Values.integrations_config }}\n - name: nri-integrations-cfg-volume\n configMap:\n name: {{ template \"newrelic.fullname\" . }}-integrations-cfg\n {{- end }}\n {{- if $.Values.priorityClassName }}\n priorityClassName: {{ $.Values.priorityClassName }}\n {{- end }}\n {{- if $.Values.nodeSelector }}\n nodeSelector:\n{{ toYaml $.Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n{{- end }}\n",
"# podsecuritypolicy.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: privileged-{{ template \"newrelic.fullname\" . }}\nspec:\n allowedCapabilities:\n - '*'\n fsGroup:\n rule: RunAsAny\n privileged: true\n runAsUser:\n rule: RunAsAny\n seLinux:\n rule: RunAsAny\n supplementalGroups:\n rule: RunAsAny\n volumes:\n - '*'\n hostPID: true\n hostIPC: true\n hostNetwork: true\n hostPorts:\n - min: 1\n max: 65536\n{{- end }}\n",
"# secret.yaml\n{{- $licenseKey := include \"newrelic.licenseKey\" . -}}\n{{- if $licenseKey }}\napiVersion: v1\nkind: Secret\nmetadata:\n namespace: {{ .Release.Namespace }}\n labels: {{ include \"newrelic.labels\" . | indent 4 }}\n name: {{ template \"newrelic.fullname\" . }}-config\ntype: Opaque\ndata:\n license: {{ $licenseKey | b64enc }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"newrelic.name\" . }}\n chart: {{ template \"newrelic.chart\" . }}\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n name: {{ template \"newrelic.serviceAccountName\" . }}\n{{- end -}}\n"
] | # - Specify either the New Relic license key or the secret which
# contains it.
#
# - Specify the Kubernetes cluster name.
# https://docs.newrelic.com/docs/kubernetes-monitoring-integration
#
# licenseKey:
# customSecretName:
# customSecretLicenseKey:
# cluster:
#
# IMPORTANT: the previous values can also be set as global so that they
# can be shared by other newrelic product's charts
#
# global:
# licenseKey:
# customSecretName:
# customSecretLicenseKey:
# cluster:
#
# kubeStateMetricsUrl - if provided, the discovery process for kube-state-metrics endpoint won't be triggered
# Only HTTP is accepted. This is an example value: http://172.17.0.3:8080
# disableKubeStateMetrics - disables KSM parsing by the DaemonSet. Defaults to "false" if not provided.
verboseLog: false
# This can be set, the default is shown below
# logFile: /var/log/nr-infra.log
image:
repository: newrelic/infrastructure-k8s
tag: 1.21.0
pullPolicy: IfNotPresent
resources:
limits:
memory: 300M
requests:
cpu: 100m
memory: 150M
privileged: true
rbac:
# Specifies whether RBAC resources should be created
create: true
pspEnabled: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# If you wish to provide additional labels to apply to the pod(s), specify
# them here
# podLabels:
# If you wish to provide your own newrelic.yml file include it under config:
# the sample config file is included here as an example. Some options have
# been omitted because they are handled either by variables, or a secret. They
# are display_name, license_key, log_file and verbose.
# config:
#
# New Relic Infrastructure configuration file
#
# Lines that begin with # are comment lines and are ignored by the
# Infrastructure agent. If options have command line equivalents, New Relic
# will use the command line option to override any value set in this file.
#
#
# Option : proxy
# Value : Useful if your firewall rules require the agent to use a
# proxy URL (HTTP or HTTPS) to communicate with New Relic.
# Default: none
#
# proxy: https://user:password@hostname:port
#
# Option : Optional custom attributes
# Use optional key-value pairs to build filter sets, group your results,ª
# annotate your Insights data, etc.
#
# custom_attributes:
# environment: production
# service: login service
# team: alpha-team
#
# Pod scheduling proirity
# Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: high-priority
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# These are default tolerations to be able to run the New Relic Kubernetes
# integration.
tolerations:
- operator: "Exists"
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
updateStrategy: RollingUpdate
# Custom attributes to be passed to the New Relic agent
customAttribues: "'{\"clusterName\":\"$(CLUSTER_NAME)\"}'"
# etcdTlsSecretName: newrelic-infra-etcd-tls-secret
etcdTlsSecretNamespace: default
# If you wish to monitor services running on Kubernetes you can provide integrations
# configuration under integrations_config. You just need to create a new entry where
# the "name" is the filename of the configuration file and the data is the content of
# the integration configuration. The name must end in ".yaml" as this will be the
# filename generated and the Infrastructure agent only looks for YAML files. The data
# part is the actual integration configuration as described in the spec here:
# https://docs.newrelic.com/docs/integrations/integrations-sdk/file-specifications/integration-configuration-file-specifications-agent-v180
# For example, if you wanted do to monitor a Redis instance that has a label "app=redis"
# you could do so by adding following entry:
# integrations_config:
# - name: nri-rabbit.yaml
# data:
# discovery:
# command:
# # Run NRI Discovery for Kubernetes
# # https://github.com/newrelic/nri-discovery-kubernetes
# exec: /var/db/newrelic-infra/nri-discovery-kubernetes
# match:
# label.app: redis
# integrations:
# - name: nri-redis
# env:
# # using the discovered IP as the hostname address
# HOSTNAME: ${discovery.ip}
# PORT: 6379
# labels:
# env: test
# For more details on monitoring services on Kubernetes see
# https://docs.newrelic.com/docs/integrations/kubernetes-integration/link-apps-services/monitor-services-running-kubernetes
integrations_config: {}
|
home-assistant | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"home-assistant.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"home-assistant.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"home-assistant.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# appdaemon-ingress.yaml\n{{- if and (.Values.appdaemon.enabled) (.Values.appdaemon.ingress.enabled) }}\n{{- $fullName := include \"home-assistant.fullname\" . -}}\n{{- $servicePort := .Values.appdaemon.service.port -}}\n{{- $ingressPath := .Values.appdaemon.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}-appdaemon\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- with .Values.appdaemon.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.appdaemon.ingress.tls }}\n tls:\n {{- range .Values.appdaemon.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.appdaemon.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n",
"# configurator-ingress.yaml\n{{- if and (.Values.configurator.enabled) (.Values.configurator.ingress.enabled) }}\n{{- $fullName := include \"home-assistant.fullname\" . -}}\n{{- $servicePort := .Values.configurator.service.port -}}\n{{- $ingressPath := .Values.configurator.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}-configurator\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- with .Values.configurator.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.configurator.ingress.tls }}\n tls:\n {{- range .Values.configurator.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.configurator.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"home-assistant.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n replicas: 1\n strategy:\n type: {{ .Values.strategyType }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n {{- if .Values.podAnnotations }}\n annotations:\n {{- range $key, $value := .Values.podAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- end }}\n spec:\n {{- with .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range . }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n {{- if .Values.hostNetwork }}\n hostNetwork: {{ .Values.hostNetwork }}\n dnsPolicy: ClusterFirstWithHostNet\n {{- end }}\n initContainers:\n {{- if .Values.git.enabled }}\n - name: git-sync\n image: \"{{ .Values.git.image.repository }}:{{ .Values.git.image.tag }}\"\n imagePullPolicy: {{ .Values.git.image.pullPolicy }}\n {{- if .Values.git.command }}\n command:\n {{- range .Values.git.command }}\n - {{ . | quote }}\n {{- end }}\n {{- else }}\n command: ['sh', '-c', '[ \"$(ls {{ .Values.git.syncPath }})\" ] || git clone {{ .Values.git.repo }} {{ .Values.git.syncPath }}']\n {{- end }}\n volumeMounts:\n - mountPath: /config\n name: config\n - mountPath: {{ .Values.git.keyPath }}\n name: git-secret\n {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }}{{ end }}\n {{- if .Values.usePodSecurityContext }}\n securityContext:\n runAsUser: {{ default 0 .Values.runAsUser }}\n {{- if and (.Values.runAsUser) (.Values.fsGroup) }}\n {{- if not (eq .Values.runAsUser 0.0) }}\n fsGroup: {{ .Values.fsGroup }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: api\n containerPort: {{ .Values.service.port }}\n protocol: TCP\n {{- range .Values.service.additionalPorts }}\n - name: {{ .name }}\n containerPort: {{ .targetPort }}\n {{- end }}\n {{- if .Values.probes.liveness.enabled }}\n livenessProbe:\n httpGet:\n path: /\n port: api\n scheme: {{ .Values.probes.liveness.scheme }}\n initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}\n failureThreshold: {{ .Values.probes.liveness.failureThreshold }}\n timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}\n {{- end }}\n {{- if .Values.probes.readiness.enabled }}\n readinessProbe:\n httpGet:\n path: /\n port: api\n scheme: {{ .Values.probes.readiness.scheme }}\n initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}\n failureThreshold: {{ .Values.probes.readiness.failureThreshold }}\n timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}\n {{- end }}\n {{- if .Values.probes.startup.enabled }}\n startupProbe:\n httpGet:\n path: /\n port: api\n scheme: {{ .Values.probes.startup.scheme }}\n failureThreshold: {{ .Values.probes.startup.failureThreshold }}\n periodSeconds: {{ .Values.probes.startup.periodSeconds }}\n {{- end }}\n env:\n {{- range $key, $value := .Values.extraEnv }}\n - name: {{ $key }}\n value: {{ $value }}\n {{- end }}\n {{- range $name, $opts := .Values.extraEnvSecrets }}\n - name: {{ $name }}\n valueFrom:\n secretKeyRef:\n name: {{ $opts.secret }}\n key: {{ $opts.key }}\n {{- end }}\n envFrom:\n {{- range .Values.extraSecretForEnvFrom }}\n - secretRef:\n name: {{ . }}\n {{- end }}\n volumeMounts:\n - mountPath: /config\n name: config\n {{- if .Values.zwave.enabled }}\n - mountPath: /dev/ttyACM0\n name: ttyacm\n {{- end }}\n {{- range .Values.hostMounts }}\n {{- if .mountPath }}\n - mountPath: {{ .mountPath }}\n {{- else }}\n - mountPath: {{ .hostPath }}\n {{- end }}\n name: {{ .name }}\n {{- end }}\n {{- if .Values.git.enabled }}\n - mountPath: {{ .Values.git.keyPath }}\n name: git-secret\n {{- end }}\n {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 10 }}{{ end }}\n {{- if .Values.usePodSecurityContext }}\n securityContext:\n runAsUser: {{ default 0 .Values.runAsUser }}\n {{- if and (.Values.runAsUser) (.Values.fsGroup) }}\n {{- if not (eq .Values.runAsUser 0.0) }}\n fsGroup: {{ .Values.fsGroup }}\n {{- end }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if or .Values.zwave.enabled (.Values.hostMounts) }}\n securityContext:\n privileged: true\n {{- end }}\n {{- if .Values.configurator.enabled }}\n - name: configurator\n image: \"{{ .Values.configurator.image.repository }}:{{ .Values.configurator.image.tag }}\"\n imagePullPolicy: {{ .Values.configurator.image.pullPolicy }}\n ports:\n - name: configurator\n containerPort: {{ .Values.configurator.service.port }}\n protocol: TCP\n env:\n {{- if .Values.configurator.hassApiPassword }}\n - name: HC_HASS_API_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"home-assistant.fullname\" . }}-configurator\n key: hass-api-password\n {{- end }}\n {{- if and (.Values.configurator.username) (.Values.configurator.password) }}\n - name: HC_USERNAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"home-assistant.fullname\" . }}-configurator\n key: username\n - name: HC_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"home-assistant.fullname\" . }}-configurator\n key: password\n {{- end }}\n {{- if .Values.configurator.hassApiUrl }}\n - name: HC_HASS_API\n value: \"{{ .Values.configurator.hassApiUrl }}\"\n {{- else }}\n - name: HC_HASS_API\n value: \"http://127.0.0.1:8123/api/\"\n {{- end }}\n {{- if .Values.configurator.basepath }}\n - name: HC_BASEPATH\n value: \"{{ .Values.configurator.basepath }}\"\n {{- end }}\n {{- if .Values.configurator.enforceBasepath }}\n - name: HC_ENFORCE_BASEPATH\n value: \"{{ .Values.configurator.enforceBasepath }}\"\n {{- end }}\n {{- if and (.Values.git.enabled) (.Values.git.user.name) }}\n - name: GIT_AUTHOR_NAME\n value: {{ .Values.git.user.name }}\n - name: GIT_COMMITTER_NAME\n value: {{ .Values.git.user.name }}\n {{ end }}\n {{- if and (.Values.git.enabled) (.Values.git.user.email) }}\n - name: GIT_AUTHOR_EMAIL\n value: {{ .Values.git.user.email }}\n - name: GIT_COMMITTER_EMAIL\n value: {{ .Values.git.user.email }}\n {{ end }}\n {{- range $key, $value := .Values.configurator.extraEnv }}\n - name: {{ $key }}\n value: {{ $value }}\n {{- end }}\n volumeMounts:\n - mountPath: /config\n name: config\n {{- if .Values.git.enabled }}\n - mountPath: {{ .Values.git.keyPath }}\n name: git-secret\n {{- end }}\n {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 10 }}{{ end }}\n {{- if .Values.usePodSecurityContext }}\n securityContext:\n runAsUser: {{ default 0 .Values.runAsUser }}\n {{- if and (.Values.runAsUser) (.Values.fsGroup) }}\n {{- if not (eq .Values.runAsUser 0.0) }}\n fsGroup: {{ .Values.fsGroup }}\n {{- end }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.configurator.resources | indent 12 }}\n {{- end }}\n {{- if .Values.vscode.enabled }}\n - name: vscode\n image: \"{{ .Values.vscode.image.repository }}:{{ .Values.vscode.image.tag }}\"\n imagePullPolicy: {{ .Values.vscode.image.pullPolicy }}\n workingDir: {{ .Values.vscode.hassConfig }}\n args:\n - --port={{ .Values.vscode.service.port }}\n {{- if not (.Values.vscode.password) }}\n - --auth=none\n {{- end }}\n {{- if .Values.vscode.vscodePath }}\n - --extensions-dir={{ .Values.vscode.vscodePath }}\n - --user-data-dir={{ .Values.vscode.vscodePath }}\n - {{ .Values.vscode.hassConfig }}\n {{- end }}\n ports:\n - name: vscode\n containerPort: {{ .Values.vscode.service.port }}\n protocol: TCP\n env:\n {{- if .Values.vscode.password }}\n - name: PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"home-assistant.fullname\" . }}-vscode\n key: password\n {{- end }}\n {{- if and (.Values.git.enabled) (.Values.git.user.name) }}\n - name: GIT_AUTHOR_NAME\n value: {{ .Values.git.user.name }}\n - name: GIT_COMMITTER_NAME\n value: {{ .Values.git.user.name }}\n {{ end }}\n {{- if and (.Values.git.enabled) (.Values.git.user.email) }}\n - name: GIT_AUTHOR_EMAIL\n value: {{ .Values.git.user.email }}\n - name: GIT_COMMITTER_EMAIL\n value: {{ .Values.git.user.email }}\n {{ end }}\n {{- range $key, $value := .Values.vscode.extraEnv }}\n - name: {{ $key }}\n value: {{ $value }}\n {{- end }}\n volumeMounts:\n - mountPath: /config\n name: config\n {{- if .Values.git.enabled }}\n - mountPath: {{ .Values.git.keyPath }}\n name: git-secret\n {{- end }}\n {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 10 }}{{ end }}\n {{- if .Values.usePodSecurityContext }}\n securityContext:\n runAsUser: {{ default 0 .Values.runAsUser }}\n {{- if and (.Values.runAsUser) (.Values.fsGroup) }}\n {{- if not (eq .Values.runAsUser 0.0) }}\n fsGroup: {{ .Values.fsGroup }}\n {{- end }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.vscode.resources | indent 12 }}\n {{- end }}\n {{- if .Values.appdaemon.enabled }}\n - name: appdaemon\n image: \"{{ .Values.appdaemon.image.repository }}:{{ .Values.appdaemon.image.tag }}\"\n imagePullPolicy: {{ .Values.appdaemon.image.pullPolicy }}\n ports:\n - name: appdaemon\n containerPort: {{ .Values.appdaemon.service.port }}\n protocol: TCP\n env:\n - name: HA_URL\n value: \"http://localhost:{{ .Values.service.port }}\"\n {{- if .Values.appdaemon.ingress.enabled }}\n - name: DASH_URL\n #value: http{{ if .Values.appdaemon.ingress.tls }}s{{ end }}://{{ index .Values.appdaemon.ingress.hosts 0 }}\n value: http://0.0.0.0:{{ .Values.appdaemon.service.port }}\n {{- end }}\n {{- if .Values.appdaemon.haToken }}\n - name: TOKEN\n valueFrom:\n secretKeyRef:\n name: {{ template \"home-assistant.fullname\" . }}-appdaemon\n key: token\n {{- end }}\n {{- if and (.Values.git.enabled) (.Values.git.user.name) }}\n - name: GIT_AUTHOR_NAME\n value: {{ .Values.git.user.name }}\n - name: GIT_COMMITTER_NAME\n value: {{ .Values.git.user.name }}\n {{ end }}\n {{- if and (.Values.git.enabled) (.Values.git.user.email) }}\n - name: GIT_AUTHOR_EMAIL\n value: {{ .Values.git.user.email }}\n - name: GIT_COMMITTER_EMAIL\n value: {{ .Values.git.user.email }}\n {{ end }}\n {{- range $key, $value := .Values.vscode.extraEnv }}\n - name: {{ $key }}\n value: {{ $value }}\n {{- end }}\n volumeMounts:\n - mountPath: /ha-conf\n name: config\n - mountPath: /conf\n subPath: appdaemon\n name: config\n {{- if .Values.extraVolumeMounts }}{{ toYaml .Values.extraVolumeMounts | trim | nindent 10 }}{{ end }}\n {{- if .Values.usePodSecurityContext }}\n securityContext:\n runAsUser: {{ default 0 .Values.runAsUser }}\n {{- if and (.Values.runAsUser) (.Values.fsGroup) }}\n {{- if not (eq .Values.runAsUser 0.0) }}\n fsGroup: {{ .Values.fsGroup }}\n {{- end }}\n {{- end }}\n {{- end }}\n resources:\n{{ toYaml .Values.appdaemon.resources | indent 12 }}\n {{- end }}\n volumes:\n - name: config\n {{- if .Values.persistence.enabled }}\n {{- if .Values.persistence.hostPath }}\n hostPath:\n path: {{.Values.persistence.hostPath}}\n type: Directory\n {{- else }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"home-assistant.fullname\" . }}{{- end }}\n {{- end }}\n {{- else }}\n emptyDir: {}\n {{ end }}\n {{- if .Values.zwave.enabled }}\n - name: ttyacm\n hostPath:\n path: /dev/{{.Values.zwave.device}}\n {{- end }}\n {{- range .Values.hostMounts }}\n - name: {{ .name }}\n hostPath:\n path: {{.hostPath}}\n {{- if .type }}\n type: {{ .type }}\n {{- end }}\n {{- end }}\n {{- if .Values.git.enabled }}\n - name: git-secret\n secret:\n defaultMode: 256\n secretName: {{ .Values.git.secret }}\n {{ end }}\n {{- if .Values.extraVolumes }}{{ toYaml .Values.extraVolumes | trim | nindent 6 }}{{ end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"home-assistant.fullname\" . -}}\n{{- $servicePort := .Values.service.port -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- with .Values.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled -}}\n{{- if not .Values.persistence.existingClaim -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"home-assistant.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n{{- end -}}",
"# secret.yaml\n{{- if .Values.configurator.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"home-assistant.fullname\" . }}-configurator\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.configurator.hassApiPassword }}\n hass-api-password: {{ .Values.configurator.hassApiPassword | b64enc | quote }}\n {{- end }}\n {{- if .Values.configurator.username }}\n username: {{ .Values.configurator.username | b64enc | quote }}\n {{- end }}\n {{- if .Values.configurator.password }}\n password: {{ .Values.configurator.password | b64enc | quote }}\n {{- end }}\n{{- end }}\n---\n{{- if .Values.vscode.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"home-assistant.fullname\" . }}-vscode\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.vscode.password }}\n password: {{ .Values.vscode.password | b64enc | quote }}\n {{- end }}\n{{- end }}\n---\n{{- if .Values.appdaemon.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"home-assistant.fullname\" . }}-appdaemon\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.appdaemon.haToken }}\n token: {{ .Values.appdaemon.haToken | b64enc | quote }}\n {{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"home-assistant.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\n{{- with .Values.service.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if (or (eq .Values.service.type \"ClusterIP\") (empty .Values.service.type)) }}\n type: ClusterIP\n {{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{end}}\n{{- else if eq .Values.service.type \"LoadBalancer\" }}\n type: {{ .Values.service.type }}\n {{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if .Values.service.externalTrafficPolicy }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}\n {{- end }}\n {{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n {{- end -}}\n{{- else }}\n type: {{ .Values.service.type }}\n{{- end }}\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }}\n ports:\n - name: {{ .Values.service.portName }}\n port: {{ .Values.service.port }}\n protocol: TCP\n targetPort: 8123\n{{ if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{.Values.service.nodePort}}\n{{ end }}\n{{- if .Values.configurator.enabled }}\n - name: configurator\n port: {{ .Values.configurator.service.port }}\n protocol: TCP\n targetPort: 3218\n{{ if (and (eq .Values.configurator.service.type \"NodePort\") (not (empty .Values.configurator.service.nodePort))) }}\n nodePort: {{.Values.configurator.service.nodePort}}\n{{ end }}\n{{- end }}\n{{- if .Values.vscode.enabled }}\n - name: vscode\n port: {{ .Values.vscode.service.port }}\n protocol: TCP\n targetPort: {{ .Values.vscode.service.port }}\n{{ if (and (eq .Values.vscode.service.type \"NodePort\") (not (empty .Values.vscode.service.nodePort))) }}\n nodePort: {{.Values.vscode.service.nodePort}}\n{{ end }}\n{{- end }}\n{{- if .Values.appdaemon.enabled }}\n - name: appdaemon\n port: {{ .Values.appdaemon.service.port }}\n protocol: TCP\n targetPort: 5050\n{{ if (and (eq .Values.appdaemon.service.type \"NodePort\") (not (empty .Values.appdaemon.service.nodePort))) }}\n nodePort: {{.Values.appdaemon.service.nodePort}}\n{{ end }}\n{{- end }}\n{{- if .Values.service.additionalPorts }}\n {{- .Values.service.additionalPorts | toYaml | indent 4 }}\n{{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# servicemonitor.yaml\n{{- if and ( .Values.monitoring.serviceMonitor.enabled ) ( .Values.monitoring.enabled ) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n{{- if .Values.monitoring.serviceMonitor.labels }}\n labels:\n{{ toYaml .Values.monitoring.serviceMonitor.labels | indent 4}}\n{{- end }}\n name: {{ template \"home-assistant.fullname\" . }}-prometheus-exporter\n{{- if .Values.monitoring.serviceMonitor.namespace }}\n namespace: {{ .Values.monitoring.serviceMonitor.namespace }}\n{{- end }}\nspec:\n endpoints:\n - targetPort: {{ .Values.service.portName }}\n path: /api/prometheus\n{{- if .Values.monitoring.serviceMonitor.interval }}\n interval: {{ .Values.monitoring.serviceMonitor.interval }}\n{{- end }}\n{{- if .Values.monitoring.serviceMonitor.bearerTokenFile }}\n bearerTokenFile: {{ .Values.monitoring.serviceMonitor.bearerTokenFile }}\n{{- end }}\n{{- if .Values.monitoring.serviceMonitor.bearerTokenSecret }}\n bearerTokenSecret:\n name: {{ .Values.monitoring.serviceMonitor.bearerTokenSecret.name }}\n key: {{ .Values.monitoring.serviceMonitor.bearerTokenSecret.key }}\n {{- if .Values.monitoring.serviceMonitor.bearerTokenSecret.optional }}\n optional: {{ .Values.monitoring.serviceMonitor.bearerTokenSecret.optional }}\n {{- end }}\n{{- end }}\n jobLabel: {{ template \"home-assistant.fullname\" . }}-prometheus-exporter\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n",
"# vscode-ingress.yaml\n{{- if and (.Values.vscode.enabled) (.Values.vscode.ingress.enabled) }}\n{{- $fullName := include \"home-assistant.fullname\" . -}}\n{{- $servicePort := .Values.vscode.service.port -}}\n{{- $ingressPath := .Values.vscode.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}-vscode\n labels:\n app.kubernetes.io/name: {{ include \"home-assistant.name\" . }}\n helm.sh/chart: {{ include \"home-assistant.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- with .Values.vscode.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.vscode.ingress.tls }}\n tls:\n {{- range .Values.vscode.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.vscode.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: {{ $servicePort }}\n {{- end }}\n{{- end }}\n"
] | # Default values for home-assistant.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: homeassistant/home-assistant
tag: 0.108.7
pullPolicy: IfNotPresent
pullSecrets: []
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
# Probes configuration
probes:
liveness:
enabled: true
scheme: HTTP
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
enabled: true
scheme: HTTP
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
startup:
enabled: false
scheme: HTTP
failureThreshold: 30
periodSeconds: 10
service:
type: ClusterIP
port: 8123
portName: api
additionalPorts: []
# - name: homematicproxy
# port: 2001
# targetPort: 2001
annotations: {}
labels: {}
clusterIP: ""
## List of IP addresses at which the hass-configurator service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
# nodePort: 30000
publishNotReadyAddresses: false
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- home-assistant.local
tls: []
# - secretName: home-assistant-tls
# hosts:
# - home-assistant.local
hostNetwork: false
persistence:
enabled: true
## home-assistant data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
##
## If you want to use a volume on the host machine instead of a PVC:
# hostPath: /path/to/the/config/folder
accessMode: ReadWriteOnce
size: 5Gi
## Additional hass container environment variable
## For instance to add a http_proxy
##
extraEnv: {}
## Additional hass container environment variable from k8s secrets
## For instance to add a password
## can use `!env_var` in the home assistant configuration to reference these variables
extraEnvSecrets:
# Example
# This will set ${MQTT_PASSWORD} to the 'password' key from the 'mqtt' secret
# MQTT_PASSWORD:
# secret: mqtt
# key: password
## If you'd like to provide your own Kubernetes Secret object instead of passing your values
## individually, pass in the name of a created + populated Secret.
## All secrets will be mounted as environment variables, with each key/value mapping to a
## corresponding environment variable.
##
extraSecretForEnvFrom: []
# - home-assistant-secrets
# Enable pod security context (must be `true` if runAsUser or fsGroup are set)
usePodSecurityContext: true
# Set runAsUser to 1000 to let home-assistant run as non-root user 'hass' which exists in 'runningman84/alpine-homeassistant' docker image.
# When setting runAsUser to a different value than 0 also set fsGroup to the same value:
# runAsUser: <defaults to 0>
# fsGroup: <will be omitted in deployment if runAsUser is 0>
git:
enabled: false
## we just use the hass-configurator container image
## you can use any image which has git and openssh installed
##
image:
repository: causticlab/hass-configurator-docker
tag: 0.3.5-x86_64
pullPolicy: IfNotPresent
## Specify the command that runs in the git-sync container to pull in configuration.
# command: []
# Committer settings
user:
name: ""
email: ""
# repo:
secret: git-creds
syncPath: /config
keyPath: /root/.ssh
zwave:
enabled: false
device: ttyACM0
# Mount devices or folders from the host machine. Can be used for USB device mounting.
hostMounts: []
# Example
# - name: zha
# hostPath: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_6120245D-if01-port0
configurator:
enabled: false
## hass-configurator container image
##
image:
repository: causticlab/hass-configurator-docker
tag: 0.3.5-x86_64
pullPolicy: IfNotPresent
## URL for the home assistant API endpoint
# hassApiUrl: http://home-assistant:8123/api/
## home assistant API password
# hassApiPassword:
## path where the home assistant configuration is stored
basepath: /config
## don't allow switching out of the base path
enforceBasepath: true
## username for basic auth for accessing the configurator
# username:
## password (sha256-hash) for basic auth for accessing the configurator
## For example "test" would be "{sha256}9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08"
# password:
## Additional hass-configurator container environment variable
## For instance to add a http_proxy
##
extraEnv: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- home-assistant.local
tls: []
# - secretName: home-assistant-tls
# hosts:
# - home-assistant.local
service:
type: ClusterIP
port: 3218
annotations: {}
labels: {}
clusterIP: ""
## List of IP addresses at which the hass-configurator service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
# externalTrafficPolicy: Local
# nodePort: 30000
## Add support for Prometheus
# settings has to be enabled in configuration.yaml
# https://www.home-assistant.io/components/prometheus/
monitoring:
enabled: false
serviceMonitor:
# When set true and if Prometheus Operator is installed then use a ServiceMonitor to configure scraping
enabled: true
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels:
# Set bearerTokenFile for home assistant auth (use long lived access tokens)
# bearerTokenFile:
# Set bearerTokenSecret for home assistant auth (use long lived access tokens)
# bearerTokenSecret:
vscode:
enabled: false
## code-server container image
##
image:
repository: codercom/code-server
tag: 3.1.1
pullPolicy: IfNotPresent
## VSCode password
# password:
## path where the home assistant configuration is stored
hassConfig: /config
## path where the VS Code data should reside
vscodePath: /config/.vscode
## Additional hass-vscode container environment variable
## For instance to add a http_proxy
##
extraEnv: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- home-assistant.local
tls: []
# - secretName: home-assistant-tls
# hosts:
# - home-assistant.local
service:
type: ClusterIP
port: 80
annotations: {}
labels: {}
clusterIP: ""
## List of IP addresses at which the hass-vscode service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
# nodePort: 30000
appdaemon:
enabled: false
## code-server container image
##
image:
repository: acockburn/appdaemon
tag: 3.0.5
pullPolicy: IfNotPresent
## Home Assistant API token
# haToken:
## Additional hass-vscode container environment variable
## For instance to add a http_proxy
##
extraEnv: {}
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- appdaemon.local
tls: []
# - secretName: appdaemon-tls
# hosts:
# - appdaemon.local
service:
type: ClusterIP
port: 5050
annotations: {}
labels: {}
clusterIP: ""
## List of IP addresses at which the hass-appdaemon service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
# nodePort: 30000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
# Any extra volumes to define for the pod
extraVolumes: []
# - name: example-name
# hostPath:
# path: /path/on/host
# type: DirectoryOrCreate
# Any extra volume mounts to define for the containers
extraVolumeMounts: []
# - name: example-name
# mountPath: /path/in/container
|
openldap | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openldap.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"openldap.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"openldap.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n\n{{/*\nGenerate chart secret name\n*/}}\n{{- define \"openldap.secretName\" -}}\n{{ default (include \"openldap.fullname\" .) .Values.existingSecret }}\n{{- end -}}\n",
"# configmap-customldif.yaml\n#\n# A ConfigMap spec for openldap slapd that map directly to files under\n# /container/service/slapd/assets/config/bootstrap/ldif/custom\n#\n{{- if .Values.customLdifFiles }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"openldap.fullname\" . }}-customldif\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ndata:\n{{- range $key, $val := .Values.customLdifFiles }}\n {{ $key }}: |-\n{{ $val | indent 4}}\n{{- end }}\n{{- end }}\n",
"# configmap-env.yaml\n#\n# A ConfigMap spec for openldap slapd that map directly to env variables in the Pod.\n# List of environment variables supported is from the docker image:\n# https://github.com/osixia/docker-openldap#beginner-guide\n# Note that passwords are defined as secrets\n#\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"openldap.fullname\" . }}-env\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ndata:\n{{ toYaml .Values.env | indent 2 }}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openldap.fullname\" . }}\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n{{- if .Values.strategy }}\n strategy:\n{{ toYaml .Values.strategy | indent 4 }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"openldap.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n annotations:\n checksum/configmap-env: {{ include (print $.Template.BasePath \"/configmap-env.yaml\") . | sha256sum }}\n{{- if .Values.customLdifFiles}}\n checksum/configmap-customldif: {{ include (print $.Template.BasePath \"/configmap-customldif.yaml\") . | sha256sum }}\n{{- end }}\n{{- if .Values.podAnnotations}}\n{{ toYaml .Values.podAnnotations | indent 8}}\n{{- end }}\n labels:\n app: {{ template \"openldap.name\" . }}\n release: {{ .Release.Name }}\n spec:\n {{- if or .Values.customLdifFiles .Values.tls.enabled }}\n initContainers:\n {{- end }}\n {{- if .Values.customLdifFiles }}\n - name: {{ .Chart.Name }}-init-ldif\n image: busybox\n command: ['sh', '-c', 'cp /customldif/* /ldifworkingdir']\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n volumeMounts:\n - name: customldif\n mountPath: /customldif\n - name: ldifworkingdir\n mountPath: /ldifworkingdir\n resources:\n{{ toYaml .Values.initResources | indent 10 }}\n {{- end }}\n {{- if .Values.tls.enabled }}\n - name: {{ .Chart.Name }}-init-tls\n image: busybox\n command: ['sh', '-c', 'cp /tls/* /certs']\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n volumeMounts:\n - name: tls\n mountPath: /tls\n - name: certs\n mountPath: /certs\n resources:\n{{ toYaml .Values.initResources | indent 10 }}\n {{- if .Values.tls.CA.enabled }}\n - name: {{ .Chart.Name }}-init-catls\n image: busybox\n command: ['sh', '-c', 'cp /catls/ca.crt /certs']\n volumeMounts:\n - name: catls\n mountPath: /catls\n - name: certs\n mountPath: /certs\n resources:\n{{ toYaml .Values.initResources | indent 10 }}\n {{- end }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n args:\n - -l\n - {{ .Values.logLevel }}\n{{- if .Values.customLdifFiles }}\n - --copy-service\n{{- end }}\n ports:\n - name: ldap-port\n containerPort: 389\n - name: ssl-ldap-port\n containerPort: 636\n envFrom:\n - configMapRef:\n name: {{ template \"openldap.fullname\" . }}-env\n - secretRef:\n name: {{ template \"openldap.secretName\" . }}\n volumeMounts:\n - name: data\n mountPath: /var/lib/ldap\n subPath: data\n - name: data\n mountPath: /etc/ldap/slapd.d\n subPath: config-data\n {{- if .Values.customLdifFiles }}\n - name: ldifworkingdir\n mountPath: /container/service/slapd/assets/config/bootstrap/ldif/custom\n {{- end }}\n {{- if .Values.tls.enabled }}\n - name: certs\n mountPath: /container/service/slapd/assets/certs\n {{- end }}\n env:\n {{- if .Values.tls.enabled }}\n - name: LDAP_TLS_CRT_FILENAME\n value: tls.crt\n - name: LDAP_TLS_KEY_FILENAME\n value: tls.key\n {{- if .Values.tls.CA.enabled }}\n - name: LDAP_TLS_CA_CRT_FILENAME\n value: ca.crt\n {{- end }}\n {{- end }}\n livenessProbe:\n tcpSocket:\n port: ldap-port\n initialDelaySeconds: 20\n periodSeconds: 10\n failureThreshold: 10\n readinessProbe:\n tcpSocket:\n port: ldap-port\n initialDelaySeconds: 20\n periodSeconds: 10\n failureThreshold: 10\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n {{- if .Values.customLdifFiles }}\n - name: customldif\n configMap:\n name: {{ template \"openldap.fullname\" . }}-customldif\n - name: ldifworkingdir\n emptyDir: {}\n {{- end }}\n {{- if .Values.tls.enabled }}\n - name: tls\n secret:\n secretName: {{ .Values.tls.secret }}\n {{- if .Values.tls.CA.enabled }}\n - name: catls\n secret:\n secretName: {{ .Values.tls.CA.secret }}\n {{- end }}\n {{- end }}\n - name: certs\n emptyDir:\n medium: Memory\n - name: data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"openldap.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n",
"# openldap-test-runner.yaml\n{{- if .Values.test.enabled -}}\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ template \"openldap.fullname\" . }}-test-{{ randAlphaNum 5 | lower }}\"\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n initContainers:\n - name: test-framework\n image: {{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-test\n image: {{ .Values.test.image.repository }}:{{ .Values.test.image.tag }}\n envFrom:\n - secretRef:\n name: {{ template \"openldap.secretName\" . }}\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"openldap.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n{{- end -}}\n",
"# openldap-tests.yaml\n{{- if .Values.test.enabled -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"openldap.fullname\" . }}-tests\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ndata:\n run.sh: |-\n @test \"Testing connecting to slapd server\" {\n # Ideally, this should be in the docker image, but there is not a generic image we can use\n # with bats and ldap-utils installed. It is not worth for now to push an image for this.\n apt-get update && apt-get install -y ldap-utils\n ldapsearch -x -H ldap://{{ template \"openldap.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.service.ldapPort }} -b \"dc=example,dc=org\" -D \"cn=admin,dc=example,dc=org\" -w $LDAP_ADMIN_PASSWORD\n }\n{{- end -}}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"openldap.fullname\" . }}\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# secret.yaml\n{{ if not .Values.existingSecret }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"openldap.fullname\" . }}\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\ntype: Opaque\ndata:\n LDAP_ADMIN_PASSWORD: {{ .Values.adminPassword | default (randAlphaNum 32) | b64enc | quote }}\n LDAP_CONFIG_PASSWORD: {{ .Values.configPassword | default (randAlphaNum 32) | b64enc | quote }}\n{{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n name: {{ template \"openldap.fullname\" . }}\n labels:\n app: {{ template \"openldap.name\" . }}\n chart: {{ template \"openldap.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.extraLabels }}\n{{ toYaml .Values.extraLabels | indent 4 }}\n{{- end }}\nspec:\n {{- with .Values.service.clusterIP }}\n clusterIP: {{ . | quote }}\n {{- end }}\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n{{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP | quote }}\n{{- end }}\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n ports:\n - name: ldap-port\n protocol: TCP\n port: {{ .Values.service.ldapPort }}\n targetPort: ldap-port\n - name: ssl-ldap-port\n protocol: TCP\n port: {{ .Values.service.sslLdapPort }}\n targetPort: ssl-ldap-port\n selector:\n app: {{ template \"openldap.name\" . }}\n release: {{ .Release.Name }}\n type: {{ .Values.service.type }}\n"
] | # Default values for openldap.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# Define deployment strategy - IMPORTANT: use rollingUpdate: null when use Recreate strategy.
# It prevents from merging with existing map keys which are forbidden.
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
#
# or
#
# type: Recreate
# rollingUpdate: null
image:
# From repository https://github.com/osixia/docker-openldap
repository: osixia/openldap
tag: 1.2.4
pullPolicy: IfNotPresent
# Spcifies an existing secret to be used for admin and config user passwords
existingSecret: ""
# settings for enabling TLS
tls:
enabled: false
secret: "" # The name of a kubernetes.io/tls type secret to use for TLS
CA:
enabled: false
secret: "" # The name of a generic secret to use for custom CA certificate (ca.crt)
## Add additional labels to all resources
extraLabels: {}
## Add additional annotations to pods
podAnnotations: {}
service:
annotations: {}
ldapPort: 389
sslLdapPort: 636 # Only used if tls.enabled is true
## List of IP addresses at which the service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
loadBalancerIP: ""
loadBalancerSourceRanges: []
type: ClusterIP
# Default configuration for openldap as environment variables. These get injected directly in the container.
# Use the env variables from https://github.com/osixia/docker-openldap#beginner-guide
env:
LDAP_ORGANISATION: "Example Inc."
LDAP_DOMAIN: "example.org"
LDAP_BACKEND: "hdb"
LDAP_TLS: "true"
LDAP_TLS_ENFORCE: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
# Default Passwords to use, stored as a secret. If unset, passwords are auto-generated.
# You can override these at install time with
# helm install openldap --set openldap.adminPassword=<passwd>,openldap.configPassword=<passwd>
# adminPassword: admin
# configPassword: config
# Custom openldap configuration files used to override default settings
# customLdifFiles:
# 01-default-users.ldif: |-
# Predefine users here
## Persist data to a persistent volume
persistence:
enabled: false
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
# existingClaim: ""
resources: {}
# requests:
# cpu: "100m"
# memory: "256Mi"
# limits:
# cpu: "500m"
# memory: "512Mi"
initResources: {}
# requests:
# cpu: "100m"
# memory: "128Mi"
# limits:
# cpu: "100m"
# memory: "128Mi"
nodeSelector: {}
tolerations: []
affinity: {}
## test container details
test:
enabled: false
image:
repository: dduportal/bats
tag: 0.4.0
# Set the container log level
# Valid log levels: none, error, warning, info (default), debug, trace
logLevel: info
|
aerospike | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"aerospike.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"aerospike.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate aerospike mesh setup\n*/}}\n{{- define \"aerospike.mesh\" -}}\n {{- $fullname := include \"aerospike.fullname\" . -}}\n {{- range $i, $e := until (.Values.replicaCount|int) }}\n {{ printf \"mesh-seed-address-port %s-%d.%s 3002\" $fullname $i $fullname }}\n {{- end -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"aerospike.fullname\" . }}\n labels:\n app: {{ template \"aerospike.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.labels }}\n {{ toYaml . | nindent 4 }}\n {{- end }}\ndata:\n aerospike.conf: |\n # aerospike configuration\n {{- $mesh := include \"aerospike.mesh\" . }}\n {{ .Values.confFile |replace \"#REPLACE_THIS_LINE_WITH_MESH_CONFIG\" $mesh | indent 4}}\n",
"# nodeport.yaml\n{{- if .Values.service.nodePort.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"aerospike.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.labels }}\n {{ toYaml . | nindent 4 }}\n {{- end }}\n name: {{ template \"aerospike.name\" . }}-nodeport\nspec:\n ports:\n - name: client\n nodePort: {{ .Values.service.nodePort.port }}\n port: 3000\n protocol: TCP\n targetPort: 3000\n selector:\n app: {{ template \"aerospike.name\" . }}\n release: {{ .Release.Name }}\n type: NodePort\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"aerospike.fullname\" . }}\n labels:\n app: {{ template \"aerospike.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.labels }}\n {{ toYaml . | nindent 4 }}\n {{- end }}\n annotations:\n {{- with .Values.service.annotations }}\n {{ tpl (toYaml .) $ | nindent 4 }}\n {{- end }}\nspec:\n # so the mesh peer-finder works\n publishNotReadyAddresses: true\n {{ if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP | quote }}\n {{ end }}\n type: {{ .Values.service.type }}\n {{ if eq .Values.service.type \"LoadBalancer\" -}} {{ if .Values.service.loadBalancerIP -}}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{ end -}}\n {{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{ toYaml .Values.service.loadBalancerSourceRanges | nindent 2}}\n {{ end -}}\n {{- end -}}\n ports:\n - port: 3000\n protocol: TCP\n name: client\n - port: 3002\n protocol: TCP\n name: mesh\n {{ if .Values.metrics.enabled }}\n - port: 9145\n protocol: TCP\n name: metrics\n {{- end }}\n selector:\n app: {{ template \"aerospike.name\" . }}\n release: {{ .Release.Name }}\n",
"# servicemonitor.yaml\n{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata: \n name: {{ template \"aerospike.fullname\" . }}\n labels:\n app: {{ template \"aerospike.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.labels }}\n {{ toYaml . | nindent 4 }}\n {{- end }}\nspec:\n endpoints:\n - path: /metrics\n port: metrics\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"aerospike.name\" . }}\n targetLabels:\n {{ range .Values.metrics.serviceMonitor.targetLabels }}\n - {{ . }}\n {{- end }}\n{{- end }}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"aerospike.fullname\" . }}\n labels:\n app: {{ template \"aerospike.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- with .Values.labels }}\n {{ toYaml . | nindent 4 }}\n {{- end }}\n {{- with .Values.annotations }}\n annotations:\n {{ toYaml . | nindent 4 }}\n {{- end }}\nspec:\n serviceName: {{ template \"aerospike.fullname\" . }}\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"aerospike.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"aerospike.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ .Values.confFile | sha256sum }}\n spec:\n terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}\n containers:\n - name: {{ template \"aerospike.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{ if .Values.command }}\n command:\n {{ toYaml .Values.command | nindent 10 }}\n {{ end }}\n {{ if .Values.args }}\n args:\n {{ toYaml .Values.args | nindent 10 }}\n {{ end }}\n ports:\n - containerPort: 3000\n name: clients\n - containerPort: 3002\n name: mesh\n - containerPort: 3003\n name: info\n readinessProbe:\n tcpSocket:\n port: 3000\n initialDelaySeconds: 15\n timeoutSeconds: 1\n volumeMounts:\n - name: config-volume\n mountPath: /etc/aerospike\n {{- range $pv := .Values.persistentVolume }}\n - name: {{ $pv.name | quote }}\n mountPath: {{ $pv.mountPath | quote }}\n {{- end }}\n resources:\n {{ toYaml .Values.resources | nindent 10 }}\n {{ if .Values.metrics.enabled }}\n - name: {{ template \"aerospike.fullname\" . }}-metrics\n image: \"{{ .Values.metrics.image.repository }}:{{ .Values.metrics.image.tag }}\"\n ports:\n - containerPort: 9145\n name: metrics\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n {{ toYaml .Values.nodeSelector | nindent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n {{ toYaml .Values.affinity | nindent 8 }}\n {{- end }}\n {{- if .Values.tolerations}}\n tolerations:\n {{ toYaml .Values.tolerations | nindent 8 }}\n {{- end }}\n volumes:\n - name: config-volume\n configMap:\n name: {{ template \"aerospike.fullname\" . }}\n items:\n - key: aerospike.conf\n path: aerospike.conf\n {{ if .Values.image.pullSecret }}\n imagePullSecrets:\n - name: {{ .Values.image.pullSecret }}\n {{- end }}\n volumeClaimTemplates:\n {{- range $pv := .Values.persistentVolume }}\n - metadata:\n name: {{ $pv.name | quote }}\n spec:\n {{ toYaml $pv.template | nindent 6 }}\n {{- end }}\n"
] | # Default values for aerospike.
terminationGracePeriodSeconds: 30
replicaCount: 1
nodeSelector: {}
image:
repository: aerospike/aerospike-server
tag: 4.5.0.5
pullPolicy: IfNotPresent
# pass custom command. This is equivalent of Entrypoint in docker
command: []
# pass custom args. This is equivalent of Cmd in docker
args: []
# Set as empty object {} if no volumes need to be created
# See confFile below
persistentVolume: {}
# - mountPath: /opt/aerospike/data
# name: aerospike-data
# template:
# accessModes: [ "ReadWriteOnce" ]
# # storageClassName: "standard"
# resources:
# requests:
# storage: "36G"
# selector:
# matchLabels:
# diskname: "aerospike-data"
service:
type: ClusterIP
# Provide any additional annotations which may be required.
# The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
annotations: {}
loadBalancerIP:
clusterIP: None
# This field takes a list of IP CIDR ranges, which Kubernetes will use to configure firewall exceptions
# loadBalancerSourceRanges:
# - 10.0.0.0/8
nodePort: {}
# turns on a sidecar that scrapes 'localhost:3000' and exposes to port 9134
# a docker image built from this repo works well: https://github.com/alicebob/asprom
# but you will need to build/host it yourself
metrics:
serviceMonitor: {}
labels: {}
annotations: {}
tolerations: []
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
confFile: |-
#default config file
service {
user root
group root
paxos-protocol v5
paxos-single-replica-limit 1
pidfile /var/run/aerospike/asd.pid
service-threads 4
transaction-queues 4
transaction-threads-per-queue 4
proto-fd-max 15000
}
logging {
file /var/log/aerospike/aerospike.log {
context any info
}
console {
context any info
}
}
network {
service {
address any
port 3000
}
heartbeat {
address any
interval 150
#REPLACE_THIS_LINE_WITH_MESH_CONFIG
mode mesh
port 3002
timeout 20
protocol v3
}
fabric {
port 3001
}
info {
port 3003
}
}
namespace test {
replication-factor 2
memory-size 1G
default-ttl 5d
storage-engine device {
file /opt/aerospike/data/test.dat
filesize 4G
}
}
|
voyager | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"voyager.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"voyager.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" $name .Release.Name | trunc 63 -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"voyager.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"voyager.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# apiregistration.yaml\n{{- $ca := genCA \"svc-cat-ca\" 3650 }}\n{{- $cn := include \"voyager.fullname\" . -}}\n{{- $altName1 := printf \"%s.%s\" $cn .Release.Namespace }}\n{{- $altName2 := printf \"%s.%s.svc\" $cn .Release.Namespace }}\n{{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }}\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n name: v1beta1.admission.voyager.appscode.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nspec:\n group: admission.voyager.appscode.com\n version: v1beta1\n service:\n namespace: {{ .Release.Namespace }}\n name: {{ template \"voyager.fullname\" . }}\n caBundle: {{ b64enc $ca.Cert }}\n groupPriorityMinimum: {{ .Values.apiserver.groupPriorityMinimum }}\n versionPriority: {{ .Values.apiserver.versionPriority }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"voyager.fullname\" . }}-apiserver-cert\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ntype: Opaque\ndata:\n tls.crt: {{ b64enc $cert.Cert }}\n tls.key: {{ b64enc $cert.Key }}\n---\n{{ if .Values.rbac.create }}\n# to read the config for terminating authentication\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"voyager.fullname\" . }}-apiserver-extension-server-authentication-reader\n namespace: kube-system\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: extension-apiserver-authentication-reader\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"voyager.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n---\n# to delegate authentication and authorization\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"voyager.fullname\" . }}-apiserver-auth-delegator\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n kind: ClusterRole\n apiGroup: rbac.authorization.k8s.io\n name: system:auth-delegator\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"voyager.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}",
"# cluster-role-binding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"voyager.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"voyager.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"voyager.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}\n",
"# cluster-role.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"voyager.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nrules:\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - \"*\"\n- apiGroups:\n - extensions\n resources:\n - thirdpartyresources\n verbs:\n - \"*\"\n- apiGroups:\n - voyager.appscode.com\n resources: [\"*\"]\n verbs: [\"*\"]\n- apiGroups:\n - monitoring.coreos.com\n resources:\n - servicemonitors\n verbs: [\"get\", \"create\", \"update\", \"patch\"]\n- apiGroups:\n - apps\n resources:\n - deployments\n verbs: [\"*\"]\n- apiGroups:\n - extensions\n resources:\n - deployments\n - daemonsets\n - ingresses\n verbs: [\"*\"]\n- apiGroups: [\"\"]\n resources:\n - replicationcontrollers\n - services\n - endpoints\n - configmaps\n verbs: [\"*\"]\n- apiGroups: [\"\"]\n resources:\n - secrets\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\"]\n- apiGroups: [\"\"]\n resources:\n - namespaces\n verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n resources:\n - events\n verbs: [\"create\"]\n- apiGroups: [\"\"]\n resources:\n - pods\n verbs: [\"list\", \"watch\", \"delete\", \"deletecollection\"]\n- apiGroups: [\"\"]\n resources:\n - nodes\n verbs: [\"list\", \"watch\", \"get\"]\n- apiGroups: [\"\"]\n resources:\n - serviceaccounts\n verbs: [\"get\", \"create\", \"delete\", \"patch\"]\n- apiGroups:\n - rbac.authorization.k8s.io\n resources:\n - rolebindings\n - roles\n verbs: [\"get\", \"create\", \"delete\", \"patch\"]\n{{ end }}\n",
"# deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"voyager.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: \"{{ template \"voyager.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: \"{{ template \"voyager.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n{{- if and .Values.criticalAddon (eq .Release.Namespace \"kube-system\") }}\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n{{- end }}\n spec:\n serviceAccountName: {{ template \"voyager.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 6 }}\n {{- end }}\n containers:\n - name: voyager\n image: {{ .Values.dockerRegistry }}/voyager:{{ .Values.imageTags.voyager }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - run\n - --cloud-provider={{ .Values.cloudProvider }}\n - --cloud-config={{ .Values.cloudConfig }}\n - --v={{ .Values.logLevel }}\n - --rbac={{ .Values.rbac.create }}\n - --ingress-class={{ .Values.ingressClass }}\n - --operator-service={{ template \"voyager.fullname\" . }}\n - --docker-registry={{ .Values.dockerRegistry }}\n - --haproxy-image-tag={{ .Values.imageTags.haproxy }}\n - --exporter-image-tag={{ .Values.imageTags.voyager }}\n - --secure-port=8443\n - --audit-log-path=-\n - --tls-cert-file=/var/serving-cert/tls.crt\n - --tls-private-key-file=/var/serving-cert/tls.key\n ports:\n - containerPort: 8443\n - containerPort: 56790\n - containerPort: 56791\n readinessProbe:\n httpGet:\n path: /healthz\n port: 8443\n scheme: HTTPS\n volumeMounts:\n - mountPath: /var/serving-cert\n name: serving-cert\n{{- if .Values.persistence.enabled }}\n - mountPath: {{ dir .Values.cloudConfig | quote }}\n name: cloudconfig\n readOnly: true\n{{- end }}\n volumes:\n - name: serving-cert\n secret:\n defaultMode: 420\n secretName: {{ template \"voyager.fullname\" . }}-apiserver-cert\n{{- if .Values.persistence.enabled }}\n - hostPath:\n path: {{ .Values.persistence.hostPath | quote }}\n name: cloudconfig\n{{- end -}}\n{{- if and .Values.criticalAddon (eq .Release.Namespace \"kube-system\") }}\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n{{- end -}}\n{{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n{{- end -}}\n",
"# service-account.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"voyager.serviceAccountName\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n{{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"voyager.fullname\" . }}\n labels:\n app: \"{{ template \"voyager.name\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n ports:\n # Port used to expose admission webhook apiserver\n - name: admission\n port: 443\n targetPort: 8443\n # Port used to expose Prometheus metrics for the operator\n - name: ops\n port: 56790\n targetPort: 56790\n # Port used to respond to Let's Encrypt HTTP challenges\n - name: acme\n port: 56791\n targetPort: 56791\n selector:\n app: \"{{ template \"voyager.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n",
"# user-roles.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: appscode:voyager:edit\n labels:\n rbac.authorization.k8s.io/aggregate-to-admin: \"true\"\n rbac.authorization.k8s.io/aggregate-to-edit: \"true\"\nrules:\n- apiGroups:\n - voyager.appscode.com\n resources:\n - certificates\n - ingresses\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: appscode:voyager:view\n labels:\n rbac.authorization.k8s.io/aggregate-to-view: \"true\"\nrules:\n- apiGroups:\n - voyager.appscode.com\n resources:\n - certificates\n - ingresses\n verbs:\n - get\n - list\n - watch\n{{ end }}\n",
"# validating-webhook-configuration.yaml\n{{- if .Values.apiserver.enableAdmissionWebhook }}\napiVersion: admissionregistration.k8s.io/v1beta1\nkind: ValidatingWebhookConfiguration\nmetadata:\n name: admission.voyager.appscode.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"voyager.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nwebhooks:\n- name: admission.voyager.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.voyager.appscode.com/v1beta1/admissionreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - voyager.appscode.com\n apiVersions:\n - \"*\"\n resources:\n - \"*\"\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - extensions\n apiVersions:\n - v1beta1\n resources:\n - ingresses\n failurePolicy: Fail\n{{ end }}\n"
] | ##
## Voyager chart configuration
##
# Docker registry containing Voyager & HAProxy images
dockerRegistry: appscode
## Tags for Docker images
imageTags:
## Docker image tag containing Voyager
voyager: 6.0.0
## Docker image tag containing HAProxy binary
haproxy: 1.7.10-6.0.0
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
##
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: IfNotPresent
## Use cloud provider here.
cloudProvider:
## The path to the cloud provider configuration file. Empty string for no configuration file.
## ie. for azure use /etc/kubernetes/azure.json
cloudConfig: ''
## Installs voyager operator as critical addon
## https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
criticalAddon: false
## Log level for voyager
logLevel: 3
persistence:
enabled: false
hostPath: /etc/kubernetes
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Install Default RBAC roles and bindings
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# this flag can be set to 'voyager' to handle only ingress
# with annotation kubernetes.io/ingress.class=voyager.
ingressClass:
apiserver:
# groupPriorityMinimum is the minimum priority the group should have. Please see
# https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64
# for more information on proper values of this field.
groupPriorityMinimum: 10000
# versionPriority is the ordering of this API inside of the group. Please see
# https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70
# for more information on proper values of this field
versionPriority: 15
# enableAdmissionWebhook is used to configure apiserver as ValidationWebhook for Voyager CRDs
enableAdmissionWebhook: false
# CA certificate used by main Kubernetes api server
ca:
|
pgadmin | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"pgadmin.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"pgadmin.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"pgadmin.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"pgadmin.labels\" -}}\napp.kubernetes.io/name: {{ include \"pgadmin.name\" . }}\nhelm.sh/chart: {{ include \"pgadmin.chart\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- if .Chart.AppVersion }}\napp.kubernetes.io/version: {{ .Chart.AppVersion | quote }}\n{{- end }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n\n{{/*\nGenerate chart secret name\n*/}}\n{{- define \"pgadmin.secretName\" -}}\n{{ default (include \"pgadmin.fullname\" .) .Values.existingSecret }}\n{{- end -}}\n\n{{/*\nDefines a JSON file containing server definitions. This allows connection information to be pre-loaded into the instance of pgAdmin in the container. Note that server definitions are only loaded on first launch, i.e. when the configuration database is created, and not on subsequent launches using the same configuration database.\n*/}}\n{{- define \"pgadmin.serverDefinitions\" -}}\n{\n \"Servers\": {\n{{ .Values.serverDefinitions.servers | indent 4 }}\n }\n}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.9-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1beta2\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for ingress.\n*/}}\n{{- define \"ingress.apiVersion\" -}}\n{{- if .Capabilities.APIVersions.Has \"networking.k8s.io/v1beta1\" }}\n{{- print \"networking.k8s.io/v1beta1\" -}}\n{{- else -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- $fullName := include \"pgadmin.fullname\" . -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ $fullName }}\n labels:\n {{- include \"pgadmin.labels\" . | nindent 4 }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"pgadmin.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- if .Values.strategy }}\n strategy:\n {{- .Values.strategy | toYaml | nindent 4 }}\n{{- end }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"pgadmin.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n spec:\n initContainers:\n - name: init-pgadmin\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command: [\"/bin/chown\", \"-R\", \"5050:5050\", \"/var/lib/pgadmin\"]\n volumeMounts:\n - name: pgadmin-data\n mountPath: /var/lib/pgadmin\n securityContext:\n runAsUser: 0\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n {{- if .Values.livenessProbe }}\n livenessProbe:\n httpGet:\n path: /misc/ping\n port: 80\n {{- .Values.livenessProbe | toYaml | nindent 12 }}\n {{- end }}\n {{- if .Values.readinessProbe }}\n readinessProbe:\n httpGet:\n path: /misc/ping\n port: 80\n {{- .Values.readinessProbe | toYaml | nindent 12 }}\n {{- end }}\n env:\n - name: PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION\n value: !!string {{ .Values.env.enhanced_cookie_protection }}\n - name: PGADMIN_DEFAULT_EMAIL\n value: {{ .Values.env.email }}\n - name: PGADMIN_DEFAULT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ $fullName }}\n key: password\n volumeMounts:\n - name: pgadmin-data\n mountPath: /var/lib/pgadmin\n {{- if .Values.serverDefinitions.enabled }}\n - name: definitions\n mountPath: /pgadmin4/servers.json\n subPath: servers.json\n {{- end }}\n resources:\n {{- .Values.resources | toYaml | nindent 12 }}\n volumes:\n - name: pgadmin-data\n {{- if .Values.persistentVolume.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistentVolume.existingClaim }}{{ .Values.persistentVolume.existingClaim }}{{- else }}{{ $fullName }}{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- if .Values.serverDefinitions.enabled }}\n - name: definitions\n secret:\n secretName: {{ template \"pgadmin.secretName\" . }}\n items:\n - key: servers.json\n path: servers.json\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n {{- .Values.iamgePullSecrets | toYaml | nindent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n {{- .Values.nodeSelector | toYaml | nindent 8 }}\n {{- end }}\n {{- if .Values.securityContext }}\n securityContext:\n {{- .Values.securityContext | toYaml | nindent 8 }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n {{- .Values.affinity | toYaml | nindent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n {{- .Values.tolerations | toYaml | nindent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"pgadmin.fullname\" . -}}\napiVersion: {{ template \"ingress.apiVersion\" . }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n {{- include \"pgadmin.labels\" . | nindent 4 }}\n {{- if .Values.ingress.annotations }}\n annotations:\n {{- .Values.ingress.annotations | toYaml | nindent 4 }}\n {{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .host | quote }}\n http:\n paths:\n {{- range .paths }}\n - path: {{ . }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistentVolume.enabled (not .Values.persistentVolume.existingClaim) }}\n{{- $fullName := include \"pgadmin.fullname\" . -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ $fullName }}\n labels:\n {{- include \"pgadmin.labels\" . | nindent 4 }}\n {{- if .Values.persistentVolume.annotations }}\n annotations:\n {{- .Values.persistentVolume.annotaions | toYaml | nindent 4 }}\n {{- end }}\nspec:\n accessModes:\n {{- .Values.persistentVolume.accessModes | toYaml | nindent 4 }}\n{{- if .Values.persistentVolume.storageClass }}\n{{- if (eq \"-\" .Values.persistentVolume.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistentVolume.storageClass }}\"\n{{- end }}\n{{- end }}\n resources:\n requests:\n storage: {{ .Values.persistentVolume.size }}\n{{- end }}\n",
"# secrets.yaml\n{{- $fullName := include \"pgadmin.fullname\" . -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ $fullName }}\n labels:\n {{- include \"pgadmin.labels\" . | nindent 4 }}\ntype: Opaque\ndata:\n password: {{ default \"SuperSecret\" .Values.env.password | b64enc | quote }}\n{{- if .Values.serverDefinitions.enabled }}\n servers.json: {{ include \"pgadmin.serverDefinitions\" . | b64enc | quote }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"pgadmin.fullname\" . }}\n labels:\n {{- include \"pgadmin.labels\" . | nindent 4 }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n selector:\n app.kubernetes.io/name: {{ include \"pgadmin.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n",
"# test-connection.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ include \"pgadmin.fullname\" . }}-test-connection\"\n labels:\n{{ include \"pgadmin.labels\" . | indent 4 }}\n annotations:\n \"helm.sh/hook\": test-success\nspec:\n containers:\n - name: wget\n image: busybox\n command: ['wget']\n args: ['{{ include \"pgadmin.fullname\" . }}:{{ .Values.service.port }}']\n restartPolicy: Never\n"
] | # Default values for pgadmin.
replicaCount: 1
## pgAdmin container image
##
image:
repository: dpage/pgadmin4
tag: 4.18
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 80
## Strategy used to replace old Pods by new ones
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
##
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
## Server definitions will be loaded at launch time. This allows connection
## information to be pre-loaded into the instance of pgAdmin in the container.
## Ref: https://www.pgadmin.org/docs/pgadmin4/4.13/import_export_servers.html
##
serverDefinitions:
## If true, server definitions will be created
##
enabled: false
servers: |-
# "1": {
# "Name": "Minimally Defined Server",
# "Group": "Servers",
# "Port": 5432,
# "Username": "postgres",
# "Host": "localhost",
# "SSLMode": "prefer",
# "MaintenanceDB": "postgres"
# }
ingress:
## If true, pgAdmin Ingress will be created
##
enabled: false
## pgAdmin Ingress annotations
##
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
## pgAdmin Ingress hostnames with optional path
## Must be provided if Ingress is enabled
hosts:
- host: chart-example.local
paths: []
## pgAdmin Ingress TLS configuration
## Secrets must be manually created in the namespace
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## pgAdmin startup configuration
## Values in here get injected as environment variables
##
env:
email: [email protected]
password: SuperSecret
## If True, allows pgAdmin to create session cookies based on IP address
## Ref: https://www.pgadmin.org/docs/pgadmin4/4.18/config_py.html
#
enhanced_cookie_protection: "False"
persistentVolume:
## If true, pgAdmin will create/use a Persistent Volume Claim
## If false, use emptyDir
##
enabled: true
## pgAdmin Persistent Volume Claim annotations
##
annotations: {}
## pgAdmin Persistent Volume access modes
## Must match those of existing PV or dynamic provisioner
## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
accessModes:
- ReadWriteOnce
## pgAdmin Persistent Volume Size
##
size: 10Gi
## pgAdmin Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
# existingClaim: ""
## Security context to be added to pgAdmin pods
##
securityContext:
runAsUser: 5050
runAsGroup: 5050
fsGroup: 5050
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## pgAdmin readiness and liveness probe initial delay and timeout
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
##
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 60
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 3
## Node labels for pgAdmin pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
## Pod affinity
##
affinity: {}
|
kubedb | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kubedb.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"kubedb.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"kubedb.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"kubedb.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# apiregistration.yaml\n{{- $ca := genCA \"svc-cat-ca\" 3650 }}\n{{- $cn := include \"kubedb.fullname\" . -}}\n{{- $altName1 := printf \"%s.%s\" $cn .Release.Namespace }}\n{{- $altName2 := printf \"%s.%s.svc\" $cn .Release.Namespace }}\n{{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }}\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n name: v1alpha1.admission.kubedb.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nspec:\n group: admission.kubedb.com\n version: v1alpha1\n service:\n namespace: {{ .Release.Namespace }}\n name: {{ template \"kubedb.fullname\" . }}\n caBundle: {{ b64enc $ca.Cert }}\n groupPriorityMinimum: {{ .Values.apiserver.groupPriorityMinimum }}\n versionPriority: {{ .Values.apiserver.versionPriority }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}-apiserver-cert\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ntype: Opaque\ndata:\n tls.crt: {{ b64enc $cert.Cert }}\n tls.key: {{ b64enc $cert.Key }}\n---\n{{ if .Values.rbac.create }}\n# to read the config for terminating authentication\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}-apiserver-extension-server-authentication-reader\n namespace: kube-system\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: extension-apiserver-authentication-reader\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kubedb.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n---\n# to delegate authentication and authorization\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}-apiserver-auth-delegator\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n kind: ClusterRole\n apiGroup: rbac.authorization.k8s.io\n name: system:auth-delegator\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kubedb.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}",
"# cluster-role-binding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"kubedb.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kubedb.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}\n",
"# cluster-role.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nrules:\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs: [\"create\", \"delete\", \"get\", \"list\"]\n- apiGroups:\n - rbac.authorization.k8s.io\n resources:\n - rolebindings\n - roles\n verbs: [\"create\", \"delete\", \"get\", \"patch\"]\n- apiGroups:\n - \"\"\n resources:\n - services\n verbs: [\"create\", \"delete\", \"get\", \"patch\"]\n- apiGroups:\n - \"\"\n resources:\n - secrets\n - serviceaccounts\n verbs: [\"create\", \"delete\", \"get\", \"patch\"]\n- apiGroups:\n - apps\n resources:\n - deployments\n - statefulsets\n verbs: [\"create\", \"delete\", \"get\", \"patch\", \"update\"]\n- apiGroups:\n - batch\n resources:\n - jobs\n verbs: [\"create\", \"delete\", \"get\", \"list\", \"watch\"]\n- apiGroups:\n - storage.k8s.io\n resources:\n - storageclasses\n verbs: [\"get\"]\n- apiGroups:\n - \"\"\n resources:\n - pods\n verbs: [\"deletecollection\", \"get\", \"list\", \"patch\", \"watch\"]\n- apiGroups:\n - \"\"\n resources:\n - persistentvolumeclaims\n verbs: [\"create\", \"delete\", \"get\", \"list\", \"patch\", \"watch\"]\n- apiGroups:\n - \"\"\n resources:\n - configmaps\n verbs: [\"create\", \"delete\", \"get\", \"update\"]\n- apiGroups:\n - \"\"\n resources:\n - events\n verbs: [\"create\"]\n- apiGroups:\n - \"\"\n resources:\n - nodes\n verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups:\n - kubedb.com\n resources:\n - '*'\n verbs: ['*']\n- apiGroups:\n - monitoring.coreos.com\n resources:\n - servicemonitors\n verbs: [\"create\", \"delete\", \"get\", \"list\", \"update\"]\n{{ end }}\n",
"# deployment.yaml\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n initializers:\n pending: []\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: \"{{ template \"kubedb.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: \"{{ template \"kubedb.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n{{- if and .Values.criticalAddon (eq .Release.Namespace \"kube-system\") }}\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n{{- end }}\n spec:\n serviceAccountName: {{ template \"kubedb.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 6 }}\n {{- end }}\n containers:\n - name: operator\n image: {{ .Values.dockerRegistry }}/operator:{{ .Values.imageTags.operator }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - run\n - --v=3\n - --docker-registry={{ .Values.dockerRegistry }}\n - --exporter-tag={{ .Values.imageTags.exporter }}\n - --governing-service=kubedb\n - --rbac={{ .Values.rbac.create }}\n env:\n - name: OPERATOR_NAMESPACE\n valueFrom:\n fieldRef:\n apiVersion: v1\n fieldPath: metadata.namespace\n - name: server\n image: {{ .Values.dockerRegistry }}/kubedb-server:{{ .Values.imageTags.apiserver }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - run\n - --v=3\n - --secure-port=8443\n - --audit-log-path=-\n - --tls-cert-file=/var/serving-cert/tls.crt\n - --tls-private-key-file=/var/serving-cert/tls.key\n ports:\n - containerPort: 8443\n volumeMounts:\n - mountPath: /var/serving-cert\n name: serving-cert\n readinessProbe:\n httpGet:\n path: /healthz\n port: 8443\n scheme: HTTPS\n volumes:\n - name: serving-cert\n secret:\n defaultMode: 420\n secretName: {{ template \"kubedb.fullname\" . }}-apiserver-cert\n{{- if and .Values.criticalAddon (eq .Release.Namespace \"kube-system\") }}\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n{{- end -}}\n",
"# service-account.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"kubedb.serviceAccountName\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n{{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"kubedb.fullname\" . }}\n labels:\n app: \"{{ template \"kubedb.name\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n ports:\n - name: api\n port: 443\n targetPort: 8443\n selector:\n app: \"{{ template \"kubedb.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n",
"# user-roles.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: kubedb:core:admin\n labels:\n rbac.authorization.k8s.io/aggregate-to-admin: \"true\"\nrules:\n- apiGroups:\n - kubedb.com\n resources:\n - dormantdatabases\n - elasticsearches\n - memcacheds\n - mongodbs\n - mysqls\n - postgreses\n - redises\n - snapshots\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: kubedb:core:edit\n labels:\n rbac.authorization.k8s.io/aggregate-to-edit: \"true\"\nrules:\n- apiGroups:\n - kubedb.com\n resources:\n - elasticsearches\n - memcacheds\n - mongodbs\n - mysqls\n - postgreses\n - redises\n - snapshots\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n- apiGroups:\n - kubedb.com\n resources:\n - dormantdatabases\n verbs:\n - get\n - list\n - watch\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: kubedb:core:view\n labels:\n rbac.authorization.k8s.io/aggregate-to-view: \"true\"\nrules:\n- apiGroups:\n - kubedb.com\n resources:\n - dormantdatabases\n - elasticsearches\n - memcacheds\n - mongodbs\n - mysqls\n - postgreses\n - redises\n - snapshots\n verbs:\n - get\n - list\n - watch\n{{ end }}\n",
"# validating-webhook-configuration.yaml\n{{- if .Values.apiserver.enableAdmissionWebhook }}\napiVersion: admissionregistration.k8s.io/v1beta1\nkind: ValidatingWebhookConfiguration\nmetadata:\n name: admission.kubedb.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"kubedb.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nwebhooks:\n- name: elasticsearch.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/elasticsearchreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"elasticsearches\"]\n operations: [\"CREATE\", \"UPDATE\", \"DELETE\"]\n failurePolicy: Fail\n- name: postgres.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/postgresreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"postgreses\"]\n operations: [\"CREATE\", \"UPDATE\", \"DELETE\"]\n failurePolicy: Fail\n- name: mysql.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/mysqlreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"mysqls\"]\n operations: [\"CREATE\", \"UPDATE\", \"DELETE\"]\n failurePolicy: Fail\n- name: mongodb.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/mongodbreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"mongodbs\"]\n operations: [\"CREATE\", \"UPDATE\", \"DELETE\"]\n failurePolicy: Fail\n- name: redis.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/redisreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"redises\"]\n operations: [\"CREATE\", \"UPDATE\", \"DELETE\"]\n failurePolicy: Fail\n- name: memcached.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/memcachedreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"memcacheds\"]\n operations: [\"CREATE\", \"UPDATE\", \"DELETE\"]\n failurePolicy: Fail\n- name: snapshot.admission.kubedb.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.kubedb.com/v1alpha1/snapshotreviews\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - apiGroups: [\"kubedb.com\"]\n apiVersions: [\"*\"]\n resources: [\"snapshots\"]\n operations: [\"CREATE\", \"UPDATE\"]\n failurePolicy: Fail\n{{ end }}\n"
] | ##
## KubeDB chart configuration
##
# Docker registry containing KubeDB images
dockerRegistry: kubedb
## Tags for Docker images
imageTags:
## Docker image tag containing KubeDB operator
operator: 0.8.0-beta.2
## Docker image tag containing KubeDB exporter
exporter: 0.8.0-beta.2
## Docker image tag containing KubeDB apiserver
apiserver: 0.1.0-beta.2
# Declare variables to be passed into your templates.
replicaCount: 1
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
##
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: IfNotPresent
## Installs KubeDB operator as critical addon
## https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
criticalAddon: false
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
apiserver:
# groupPriorityMinimum is the minimum priority the group should have. Please see
# https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64
# for more information on proper values of this field.
groupPriorityMinimum: 10000
# versionPriority is the ordering of this API inside of the group. Please see
# https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70
# for more information on proper values of this field
versionPriority: 15
# enableAdmissionWebhook is used to configure apiserver as admission webhook for KubeDB CRDs
enableAdmissionWebhook: false
# CA certificate used by main Kubernetes api server
ca:
|
stash | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"stash.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"stash.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"stash.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"stash.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# apiregistration.yaml\n{{- $ca := genCA \"svc-cat-ca\" 3650 }}\n{{- $cn := include \"stash.fullname\" . -}}\n{{- $altName1 := printf \"%s.%s\" $cn .Release.Namespace }}\n{{- $altName2 := printf \"%s.%s.svc\" $cn .Release.Namespace }}\n{{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }}\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n name: v1alpha1.admission.stash.appscode.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nspec:\n group: admission.stash.appscode.com\n version: v1alpha1\n service:\n namespace: {{ .Release.Namespace }}\n name: {{ template \"stash.fullname\" . }}\n caBundle: {{ b64enc $ca.Cert }}\n groupPriorityMinimum: {{ .Values.apiserver.groupPriorityMinimum }}\n versionPriority: {{ .Values.apiserver.versionPriority }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"stash.fullname\" . }}-apiserver-cert\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\ntype: Opaque\ndata:\n tls.crt: {{ b64enc $cert.Cert }}\n tls.key: {{ b64enc $cert.Key }}\n---\n{{ if .Values.rbac.create }}\n# to read the config for terminating authentication\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"stash.fullname\" . }}-apiserver-extension-server-authentication-reader\n namespace: kube-system\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n kind: Role\n apiGroup: rbac.authorization.k8s.io\n name: extension-apiserver-authentication-reader\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"stash.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n---\n# to delegate authentication and authorization\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"stash.fullname\" . }}-apiserver-auth-delegator\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n kind: ClusterRole\n apiGroup: rbac.authorization.k8s.io\n name: system:auth-delegator\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"stash.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}",
"# cluster-role-binding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"stash.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"stash.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"stash.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}\n",
"# cluster-role.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"stash.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nrules:\n- apiGroups:\n - apiextensions.k8s.io\n resources:\n - customresourcedefinitions\n verbs:\n - \"*\"\n- apiGroups:\n - extensions\n resources:\n - thirdpartyresources\n verbs:\n - \"*\"\n- apiGroups:\n - stash.appscode.com\n resources: [\"*\"]\n verbs: [\"*\"]\n- apiGroups:\n - apps\n resources:\n - deployments\n - statefulsets\n verbs: [\"get\", \"list\", \"watch\", \"patch\"]\n- apiGroups:\n - batch\n resources:\n - jobs\n - cronjobs\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\", \"patch\"]\n- apiGroups:\n - extensions\n resources:\n - replicasets\n - daemonsets\n verbs: [\"get\", \"list\", \"watch\", \"patch\"]\n- apiGroups: [\"\"]\n resources:\n - namespaces\n - replicationcontrollers\n verbs: [\"get\", \"list\", \"watch\", \"patch\"]\n- apiGroups: [\"\"]\n resources:\n - configmaps\n verbs: [\"create\", \"update\", \"get\", \"delete\"]\n- apiGroups: [\"\"]\n resources:\n - secrets\n verbs: [\"get\"]\n- apiGroups: [\"\"]\n resources:\n - events\n verbs: [\"create\"]\n- apiGroups: [\"\"]\n resources:\n - nodes\n verbs: [\"list\"]\n- apiGroups: [\"\"]\n resources:\n - pods\n verbs: [\"get\", \"create\", \"list\", \"delete\", \"deletecollection\"]\n- apiGroups: [\"\"]\n resources:\n - serviceaccounts\n verbs: [\"get\", \"create\", \"patch\", \"delete\"]\n- apiGroups:\n - rbac.authorization.k8s.io\n resources:\n - clusterroles\n - roles\n - rolebindings\n verbs: [\"get\", \"create\", \"delete\", \"patch\"]\n{{ end }}\n",
"# deployment.yaml\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"stash.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: \"{{ template \"stash.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: \"{{ template \"stash.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n{{- if and .Values.criticalAddon (eq .Release.Namespace \"kube-system\") }}\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n{{- end }}\n spec:\n serviceAccountName: {{ template \"stash.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 6 }}\n {{- end }}\n containers:\n - name: operator\n image: {{ .Values.operator.image }}:{{ .Values.operator.tag }}\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - run\n - --v=3\n - --rbac={{ .Values.rbac.create }}\n - --docker-registry={{ .Values.dockerRegistry }}\n - --secure-port=8443\n - --audit-log-path=-\n - --tls-cert-file=/var/serving-cert/tls.crt\n - --tls-private-key-file=/var/serving-cert/tls.key\n ports:\n - containerPort: 8443\n - containerPort: 56790\n readinessProbe:\n httpGet:\n path: /healthz\n port: 8443\n scheme: HTTPS\n volumeMounts:\n - mountPath: /var/serving-cert\n name: serving-cert\n - name: pushgateway\n image: '{{ .Values.pushgateway.image }}:{{ .Values.pushgateway.tag }}'\n imagePullPolicy: {{ .Values.imagePullPolicy }}\n args:\n - -web.listen-address=:56789\n - -persistence.file=/var/pv/pushgateway.dat\n ports:\n - containerPort: 56789\n volumeMounts:\n - mountPath: /var/pv\n name: data-volume\n - mountPath: /tmp\n name: stash-scratchdir\n volumes:\n - emptyDir: {}\n name: data-volume\n - emptyDir: {}\n name: stash-scratchdir\n - name: serving-cert\n secret:\n defaultMode: 420\n secretName: {{ template \"stash.fullname\" . }}-apiserver-cert\n{{- if and .Values.criticalAddon (eq .Release.Namespace \"kube-system\") }}\n tolerations:\n - key: CriticalAddonsOnly\n operator: Exists\n{{- end -}}\n",
"# mutating-webhook-configuration.yaml\n{{- if .Values.apiserver.enableMutatingWebhook }}\napiVersion: admissionregistration.k8s.io/v1beta1\nkind: MutatingWebhookConfiguration\nmetadata:\n name: admission.stash.appscode.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nwebhooks:\n- name: deployment.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/deployments\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - apps\n - extensions\n apiVersions:\n - \"*\"\n resources:\n - deployments\n failurePolicy: Fail\n- name: daemonset.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/daemonsets\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - apps\n - extensions\n apiVersions:\n - \"*\"\n resources:\n - daemonsets\n failurePolicy: Fail\n- name: statefulset.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/statefulsets\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n apiGroups:\n - apps\n apiVersions:\n - \"*\"\n resources:\n - statefulsets\n failurePolicy: Fail\n- name: replicationcontroller.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/replicationcontrollers\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - \"\"\n apiVersions:\n - \"*\"\n resources:\n - replicationcontrollers\n failurePolicy: Fail\n- name: replicaset.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/replicasets\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - apps\n - extensions\n apiVersions:\n - \"*\"\n resources:\n - replicasets\n failurePolicy: Fail\n{{ end }}\n",
"# service-account.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"stash.serviceAccountName\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\n{{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"stash.fullname\" . }}\n labels:\n app: \"{{ template \"stash.name\" . }}\"\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n ports:\n # Port used to expose admission webhook apiserver\n - name: admission\n port: 443\n targetPort: 8443\n # Port used to expose Prometheus pushgateway\n - name: pushgateway\n port: 56789\n protocol: TCP\n targetPort: 56789\n # Port used to expose Prometheus metrics for the operator\n - name: ops\n port: 56790\n protocol: TCP\n targetPort: 56790\n selector:\n app: \"{{ template \"stash.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n",
"# user-roles.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: appscode:stash:edit\n labels:\n rbac.authorization.k8s.io/aggregate-to-admin: \"true\"\n rbac.authorization.k8s.io/aggregate-to-edit: \"true\"\nrules:\n- apiGroups:\n - stash.appscode.com\n resources:\n - restics\n - recoveries\n verbs:\n - create\n - delete\n - deletecollection\n - get\n - list\n - patch\n - update\n - watch\n---\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: appscode:stash:view\n labels:\n rbac.authorization.k8s.io/aggregate-to-view: \"true\"\nrules:\n- apiGroups:\n - stash.appscode.com\n resources:\n - restics\n - recoveries\n verbs:\n - get\n - list\n - watch\n{{ end }}",
"# validating-webhook-configuration.yaml\n{{- if .Values.apiserver.enableValidatingWebhook }}\napiVersion: admissionregistration.k8s.io/v1beta1\nkind: ValidatingWebhookConfiguration\nmetadata:\n name: admission.stash.appscode.com\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"stash.name\" . }}\"\n heritage: \"{{ .Release.Service }}\"\n release: \"{{ .Release.Name }}\"\nwebhooks:\n- name: restic.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/restics\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - stash.appscode.com\n apiVersions:\n - \"*\"\n resources:\n - restics\n failurePolicy: Fail\n- name: recovery.admission.stash.appscode.com\n clientConfig:\n service:\n namespace: default\n name: kubernetes\n path: /apis/admission.stash.appscode.com/v1alpha1/recoveries\n caBundle: {{ b64enc .Values.apiserver.ca }}\n rules:\n - operations:\n - CREATE\n - UPDATE\n apiGroups:\n - stash.appscode.com\n apiVersions:\n - \"*\"\n resources:\n - recoveries\n failurePolicy: Fail\n{{ end }}\n"
] | # Default values for stash.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
operator:
image: appscode/stash
tag: 0.7.0-rc.1
pushgateway:
image: prom/pushgateway
tag: v0.4.0
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
##
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Specify a imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: IfNotPresent
## Installs Stash operator as critical addon
## https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
criticalAddon: false
## Install Default RBAC roles and bindings
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
apiserver:
# groupPriorityMinimum is the minimum priority the group should have. Please see
# https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L58-L64
# for more information on proper values of this field.
groupPriorityMinimum: 10000
# versionPriority is the ordering of this API inside of the group. Please see
# https://github.com/kubernetes/kube-aggregator/blob/release-1.9/pkg/apis/apiregistration/v1beta1/types.go#L66-L70
# for more information on proper values of this field
versionPriority: 15
# enableAdmissionWebhook is used to configure apiserver as ValidationWebhook for Voyager CRDs
enableAdmissionWebhook: false
# CA certificate used by main Kubernetes api server
ca:
|
rabbitmq | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"rabbitmq.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"rabbitmq.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"rabbitmq.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper RabbitMQ plugin list\n*/}}\n{{- define \"rabbitmq.plugins\" -}}\n{{- $plugins := .Values.rabbitmq.plugins | replace \" \" \", \" -}}\n{{- if .Values.rabbitmq.extraPlugins -}}\n{{- $extraPlugins := .Values.rabbitmq.extraPlugins | replace \" \" \", \" -}}\n{{- printf \"[%s, %s].\" $plugins $extraPlugins | indent 4 -}}\n{{- else -}}\n{{- printf \"[%s].\" $plugins | indent 4 -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper RabbitMQ image name\n*/}}\n{{- define \"rabbitmq.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper metrics image name\n*/}}\n{{- define \"rabbitmq.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGet the password secret.\n*/}}\n{{- define \"rabbitmq.secretPasswordName\" -}}\n {{- if .Values.rabbitmq.existingPasswordSecret -}}\n {{- printf \"%s\" .Values.rabbitmq.existingPasswordSecret -}}\n {{- else -}}\n {{- printf \"%s\" (include \"rabbitmq.fullname\" .) -}}\n {{- end -}}\n{{- end -}}\n\n{{/*\nGet the erlang secret.\n*/}}\n{{- define \"rabbitmq.secretErlangName\" -}}\n {{- if .Values.rabbitmq.existingErlangSecret -}}\n {{- printf \"%s\" .Values.rabbitmq.existingErlangSecret -}}\n {{- else -}}\n {{- printf \"%s\" (include \"rabbitmq.fullname\" .) -}}\n {{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"rabbitmq.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the init container volume-permissions image)\n*/}}\n{{- define \"rabbitmq.volumePermissions.image\" -}}\n{{- $registryName := .Values.volumePermissions.image.registry -}}\n{{- $repositoryName := .Values.volumePermissions.image.repository -}}\n{{- $tag := .Values.volumePermissions.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"rabbitmq.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCompile all warnings into a single message, and call fail.\n*/}}\n{{- define \"rabbitmq.validateValues\" -}}\n{{- $messages := list -}}\n{{- $messages := append $messages (include \"rabbitmq.validateValues.ldap\" .) -}}\n{{- $messages := without $messages \"\" -}}\n{{- $message := join \"\\n\" $messages -}}\n\n{{- if $message -}}\n{{- printf \"\\nVALUES VALIDATION:\\n%s\" $message | fail -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nValidate values of rabbitmq - LDAP support\n*/}}\n{{- define \"rabbitmq.validateValues.ldap\" -}}\n{{- if .Values.ldap.enabled }}\n{{- if not (and .Values.ldap.server .Values.ldap.port .Values.ldap.user_dn_pattern) }}\nrabbitmq: LDAP\n Invalid LDAP configuration. When enabling LDAP support, the parameters \"ldap.server\",\n \"ldap.port\", and \"ldap. user_dn_pattern\" are mandatory. Please provide them:\n\n $ helm install {{ .Release.Name }} stable/rabbitmq \\\n --set ldap.enabled=true \\\n --set ldap.server=\"lmy-ldap-server\" \\\n --set ldap.port=\"389\" \\\n --set user_dn_pattern=\"cn=${username},dc=example,dc=org\"\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nRenders a value that contains template.\nUsage:\n{{ include \"rabbitmq.tplValue\" (dict \"value\" .Values.path.to.the.Value \"context\" $) }}\n*/}}\n{{- define \"rabbitmq.tplValue\" -}}\n {{- if typeIs \"string\" .value }}\n {{- tpl .value .context }}\n {{- else }}\n {{- tpl (.value | toYaml) .context }}\n {{- end }}\n{{- end -}}\n",
"# certs.yaml\n{{- if and (not .Values.rabbitmq.tls.existingSecret) ( .Values.rabbitmq.tls.enabled) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}-certs\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: kubernetes.io/tls\ndata:\n ca.crt:\n {{ required \"A valid .Values.rabbitmq.tls.caCertificate entry required!\" .Values.rabbitmq.tls.caCertificate | b64enc | quote }}\n tls.crt:\n {{ required \"A valid .Values.rabbitmq.tls.serverCertificate entry required!\" .Values.rabbitmq.tls.serverCertificate| b64enc | quote }}\n tls.key:\n {{ required \"A valid .Values.rabbitmq.tls.serverKey entry required!\" .Values.rabbitmq.tls.serverKey | b64enc | quote }}\n{{- end }}\n",
"# configuration.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}-config\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n enabled_plugins: |-\n{{ template \"rabbitmq.plugins\" . }}\n rabbitmq.conf: |-\n ##username and password\n default_user={{.Values.rabbitmq.username}}\n default_pass=CHANGEME\n{{ .Values.rabbitmq.configuration | indent 4 }}\n{{ .Values.rabbitmq.extraConfiguration | indent 4 }}\n{{- if .Values.rabbitmq.tls.enabled }}\n ssl_options.verify={{ .Values.rabbitmq.tls.sslOptionsVerify }}\n listeners.ssl.default={{ .Values.service.tlsPort }}\n ssl_options.fail_if_no_peer_cert={{ .Values.rabbitmq.tls.failIfNoPeerCert }}\n ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem\n ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem\n ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem\n{{- end }}\n{{- if .Values.ldap.enabled }}\n auth_backends.1 = rabbit_auth_backend_ldap\n auth_backends.2 = internal\n auth_ldap.servers.1 = {{ .Values.ldap.server }}\n auth_ldap.port = {{ .Values.ldap.port }}\n auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }}\n{{- if .Values.ldap.tls.enabled }}\n auth_ldap.use_ssl = true\n{{- end }}\n{{- end }}\n\n{{ if .Values.rabbitmq.advancedConfiguration}}\n advanced.config: |-\n{{ .Values.rabbitmq.advancedConfiguration | indent 4 }}\n{{- end }}\n",
"# healthchecks.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}-healthchecks\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n rabbitmq-health-check: |-\n #!/bin/sh\n START_FLAG=/opt/bitnami/rabbitmq/var/lib/rabbitmq/.start\n if [ -f ${START_FLAG} ]; then\n rabbitmqctl node_health_check\n RESULT=$?\n if [ $RESULT -ne 0 ]; then\n rabbitmqctl status\n exit $?\n fi\n rm -f ${START_FLAG}\n exit ${RESULT}\n fi\n rabbitmq-api-check $1 $2\n rabbitmq-api-check: |-\n #!/bin/sh\n set -e\n URL=$1\n EXPECTED=$2\n ACTUAL=$(curl --silent --show-error --fail \"${URL}\")\n echo \"${ACTUAL}\"\n test \"${EXPECTED}\" = \"${ACTUAL}\"",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: \"{{ template \"rabbitmq.fullname\" . }}\"\n labels:\n app: \"{{ template \"rabbitmq.name\" . }}\"\n chart: \"{{ template \"rabbitmq.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.tls }}\n ingress.kubernetes.io/secure-backends: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- if .Values.ingress.hostName }}\n - host: {{ .Values.ingress.hostName }}\n http:\n {{- else }}\n - http:\n {{- end }}\n paths:\n - path: {{ .Values.ingress.path }}\n backend:\n serviceName: {{ template \"rabbitmq.fullname\" . }}\n servicePort: {{ .Values.service.managerPort }}\n{{- if .Values.ingress.tls }}\n tls:\n - hosts:\n {{- if .Values.ingress.hostName }}\n - {{ .Values.ingress.hostName }}\n secretName: {{ .Values.ingress.tlsSecret }}\n {{- else}}\n - secretName: {{ .Values.ingress.tlsSecret }}\n {{- end }}\n{{- end }}\n{{- end }}\n",
"# networkpolicy.yaml\n{{- if .Values.networkPolicy.enabled }}\nkind: NetworkPolicy\napiVersion: networking.k8s.io/v1\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n podSelector:\n matchLabels:\n app: {{ template \"rabbitmq.name\" . }}\n release: {{ .Release.Name | quote }}\n ingress:\n # Allow inbound connections\n\n - ports:\n - port: 4369 # EPMD\n - port: {{ .Values.service.port }}\n - port: {{ .Values.service.tlsPort }}\n - port: {{ .Values.service.distPort }}\n - port: {{ .Values.service.managerPort }}\n\n {{- if not .Values.networkPolicy.allowExternal }}\n from:\n - podSelector:\n matchLabels:\n {{ template \"rabbitmq.fullname\" . }}-client: \"true\"\n {{- with .Values.networkPolicy.additionalRules }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- end }}\n\n # Allow prometheus scrapes\n - ports:\n - port: {{ .Values.metrics.port }}\n{{- end }}\n",
"# pdb.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end -}}\n",
"# prometheusrule.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: PrometheusRule\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n{{- with .Values.metrics.prometheusRule.namespace }}\n namespace: {{ . }}\n{{- end }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n{{- with .Values.metrics.prometheusRule.additionalLabels }}\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- with .Values.metrics.prometheusRule.rules }}\n groups:\n - name: {{ template \"rabbitmq.name\" $ }}\n rules: {{ tpl (toYaml .) $ | nindent 8 }}\n{{- end }}\n{{- end }}\n",
"# role.yaml\n{{- if .Values.rbacEnabled }}\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}-endpoint-reader\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n- apiGroups: [\"\"]\n resources: [\"endpoints\"]\n verbs: [\"get\"]\n{{- end }}\n",
"# rolebinding.yaml\n{{- if .Values.rbacEnabled }}\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}-endpoint-reader\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"rabbitmq.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"rabbitmq.fullname\" . }}-endpoint-reader\n{{- end }}\n",
"# secrets.yaml\n{{- if or (not .Values.rabbitmq.existingErlangSecret) (not .Values.rabbitmq.existingPasswordSecret) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if not .Values.rabbitmq.existingPasswordSecret }}{{ if .Values.rabbitmq.password }}\n rabbitmq-password: {{ .Values.rabbitmq.password | b64enc | quote }}\n {{ else }}\n rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}{{ end }}\n {{ if not .Values.rabbitmq.existingErlangSecret }}{{ if .Values.rabbitmq.erlangCookie }}\n rabbitmq-erlang-cookie: {{ .Values.rabbitmq.erlangCookie | b64enc | quote }}\n {{ else }}\n rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }}\n {{ end }}{{ end }}\n{{- end }}\n{{- range $key, $value := .Values.extraSecrets }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ $key }}\n labels:\n app: {{ template \"rabbitmq.name\" $ }}\n chart: {{ template \"rabbitmq.chart\" $ }}\n release: \"{{ $.Release.Name }}\"\n heritage: \"{{ $.Release.Service }}\"\ntype: Opaque\nstringData:\n{{ $value | toYaml | nindent 2 }}\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.rbacEnabled }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end }}\n",
"# servicemonitor.yaml\n{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n {{- if .Values.metrics.serviceMonitor.namespace }}\n namespace: {{ .Values.metrics.serviceMonitor.namespace }}\n {{- end }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n heritage: \"{{ .Release.Service }}\"\n release: {{ if .Values.metrics.serviceMonitor.release }}\"{{ .Values.metrics.serviceMonitor.release }}\"{{ else }}\"{{ .Release.Name }}\"{{ end }}\n {{- if .Values.metrics.serviceMonitor.additionalLabels }}\n{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n endpoints:\n - port: metrics\n interval: {{ .Values.metrics.serviceMonitor.interval }}\n {{- if .Values.metrics.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}\n {{- end }}\n honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }}\n {{- if .Values.metrics.serviceMonitor.relabellings }}\n metricRelabelings:\n{{ toYaml .Values.metrics.serviceMonitor.relabellings | indent 6 }}\n {{- end }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n{{- end }}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n serviceName: {{ template \"rabbitmq.fullname\" . }}-headless\n podManagementPolicy: {{ .Values.podManagementPolicy }}\n replicas: {{ .Values.replicas }}\n updateStrategy:\n type: {{ .Values.updateStrategy.type }}\n {{- if (eq \"Recreate\" .Values.updateStrategy.type) }}\n rollingUpdate: null\n {{- end }}\n selector:\n matchLabels:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n chart: {{ template \"rabbitmq.chart\" . }}\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n annotations:\n {{- if or (not .Values.rabbitmq.existingErlangSecret) (not .Values.rabbitmq.existingPasswordSecret) }}\n checksum/secret: {{ include (print $.Template.BasePath \"/secrets.yaml\") . | sha256sum }}\n {{- end }}\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.schedulerName }}\n schedulerName: \"{{ .Values.schedulerName }}\"\n {{- end }}\n{{- include \"rabbitmq.imagePullSecrets\" . | indent 6 }}\n {{- if .Values.rbacEnabled}}\n serviceAccountName: {{ template \"rabbitmq.fullname\" . }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity: {{- include \"rabbitmq.tplValue\" (dict \"value\" .Values.affinity \"context\" .) | nindent 8 }}\n {{- end }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n terminationGracePeriodSeconds: 10\n {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.securityContext.enabled }}\n initContainers:\n - name: volume-permissions\n image: \"{{ template \"rabbitmq.volumePermissions.image\" . }}\"\n imagePullPolicy: {{ default \"\" .Values.volumePermissions.image.pullPolicy | quote }}\n command: [\"/bin/chown\", \"-R\", \"{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}\", \"{{ .Values.persistence.path }}\"]\n securityContext:\n runAsUser: 0\n resources:\n{{ toYaml .Values.volumePermissions.resources | indent 10 }}\n volumeMounts:\n - name: data\n mountPath: \"{{ .Values.persistence.path }}\"\n {{- end }}\n containers:\n - name: rabbitmq\n image: {{ template \"rabbitmq.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n command:\n - bash\n - -ec\n - |\n mkdir -p /opt/bitnami/rabbitmq/.rabbitmq/\n mkdir -p /opt/bitnami/rabbitmq/etc/rabbitmq/\n touch /opt/bitnami/rabbitmq/var/lib/rabbitmq/.start\n #persist the erlang cookie in both places for server and cli tools\n echo $RABBITMQ_ERL_COOKIE > /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie\n cp /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/.rabbitmq/\n #change permission so only the user has access to the cookie file\n chmod 600 /opt/bitnami/rabbitmq/.rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie\n #copy the mounted configuration to both places\n cp /opt/bitnami/rabbitmq/conf/* /opt/bitnami/rabbitmq/etc/rabbitmq\n # Apply resources limits\n {{- if .Values.rabbitmq.setUlimitNofiles }}\n ulimit -n \"${RABBITMQ_ULIMIT_NOFILES}\"\n {{- end }}\n #replace the default password that is generated\n sed -i \"/CHANGEME/cdefault_pass=${RABBITMQ_PASSWORD//\\\\/\\\\\\\\}\" /opt/bitnami/rabbitmq/etc/rabbitmq/rabbitmq.conf\n {{- if and .Values.persistence.enabled .Values.forceBoot.enabled }}\n if [ -d \"{{ .Values.persistence.path }}/mnesia/${RABBITMQ_NODENAME}\" ]; then rabbitmqctl force_boot; fi\n {{- end }}\n exec rabbitmq-server\n {{- if .Values.resources }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n {{- end }}\n volumeMounts:\n - name: config-volume\n mountPath: /opt/bitnami/rabbitmq/conf\n - name: healthchecks\n mountPath: /usr/local/sbin/rabbitmq-api-check\n subPath: rabbitmq-api-check\n - name: healthchecks\n mountPath: /usr/local/sbin/rabbitmq-health-check\n subPath: rabbitmq-health-check\n {{- if .Values.rabbitmq.tls.enabled }}\n - name: {{ template \"rabbitmq.fullname\" . }}-certs\n mountPath: /opt/bitnami/rabbitmq/certs\n {{- end }}\n - name: data\n mountPath: \"{{ .Values.persistence.path }}\"\n {{- if .Values.rabbitmq.loadDefinition.enabled }}\n - name: load-definition-volume\n mountPath: /app\n readOnly: true\n {{- end }}\n ports:\n - name: epmd\n containerPort: 4369\n - name: amqp\n containerPort: {{ .Values.service.port }}\n {{- if .Values.rabbitmq.tls.enabled }}\n - name: amqp-ssl\n containerPort: {{ .Values.service.tlsPort }}\n {{- end }}\n - name: dist\n containerPort: {{ .Values.service.distPort }}\n - name: stats\n containerPort: {{ .Values.service.managerPort }}\n{{- if .Values.service.extraContainerPorts }}\n{{ toYaml .Values.service.extraContainerPorts | indent 8 }}\n{{- end }}\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - rabbitmq-api-check \"http://{{ .Values.rabbitmq.username }}:[email protected]:{{ .Values.service.managerPort }}/api/healthchecks/node\" '{\"status\":\"ok\"}'\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n exec:\n command:\n - sh\n - -c\n - rabbitmq-health-check \"http://{{ .Values.rabbitmq.username }}:[email protected]:{{ .Values.service.managerPort }}/api/healthchecks/node\" '{\"status\":\"ok\"}'\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n {{- end }}\n {{- if and (gt (.Values.replicas | int) 1) ( eq .Values.rabbitmq.clustering.rebalance true) }}\n lifecycle:\n postStart:\n exec:\n command:\n - /bin/sh\n - -c\n - until rabbitmqctl cluster_status >/dev/null; do echo Waiting for\n cluster readiness...; sleep 5 ; done; rabbitmq-queues rebalance \"all\"\n {{- end }}\n env:\n - name: BITNAMI_DEBUG\n value: {{ ternary \"true\" \"false\" .Values.image.debug | quote }}\n - name: MY_POD_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: K8S_SERVICE_NAME\n value: \"{{ template \"rabbitmq.fullname\" . }}-headless\"\n - name: K8S_ADDRESS_TYPE\n value: {{ .Values.rabbitmq.clustering.address_type }}\n {{- if (eq \"hostname\" .Values.rabbitmq.clustering.address_type) }}\n - name: RABBITMQ_NODENAME\n value: \"rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.rabbitmq.clustering.k8s_domain }}\"\n - name: K8S_HOSTNAME_SUFFIX\n value: \".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.rabbitmq.clustering.k8s_domain }}\"\n {{- else }}\n - name: RABBITMQ_NODENAME\n {{- if .Values.rabbitmq.rabbitmqClusterNodeName }}\n value: {{ .Values.rabbitmq.rabbitmqClusterNodeName | quote }}\n {{- else }}\n value: \"rabbit@$(MY_POD_NAME)\"\n {{- end }}\n {{- end }}\n {{- if .Values.ldap.enabled }}\n - name: RABBITMQ_LDAP_ENABLE\n value: \"yes\"\n - name: RABBITMQ_LDAP_TLS\n value: {{ ternary \"yes\" \"no\" .Values.ldap.tls.enabled | quote }}\n - name: RABBITMQ_LDAP_SERVER\n value: {{ .Values.ldap.server }}\n - name: RABBITMQ_LDAP_SERVER_PORT\n value: {{ .Values.ldap.port | quote }}\n - name: RABBITMQ_LDAP_USER_DN_PATTERN\n value: {{ .Values.ldap.user_dn_pattern }}\n {{- end }}\n - name: RABBITMQ_LOGS\n value: {{ .Values.rabbitmq.logs | quote }}\n - name: RABBITMQ_ULIMIT_NOFILES\n value: {{ .Values.rabbitmq.ulimitNofiles | quote }}\n {{- if and .Values.rabbitmq.maxAvailableSchedulers }}\n - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS\n value: {{ printf \"+S %s:%s\" (toString .Values.rabbitmq.maxAvailableSchedulers) (toString .Values.rabbitmq.onlineSchedulers) -}}\n {{- end }}\n - name: RABBITMQ_USE_LONGNAME\n value: \"true\"\n - name: RABBITMQ_ERL_COOKIE\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq.secretErlangName\" . }}\n key: rabbitmq-erlang-cookie\n - name: RABBITMQ_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq.secretPasswordName\" . }}\n key: rabbitmq-password\n {{- range $key, $value := .Values.rabbitmq.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"rabbitmq.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n env:\n - name: RABBIT_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"rabbitmq.secretPasswordName\" . }}\n key: rabbitmq-password\n - name: RABBIT_URL\n value: \"http://{{ .Values.metrics.rabbitmqAddress }}:{{ .Values.service.managerPort }}\"\n - name: RABBIT_USER\n value: {{ .Values.rabbitmq.username }}\n - name: PUBLISH_PORT\n value: \"{{ .Values.metrics.port }}\"\n {{ if .Values.metrics.capabilities }}\n - name: RABBIT_CAPABILITIES\n value: \"{{ .Values.metrics.capabilities }}\"\n {{- end }}\n {{- range $key, $value := .Values.metrics.env }}\n - name: {{ $key }}\n value: {{ $value | quote }}\n {{- end }}\n ports:\n - name: metrics\n containerPort: {{ .Values.metrics.port }}\n {{- if .Values.metrics.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}\n periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }}\n failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }}\n successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }}\n {{- end }}\n {{- if .Values.metrics.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}\n periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }}\n failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }}\n successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- if .Values.securityContext.extra }}\n {{- toYaml .Values.securityContext.extra | nindent 8 }}\n {{- end }}\n {{- end }}\n volumes:\n {{- if .Values.rabbitmq.tls.enabled }}\n - name: {{ template \"rabbitmq.fullname\" . }}-certs\n secret:\n secretName: {{ if .Values.rabbitmq.tls.existingSecret }}{{ .Values.rabbitmq.tls.existingSecret }}{{- else }}{{ template \"rabbitmq.fullname\" . }}-certs{{- end }}\n items:\n - key: ca.crt\n path: ca_certificate.pem\n - key: tls.crt\n path: server_certificate.pem\n - key: tls.key\n path: server_key.pem\n {{- end }}\n - name: config-volume\n configMap:\n name: {{ template \"rabbitmq.fullname\" . }}-config\n items:\n - key: rabbitmq.conf\n path: rabbitmq.conf\n {{- if .Values.rabbitmq.advancedConfiguration}}\n - key: advanced.config\n path: advanced.config\n {{- end }}\n - key: enabled_plugins\n path: enabled_plugins\n - name: healthchecks\n configMap:\n name: {{ template \"rabbitmq.fullname\" . }}-healthchecks\n items:\n - key: rabbitmq-health-check\n path: rabbitmq-health-check\n mode: 111\n - key: rabbitmq-api-check\n path: rabbitmq-api-check\n mode: 111\n {{- if .Values.rabbitmq.loadDefinition.enabled }}\n - name: load-definition-volume\n secret:\n secretName: {{ .Values.rabbitmq.loadDefinition.secretName | quote }}\n {{- end }}\n {{- if not .Values.persistence.enabled }}\n - name: data\n emptyDir: {}\n {{- else if .Values.persistence.existingClaim }}\n - name: data\n persistentVolumeClaim:\n {{- with .Values.persistence.existingClaim }}\n claimName: {{ tpl . $ }}\n {{- end }}\n {{- else }}\n volumeClaimTemplates:\n - metadata:\n name: data\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n spec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"rabbitmq.storageClass\" . }}\n {{- end }}\n",
"# svc-headless.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}-headless\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n clusterIP: None\n ports:\n - name: epmd\n port: 4369\n targetPort: epmd\n - name: amqp\n port: {{ .Values.service.port }}\n targetPort: amqp\n{{- if .Values.rabbitmq.tls.enabled }}\n - name: amqp-tls\n port: {{ .Values.service.tlsPort }}\n targetPort: amqp-tls\n{{- end }}\n - name: dist\n port: {{ .Values.service.distPort }}\n targetPort: dist\n - name: stats\n port: {{ .Values.service.managerPort }}\n targetPort: stats\n selector:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"rabbitmq.fullname\" . }}\n labels:\n app: {{ template \"rabbitmq.name\" . }}\n chart: {{ template \"rabbitmq.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if or .Values.service.annotations .Values.metrics.enabled }}\n annotations:\n{{- end }}\n{{- if .Values.service.annotations }}\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\n{{- if .Values.metrics.enabled }}\n{{ toYaml .Values.metrics.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n{{- if and (eq .Values.service.type \"LoadBalancer\") .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n {{ with .Values.service.loadBalancerSourceRanges }}\n{{ toYaml . | indent 4 }}\n{{- end }}\n{{- end }}\n {{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: epmd\n port: 4369\n targetPort: epmd\n {{- if (eq .Values.service.type \"ClusterIP\") }}\n nodePort: null\n {{- end }}\n - name: amqp\n port: {{ .Values.service.port }}\n targetPort: amqp\n {{- if (eq .Values.service.type \"ClusterIP\") }}\n nodePort: null\n {{- else if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePort))) }}\n nodePort: {{ .Values.service.nodePort }}\n {{- end }}\n {{- if .Values.rabbitmq.tls.enabled }}\n - name: amqp-ssl\n port: {{ .Values.service.tlsPort }}\n targetPort: amqp-ssl\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodeTlsPort))) }}\n nodePort: {{ .Values.service.nodeTlsPort }}\n {{- end }}\n {{- end }}\n - name: dist\n port: {{ .Values.service.distPort }}\n targetPort: dist\n {{- if (eq .Values.service.type \"ClusterIP\") }}\n nodePort: null\n {{- end }}\n - name: stats\n port: {{ .Values.service.managerPort }}\n targetPort: stats\n {{- if (eq .Values.service.type \"ClusterIP\") }}\n nodePort: null\n {{- end }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n port: {{ .Values.metrics.port }}\n targetPort: metrics\n {{- if (eq .Values.service.type \"ClusterIP\") }}\n nodePort: null\n {{- end }}\n{{- end }}\n{{- if .Values.service.extraPorts }}\n{{ toYaml .Values.service.extraPorts | indent 2 }}\n{{- end }}\n selector:\n app: {{ template \"rabbitmq.name\" . }}\n release: \"{{ .Release.Name }}\"\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami RabbitMQ image version
## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/
##
image:
registry: docker.io
repository: bitnami/rabbitmq
tag: 3.8.2-debian-10-r30
## set to true if you would like to see extra information on logs
## it turns BASH and NAMI debugging in minideb
## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging
debug: false
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override rabbitmq.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override rabbitmq.fullname template
##
# fullnameOverride:
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## does your cluster have rbac enabled? assume yes by default
rbacEnabled: true
## RabbitMQ should be initialized one by one when building cluster for the first time.
## Therefore, the default value of podManagementPolicy is 'OrderedReady'
## Once the RabbitMQ participates in the cluster, it waits for a response from another
## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster.
## If the cluster exits gracefully, you do not need to change the podManagementPolicy
## because the first RabbitMQ of the statefulset always will be last of the cluster.
## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure,
## you must change podManagementPolicy to 'Parallel'.
## ref : https://www.rabbitmq.com/clustering.html#restarting
##
podManagementPolicy: OrderedReady
## section of specific values for rabbitmq
rabbitmq:
## RabbitMQ application username
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
username: user
## RabbitMQ application password
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
# password:
# existingPasswordSecret: name-of-existing-secret
## Erlang cookie to determine whether different nodes are allowed to communicate with each other
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
# erlangCookie:
# existingErlangSecret: name-of-existing-secret
## Node name to cluster with. e.g.: `clusternode@hostname`
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
# rabbitmqClusterNodeName:
## Value for the RABBITMQ_LOGS environment variable
## ref: https://www.rabbitmq.com/logging.html#log-file-location
##
logs: '-'
## RabbitMQ Max File Descriptors
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits
##
setUlimitNofiles: true
ulimitNofiles: '65536'
## RabbitMQ maximum available scheduler threads and online scheduler threads
## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads
##
maxAvailableSchedulers: 2
onlineSchedulers: 1
## Plugins to enable
plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s"
## Extra plugins to enable
## Use this instead of `plugins` to add new plugins
extraPlugins: "rabbitmq_auth_backend_ldap"
## Clustering settings
clustering:
address_type: hostname
k8s_domain: cluster.local
## Rebalance master for queues in cluster when new replica is created
## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance
rebalance: false
loadDefinition:
enabled: false
secretName: load-definition
## environment variables to configure rabbitmq
## ref: https://www.rabbitmq.com/configure.html#customise-environment
env: {}
## Configuration file content: required cluster configuration
## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead
configuration: |-
## Clustering
cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s
cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
cluster_formation.node_cleanup.interval = 10
cluster_formation.node_cleanup.only_log_warning = true
cluster_partition_handling = autoheal
# queue master locator
queue_master_locator=min-masters
# enable guest user
loopback_users.guest = false
## Configuration file content: extra configuration
## Use this instead of `configuration` to add more configuration
extraConfiguration: |-
#disk_free_limit.absolute = 50MB
#management.load_definitions = /app/load_definition.json
## Configuration file content: advanced configuration
## Use this as additional configuraton in classic config format (Erlang term configuration format)
##
## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines.
## advancedConfiguration: |-
## [{
## rabbitmq_auth_backend_ldap,
## [{
## ssl_options,
## [{
## verify, verify_none
## }, {
## fail_if_no_peer_cert,
## false
## }]
## ]}
## }].
##
advancedConfiguration: |-
## Enable encryption to rabbitmq
## ref: https://www.rabbitmq.com/ssl.html
##
tls:
enabled: false
failIfNoPeerCert: true
sslOptionsVerify: verify_peer
caCertificate: |-
serverCertificate: |-
serverKey: |-
# existingSecret: name-of-existing-secret-to-rabbitmq
## LDAP configuration
##
ldap:
enabled: false
server: ""
port: "389"
user_dn_pattern: cn=${username},dc=example,dc=org
tls:
# If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter.
enabled: false
## Kubernetes service type
service:
type: ClusterIP
## Node port
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
# nodePort: 30672
## Set the LoadBalancerIP
##
# loadBalancerIP:
## Node port Tls
##
# nodeTlsPort: 30671
## Amqp port
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
port: 5672
## Amqp Tls port
##
tlsPort: 5671
## Dist port
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
distPort: 25672
## RabbitMQ Manager port
## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables
##
managerPort: 15672
## Service annotations
annotations: {}
# service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Extra ports to expose
# extraPorts:
## Extra ports to be included in container spec, primarily informational
# extraContainerPorts:
# Additional pod labels to apply
podLabels: {}
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
extra: {}
persistence:
## this enables PVC templates that will create one per pod
enabled: true
## rabbitmq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
## Existing PersistentVolumeClaims
## The value is evaluated as a template
## So, for example, the name can depend on .Release or .Chart
# existingClaim: ""
# If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well.
size: 8Gi
# persistence directory, maps to the rabbitmq data directory
path: /opt/bitnami/rabbitmq/var/lib/rabbitmq
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
networkPolicy:
## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now.
## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/
##
enabled: false
## The Policy model to apply. When set to false, only pods with the correct
## client label will have network access to the ports RabbitMQ is listening
## on. When true, RabbitMQ will accept connections from any source
## (with the correct destination port).
##
allowExternal: true
## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed.
##
# additionalRules:
# - matchLabels:
# - role: frontend
# - matchExpressions:
# - key: role
# operator: In
# values:
# - frontend
## Replica count, set to 1 to provide a default available cluster
replicas: 1
## Pod priority
## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
# priorityClassName: ""
## updateStrategy for RabbitMQ statefulset
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategy:
type: RollingUpdate
## Node labels and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
nodeSelector: {}
tolerations: []
affinity: {}
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
## annotations for rabbitmq pods
podAnnotations: {}
## Configure the ingress resource that allows you to access the
## Wordpress installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
## hostName: foo.bar.com
path: /
## Set this to true in order to enable TLS on the ingress record
## A side effect of this will be that the backend wordpress service will be connected at port 443
tls: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: myTlsSecret
## Ingress annotations done as key:value pairs
## If you're using kube-lego, you will want to add:
## kubernetes.io/tls-acme: true
##
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: true
## The following settings are to configure the frequency of the lifeness and readiness probes
livenessProbe:
enabled: true
initialDelaySeconds: 120
timeoutSeconds: 20
periodSeconds: 30
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 10
timeoutSeconds: 20
periodSeconds: 30
failureThreshold: 3
successThreshold: 1
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/rabbitmq-exporter
tag: 0.29.0-debian-10-r28
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## environment variables to configure rabbitmq_exporter
## ref: https://github.com/kbudde/rabbitmq_exporter#configuration
env: {}
## Metrics exporter port
port: 9419
## RabbitMQ address to connect to (from the same Pod, usually the local loopback address).
## If your Kubernetes cluster does not support IPv6, you can change to `127.0.0.1` in order to force IPv4.
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/#networking
rabbitmqAddress: localhost
## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server
## ref: https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities
capabilities: "bert,no_sort"
resources: {}
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9419"
livenessProbe:
enabled: true
initialDelaySeconds: 15
timeoutSeconds: 5
periodSeconds: 30
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
timeoutSeconds: 5
periodSeconds: 30
failureThreshold: 3
successThreshold: 1
## Prometheus Service Monitor
## ref: https://github.com/coreos/prometheus-operator
## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
serviceMonitor:
## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
enabled: false
## Specify the namespace in which the serviceMonitor resource will be created
# namespace: ""
## Specify the interval at which metrics should be scraped
interval: 30s
## Specify the timeout after which the scrape is ended
# scrapeTimeout: 30s
## Specify Metric Relabellings to add to the scrape endpoint
# relabellings:
## Specify honorLabels parameter to add the scrape endpoint
honorLabels: false
## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work
# release: ""
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
additionalLabels: {}
## Custom PrometheusRule to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules: []
## List of reules, used as template by Helm.
## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html
## Please adapt them to your needs.
## Make sure to constraint the rules to the current rabbitmq service.
## Also make sure to escape what looks like helm template.
# - alert: RabbitmqDown
# expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0
# for: 5m
# labels:
# severity: error
# annotations:
# summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }})
# description: RabbitMQ node down
# - alert: ClusterDown
# expr: |
# sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"})
# < {{ .Values.replicas }}
# for: 5m
# labels:
# severity: error
# annotations:
# summary: Cluster down (instance {{ "{{ $labels.instance }}" }})
# description: |
# Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster
# VALUE = {{ "{{ $value }}" }}
# - alert: ClusterPartition
# expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0
# for: 5m
# labels:
# severity: error
# annotations:
# summary: Cluster partition (instance {{ "{{ $labels.instance }}" }})
# description: |
# Cluster partition
# VALUE = {{ "{{ $value }}" }}
# - alert: OutOfMemory
# expr: |
# rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"}
# / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"}
# * 100 > 90
# for: 5m
# labels:
# severity: warning
# annotations:
# summary: Out of memory (instance {{ "{{ $labels.instance }}" }})
# description: |
# Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }}
# LABELS: {{ "{{ $labels }}" }}
# - alert: TooManyConnections
# expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000
# for: 5m
# labels:
# severity: warning
# annotations:
# summary: Too many connections (instance {{ "{{ $labels.instance }}" }})
# description: |
# RabbitMQ instance has too many connections (> 1000)
# VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }}
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an
## unknown order.
## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot
##
forceBoot:
enabled: false
## Optionally specify extra secrets to be created by the chart.
## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded.
##
extraSecrets: {}
# load-definition:
# load_definition.json: |
# {
# ...
# }
|
openvpn | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openvpn.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"openvpn.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"openvpn.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# certs-pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: {{ template \"openvpn.fullname\" . }}\n labels:\n app: {{ template \"openvpn.name\" . }}\n chart: {{ template \"openvpn.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# config-ccd.yaml\n{{- if .Values.openvpn.ccd.enabled }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"openvpn.fullname\" . }}-ccd\n labels:\n app: {{ template \"openvpn.name\" . }}\n chart: {{ template \"openvpn.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n{{ toYaml .Values.openvpn.ccd.config | indent 2 }}\n{{- end }}\n",
"# config-openvpn.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"openvpn.fullname\" . }}\n labels:\n app: {{ template \"openvpn.name\" . }}\n chart: {{ template \"openvpn.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n setup-certs.sh: |-\n #!/bin/bash\n EASY_RSA_LOC=\"/etc/openvpn/certs\"\n cd $EASY_RSA_LOC\n SERVER_CERT=\"${EASY_RSA_LOC}/pki/issued/server.crt\"\n if [ -e \"$SERVER_CERT\" ]\n then\n echo \"found existing certs - reusing\"\n {{- if .Values.openvpn.useCrl }}\n if [ ! -e ${EASY_RSA_LOC}/crl.pem ]\n then\n echo \"generating missed crl file\"\n ./easyrsa gen-crl\n cp ${EASY_RSA_LOC}/pki/crl.pem ${EASY_RSA_LOC}/crl.pem\n chmod 644 ${EASY_RSA_LOC}/crl.pem\n fi\n {{- end }}\n {{- if .Values.openvpn.taKey }}\n if [ ! -e ${EASY_RSA_LOC}/pki/ta.key ]\n then\n echo \"generating missed ta.key\"\n openvpn --genkey --secret ${EASY_RSA_LOC}/pki/ta.key\n fi\n {{- end }}\n else\n cp -R /usr/share/easy-rsa/* $EASY_RSA_LOC\n ./easyrsa init-pki\n echo \"ca\\n\" | ./easyrsa build-ca nopass\n ./easyrsa build-server-full server nopass\n ./easyrsa gen-dh\n {{- if .Values.openvpn.useCrl }}\n ./easyrsa gen-crl\n cp ${EASY_RSA_LOC}/pki/crl.pem ${EASY_RSA_LOC}/crl.pem # Note: the pki/ directory is inaccessible after openvpn drops privileges, so we cp crl.pem to ${EASY_RSA_LOC} to allow CRL updates without a restart\n chmod 644 ${EASY_RSA_LOC}/crl.pem\n {{- end }}\n {{- if .Values.openvpn.taKey }}\n openvpn --genkey --secret ${EASY_RSA_LOC}/pki/ta.key\n {{- end }}\n fi\n\n\n newClientCert.sh: |-\n #!/bin/bash\n EASY_RSA_LOC=\"/etc/openvpn/certs\"\n cd $EASY_RSA_LOC\n MY_IP_ADDR=\"$2\"\n ./easyrsa build-client-full $1 nopass\n cat >${EASY_RSA_LOC}/pki/$1.ovpn <<EOF\n client\n nobind\n dev tun\n{{- if eq .Values.service.type \"NodePort\" }}\n remote ${MY_IP_ADDR} {{ .Values.service.nodePort }} {{ .Values.openvpn.OVPN_PROTO }}\n{{- else }}\n remote ${MY_IP_ADDR} {{ .Values.service.externalPort }} {{ .Values.openvpn.OVPN_PROTO }}\n{{- end }}\n {{ if .Values.openvpn.cipher }}\n cipher {{ .Values.openvpn.cipher }}\n {{- end }}\n {{ if .Values.openvpn.redirectGateway }}\n redirect-gateway def1\n {{- end }}\n {{ if .Values.openvpn.clientConf }}\n{{ indent 6 .Values.openvpn.clientConf }}\n {{- end }}\n <key>\n `cat ${EASY_RSA_LOC}/pki/private/$1.key`\n </key>\n <cert>\n `cat ${EASY_RSA_LOC}/pki/issued/$1.crt`\n </cert>\n <ca>\n `cat ${EASY_RSA_LOC}/pki/ca.crt`\n </ca>\n {{- if .Values.openvpn.taKey }}\n <tls-auth>\n `cat ${EASY_RSA_LOC}/pki/ta.key`\n </tls-auth>\n key-direction 1\n {{- end }}\n EOF\n cat pki/$1.ovpn\n\n revokeClientCert.sh: |-\n #!/bin/bash\n EASY_RSA_LOC=\"/etc/openvpn/certs\"\n cd $EASY_RSA_LOC\n ./easyrsa revoke $1\n ./easyrsa gen-crl\n cp ${EASY_RSA_LOC}/pki/crl.pem ${EASY_RSA_LOC}\n chmod 644 ${EASY_RSA_LOC}/crl.pem\n\n configure.sh: |-\n #!/bin/sh\n\n cidr2mask() {\n # Number of args to shift, 255..255, first non-255 byte, zeroes\n set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0\n [ $1 -gt 1 ] && shift \"$1\" || shift\n echo ${1-0}.${2-0}.${3-0}.${4-0}\n }\n\n cidr2net() {\n local i ip mask netOctets octets\n ip=\"${1%/*}\"\n mask=\"${1#*/}\"\n octets=$(echo \"$ip\" | tr '.' '\\n')\n\n for octet in $octets; do\n i=$((i+1))\n if [ $i -le $(( mask / 8)) ]; then\n netOctets=\"$netOctets.$octet\"\n elif [ $i -eq $(( mask / 8 +1 )) ]; then\n netOctets=\"$netOctets.$((((octet / ((256 / ((2**((mask % 8)))))))) * ((256 / ((2**((mask % 8))))))))\"\n else\n netOctets=\"$netOctets.0\"\n fi\n done\n\n echo ${netOctets#.}\n }\n\n /etc/openvpn/setup/setup-certs.sh\n{{ if .Values.openvpn.istio.enabled }}\n iptables -t nat -A PREROUTING -s {{ .Values.openvpn.OVPN_NETWORK }}/{{ .Values.openvpn.OVPN_SUBNET }} -i tun0 -p tcp -j REDIRECT --to-ports {{ .Values.openvpn.istio.proxy.port }}\n{{ end }}\n\n{{ range .Values.openvpn.iptablesExtra }}\n iptables {{ . }}\n{{ end }}\n\n iptables -t nat -A POSTROUTING -s {{ .Values.openvpn.OVPN_NETWORK }}/{{ .Values.openvpn.OVPN_SUBNET }} -o eth0 -j MASQUERADE\n mkdir -p /dev/net\n if [ ! -c /dev/net/tun ]; then\n mknod /dev/net/tun c 10 200\n fi\n\n if [ \"$DEBUG\" == \"1\" ]; then\n echo ========== ${OVPN_CONFIG} ==========\n cat \"${OVPN_CONFIG}\"\n echo ====================================\n fi\n\n intAndIP=\"$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $5 \"-\" $7}')\"\n int=\"${intAndIP%-*}\"\n ip=\"${intAndIP#*-}\"\n cidr=\"$(ip addr show dev \"$int\" | awk -vip=\"$ip\" '($2 ~ ip) {print $2}')\"\n\n NETWORK=\"$(cidr2net $cidr)\"\n NETMASK=\"$(cidr2mask ${cidr#*/})\"\n DNS=$(cat /etc/resolv.conf | grep -v '^#' | grep nameserver | awk '{print $2}')\n SEARCH=$(cat /etc/resolv.conf | grep -v '^#' | grep search | awk '{$1=\"\"; print $0}')\n FORMATTED_SEARCH=\"\"\n for DOMAIN in $SEARCH; do\n FORMATTED_SEARCH=\"${FORMATTED_SEARCH}push \\\"dhcp-option DOMAIN-SEARCH ${DOMAIN}\\\"\\n\"\n done\n cp -f /etc/openvpn/setup/openvpn.conf /etc/openvpn/\n sed 's|OVPN_K8S_SEARCH|'\"${FORMATTED_SEARCH}\"'|' -i /etc/openvpn/openvpn.conf\n sed 's|OVPN_K8S_DNS|'\"${DNS}\"'|' -i /etc/openvpn/openvpn.conf\n sed 's|NETWORK|'\"${NETWORK}\"'|' -i /etc/openvpn/openvpn.conf\n sed 's|NETMASK|'\"${NETMASK}\"'|' -i /etc/openvpn/openvpn.conf\n\n # exec openvpn process so it receives lifecycle signals\n exec openvpn --config /etc/openvpn/openvpn.conf\n openvpn.conf: |-\n server {{ .Values.openvpn.OVPN_NETWORK }} {{ .Values.openvpn.OVPN_SUBNET }}\n verb 3\n{{ if .Values.openvpn.useCrl }}\n crl-verify /etc/openvpn/certs/crl.pem\n{{ end }}\n key /etc/openvpn/certs/pki/private/server.key\n ca /etc/openvpn/certs/pki/ca.crt\n cert /etc/openvpn/certs/pki/issued/server.crt\n dh /etc/openvpn/certs/pki/dh.pem\n{{ if .Values.openvpn.taKey }}\n tls-auth /etc/openvpn/certs/pki/ta.key 0\n{{ end }}\n\n{{ if .Values.openvpn.cipher }}\n cipher {{ .Values.openvpn.cipher }}\n{{ end }}\n key-direction 0\n keepalive 10 60\n persist-key\n persist-tun\n\n proto {{ .Values.openvpn.OVPN_PROTO }}\n port {{ .Values.service.internalPort }}\n dev tun0\n status /tmp/openvpn-status.log\n\n user nobody\n group nogroup\n\n{{ if .Values.openvpn.ccd.enabled }}\n client-config-dir /etc/openvpn/ccd\n{{ end }}\n\n{{ if .Values.openvpn.DEFAULT_ROUTE_ENABLED }}\n push \"route NETWORK NETMASK\"\n{{ end }}\n{{ if and (.Values.openvpn.OVPN_K8S_POD_NETWORK) (.Values.openvpn.OVPN_K8S_POD_SUBNET) }}\n push \"route {{ .Values.openvpn.OVPN_K8S_POD_NETWORK }} {{ .Values.openvpn.OVPN_K8S_POD_SUBNET }}\"\n{{ end }}\n{{ if and (.Values.openvpn.OVPN_K8S_SVC_NETWORK) (.Values.openvpn.OVPN_K8S_SVC_SUBNET) }}\n push \"route {{ .Values.openvpn.OVPN_K8S_SVC_NETWORK }} {{ .Values.openvpn.OVPN_K8S_SVC_SUBNET }}\"\n{{ end }}\n\n{{ if .Values.openvpn.dhcpOptionDomain }}\n OVPN_K8S_SEARCH\n{{ end }}\n push \"dhcp-option DNS OVPN_K8S_DNS\"\n\n {{- if .Values.openvpn.serverConf }}\n{{ indent 6 .Values.openvpn.serverConf }}\n {{- end -}}\n",
"# openvpn-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openvpn.fullname\" . }}\n labels:\n app: {{ template \"openvpn.name\" . }}\n chart: {{ template \"openvpn.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n{{- if .Values.updateStrategy }}\n strategy:\n{{ toYaml .Values.updateStrategy | indent 4 }}\n{{- end }}\n selector:\n matchLabels:\n app: {{ template \"openvpn.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"openvpn.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ include (print .Template.BasePath \"/config-openvpn.yaml\") . | sha256sum }}\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n{{- if .Values.ipForwardInitContainer }}\n initContainers:\n - args:\n - -c\n - sysctl -w net.ipv4.ip_forward=1\n command:\n - /bin/sh\n image: busybox:1.29\n imagePullPolicy: IfNotPresent\n name: sysctl\n resources:\n requests:\n cpu: 1m\n memory: 1Mi\n securityContext:\n privileged: true\n{{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command: [\"/etc/openvpn/setup/configure.sh\"]\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n {{- if .Values.service.hostPort }}\n hostPort: {{ .Values.service.hostPort }}\n {{- end }}\n name: openvpn\n securityContext:\n capabilities:\n add:\n - NET_ADMIN\n readinessProbe:\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n exec:\n command:\n - nc\n {{- if eq .Values.openvpn.OVPN_PROTO \"udp\" }}\n - -u\n {{- end }}\n - -z\n - 127.0.0.1\n - \"{{ .Values.service.internalPort }}\"\n resources:\n requests:\n cpu: \"{{ .Values.resources.requests.cpu }}\"\n memory: \"{{ .Values.resources.requests.memory }}\"\n limits:\n cpu: \"{{ .Values.resources.limits.cpu }}\"\n memory: \"{{ .Values.resources.limits.memory }}\"\n volumeMounts:\n - mountPath: /etc/openvpn/setup\n name: openvpn\n readOnly: false\n - mountPath: /etc/openvpn/certs\n {{- if .Values.persistence.subPath }}\n subPath: {{ .Values.persistence.subPath }}\n {{- end }}\n name: certs\n readOnly: {{ if .Values.openvpn.keystoreSecret }}true{{ else }}false{{ end }}\n {{- if .Values.openvpn.ccd.enabled }}\n - mountPath: /etc/openvpn/ccd\n name: openvpn-ccd\n {{- end }}\n volumes:\n - name: openvpn\n configMap:\n name: {{ template \"openvpn.fullname\" . }}\n defaultMode: 0775\n {{- if .Values.openvpn.ccd.enabled }}\n - name: openvpn-ccd\n configMap:\n name: {{ template \"openvpn.fullname\" . }}-ccd\n defaultMode: 0775\n {{- end }}\n - name: certs\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"openvpn.fullname\" . }}{{- end }}\n {{- else if .Values.openvpn.keystoreSecret }}\n secret:\n secretName: \"{{ .Values.openvpn.keystoreSecret }}\"\n defaultMode: 0600\n items:\n - key: \"server.key\"\n path: \"pki/private/server.key\"\n - key: \"ca.crt\"\n path: \"pki/ca.crt\"\n - key: \"server.crt\"\n path: \"pki/issued/server.crt\"\n - key: \"dh.pem\"\n path: \"pki/dh.pem\"\n {{- if .Values.openvpn.useCrl }}\n - key: \"crl.pem\"\n path: \"crl.pem\"\n mode: 0644\n {{- end }}\n {{- if .Values.openvpn.taKey }}\n - key: \"ta.key\"\n path: \"pki/ta.key\"\n {{- end }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n {{ toYaml .Values.nodeSelector }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n\n{{- if .Values.imagePullSecretName }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecretName }}\n{{- end -}}\n\n",
"# openvpn-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"openvpn.fullname\" . }}\n labels:\n app: {{ template \"openvpn.name\" . }}\n chart: {{ template \"openvpn.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.service.externalIPs }}\n externalIPs:\n{{ toYaml .Values.service.externalIPs | indent 4 }}\n{{- end }}\n ports:\n - name: openvpn\n port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n protocol: {{ .Values.openvpn.OVPN_PROTO | upper }}\n{{- if and (eq \"NodePort\" .Values.service.type) (hasKey .Values.service \"nodePort\") }}\n nodePort: {{ .Values.service.nodePort }}\n{{- end }}\n selector:\n app: {{ template \"openvpn.name\" . }}\n release: {{ .Release.Name }}\n type: {{ .Values.service.type }}\n{{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n{{- end }}\n{{- if .Values.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n{{- end }}\n{{- if .Values.service.loadBalancerSourceRanges }}\n loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }}\n{{- end }}\n"
] | # Default values for openvpn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
updateStrategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
# For supporting pulling from private registries
imagePullSecretName:
image:
repository: jfelten/openvpn-docker
tag: 1.1.0
pullPolicy: IfNotPresent
service:
type: LoadBalancer
externalPort: 443
internalPort: 443
# hostPort: 443
externalIPs: []
nodePort: 32085
# clusterIP: None
# LoadBalancerSourceRanges: 0.0.0.0/0
# loadBalancerIP: 10.0.0.1
## Here annotations can be added to the openvpn service
# annotations:
# external-dns.alpha.kubernetes.io/hostname: vpn.example.com
annotations: {}
## Here annotations can be added to the openvpn pod
# podAnnotations:
# backup.ark.heptio.com/backup-volumes: certs
podAnnotations: {}
# Add privileged init container to enable IPv4 forwarding
ipForwardInitContainer: false
resources:
limits:
cpu: 300m
memory: 128Mi
requests:
cpu: 300m
memory: 128Mi
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 2
persistence:
enabled: true
# subPath: openvpn
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## openvpn data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 2M
openvpn:
# Network allocated for openvpn clients (default: 10.240.0.0).
OVPN_NETWORK: 10.240.0.0
# Network subnet allocated for openvpn client (default: 255.255.0.0).
OVPN_SUBNET: 255.255.0.0
# Protocol used by openvpn tcp or udp (default: udp).
OVPN_PROTO: tcp
# Kubernetes pod network (optional).
OVPN_K8S_POD_NETWORK: "10.0.0.0"
# Kubernetes pod network subnet (optional).
OVPN_K8S_POD_SUBNET: "255.0.0.0"
# Kubernetes service network (optional).
# Define openvpn.OVPN_K8S_SVC_NETWORK and openvpn.OVPN_K8S_SVC_SUBNET if it's needed to create a separate route to Kubernates service subnet
# OVPN_K8S_SVC_NETWORK:
# Kubernetes service network subnet (optional).
# OVPN_K8S_SVC_SUBNET:
# Set default route which openvpn figures basing on network routes inside openvpn pod
DEFAULT_ROUTE_ENABLED: true
# Server certificate data
# keystoreSecret:
# secret with openvpn certificates. If specified, certificates are taken from the secret
# create secret with such command:
# kubectl create secret generic openvpn-keystore-secret --from-file=./server.key --from-file=./ca.crt --from-file=./server.crt --from-file=./dh.pem [--from-file=./crl.pem] [--from-file=./ta.key]
# Push a `dhcp-option DOMAIN` config
dhcpOptionDomain: true
# Redirect all client traffic through VPN
redirectGateway: true
# Use/generate certificate revocation list
useCrl: false
# Use/generate a ta.key (https://openvpn.net/community-resources/hardening-openvpn-security/)
taKey: false
# Override default cipher
# cipher: AES-256-CBC
# Lines appended to the end of the server configuration file
# serverConf: |
# max-clients 100
# client-to-client
# Lines appended to the end of the client configuration file
# Example: if all of your clients are Ubuntu (18.04+) you may need to install
# the update-systemd-resolved package (apt install update-systemd-resolved) then
# set the following to make sure systemd-resolved routes DNS requests correctly:
# clientConf: |
# script-security 2
# up /etc/openvpn/update-systemd-resolved
# up-restart
# down /etc/openvpn/update-systemd-resolved
# down-pre
# Enable istio support for openvpn connections
istio:
enabled: false
proxy:
port: 15001
iptablesExtra: []
# - -A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
# - -A FORWARD -m conntrack --ctstate NEW -d 10.240.0.0/255.255.0.0 -j ACCEPT
# - -A FORWARD -j REJECT
# Enable CCD support
ccd:
enabled: false
config: {}
# johndoe: "ifconfig-push 10.240.100.10 10.240.100.11"
# janedoe: "ifconfig-push 10.240.100.20 10.240.100.21"
nodeSelector: {}
tolerations: []
|
k8s-spot-termination-handler | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"k8s-spot-termination-handler.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"k8s-spot-termination-handler.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"k8s-spot-termination-handler.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"k8s-spot-termination-handler.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"k8s-spot-termination-handler.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"k8s-spot-termination-handler.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"k8s-spot-termination-handler.name\" . }}\n helm.sh/chart: {{ template \"k8s-spot-termination-handler.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nrules:\n # For draining nodes\n - apiGroups:\n - \"\"\n resources:\n - nodes\n verbs:\n - get\n - list\n - patch\n - apiGroups:\n - \"\"\n resources:\n - pods\n verbs:\n - get\n - list\n - apiGroups:\n - extensions\n resources:\n - replicasets\n - daemonsets\n verbs:\n - get\n - list\n - apiGroups:\n - apps\n resources:\n - statefulsets\n verbs:\n - get\n - list\n - apiGroups:\n - \"\"\n resources:\n - pods/eviction\n verbs:\n - create\n{{- end}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"k8s-spot-termination-handler.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"k8s-spot-termination-handler.name\" . }}\n helm.sh/chart: {{ template \"k8s-spot-termination-handler.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"k8s-spot-termination-handler.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"k8s-spot-termination-handler.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace | quote }}\n{{- end}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"k8s-spot-termination-handler.fullname\" . }}\n namespace: {{ .Release.Namespace | quote }}\n labels:\n app.kubernetes.io/name: {{ template \"k8s-spot-termination-handler.name\" . }}\n helm.sh/chart: {{ template \"k8s-spot-termination-handler.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"k8s-spot-termination-handler.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n {{- if .Values.updateStrategy }}\n updateStrategy:\n type: {{ .Values.updateStrategy }}\n {{- if and .Values.updateStrategy (eq .Values.updateStrategy \"RollingUpdate\") }}\n rollingUpdate:\n maxUnavailable: {{ default 1 .Values.maxUnavailable }}\n {{- end }}\n {{- end }}\n template:\n metadata:\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app.kubernetes.io/name: {{ template \"k8s-spot-termination-handler.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n spec:\n hostNetwork: {{ .Values.hostNetwork }}\n serviceAccountName: {{ template \"k8s-spot-termination-handler.serviceAccountName\" . }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 8 }}\n {{- end }}\n {{- if .Values.podSecurityContext }}\n securityContext:\n{{ toYaml .Values.podSecurityContext | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n {{- with .Values.verbose }}\n - name: VERBOSE\n value: {{ . | quote }}\n {{- end }}\n {{- with .Values.noticeUrl }}\n - name: NOTICE_URL\n value: {{ . | quote }}\n {{- end }}\n {{- if not .Values.enableLogspout }}\n - name: LOGSPOUT\n value: \"ignore\"\n {{- end }}\n {{- with .Values.slackUrl }}\n - name: SLACK_URL\n value: {{ . | quote }}\n {{- end }}\n {{- with .Values.detachAsg }}\n - name: DETACH_ASG\n value: {{ . | quote }}\n {{- end }}\n {{- with .Values.gracePeriod }}\n - name: GRACE_PERIOD\n value: {{ . | quote }}\n {{- end }}\n - name: POLL_INTERVAL\n value: {{ .Values.pollInterval | quote }}\n - name: CLUSTER\n value: {{ .Values.clusterName | quote }}\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n {{- if .Values.envFromSecret }}\n envFrom:\n - secretRef:\n name: {{ .Values.envFromSecret }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n {{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"k8s-spot-termination-handler.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace | quote }}\n labels:\n app.kubernetes.io/name: {{ template \"k8s-spot-termination-handler.name\" . }}\n helm.sh/chart: {{ template \"k8s-spot-termination-handler.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end }}\n"
] | # Default values for k8s-spot-termination-handler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
image:
repository: kubeaws/kube-spot-termination-notice-handler
tag: 1.13.7-1
pullPolicy: IfNotPresent
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: secretName
# URL of EC2 spot instance termination notice endpoint
noticeUrl: http://169.254.169.254/latest/meta-data/spot/termination-time
# Poll the metadata every pollInterval seconds for termination events:
pollInterval: 5
# Set VERBOSE=1 to get more output
# verbose: 1
# Send notifications to a Slack webhook URL - replace with your own value and uncomment:
# slackUrl: https://hooks.slack.com/services/EXAMPLE123/EXAMPLE123/example1234567
# Set the cluster name to be reported in a Slack message
# clusterName: test
# Silence logspout by default - set to true to enable logs arriving in logspout
enableLogspout: false
# Trigger instance removal from AutoScaling Group on termination notice
detachAsg: false
# Grace period for node draining
gracePeriod: 120
## The name of a secret in the same kubernetes namespace which contain values to be added to the environment
## This can be useful for auth tokens, etc
envFromSecret: ""
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 10m
# memory: 32Mi
# Add a priority class to the deamonset
priorityClassName: ""
nodeSelector: {}
# "node-role.kubernetes.io/spot-worker": "true"
tolerations: []
# - key: "dedicated"
# operator: "Equal"
# value: "gpu"
# effect: "NoSchedule"
affinity: {}
hostNetwork: true
# annotations to be added to pods
podAnnotations: {}
# If the spot handler was installed before Kubernetes version 1.6
# then you need to explicitly set the value below otherwise
# you will have to manually cycle your pods every time you perform the update.
# Default value for Kubernetes v1.5 and before was "OnDelete".
updateStrategy: RollingUpdate
maxUnavailable: 1
podSecurityContext: {}
|
spotify-docker-gc | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"spotify-docker-gc.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"spotify-docker-gc.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"spotify-docker-gc.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# daemonset.yaml\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ include \"spotify-docker-gc.name\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"spotify-docker-gc.name\" . }}\n helm.sh/chart: {{ include \"spotify-docker-gc.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n kubernetes.io/cluster-service: \"true\"\n addonmanager.kubernetes.io/mode: Reconcile\nspec:\n {{- if semverCompare \"^1.6-0\" .Capabilities.KubeVersion.GitVersion }}\n updateStrategy:\n type: RollingUpdate\n {{- end }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"spotify-docker-gc.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"spotify-docker-gc.name\" . }}\n helm.sh/chart: {{ include \"spotify-docker-gc.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n kubernetes.io/cluster-service: \"true\"\n spec:\n {{- if .Values.serviceAccount }}\n serviceAccountName: {{ .Values.serviceAccount }}\n {{- end }}\n {{- if .Values.imagePullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.imagePullSecrets | indent 8 }}\n {{- end }}\n containers:\n - name: {{ include \"spotify-docker-gc.name\" . }}\n image: \"{{ .Values.image.registry }}/{{ .Values.image.org }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command: [\"/bin/sh\"]\n args: [\"-c\", '\n {{- if .Values.exclude }}\n echo \"{{ .Values.exclude.images }}\" | tr \" \" \"\\n\" > /etc/docker-gc-exclude &&\n echo \"{{ .Values.exclude.containers }}\" | tr \" \" \"\\n\" > /etc/docker-gc-exclude-containers &&\n {{- end }}\n touch {{ .Values.cron.log }} &&\n echo \"{{ .Values.cron.schedule }} /docker-gc >> {{ .Values.cron.log }} 2>&1\" | crontab - &&\n crond -L {{ .Values.cron.log }} &&\n tail -f {{ .Values.cron.log }}' ]\n env:\n - name: GRACE_PERIOD_SECONDS\n value: \"{{ .Values.env.gracePeriodSeconds }}\"\n - name: DOCKER_API_VERSION\n value: \"{{ .Values.env.dockerAPIVersion }}\"\n volumeMounts:\n - name: docker-socket\n mountPath: /var/run/docker.sock\n {{- if .Values.resources }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n {{- end }}\n volumes:\n - name: docker-socket\n hostPath:\n path: /var/run/docker.sock\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n"
] | image:
registry: docker.io
org: spotify
repository: docker-gc
tag: latest
pullPolicy: "IfNotPresent"
cron:
schedule: "0 0 * * *"
log: /var/log/crond.log
env:
# grace period in seconds before garbage collecting
gracePeriodSeconds: "0"
# docker (client) api version to use in container, to match node host (server) api version
# dockerAPIVersion: "1.23"
# List any image or container exclusions here
# exclude:
# images: |-
# spotify/cassandra:latest
# redis:.*
# 9681260c3ad5
# containers: |-
# mariadb-data
# inimitable_quokka
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
#
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Resource requirements for spotify-docker-gc container
## Ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
resources: {}
## Node tolerations for spotify-docker-gc scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
## Node labels for spotify-docker-gc pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
|
terracotta | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"terracotta.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"terracotta.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"terracotta.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"terracotta.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"terracotta.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# config.yaml\n{{- $root := . -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"terracotta.fullname\" . }}-configuration\n labels:\n app: {{ template \"terracotta.name\" . }}\n chart: {{ template \"terracotta.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n tc-config.xml: |\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <tc-config xmlns=\"http://www.terracotta.org/config\">\n <plugins>\n <config>\n <ohr:offheap-resources xmlns:ohr=\"http://www.terracotta.org/config/offheap-resource\">\n {{- range .Values.offheaps }}\n <ohr:resource name=\"{{ .name }}\" unit=\"{{ .unit }}\">{{ .size }}</ohr:resource>\n {{- end}}\n </ohr:offheap-resources>\n </config>\n </plugins>\n <servers>\n {{ range $i := until (int .Values.replicaCount) }}\n <server host=\"{{ template \"terracotta.fullname\" $root}}-{{ $i }}.{{ template \"terracotta.fullname\" $root}}\" name=\"{{ template \"terracotta.fullname\" $root}}-{{ $i }}\" bind=\"0.0.0.0\">\n <logs>stdout:</logs>\n <tsa-port bind=\"0.0.0.0\">9410</tsa-port>\n <tsa-group-port bind=\"0.0.0.0\">9430</tsa-group-port>\n </server>\n {{ end }}\n </servers>\n <failover-priority>\n <availability/>\n </failover-priority>\n </tc-config>\n",
"# role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ template \"terracotta.fullname\" . }}\n labels:\n app: {{ template \"terracotta.name\" . }}\n chart: {{ template \"terracotta.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n verbs:\n - get\n - list\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"terracotta.fullname\" . }}\n labels:\n app: {{ template \"terracotta.name\" . }}\n chart: {{ template \"terracotta.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"terracotta.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"terracotta.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"terracotta.fullname\" . }}\n labels:\n app: {{ template \"terracotta.name\" . }}\n chart: {{ template \"terracotta.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n # see https://github.com/kubernetes/kubernetes/issues/39363 , to have dns entries available immediately\n service.alpha.kubernetes.io/tolerate-unready-endpoints: \"{{ .Values.tolerateUnreadyEndpoints }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{- end }}\n ports:\n - name: terracotta-port\n port: {{ .Values.service.terracottaPort }}\n - name: sync-port\n port: {{ .Values.service.syncPort }}\n selector:\n app: {{ template \"terracotta.name\" . }}\n release: \"{{ .Release.Name }}\"\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"terracotta.serviceAccountName\" . }}\n labels:\n app: {{ template \"terracotta.name\" . }}\n chart: {{ template \"terracotta.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- end -}}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"terracotta.fullname\" . }}\n labels:\n app: {{ template \"terracotta.name\" . }}\n chart: {{ template \"terracotta.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"terracotta.name\" . }}\n release: \"{{ .Release.Name }}\"\n serviceName: {{ template \"terracotta.fullname\" . }}\n template:\n metadata:\n labels:\n app: {{ template \"terracotta.name\" . }}\n release: \"{{ .Release.Name }}\"\n spec:\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n initContainers:\n # be careful with busybox versions : https://github.com/docker-library/busybox/issues/48\n - name: init-{{ template \"terracotta.fullname\" . }}\n image: busybox:1.28\n # check service name resolution works fine; if it can't resolve the service, a split brain could occur\n command: ['sh', '-c', 'until nslookup {{ include \"terracotta.fullname\" . }}; do echo \"waiting for {{ include \"terracotta.fullname\" . }} to resolve\"; sleep 2; done;']\n containers:\n - name: {{ template \"terracotta.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n command: [\"bin/start-tc-server.sh\"]\n args: [\"-f\", \"/config/tc-config.xml\", \"-n\", \"$(POD_NAME)\"]\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n ports:\n - name: terracotta-port\n containerPort: 9410\n - name: sync-port\n containerPort: 9430\n volumeMounts:\n - name: config-volume\n mountPath: /config\n env:\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n volumes:\n - name: config-volume\n configMap:\n name: {{ template \"terracotta.fullname\" . }}-configuration\n"
] | # Terracotta Image definitions are available at : https://github.com/Terracotta-OSS/docker
image:
repository: terracotta/terracotta-server-oss
tag: 5.6.0
pullPolicy: Always
website: "https://github.com/Terracotta-OSS/docker"
sampleEhcacheClientImage:
repository: terracotta/sample-ehcache-client
tag: 5.6.0
replicaCount: 2
# specify as many offheap resources as you want; provided you have enough resources on the node
# and you have caching clients using those resources for their clustered tiers !
offheaps:
- name: offheap-1
unit: MB
size: 512
- name: offheap-2
unit: MB
size: 256
service:
type: ClusterIP
terracottaPort: 9410
syncPort: 9430
# None is headless service; still useful for the StatefulSet that relies on it
clusterIP: "None"
tolerateUnreadyEndpoints: true
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
|
burrow | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"burrow.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"burrow.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"burrow.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nFormulate the how the seeds feed is populated.\n*/}}\n{{- define \"burrow.seeds\" -}}\n{{- if (and .Values.peer.ingress.enabled (not (eq (len .Values.peer.ingress.hosts) 0))) -}}\n{{- $host := index .Values.peer.ingress.hosts 0 -}}\n{{- range $index, $val := $.Values.validators -}}\n{{- $addr := $val.nodeAddress | lower -}}\n{{- $node := printf \"%03d\" $index -}}\ntcp://{{ $addr }}@{{ $node }}.{{ $host }}:{{ $.Values.config.Tendermint.ListenPort }},\n{{- end -}}\n{{- if not (eq (len .Values.chain.extraSeeds) 0) -}}\n{{- range .Values.chain.extraSeeds -}},{{ . }}{{- end -}}\n{{- end -}}\n{{- else -}}\n{{- range $index, $val := $.Values.validators -}}\n{{- $addr := $val.nodeAddress | lower -}}\n{{- $node := printf \"%03d\" $index -}}\ntcp://{{ $addr }}@{{ template \"burrow.fullname\" $ }}-peer-{{ $node }}:{{ $.Values.config.Tendermint.ListenPort }},\n{{- end -}}\n{{- if not (eq (len .Values.chain.extraSeeds) 0) -}}\n{{- range .Values.chain.extraSeeds -}},{{ . }}{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n",
"# _settings.yaml\n{{- define \"settings\" -}}\n{{- range $.Values.environment.secrets }}\n- name: {{ .name }}\n valueFrom:\n secretKeyRef:\n name: {{ .location }}\n key: {{ .key }}\n{{- end }}\n{{- range $key, $val := $.Values.environment.inline }}\n- name: {{ $key }}\n value: {{ $val | quote }}\n{{- end }}\n{{- end -}}\n",
"# configmap.yaml\n{{- $config := .Values.config }}\n{{- $pp := dict \"Tendermint\" (dict \"PersistentPeers\" (include \"burrow.seeds\" .)) }}\nkind: ConfigMap\napiVersion: v1\nmetadata:\n labels:\n app: {{ template \"burrow.name\" . }}\n chart: {{ template \"burrow.chart\" $ }}\n heritage: {{ $.Release.Service }}\n release: {{ $.Release.Name }}\n name: {{ template \"burrow.fullname\" . }}-config\ndata:\n burrow.json: |-\n{{ toJson (mergeOverwrite $config $pp) | indent 4 }}",
"# contracts.yaml\n{{- if .Values.contracts.enabled }}\n{{- $refDir := printf \"/ref\" }}\napiVersion: batch/v1\nkind: Job\nmetadata:\n name: {{ template \"burrow.fullname\" $ }}-contracts\n namespace: {{ $.Release.Namespace | quote }}\n labels:\n app: {{ template \"burrow.name\" $ }}\n chart: {{ template \"burrow.chart\" $ }}\n heritage: {{ $.Release.Service }}\n release: {{ $.Release.Name }}\n annotations:\n \"helm.sh/hook\": \"post-install\"\n \"helm.sh/hook-delete-policy\": before-hook-creation,hook-succeeded\nspec:\n template:\n spec:\n # we always want burrow & solc installed\n initContainers:\n - name: burrow\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n command: ['sh', '-c', 'cp /usr/local/bin/* /tmp']\n volumeMounts:\n - name: bin\n mountPath: /tmp\n containers:\n - name: contracts-deploy\n image: \"{{ .Values.contracts.image }}:{{ $.Values.contracts.tag }}\"\n imagePullPolicy: Always\n volumeMounts:\n - name: bin\n mountPath: /usr/local/bin/\n - mountPath: {{ $refDir }}\n name: ref-dir\n env:\n - name: CHAIN_URL_GRPC\n value: {{ template \"burrow.fullname\" $ }}-grpc:{{ .Values.config.RPC.GRPC.ListenPort }}\n{{- include \"settings\" . | indent 8 }}\n command: [\"/bin/sh\", \"-c\", \"{{ .Values.contracts.deploy }}\"]\n restartPolicy: Never\n volumes:\n - name: bin\n emptyDir: {}\n - name: ref-dir\n projected:\n sources:\n - configMap:\n name: {{ template \"burrow.fullname\" $ }}-config\n - configMap:\n name: {{ template \"burrow.fullname\" $ }}-genesis\n backoffLimit: 0\n{{- end }}",
"# deployments.yaml\n{{- $workDir := printf \"/work\" }}\n{{- $refDir := printf \"/ref\" }}\n{{- $keysDir := printf \"/keys\" }}\n{{- range $index, $val := $.Values.validators }}\n{{- $nodeNumber := printf \"%03d\" $index }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n labels:\n app: {{ template \"burrow.name\" $ }}\n chart: {{ template \"burrow.chart\" $ }}\n heritage: {{ $.Release.Service }}\n release: {{ $.Release.Name }}\n nodeNumber: {{ $nodeNumber | quote }}\n name: {{ template \"burrow.fullname\" $ }}-{{ $nodeNumber }}\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ template \"burrow.name\" $ }}\n release: {{ $.Release.Name }}\n nodeNumber: {{ $nodeNumber | quote }}\n template:\n metadata:\n{{- if (or $.Values.podAnnotations $.Values.config.RPC.Metrics.Enabled) }}\n annotations:\n{{- if $.Values.config.RPC.Metrics.Enabled }}\n prometheus.io/scrape: \"true\"\n prometheus.io/port: {{ $.Values.config.RPC.Metrics.ListenPort | quote }}\n prometheus.io/path: {{ $.Values.config.RPC.Metrics.MetricsPath }}\n{{- end }}\n{{- if $.Values.podAnnotations }}\n{{ toYaml $.Values.podAnnotations | indent 8 }}\n{{- end }}\n{{- end }}\n labels:\n app: {{ template \"burrow.name\" $ }}\n release: {{ $.Release.Name }}\n nodeNumber: {{ $nodeNumber | quote }}\n{{- if $.Values.podLabels }}\n{{ toYaml $.Values.podLabels | indent 8 }}\n{{- end }}\n spec:\n initContainers:\n - name: init-keys\n image: busybox\n imagePullPolicy: IfNotPresent\n workingDir: {{ $keysDir }}\n volumeMounts:\n - name: keys-dir\n mountPath: {{ $keysDir }}\n - name: work-dir\n mountPath: {{ $workDir }}\n command:\n - 'sh'\n - '-xc'\n - |-\n mkdir -p {{ $workDir }}/.burrow/config && \\\n cp node_key.json {{ $workDir }}/.burrow/config/node_key.json && \\\n chmod 600 {{ $workDir }}/.burrow/config/node_key.json\n{{- if $.Values.chain.restore.enabled }}\n - name: retrieve\n image: appropriate/curl\n imagePullPolicy: {{ $.Values.image.pullPolicy }}\n workingDir: {{ $workDir }}\n command:\n - curl\n args:\n - -o\n - dumpFile\n - {{ $.Values.chain.restore.dumpURL }}\n volumeMounts:\n - name: work-dir\n mountPath: {{ $workDir }}\n - name: restore\n image: \"{{ $.Values.image.repository }}:{{ $.Values.image.tag }}\"\n imagePullPolicy: {{ $.Values.image.pullPolicy }}\n workingDir: {{ $workDir }}\n command:\n - burrow\n args:\n - restore\n - --config\n - \"{{ $refDir }}/burrow.json\"\n - --genesis\n - \"{{ $refDir }}/genesis.json\"\n - --silent\n - dumpFile\n - --address\n - {{ $val.address | quote }}\n - --moniker\n - {{ printf \"%s-validator-%s\" $.Values.organization $nodeNumber | quote }}\n volumeMounts:\n - mountPath: {{ $workDir }}\n name: work-dir\n - mountPath: {{ $refDir }}\n name: ref-dir\n{{- end }}\n containers:\n - name: node\n image: \"{{ $.Values.image.repository }}:{{ $.Values.image.tag }}\"\n imagePullPolicy: {{ $.Values.image.pullPolicy }}\n workingDir: {{ $workDir }}\n command:\n - burrow\n args:\n - start\n - --config\n - \"{{ $refDir }}/burrow.json\"\n - --genesis\n - \"{{ $refDir }}/genesis.json\"\n - --address\n - {{ $val.address | quote }}\n - --moniker\n - {{ printf \"%s-validator-%s\" $.Values.organization $nodeNumber | quote }}\n{{- if (and $.Values.peer.ingress.enabled (not (eq (len $.Values.peer.ingress.hosts) 0))) }}\n - --external-address\n - \"{{ $nodeNumber }}.{{ index $.Values.peer.ingress.hosts 0 }}:{{ $.Values.config.Tendermint.ListenPort }}\"\n{{- end }}\n{{- range $key, $value := $.Values.extraArgs }}\n - --{{ $key }}={{ $value }}\n{{- end }}\n env:\n{{- include \"settings\" $ | indent 10 }}\n volumeMounts:\n - name: ref-dir\n mountPath: {{ $refDir }}\n - name: work-dir\n mountPath: {{ $workDir }}\n - name: keys-dir\n mountPath: {{ $keysDir }}/data\n - name: keys-dir-names\n mountPath: {{ $keysDir }}/names\n ports:\n - name: peer\n protocol: TCP\n containerPort: {{ $.Values.config.Tendermint.ListenPort }}\n{{- if $.Values.config.RPC.GRPC.Enabled }}\n - name: grpc\n protocol: TCP\n containerPort: {{ $.Values.config.RPC.GRPC.ListenPort }}\n{{- end }}\n{{- if $.Values.config.RPC.Info.Enabled }}\n - name: info\n protocol: TCP\n containerPort: {{ $.Values.config.RPC.Info.ListenPort }}\n{{- end }}\n{{- if $.Values.config.RPC.Metrics.Enabled }}\n - name: metrics\n protocol: TCP\n containerPort: {{ $.Values.config.RPC.Metrics.ListenPort }}\n{{- end }}\n{{- if not $.Values.chain.testing }}\n{{- if $.Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: {{ $.Values.livenessProbe.path }}\n port: info\n scheme: HTTP\n initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}\n periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}\n{{- end }}\n{{- if $.Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: {{ $.Values.readinessProbe.path }}\n port: info\n scheme: HTTP\n initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}\n{{- end }}\n{{- end }}\n{{- if $.Values.resources }}\n resources:\n{{ toYaml $.Values.resources | indent 12 }}\n{{- end }}\n restartPolicy: Always\n volumes:\n - name: ref-dir\n projected:\n sources:\n - configMap:\n name: {{ template \"burrow.fullname\" $ }}-config\n - configMap:\n name: {{ template \"burrow.fullname\" $ }}-genesis\n - name: keys-dir\n projected:\n sources:\n - secret:\n name: {{ template \"burrow.fullname\" $ }}-keys-{{ $nodeNumber }}\n - name: keys-dir-names\n emptyDir: {}\n - name: work-dir\n{{- if $.Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"burrow.fullname\" $ }}-{{ $nodeNumber }}\n{{- else }}\n emptyDir: {}\n{{- end }}\n securityContext:\n fsGroup: 101\n runAsUser: 1000\n{{- if $.Values.affinity }}\n affinity:\n{{ toYaml $.Values.affinity | indent 8 }}\n{{- end }}\n{{- if $.Values.nodeSelector }}\n nodeSelector:\n{{ toYaml $.Values.nodeSelector | indent 8 }}\n{{- end }}\n{{- if $.Values.tolerations }}\n tolerations:\n{{ toYaml $.Values.tolerations | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# ingress-grpc.yaml\n{{- if .Values.grpc.ingress.enabled -}}\n{{- $serviceName := printf \"%s-grpc\" (include \"burrow.fullname\" .) -}}\n{{- $servicePort := .Values.config.RPC.GRPC.ListenPort -}}\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n labels:\n app: {{ template \"burrow.name\" . }}\n chart: {{ template \"burrow.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"burrow.fullname\" . }}-grpc\n annotations:\n {{- range $key, $value := .Values.grpc.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.grpc.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.grpc.ingress.tls }}\n tls:\n{{ toYaml .Values.grpc.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# ingress-info.yaml\n{{- if .Values.info.ingress.enabled -}}\n{{- $serviceName := printf \"%s-info\" (include \"burrow.fullname\" .) -}}\n{{- $servicePort := $.Values.config.info.ListenPort -}}\n{{- $pathLeader := .Values.info.ingress.pathLeader -}}\n{{- $partialIngress := .Values.info.ingress.partial -}}\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n labels:\n app: {{ template \"burrow.name\" . }}\n chart: {{ template \"burrow.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"burrow.fullname\" . }}-info\n annotations:\n{{ toYaml .Values.info.ingress.annotations | indent 4 }}\nspec:\n rules:\n{{- range $host := .Values.info.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n{{- if $partialIngress }}\n - path: {{ $pathLeader }}/account\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n - path: {{ $pathLeader }}/block\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n{{- else }}\n - path: {{ $pathLeader }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n{{- end -}}\n{{- end -}}\n{{- if .Values.info.ingress.tls }}\n tls:\n{{ toYaml .Values.info.ingress.tls | indent 4 }}\n{{- end -}}\n{{- end -}}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled }}\n{{- range $index, $val := $.Values.validators }}\n{{- $nodeNumber := printf \"%03d\" $index }}\n---\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"burrow.fullname\" $ }}-{{ $nodeNumber }}\n labels:\n app: {{ template \"burrow.name\" $ }}\n release: {{ $.Release.Name }}\n nodeNumber: {{ $nodeNumber | quote }}\n annotations:\n {{- range $key, $value := $.Values.persistence.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n accessModes:\n - {{ $.Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ $.Values.persistence.size | quote }}\n{{- if $.Values.persistence.storageClass }}\n{{- if (eq \"-\" $.Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ $.Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# service-grpc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"burrow.name\" . }}\n chart: {{ template \"burrow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n name: {{ template \"burrow.fullname\" . }}-grpc\nspec:\n type: {{ .Values.grpc.service.type }}\n{{- if .Values.grpc.service.loadBalance }}\n sessionAffinity: ClientIP\n sessionAffinityConfig:\n clientIP:\n timeoutSeconds: 600\n{{- end }}\n ports:\n - name: grpc\n port: {{ $.Values.config.RPC.GRPC.ListenPort }}\n targetPort: grpc\n protocol: TCP\n selector:\n app: {{ template \"burrow.name\" . }}\n release: {{ .Release.Name }}\n{{- if not .Values.grpc.service.loadBalance }}\n nodeNumber: {{ .Values.grpc.service.node | quote }}\n{{- end }}\n",
"# service-info.yaml\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n app: {{ template \"burrow.name\" . }}\n chart: {{ template \"burrow.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n name: {{ template \"burrow.fullname\" . }}-info\nspec:\n type: {{ .Values.info.service.type }}\n{{- if .Values.info.service.loadBalance }}\n sessionAffinity: ClientIP\n{{- end }}\n ports:\n - name: info\n port: {{ $.Values.config.RPC.Info.ListenPort }}\n targetPort: info\n protocol: TCP\n selector:\n app: {{ template \"burrow.name\" . }}\n release: {{ .Release.Name }}\n{{- if not .Values.info.service.loadBalance }}\n nodeNumber: {{ .Values.info.service.node | quote }}\n{{- end }}\n",
"# service-peers.yaml\n{{- $peerIngress := and .Values.peer.ingress.enabled (not (eq (len .Values.peer.ingress.hosts) 0)) -}}\n{{- range $index, $val := $.Values.validators }}\n{{- $nodeNumber := printf \"%03d\" $index }}\n---\nkind: Service\napiVersion: v1\nmetadata:\n{{- if $peerIngress }}\n annotations:\n external-dns.alpha.kubernetes.io/hostname: \"{{ $nodeNumber }}.{{ index $.Values.peer.ingress.hosts 0 }}\"\n external-dns.alpha.kubernetes.io/ttl: \"120\"\n{{- end }}\n labels:\n app: {{ template \"burrow.name\" $ }}\n chart: {{ template \"burrow.chart\" $ }}\n release: {{ $.Release.Name }}\n heritage: {{ $.Release.Service }}\n nodeNumber: {{ $nodeNumber | quote }}\n name: {{ template \"burrow.fullname\" $ }}-peer-{{ $nodeNumber }}\nspec:\n{{- if $peerIngress }}\n type: LoadBalancer\n externalTrafficPolicy: Local\n{{- else }}\n type: {{ $.Values.peer.service.type }}\n{{- end }}\n ports:\n - name: peer\n port: {{ $.Values.config.Tendermint.ListenPort }}\n targetPort: peer\n protocol: TCP\n selector:\n app: {{ template \"burrow.name\" $ }}\n release: {{ $.Release.Name }}\n nodeNumber: {{ $nodeNumber | quote }}\n{{- end }}\n",
"# test-config.yaml\n{{- if .Values.chain.testing }}\nkind: ConfigMap\napiVersion: v1\nmetadata:\n labels:\n app: {{ template \"burrow.name\" . }}\n chart: {{ template \"burrow.chart\" $ }}\n heritage: {{ $.Release.Service }}\n release: {{ $.Release.Name }}\n name: {{ template \"burrow.fullname\" . }}-genesis\ndata:\n genesis.json: |\n {\"GenesisTime\":\"2018-12-20T09:43:49.505674605Z\",\"ChainName\":\"agreements.network\",\"Params\":{\"ProposalThreshold\":3},\"GlobalPermissions\":{\"Base\":{\"Perms\":\"send | call | createContract | createAccount | bond | name | proposal | input | batch | hasBase | hasRole\",\"SetBit\":\"root | send | call | createContract | createAccount | bond | name | proposal | input | batch | hasBase | setBase | unsetBase | setGlobal | hasRole | addRole | removeRole\"}},\"Accounts\":[{\"Address\":\"744630EA9A7CBD310AE7B8EDAFCBF94E54D23F37\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"D0FCF06BC69C9A046D7249CDDBE5CC287349C8EB7C160A58680D807CB849BC7A\"},\"Amount\":9999999999,\"Name\":\"Validator_0\",\"Permissions\":{\"Base\":{\"Perms\":\"bond\",\"SetBit\":\"bond\"}}},{\"Address\":\"2C1B7046183387E63C17898235D3C0FDE4943BC7\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"7630B56CD8CAB7E2181EA4ADB4288466F035A0F74710E1E8E7EE4E4101C43BF0\"},\"Amount\":9999999999,\"Name\":\"Validator_1\",\"Permissions\":{\"Base\":{\"Perms\":\"bond\",\"SetBit\":\"bond\"}}},{\"Address\":\"C5291CE95749A2DE1D992946B683280D75EDBE8C\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"8912F216635661071EF50019D5AF6CA4AF878BF5216AE2582AD39D7F644A3AAB\"},\"Amount\":9999999999,\"Name\":\"Validator_2\",\"Permissions\":{\"Base\":{\"Perms\":\"bond\",\"SetBit\":\"bond\"}}},{\"Address\":\"A5BCAF761B774A61FADA691AB40C4E9A20D82B7B\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"E005A1D989A98B6A7910DA06A3942158676C83A057C98A08A9B5285E8E960A8B\"},\"Amount\":9999999999,\"Name\":\"Validator_3\",\"Permissions\":{\"Base\":{\"Perms\":\"bond\",\"SetBit\":\"bond\"}}}],\"Validators\":[{\"Address\":\"744630EA9A7CBD310AE7B8EDAFCBF94E54D23F37\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"D0FCF06BC69C9A046D7249CDDBE5CC287349C8EB7C160A58680D807CB849BC7A\"},\"Amount\":9999999998,\"NodeAddress\":\"9367CCE15205DC38DA61F5B348AF2AFEED2FE77A\",\"Name\":\"Validator_0\",\"UnbondTo\":[{\"Address\":\"744630EA9A7CBD310AE7B8EDAFCBF94E54D23F37\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"D0FCF06BC69C9A046D7249CDDBE5CC287349C8EB7C160A58680D807CB849BC7A\"},\"Amount\":9999999998}]},{\"Address\":\"2C1B7046183387E63C17898235D3C0FDE4943BC7\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"7630B56CD8CAB7E2181EA4ADB4288466F035A0F74710E1E8E7EE4E4101C43BF0\"},\"Amount\":9999999998,\"NodeAddress\":\"5B624373E8EE692ACDAF408F5B8E0831E78FEC50\",\"Name\":\"Validator_1\",\"UnbondTo\":[{\"Address\":\"2C1B7046183387E63C17898235D3C0FDE4943BC7\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"7630B56CD8CAB7E2181EA4ADB4288466F035A0F74710E1E8E7EE4E4101C43BF0\"},\"Amount\":9999999998}]},{\"Address\":\"C5291CE95749A2DE1D992946B683280D75EDBE8C\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"8912F216635661071EF50019D5AF6CA4AF878BF5216AE2582AD39D7F644A3AAB\"},\"Amount\":9999999998,\"NodeAddress\":\"C13AEAC6523429A1ED244255D2BBAA7CB4AB7CB4\",\"Name\":\"Validator_2\",\"UnbondTo\":[{\"Address\":\"C5291CE95749A2DE1D992946B683280D75EDBE8C\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"8912F216635661071EF50019D5AF6CA4AF878BF5216AE2582AD39D7F644A3AAB\"},\"Amount\":9999999998}]},{\"Address\":\"A5BCAF761B774A61FADA691AB40C4E9A20D82B7B\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"E005A1D989A98B6A7910DA06A3942158676C83A057C98A08A9B5285E8E960A8B\"},\"Amount\":9999999998,\"NodeAddress\":\"A85AE5C27FEDEFA57F425B7762A1BB5CCA095E64\",\"Name\":\"Validator_3\",\"UnbondTo\":[{\"Address\":\"A5BCAF761B774A61FADA691AB40C4E9A20D82B7B\",\"PublicKey\":{\"CurveType\":\"ed25519\",\"PublicKey\":\"E005A1D989A98B6A7910DA06A3942158676C83A057C98A08A9B5285E8E960A8B\"},\"Amount\":9999999998}]}]}\n{{- end }}\n",
"# test-secret.yaml\n\n{{- $nodeKeys := dict }}\n{{- $_ := set $nodeKeys \"Validator_0\" \"eyJwcml2X2tleSI6eyJ0eXBlIjoidGVuZGVybWludC9Qcml2S2V5RWQyNTUxOSIsInZhbHVlIjoibmtUeEJlTm55b2UvR3VSZmVva0RBbWRsSlVaa0dFMWJUMU5nalV3N0lieEs3WEE4RHdybFByMW9YS0s1Nk9VNllOUHZDSXR6T1ZwQm9GSWNyQngvemc9PSJ9fQ==\" }}\n{{- $_ := set $nodeKeys \"Validator_1\" \"eyJwcml2X2tleSI6eyJ0eXBlIjoidGVuZGVybWludC9Qcml2S2V5RWQyNTUxOSIsInZhbHVlIjoiNTltUUZFbUdGVFpnOHQxa0M5SHljYVpIL3ViWjlEVkd5NHdGWUlOaFh5dEZVYkc4NlZvUFdwTE4rWGU3UGhLODZzODNTQm5WUGw4eitvUXp3RXV0Q2c9PSJ9fQ==\" }}\n{{- $_ := set $nodeKeys \"Validator_2\" \"eyJwcml2X2tleSI6eyJ0eXBlIjoidGVuZGVybWludC9Qcml2S2V5RWQyNTUxOSIsInZhbHVlIjoiTC9VQkx6eWd4Yk1udmRoM1ltTlJldTVFN2VFSUhUcDdkNGMzeG0ycnhHbUtrbHZidTNPZEZJaUtqRHBIZFQycUN3WDN3dkJzWU41MDRISHpFckEwR0E9PSJ9fQ==\" }}\n{{- $_ := set $nodeKeys \"Validator_3\" \"eyJwcml2X2tleSI6eyJ0eXBlIjoidGVuZGVybWludC9Qcml2S2V5RWQyNTUxOSIsInZhbHVlIjoiUUIvOUkycCtJQzRwY0pGZWNhY3Q3d2JjMlBsYmI0SjkwVkRlRll0MTIzcE0xWFZpUWg1dWo2K2dsUDhpd0JMNzlQQkdqa051QTRtTnRqOGV6b0djL3c9PSJ9fQ==\" }}\n\n{{- $privKeys := dict }}\n{{- $_ := set $privKeys \"Validator_0\" \"eyJDdXJ2ZVR5cGUiOiJlZDI1NTE5IiwiQWRkcmVzcyI6Ijc0NDYzMEVBOUE3Q0JEMzEwQUU3QjhFREFGQ0JGOTRFNTREMjNGMzciLCJQdWJsaWNLZXkiOiJEMEZDRjA2QkM2OUM5QTA0NkQ3MjQ5Q0REQkU1Q0MyODczNDlDOEVCN0MxNjBBNTg2ODBEODA3Q0I4NDlCQzdBIiwiQWRkcmVzc0hhc2giOiJnby1jcnlwdG8tMC41LjAiLCJQcml2YXRlS2V5Ijp7IkNyeXB0byI6Im5vbmUiLCJQbGFpbiI6IjY1REEzMzYxMjRBOTYzNzYwMDYzNjJFN0MzQjQ3MEZGNkQ3MTA1RDZFNkQwQTVCQTczQzkyRjY0NkU2NzAwOENEMEZDRjA2QkM2OUM5QTA0NkQ3MjQ5Q0REQkU1Q0MyODczNDlDOEVCN0MxNjBBNTg2ODBEODA3Q0I4NDlCQzdBIn19\" }}\n{{- $_ := set $privKeys \"Validator_1\" \"eyJDdXJ2ZVR5cGUiOiJlZDI1NTE5IiwiQWRkcmVzcyI6IjJDMUI3MDQ2MTgzMzg3RTYzQzE3ODk4MjM1RDNDMEZERTQ5NDNCQzciLCJQdWJsaWNLZXkiOiI3NjMwQjU2Q0Q4Q0FCN0UyMTgxRUE0QURCNDI4ODQ2NkYwMzVBMEY3NDcxMEUxRThFN0VFNEU0MTAxQzQzQkYwIiwiQWRkcmVzc0hhc2giOiJnby1jcnlwdG8tMC41LjAiLCJQcml2YXRlS2V5Ijp7IkNyeXB0byI6Im5vbmUiLCJQbGFpbiI6IkVFQUQyMkEyMDM1OUM2NTU2Q0I3OUY4NEVDMEUxNkQyNjYyQ0M2RTVERkIxMjY2MkI3MEI3MzcyRTQxMDcxNUI3NjMwQjU2Q0Q4Q0FCN0UyMTgxRUE0QURCNDI4ODQ2NkYwMzVBMEY3NDcxMEUxRThFN0VFNEU0MTAxQzQzQkYwIn19\" }}\n{{- $_ := set $privKeys \"Validator_2\" \"eyJDdXJ2ZVR5cGUiOiJlZDI1NTE5IiwiQWRkcmVzcyI6IkM1MjkxQ0U5NTc0OUEyREUxRDk5Mjk0NkI2ODMyODBENzVFREJFOEMiLCJQdWJsaWNLZXkiOiI4OTEyRjIxNjYzNTY2MTA3MUVGNTAwMTlENUFGNkNBNEFGODc4QkY1MjE2QUUyNTgyQUQzOUQ3RjY0NEEzQUFCIiwiQWRkcmVzc0hhc2giOiJnby1jcnlwdG8tMC41LjAiLCJQcml2YXRlS2V5Ijp7IkNyeXB0byI6Im5vbmUiLCJQbGFpbiI6IjIxMzlCQjU4MkE1MDQ1MTAyNjRDMzk0NDQ5ODVDRTREM0JEOTM5OEE0NjA2ODRCQUI1QTI4RjU4QzhFMjQ2QUY4OTEyRjIxNjYzNTY2MTA3MUVGNTAwMTlENUFGNkNBNEFGODc4QkY1MjE2QUUyNTgyQUQzOUQ3RjY0NEEzQUFCIn19\" }}\n{{- $_ := set $privKeys \"Validator_3\" \"eyJDdXJ2ZVR5cGUiOiJlZDI1NTE5IiwiQWRkcmVzcyI6IkE1QkNBRjc2MUI3NzRBNjFGQURBNjkxQUI0MEM0RTlBMjBEODJCN0IiLCJQdWJsaWNLZXkiOiJFMDA1QTFEOTg5QTk4QjZBNzkxMERBMDZBMzk0MjE1ODY3NkM4M0EwNTdDOThBMDhBOUI1Mjg1RThFOTYwQThCIiwiQWRkcmVzc0hhc2giOiJnby1jcnlwdG8tMC41LjAiLCJQcml2YXRlS2V5Ijp7IkNyeXB0byI6Im5vbmUiLCJQbGFpbiI6IkMyN0U3RjA2MDI0M0QwODg4NzYwNTgxMTgyMzYwNjFEMkZERjlCNjU4MkJCNUU0MTdGOEQ3MTVCRDc1RkU5RUFFMDA1QTFEOTg5QTk4QjZBNzkxMERBMDZBMzk0MjE1ODY3NkM4M0EwNTdDOThBMDhBOUI1Mjg1RThFOTYwQThCIn19\" }}\n\n{{- if .Values.chain.testing }}\n{{- range $index, $val := $.Values.validators }}\n{{- $nodeNumber := printf \"%03d\" $index }}\n---\napiVersion: v1\nkind: Secret\nmetadata:\n labels:\n app: {{ template \"burrow.name\" $ }}\n chart: {{ template \"burrow.chart\" $ }}\n heritage: {{ $.Release.Service }}\n release: {{ $.Release.Name }}\n nodeNumber: {{ $nodeNumber | quote }}\n name: {{ template \"burrow.fullname\" $ }}-keys-{{ $nodeNumber }}\ntype: Opaque\ndata:\n {{ printf \"%s.json\" $val.address }}: {{ index $privKeys ($val.name) }}\n node_key.json: {{ index $nodeKeys ($val.name) }}\n{{- end }}\n{{- end }}\n"
] | image:
repository: hyperledger/burrow
tag: 0.29.0
pullPolicy: IfNotPresent
chain:
logLevel: info
extraSeeds: []
testing: false
restore:
enabled: false
dumpURL: ""
config:
BurrowDir: ".burrow"
Tendermint:
Seeds: ""
SeedMode: false
ListenHost: "0.0.0.0"
ListenPort: "26656"
ExternalAddress: ""
Moniker: ""
Keys:
GRPCServiceEnabled: true
AllowBadFilePermissions: true
RemoteAddress: ""
KeysDirectory: "/keys"
RPC:
Info:
Enabled: true
ListenHost: "0.0.0.0"
ListenPort: "26658"
Profiler:
Enabled: false
ListenHost: "0.0.0.0"
ListenPort: "6060"
GRPC:
Enabled: true
ListenHost: "0.0.0.0"
ListenPort: "10997"
Metrics:
Enabled: true
ListenHost: "0.0.0.0"
ListenPort: "9102"
MetricsPath: "/metrics"
BlockSampleSize: 100
Logging:
ExcludeTrace: true
NonBlocking: true
RootSink:
Output:
OutputType: "stderr"
Format: "json"
validators:
- name: Validator_0
address: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
nodeAddress: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
contracts:
# wait required to ensure chain readiness
enabled: false
image: ""
tag: ""
deploy: ""
extraArgs: {}
environment:
inline: {}
secrets: []
organization: "user"
persistence:
enabled: true
size: 80Gi
storageClass: standard
accessMode: ReadWriteOnce
persistentVolumeReclaimPolicy: "Retain"
peer:
service:
type: ClusterIP
ingress:
enabled: false
hosts: []
grpc:
service:
type: ClusterIP
loadBalance: true
ingress:
enabled: false
hosts: []
annotations: {}
tls: {}
info:
service:
type: ClusterIP
loadBalance: true
ingress:
enabled: false
# exposing partial ingress only exposes
# the /accounts and /blocks paths outside the cluster
partial: false
pathLeader: "/"
annotations: {}
hosts: []
tls: {}
# resources:
# limits:
# cpu: 500m
# memory: 1Gi
# requests:
# cpu: 100m
# memory: 256Mi
livenessProbe:
enabled: true
path: /status?block_seen_time_within=10m
initialDelaySeconds: 240
timeoutSeconds: 1
periodSeconds: 30
readinessProbe:
enabled: true
path: /status
initialDelaySeconds: 5
podAnnotations: {}
podLabels: {}
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
|
openebs | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"openebs.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"openebs.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"openebs.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"openebs.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"openebs.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"openebs.fullname\" . }}\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nrules:\n- apiGroups: [\"*\"]\n resources: [\"nodes\", \"nodes/proxy\"]\n verbs: [\"*\"]\n- apiGroups: [\"*\"]\n resources: [\"namespaces\", \"services\", \"pods\", \"pods/exec\", \"deployments\", \"deployments/finalizers\", \"replicationcontrollers\", \"replicasets\", \"events\", \"endpoints\", \"configmaps\", \"secrets\", \"jobs\", \"cronjobs\" ]\n verbs: [\"*\"]\n- apiGroups: [\"*\"]\n resources: [\"statefulsets\", \"daemonsets\"]\n verbs: [\"*\"]\n- apiGroups: [\"*\"]\n resources: [\"resourcequotas\", \"limitranges\"]\n verbs: [\"list\", \"watch\"]\n- apiGroups: [\"*\"]\n resources: [\"ingresses\", \"horizontalpodautoscalers\", \"verticalpodautoscalers\", \"poddisruptionbudgets\", \"certificatesigningrequests\"]\n verbs: [\"list\", \"watch\"]\n- apiGroups: [\"*\"]\n resources: [\"storageclasses\", \"persistentvolumeclaims\", \"persistentvolumes\"]\n verbs: [\"*\"]\n- apiGroups: [\"volumesnapshot.external-storage.k8s.io\"]\n resources: [\"volumesnapshots\", \"volumesnapshotdatas\"]\n verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\", \"delete\"]\n- apiGroups: [\"apiextensions.k8s.io\"]\n resources: [\"customresourcedefinitions\"]\n verbs: [ \"get\", \"list\", \"create\", \"update\", \"delete\", \"patch\"]\n- apiGroups: [\"openebs.io\"]\n resources: [ \"*\"]\n verbs: [\"*\" ]\n- apiGroups: [\"cstor.openebs.io\"]\n resources: [ \"*\"]\n verbs: [\"*\" ]\n- apiGroups: [\"coordination.k8s.io\"]\n resources: [\"leases\"]\n verbs: [\"get\", \"watch\", \"list\", \"delete\", \"update\", \"create\"]\n- apiGroups: [\"admissionregistration.k8s.io\"]\n resources: [\"validatingwebhookconfigurations\", \"mutatingwebhookconfigurations\"]\n verbs: [\"get\", \"create\", \"list\", \"delete\", \"update\", \"patch\"]\n- nonResourceURLs: [\"/metrics\"]\n verbs: [\"get\"]\n{{- end }}\n",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"openebs.fullname\" . }}\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"openebs.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"openebs.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n",
"# cm-node-disk-manager.yaml\n{{- if .Values.ndm.enabled }}\n# This is the node-disk-manager related config.\n# It can be used to customize the disks probes and filters\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-ndm-config\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: ndm-config\n openebs.io/component-name: ndm-config\ndata:\n # udev-probe is default or primary probe which should be enabled to run ndm\n # filterconfigs contains configs of filters - in the form of include\n # and exclude comma separated strings\n node-disk-manager.config: |\n probeconfigs:\n - key: udev-probe\n name: udev probe\n state: true\n - key: seachest-probe\n name: seachest probe\n state: {{ .Values.ndm.probes.enableSeachest }}\n - key: smart-probe\n name: smart probe\n state: true\n filterconfigs:\n - key: os-disk-exclude-filter\n name: os disk exclude filter\n state: {{ .Values.ndm.filters.enableOsDiskExcludeFilter }}\n exclude: \"/,/etc/hosts,/boot\"\n - key: vendor-filter\n name: vendor filter\n state: {{ .Values.ndm.filters.enableVendorFilter }}\n include: \"\"\n exclude: \"{{ .Values.ndm.filters.excludeVendors }}\"\n - key: path-filter\n name: path filter\n state: {{ .Values.ndm.filters.enablePathFilter }}\n include: \"{{ .Values.ndm.filters.includePaths }}\"\n exclude: \"{{ .Values.ndm.filters.excludePaths }}\"\n---\n{{- end }}\n",
"# daemonset-ndm.yaml\n{{- if .Values.ndm.enabled }}\napiVersion: apps/v1\nkind: DaemonSet\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-ndm\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: ndm\n openebs.io/component-name: ndm\n openebs.io/version: {{ .Values.release.version }}\nspec:\n updateStrategy:\n type: \"RollingUpdate\"\n selector:\n matchLabels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: ndm\n template:\n metadata:\n labels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: ndm\n openebs.io/component-name: ndm\n name: openebs-ndm\n openebs.io/version: {{ .Values.release.version }}\n spec:\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n hostNetwork: true\n containers:\n - name: {{ template \"openebs.name\" . }}-ndm\n image: \"{{ .Values.image.repository }}{{ .Values.ndm.image }}:{{ .Values.ndm.imageTag }}\"\n args:\n - -v=4\n{{- if .Values.featureGates.enabled }}\n{{- if .Values.featureGates.GPTBasedUUID.enabled }}\n - --feature-gates={{ .Values.featureGates.GPTBasedUUID.featureGateFlag }}\n{{- end}}\n{{- end}}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n securityContext:\n privileged: true\n env:\n # namespace in which NDM is installed will be passed to NDM Daemonset\n # as environment variable\n - name: NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n # pass hostname as env variable using downward API to the NDM container\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n{{- if .Values.ndm.sparse }}\n{{- if .Values.ndm.sparse.path }}\n # specify the directory where the sparse files need to be created.\n # if not specified, then sparse files will not be created.\n - name: SPARSE_FILE_DIR\n value: \"{{ .Values.ndm.sparse.path }}\"\n{{- end }}\n{{- if .Values.ndm.sparse.size }}\n # Size(bytes) of the sparse file to be created.\n - name: SPARSE_FILE_SIZE\n value: \"{{ .Values.ndm.sparse.size }}\"\n{{- end }}\n{{- if .Values.ndm.sparse.count }}\n # Specify the number of sparse files to be created\n - name: SPARSE_FILE_COUNT\n value: \"{{ .Values.ndm.sparse.count }}\"\n{{- end }}\n{{- end }}\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can be used here with pgrep (cmd is < 15 chars).\n livenessProbe:\n exec:\n command:\n - pgrep\n - \"ndm\"\n initialDelaySeconds: {{ .Values.ndm.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.ndm.healthCheck.periodSeconds }}\n volumeMounts:\n - name: config\n mountPath: /host/node-disk-manager.config\n subPath: node-disk-manager.config\n readOnly: true\n - name: udev\n mountPath: /run/udev\n - name: procmount\n mountPath: /host/proc\n readOnly: true\n - name: basepath\n mountPath: /var/openebs/ndm\n{{- if .Values.ndm.sparse }}\n{{- if .Values.ndm.sparse.path }}\n - name: sparsepath\n mountPath: {{ .Values.ndm.sparse.path }}\n{{- end }}\n{{- end }}\n volumes:\n - name: config\n configMap:\n name: {{ template \"openebs.fullname\" . }}-ndm-config\n - name: udev\n hostPath:\n path: /run/udev\n type: Directory\n # mount /proc (to access mount file of process 1 of host) inside container\n # to read mount-point of disks and partitions\n - name: procmount\n hostPath:\n path: /proc\n type: Directory\n - name: basepath\n hostPath:\n path: \"{{ .Values.varDirectoryPath.baseDir }}/ndm\"\n type: DirectoryOrCreate\n{{- if .Values.ndm.sparse }}\n{{- if .Values.ndm.sparse.path }}\n - name: sparsepath\n hostPath:\n path: {{ .Values.ndm.sparse.path }}\n{{- end }}\n{{- end }}\n # By default the node-disk-manager will be run on all kubernetes nodes\n # If you would like to limit this to only some nodes, say the nodes\n # that have storage attached, you could label those node and use\n # nodeSelector.\n #\n # e.g. label the storage nodes with - \"openebs.io/nodegroup\"=\"storage-node\"\n # kubectl label node <node-name> \"openebs.io/nodegroup\"=\"storage-node\"\n #nodeSelector:\n # \"openebs.io/nodegroup\": \"storage-node\"\n{{- if .Values.ndm.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.ndm.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.ndm.tolerations }}\n tolerations:\n{{ toYaml .Values.ndm.tolerations | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# deployment-admission-server.yaml\n{{- if .Values.webhook.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-admission-server\n labels:\n app: admission-webhook\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: admission-webhook\n openebs.io/component-name: admission-webhook\n openebs.io/version: {{ .Values.release.version }}\nspec:\n replicas: {{ .Values.webhook.replicas }}\n strategy:\n type: \"Recreate\"\n rollingUpdate: null\n selector:\n matchLabels:\n app: admission-webhook\n template:\n metadata:\n labels:\n app: admission-webhook\n name: admission-webhook\n release: {{ .Release.Name }}\n openebs.io/version: {{ .Values.release.version }}\n openebs.io/component-name: admission-webhook\n spec:\n{{- if .Values.webhook.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.webhook.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.webhook.tolerations }}\n tolerations:\n{{ toYaml .Values.webhook.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.webhook.affinity }}\n affinity:\n{{ toYaml .Values.webhook.affinity | indent 8 }}\n{{- end }}\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n containers:\n - name: admission-webhook\n image: \"{{ .Values.image.repository }}{{ .Values.webhook.image }}:{{ .Values.webhook.imageTag }}\"\n imagePullPolicy: Always \n args:\n - -alsologtostderr\n - -v=2\n - 2>&1\n env:\n - name: OPENEBS_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can't be used here with pgrep (>15 chars).A regular expression\n # Anchor `^` : matches any string that starts with `admission-serve`\n # `.*`: matche any string that has `admission-serve` followed by zero or more char\n # that matches the entire command name has to specified.\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - test `pgrep -c \"^admission-serve.*\"` = 1\n initialDelaySeconds: {{ .Values.webhook.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.webhook.healthCheck.periodSeconds }}\n{{- end }}\n",
"# deployment-local-provisioner.yaml\n{{- if .Values.localprovisioner.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-localpv-provisioner\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: localpv-provisioner\n openebs.io/component-name: openebs-localpv-provisioner\n openebs.io/version: {{ .Values.release.version }}\nspec:\n replicas: {{ .Values.localprovisioner.replicas }}\n strategy:\n type: \"Recreate\"\n rollingUpdate: null\n selector:\n matchLabels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: localpv-provisioner\n name: openebs-localpv-provisioner\n openebs.io/component-name: openebs-localpv-provisioner\n openebs.io/version: {{ .Values.release.version }}\n spec:\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n containers:\n - name: {{ template \"openebs.name\" . }}-localpv-provisioner\n image: \"{{ .Values.image.repository }}{{ .Values.localprovisioner.image }}:{{ .Values.localprovisioner.imageTag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s\n # based on this address. This is ignored if empty.\n # This is supported for openebs provisioner version 0.5.2 onwards\n #- name: OPENEBS_IO_K8S_MASTER\n # value: \"http://10.128.0.12:8080\"\n # OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s\n # based on this config. This is ignored if empty.\n # This is supported for openebs provisioner version 0.5.2 onwards\n #- name: OPENEBS_IO_KUBE_CONFIG\n # value: \"/home/ubuntu/.kube/config\"\n # OPENEBS_NAMESPACE is the namespace that this provisioner will\n # lookup to find maya api service\n - name: OPENEBS_NAMESPACE\n value: \"{{ .Release.Namespace }}\"\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as\n # environment variable\n - name: OPENEBS_SERVICE_ACCOUNT\n valueFrom:\n fieldRef:\n fieldPath: spec.serviceAccountName\n # OPENEBS_IO_BASE_PATH is the environment variable that provides the\n # default base path on the node where host-path PVs will be provisioned.\n - name: OPENEBS_IO_ENABLE_ANALYTICS\n value: \"{{ .Values.analytics.enabled }}\"\n - name: OPENEBS_IO_BASE_PATH\n value: \"{{ .Values.localprovisioner.basePath }}\"\n - name: OPENEBS_IO_HELPER_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.helper.image }}:{{ .Values.helper.imageTag }}\"\n - name: OPENEBS_IO_INSTALLER_TYPE\n value: \"charts-helm\"\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can't be used here with pgrep (>15 chars).A regular expression\n # that matches the entire command name has to specified.\n # Anchor `^` : matches any string that starts with `provisioner-loc`\n # `.*`: matches any string that has `provisioner-loc` followed by zero or more char\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - test `pgrep -c \"^provisioner-loc.*\"` = 1\n initialDelaySeconds: {{ .Values.localprovisioner.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.localprovisioner.healthCheck.periodSeconds }}\n{{- if .Values.localprovisioner.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.localprovisioner.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.localprovisioner.tolerations }}\n tolerations:\n{{ toYaml .Values.localprovisioner.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.localprovisioner.affinity }}\n affinity:\n{{ toYaml .Values.localprovisioner.affinity | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# deployment-maya-apiserver.yaml\n{{- if .Values.apiserver.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-apiserver\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: apiserver\n name: maya-apiserver\n openebs.io/component-name: maya-apiserver\n openebs.io/version: {{ .Values.release.version }}\nspec:\n replicas: {{ .Values.apiserver.replicas }}\n strategy:\n type: \"Recreate\"\n rollingUpdate: null\n selector:\n matchLabels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: apiserver\n name: maya-apiserver\n openebs.io/component-name: maya-apiserver\n openebs.io/version: {{ .Values.release.version }}\n spec:\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n containers:\n - name: {{ template \"openebs.name\" . }}-apiserver\n image: \"{{ .Values.image.repository }}{{ .Values.apiserver.image }}:{{ .Values.apiserver.imageTag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - containerPort: {{ .Values.apiserver.ports.internalPort }}\n env:\n # OPENEBS_IO_KUBE_CONFIG enables maya api service to connect to K8s\n # based on this config. This is ignored if empty.\n # This is supported for maya api server version 0.5.2 onwards\n #- name: OPENEBS_IO_KUBE_CONFIG\n # value: \"/home/ubuntu/.kube/config\"\n # OPENEBS_IO_K8S_MASTER enables maya api service to connect to K8s\n # based on this address. This is ignored if empty.\n # This is supported for maya api server version 0.5.2 onwards\n #- name: OPENEBS_IO_K8S_MASTER\n # value: \"http://172.28.128.3:8080\"\n # OPENEBS_NAMESPACE provides the namespace of this deployment as an\n # environment variable\n - name: OPENEBS_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n # OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as\n # environment variable\n - name: OPENEBS_SERVICE_ACCOUNT\n valueFrom:\n fieldRef:\n fieldPath: spec.serviceAccountName\n # OPENEBS_MAYA_POD_NAME provides the name of this pod as\n # environment variable\n - name: OPENEBS_MAYA_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n # If OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG is false then OpenEBS default\n # storageclass and storagepool will not be created.\n - name: OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG\n value: \"{{ .Values.defaultStorageConfig.enabled }}\"\n # OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL decides whether default cstor sparse pool should be\n # configured as a part of openebs installation.\n # If \"true\" a default cstor sparse pool will be configured, if \"false\" it will not be configured.\n # This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG\n # is set to true\n - name: OPENEBS_IO_INSTALL_DEFAULT_CSTOR_SPARSE_POOL\n value: \"{{ .Values.apiserver.sparse.enabled }}\"\n # OPENEBS_IO_CSTOR_TARGET_DIR can be used to specify the hostpath\n # to be used for saving the shared content between the side cars\n # of cstor volume pod.\n # The default path used is /var/openebs/sparse\n - name: OPENEBS_IO_CSTOR_TARGET_DIR\n value: \"{{ .Values.ndm.sparse.path }}\"\n # OPENEBS_IO_CSTOR_POOL_SPARSE_DIR can be used to specify the hostpath\n # to be used for saving the shared content between the side cars\n # of cstor pool pod. This ENV is also used to indicate the location\n # of the sparse devices.\n # The default path used is /var/openebs/sparse\n - name: OPENEBS_IO_CSTOR_POOL_SPARSE_DIR\n value: \"{{ .Values.ndm.sparse.path }}\"\n # OPENEBS_IO_JIVA_POOL_DIR can be used to specify the hostpath\n # to be used for default Jiva StoragePool loaded by OpenEBS\n # The default path used is /var/openebs\n # This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG\n # is set to true\n - name: OPENEBS_IO_JIVA_POOL_DIR\n value: \"{{ .Values.jiva.defaultStoragePath }}\"\n # OPENEBS_IO_LOCALPV_HOSTPATH_DIR can be used to specify the hostpath\n # to be used for default openebs-hostpath storageclass loaded by OpenEBS\n # The default path used is /var/openebs/local\n # This value takes effect only if OPENEBS_IO_CREATE_DEFAULT_STORAGE_CONFIG\n # is set to true\n - name: OPENEBS_IO_LOCALPV_HOSTPATH_DIR\n value: \"{{ .Values.localprovisioner.basePath }}\"\n # OPENEBS_IO_BASE_DIR used by the OpenEBS to store debug information and\n # so forth that are generated in the course of running OpenEBS containers.\n - name: OPENEBS_IO_BASE_DIR\n value: \"{{ .Values.varDirectoryPath.baseDir }}\"\n - name: OPENEBS_IO_JIVA_CONTROLLER_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.jiva.image }}:{{ .Values.jiva.imageTag }}\"\n - name: OPENEBS_IO_JIVA_REPLICA_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.jiva.image }}:{{ .Values.jiva.imageTag }}\"\n - name: OPENEBS_IO_JIVA_REPLICA_COUNT\n value: \"{{ .Values.jiva.replicas }}\"\n - name: OPENEBS_IO_CSTOR_TARGET_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.cstor.target.image }}:{{ .Values.cstor.target.imageTag }}\"\n - name: OPENEBS_IO_CSTOR_POOL_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.cstor.pool.image }}:{{ .Values.cstor.pool.imageTag }}\"\n - name: OPENEBS_IO_CSTOR_POOL_MGMT_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.cstor.poolMgmt.image }}:{{ .Values.cstor.poolMgmt.imageTag }}\"\n - name: OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.cstor.volumeMgmt.image }}:{{ .Values.cstor.volumeMgmt.imageTag }}\"\n - name: OPENEBS_IO_VOLUME_MONITOR_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.policies.monitoring.image }}:{{ .Values.policies.monitoring.imageTag }}\"\n - name: OPENEBS_IO_CSTOR_POOL_EXPORTER_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.policies.monitoring.image }}:{{ .Values.policies.monitoring.imageTag }}\"\n - name: OPENEBS_IO_HELPER_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.helper.image }}:{{ .Values.helper.imageTag }}\"\n # OPENEBS_IO_ENABLE_ANALYTICS if set to true sends anonymous usage\n # events to Google Analytics\n - name: OPENEBS_IO_ENABLE_ANALYTICS\n value: \"{{ .Values.analytics.enabled }}\"\n # OPENEBS_IO_ANALYTICS_PING_INTERVAL can be used to specify the duration (in hours)\n # for periodic ping events sent to Google Analytics. Default is 24 hours.\n - name: OPENEBS_IO_ANALYTICS_PING_INTERVAL\n value: \"{{ .Values.analytics.pingInterval }}\"\n - name: OPENEBS_IO_INSTALLER_TYPE\n value: \"charts-helm\"\n # OPENEBS_IO_INSTALL_CRD environment variable is used to enable/disable CRD installation\n # from Maya API server. By default the CRDs will be installed\n - name: OPENEBS_IO_INSTALL_CRD\n value: \"{{ .Values.crd.enableInstall }}\"\n livenessProbe:\n exec:\n command:\n - /usr/local/bin/mayactl\n - version\n initialDelaySeconds: {{ .Values.apiserver.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.apiserver.healthCheck.periodSeconds }}\n{{- if .Values.apiserver.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.apiserver.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.apiserver.tolerations }}\n tolerations:\n{{ toYaml .Values.apiserver.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.apiserver.affinity }}\n affinity:\n{{ toYaml .Values.apiserver.affinity | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# deployment-maya-provisioner.yaml\n{{- if .Values.provisioner.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-provisioner\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: provisioner\n name: openebs-provisioner\n openebs.io/component-name: openebs-provisioner\n openebs.io/version: {{ .Values.release.version }}\nspec:\n replicas: {{ .Values.provisioner.replicas }}\n strategy:\n type: \"Recreate\"\n rollingUpdate: null\n selector:\n matchLabels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: provisioner\n name: openebs-provisioner\n openebs.io/component-name: openebs-provisioner\n openebs.io/version: {{ .Values.release.version }}\n spec:\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n containers:\n - name: {{ template \"openebs.name\" . }}-provisioner\n image: \"{{ .Values.image.repository }}{{ .Values.provisioner.image }}:{{ .Values.provisioner.imageTag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n # OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s\n # based on this address. This is ignored if empty.\n # This is supported for openebs provisioner version 0.5.2 onwards\n #- name: OPENEBS_IO_K8S_MASTER\n # value: \"http://10.128.0.12:8080\"\n # OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s\n # based on this config. This is ignored if empty.\n # This is supported for openebs provisioner version 0.5.2 onwards\n #- name: OPENEBS_IO_KUBE_CONFIG\n # value: \"/home/ubuntu/.kube/config\"\n # OPENEBS_NAMESPACE is the namespace that this provisioner will\n # lookup to find maya api service\n - name: OPENEBS_NAMESPACE\n value: \"{{ .Release.Namespace }}\"\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,\n # that provisioner should forward the volume create/delete requests.\n # If not present, \"maya-apiserver-service\" will be used for lookup.\n # This is supported for openebs provisioner version 0.5.3-RC1 onwards\n - name: OPENEBS_MAYA_SERVICE_NAME\n value: \"{{ template \"openebs.fullname\" . }}-apiservice\"\n # The following values will be set as annotations to the PV object.\n # Refer : https://github.com/openebs/external-storage/pull/15\n #- name: OPENEBS_MONITOR_URL\n # value: \"{{ .Values.provisioner.monitorUrl }}\"\n #- name: OPENEBS_MONITOR_VOLKEY\n # value: \"{{ .Values.provisioner.monitorVolumeKey }}\"\n #- name: MAYA_PORTAL_URL\n # value: \"{{ .Values.provisioner.mayaPortalUrl }}\"\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can't be used here with pgrep (>15 chars).A regular expression\n # that matches the entire command name has to specified.\n # Anchor `^` : matches any string that starts with `openebs-provis`\n # `.*`: matches any string that has `openebs-provis` followed by zero or more char\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - test `pgrep -c \"^openebs-provisi.*\"` = 1\n initialDelaySeconds: {{ .Values.provisioner.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.provisioner.healthCheck.periodSeconds }}\n{{- if .Values.provisioner.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.provisioner.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.provisioner.tolerations }}\n tolerations:\n{{ toYaml .Values.provisioner.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.provisioner.affinity }}\n affinity:\n{{ toYaml .Values.provisioner.affinity | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# deployment-maya-snapshot-operator.yaml\n{{- if .Values.snapshotOperator.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-snapshot-operator\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: snapshot-operator\n openebs.io/component-name: openebs-snapshot-operator\n openebs.io/version: {{ .Values.release.version }}\nspec:\n replicas: {{ .Values.snapshotOperator.replicas }}\n selector:\n matchLabels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n strategy:\n type: \"Recreate\"\n rollingUpdate: null\n template:\n metadata:\n labels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: snapshot-operator\n name: openebs-snapshot-operator\n openebs.io/version: {{ .Values.release.version }}\n openebs.io/component-name: openebs-snapshot-operator\n spec:\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n containers:\n - name: {{ template \"openebs.name\" . }}-snapshot-controller\n image: \"{{ .Values.image.repository }}{{ .Values.snapshotOperator.controller.image }}:{{ .Values.snapshotOperator.controller.imageTag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n # OPENEBS_IO_K8S_MASTER enables openebs snapshot controller to connect to K8s\n # based on this address. This is ignored if empty.\n # This is supported for openebs snapshot controller version 0.6-RC1 onwards\n #- name: OPENEBS_IO_K8S_MASTER\n # value: \"http://10.128.0.12:8080\"\n # OPENEBS_IO_KUBE_CONFIG enables openebs snapshot controller to connect to K8s\n # based on this config. This is ignored if empty.\n # This is supported for openebs snapshot controller version 0.6-RC1 onwards\n #- name: OPENEBS_IO_KUBE_CONFIG\n # value: \"/home/ubuntu/.kube/config\"\n # OPENEBS_NAMESPACE is the namespace that this snapshot controller will\n # lookup to find maya api service\n - name: OPENEBS_NAMESPACE\n value: \"{{ .Release.Namespace }}\"\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,\n # that snapshot controller should forward the volume snapshot requests.\n # If not present, \"maya-apiserver-service\" will be used for lookup.\n # This is supported for openebs snapshot controller version 0.6-RC1 onwards\n - name: OPENEBS_MAYA_SERVICE_NAME\n value: \"{{ template \"openebs.fullname\" . }}-apiservice\"\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can't be used here with pgrep (>15 chars).A regular expression\n # that matches the entire command name has to specified.\n # Anchor `^` : matches any string that starts with `snapshot-contro`\n # `.*`: matches any string that has `snapshot-contro` followed by zero or more char\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - test `pgrep -c \"^snapshot-contro.*\"` = 1\n initialDelaySeconds: {{ .Values.snapshotOperator.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.snapshotOperator.healthCheck.periodSeconds }}\n - name: {{ template \"openebs.name\" . }}-snapshot-provisioner\n image: \"{{ .Values.image.repository }}{{ .Values.snapshotOperator.provisioner.image }}:{{ .Values.snapshotOperator.provisioner.imageTag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n # OPENEBS_IO_K8S_MASTER enables openebs snapshot provisioner to connect to K8s\n # based on this address. This is ignored if empty.\n # This is supported for openebs snapshot provisioner version 0.6-RC1 onwards\n #- name: OPENEBS_IO_K8S_MASTER\n # value: \"http://10.128.0.12:8080\"\n # OPENEBS_IO_KUBE_CONFIG enables openebs snapshot provisioner to connect to K8s\n # based on this config. This is ignored if empty.\n # This is supported for openebs snapshot provisioner version 0.6-RC1 onwards\n #- name: OPENEBS_IO_KUBE_CONFIG\n # value: \"/home/ubuntu/.kube/config\"\n # OPENEBS_NAMESPACE is the namespace that this snapshot provisioner will\n # lookup to find maya api service\n - name: OPENEBS_NAMESPACE\n value: \"{{ .Release.Namespace }}\"\n - name: NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n # OPENEBS_MAYA_SERVICE_NAME provides the maya-apiserver K8s service name,\n # that snapshot provisioner should forward the volume snapshot PV requests.\n # If not present, \"maya-apiserver-service\" will be used for lookup.\n # This is supported for openebs snapshot provisioner version 0.6-RC1 onwards\n - name: OPENEBS_MAYA_SERVICE_NAME\n value: \"{{ template \"openebs.fullname\" . }}-apiservice\"\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can't be used here with pgrep (>15 chars).A regular expression\n # that matches the entire command name has to specified.\n # Anchor `^` : matches any string that starts with `snapshot-provis`\n # `.*`: matches any string that has `snapshot-provis` followed by zero or more char\n livenessProbe:\n exec:\n command:\n - sh\n - -c\n - test `pgrep -c \"^snapshot-provis.*\"` = 1\n initialDelaySeconds: {{ .Values.snapshotOperator.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.snapshotOperator.healthCheck.periodSeconds }}\n{{- if .Values.snapshotOperator.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.snapshotOperator.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.snapshotOperator.tolerations }}\n tolerations:\n{{ toYaml .Values.snapshotOperator.tolerations | indent 8 }}\n{{- end }}\n{{- if .Values.snapshotOperator.affinity }}\n affinity:\n{{ toYaml .Values.snapshotOperator.affinity | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# deployment-ndm-operator.yaml\n{{- if .Values.ndmOperator.enabled }}\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-ndm-operator\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n component: ndm-operator\n openebs.io/component-name: ndm-operator\n openebs.io/version: {{ .Values.release.version }}\n name: ndm-operator\nspec:\n replicas: {{ .Values.ndmOperator.replicas }}\n strategy:\n type: \"Recreate\"\n rollingUpdate: null\n selector:\n matchLabels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: ndm-operator\n name: ndm-operator\n openebs.io/component-name: ndm-operator\n openebs.io/version: {{ .Values.release.version }}\n spec:\n serviceAccountName: {{ template \"openebs.serviceAccountName\" . }}\n containers:\n - name: {{ template \"openebs.fullname\" . }}-ndm-operator\n image: \"{{ .Values.image.repository }}{{ .Values.ndmOperator.image }}:{{ .Values.ndmOperator.imageTag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n readinessProbe:\n exec:\n command:\n - stat\n - /tmp/operator-sdk-ready\n initialDelaySeconds: {{ .Values.ndmOperator.readinessCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.ndmOperator.readinessCheck.periodSeconds }}\n failureThreshold: {{ .Values.ndmOperator.readinessCheck.failureThreshold }}\n env:\n - name: WATCH_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: SERVICE_ACCOUNT\n valueFrom:\n fieldRef:\n fieldPath: spec.serviceAccountName\n - name: OPERATOR_NAME\n value: \"node-disk-operator\"\n - name: CLEANUP_JOB_IMAGE\n value: \"{{ .Values.image.repository }}{{ .Values.helper.image }}:{{ .Values.helper.imageTag }}\"\n # OPENEBS_IO_INSTALL_CRD environment variable is used to enable/disable CRD installation\n # from NDM Operator. By default the CRDs will be installed\n - name: OPENEBS_IO_INSTALL_CRD\n value: \"{{ .Values.crd.enableInstall }}\"\n # Process name used for matching is limited to the 15 characters\n # present in the pgrep output.\n # So fullname can be used here with pgrep (cmd is < 15 chars).\n livenessProbe:\n exec:\n command:\n - pgrep\n - \"ndo\"\n initialDelaySeconds: {{ .Values.ndmOperator.healthCheck.initialDelaySeconds }}\n periodSeconds: {{ .Values.ndmOperator.healthCheck.periodSeconds }}\n{{- if .Values.ndmOperator.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.ndmOperator.nodeSelector | indent 8 }}\n{{- end }}\n{{- if .Values.ndmOperator.tolerations }}\n tolerations:\n{{ toYaml .Values.ndmOperator.tolerations | indent 8 }}\n{{- end }}\n{{- end }}\n",
"# psp-clusterrole.yaml\n{{- if and .Values.rbac.create .Values.rbac.pspEnabled }}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-psp\n labels:\n app: {{ template \"openebs.name\" . }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"openebs.fullname\" . }}-psp\n{{- end }}\n",
"# psp-clusterrolebinding.yaml\n{{- if and .Values.rbac.create .Values.rbac.pspEnabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-psp\n labels:\n app: {{ template \"openebs.name\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"openebs.fullname\" . }}-psp\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"openebs.serviceAccountName\" . }}\n namespace: {{ $.Release.Namespace }}\n{{- end }}\n\n",
"# psp.yaml\n{{- if and .Values.rbac.create .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-psp\n namespace: {{ $.Release.Namespace }}\n labels:\n app: {{ template \"openebs.name\" . }}\nspec:\n privileged: true\n allowPrivilegeEscalation: true\n allowedCapabilities: ['*']\n volumes: ['*']\n hostNetwork: true\n hostPorts:\n - min: 0\n max: 65535\n hostIPC: true\n hostPID: true\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n{{- end }}\n",
"# service-maya-apiserver.yaml\n{{- if .Values.apiserver.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"openebs.fullname\" . }}-apiservice\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n openebs.io/component-name: maya-apiserver-svc\nspec:\n ports:\n - name: api\n port: {{ .Values.apiserver.ports.externalPort }}\n targetPort: {{ .Values.apiserver.ports.internalPort }}\n protocol: TCP\n selector:\n app: {{ template \"openebs.name\" . }}\n release: {{ .Release.Name }}\n component: apiserver\n sessionAffinity: None\n{{- end }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"openebs.serviceAccountName\" . }}\n labels:\n app: {{ template \"openebs.name\" . }}\n chart: {{ template \"openebs.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- end }}\n"
] | # Default values for openebs.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
rbac:
# Specifies whether RBAC resources should be created
create: true
pspEnabled: false
serviceAccount:
create: true
name:
release:
# "openebs.io/version" label for control plane components
version: "1.11.0"
image:
pullPolicy: IfNotPresent
repository: ""
apiserver:
enabled: true
image: "openebs/m-apiserver"
imageTag: "1.11.0"
replicas: 1
ports:
externalPort: 5656
internalPort: 5656
sparse:
enabled: "false"
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
defaultStorageConfig:
enabled: "true"
# Directory used by the OpenEBS to store debug information and so forth
# that are generated in the course of running OpenEBS containers.
varDirectoryPath:
baseDir: "/var/openebs"
provisioner:
enabled: true
image: "openebs/openebs-k8s-provisioner"
imageTag: "1.11.0"
replicas: 1
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
localprovisioner:
enabled: true
image: "openebs/provisioner-localpv"
imageTag: "1.11.0"
replicas: 1
basePath: "/var/openebs/local"
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
snapshotOperator:
enabled: true
controller:
image: "openebs/snapshot-controller"
imageTag: "1.11.0"
provisioner:
image: "openebs/snapshot-provisioner"
imageTag: "1.11.0"
replicas: 1
upgradeStrategy: "Recreate"
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
ndm:
enabled: true
image: "openebs/node-disk-manager-amd64"
imageTag: "0.6.0"
sparse:
path: "/var/openebs/sparse"
size: "10737418240"
count: "0"
filters:
enableOsDiskExcludeFilter: true
enableVendorFilter: true
excludeVendors: "CLOUDBYT,OpenEBS"
enablePathFilter: true
includePaths: ""
excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd"
probes:
enableSeachest: false
nodeSelector: {}
tolerations: []
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
ndmOperator:
enabled: true
image: "openebs/node-disk-operator-amd64"
imageTag: "0.6.0"
replicas: 1
upgradeStrategy: Recreate
nodeSelector: {}
tolerations: []
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
readinessCheck:
initialDelaySeconds: 4
periodSeconds: 10
failureThreshold: 1
webhook:
enabled: true
image: "openebs/admission-server"
imageTag: "1.11.0"
failurePolicy: Ignore
replicas: 1
healthCheck:
initialDelaySeconds: 30
periodSeconds: 60
nodeSelector: {}
tolerations: []
affinity: {}
jiva:
image: "openebs/jiva"
imageTag: "1.11.0"
replicas: 3
defaultStoragePath: "/var/openebs"
cstor:
pool:
image: "openebs/cstor-pool"
imageTag: "1.11.0"
poolMgmt:
image: "openebs/cstor-pool-mgmt"
imageTag: "1.11.0"
target:
image: "openebs/cstor-istgt"
imageTag: "1.11.0"
volumeMgmt:
image: "openebs/cstor-volume-mgmt"
imageTag: "1.11.0"
helper:
image: "openebs/linux-utils"
imageTag: "1.11.0"
featureGates:
enabled: false
GPTBasedUUID:
enabled: false
featureGateFlag: "GPTBasedUUID"
crd:
enableInstall: true
policies:
monitoring:
enabled: true
image: "openebs/m-exporter"
imageTag: "1.11.0"
analytics:
enabled: true
# Specify in hours the duration after which a ping event needs to be sent.
pingInterval: "24h"
|
sensu | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sensu.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"sensu.redis.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"redis\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"sensu.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"sensu.fullname\" . }}\n labels:\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"sensu.fullname\" . }}\n release: {{ .Release.Name | quote }}\n spec:\n containers:\n - name: server\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: {{ .Values.pullPolicy }}\n args:\n - server\n resources:\n{{ toYaml .Values.server.resources | indent 10 }}\n env:\n - name: API_HOST\n value: localhost\n - name: API_PORT\n value: '4567'\n - name: REDIS_HOST\n value: {{ template \"sensu.redis.fullname\" . }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"sensu.redis.fullname\" . }}\n key: redis-password\n - name: REDIS_DB\n value: {{ .Values.REDIS_DB | quote }}\n - name: REDIS_AUTO_RECONNECT\n value: {{ .Values.REDIS_AUTO_RECONNECT | quote }}\n - name: REDIS_RECONNECT_ON_ERROR\n value: {{ .Values.REDIS_RECONNECT_ON_ERROR | quote }}\n - name: REDIS_PORT\n value: {{ .Values.REDIS_PORT | quote }}\n - name: api\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: {{ .Values.pullPolicy }}\n args:\n - api\n resources:\n{{ toYaml .Values.api.resources | indent 10 }}\n env:\n - name: REDIS_HOST\n value: {{ template \"sensu.redis.fullname\" . }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"sensu.redis.fullname\" . }}\n key: redis-password\n - name: REDIS_DB\n value: {{ .Values.REDIS_DB | quote }}\n - name: REDIS_AUTO_RECONNECT\n value: {{ .Values.REDIS_AUTO_RECONNECT | quote }}\n - name: REDIS_RECONNECT_ON_ERROR\n value: {{ .Values.REDIS_RECONNECT_ON_ERROR | quote }}\n - name: REDIS_PORT\n value: {{ .Values.REDIS_PORT | quote }}\n ports:\n - containerPort: 4567\n readinessProbe:\n httpGet:\n path: /info\n port: 4567\n initialDelaySeconds: 30\n timeoutSeconds: 1\n livenessProbe:\n httpGet:\n path: /info\n port: 4567\n initialDelaySeconds: 30\n timeoutSeconds: 1\n\n\n\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"sensu.fullname\" . }}\n labels:\n heritage: {{ .Release.Service | quote }}\n release: {{ .Release.Name | quote }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n {{ if .Values.deis.routable }}\n router.deis.io/routable: \"true\"\n annotations:\n router.deis.io/domains: {{ .Values.deis.domains | quote }}\n {{ end }}\nspec:\n type: {{ .Values.serviceType }}\n ports:\n - port: {{ .Values.httpPort }}\n targetPort: 4567\n selector:\n app: {{ template \"sensu.fullname\" . }}\n"
] | # Docker image name
image: "sstarcher/sensu"
# Docker image tag
imageTag: "0.28"
# Image pull policy for the container
pullPolicy: "IfNotPresent"
# How many sensu containers to spawn
replicaCount: 1
# How to publish the service http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
serviceType: ClusterIP
# Service port to expose Sensu on
httpPort: 4567
# If set to true, the service will be exposed via the Deis Router if setup https://github.com/deis/router
deis:
routable: false
domains: sensu
# CPU and Memory limit and request for Sensu Server
server:
resources:
requests:
cpu: 100m
memory: 100Mi
# CPU and Memory limit and request for Sensu Api
api:
resources:
requests:
cpu: 50m
memory: 100Mi
# Redis configuration
REDIS_PORT: 6379
REDIS_DB: 0
REDIS_AUTO_RECONNECT: true
REDIS_RECONNECT_ON_ERROR: true
# Redis chart configuration
redis:
persistence:
enabled: false
|
osclass | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"osclass.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"osclass.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"osclass.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"osclass.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"osclass.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"osclass.host\" -}}\n{{- $host := index .Values (printf \"%sHost\" .Chart.Name) | default \"\" -}}\n{{- default (include \"osclass.serviceIP\" .) $host -}}\n{{- end -}}\n\n{{/*\nReturn the proper Osclass image name\n*/}}\n{{- define \"osclass.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"osclass.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"osclass.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"osclass.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.osclass.torageClass -}}\n {{- if (eq \"-\" .Values.persistence.osclass.torageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.osclass.torageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.osclass.torageClass -}}\n {{- if (eq \"-\" .Values.persistence.osclass.torageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.osclass.torageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"osclass.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if include \"osclass.host\" . -}}\napiVersion: {{ template \"osclass.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"osclass.fullname\" . }}\n labels:\n app: {{ template \"osclass.fullname\" . }}\n chart: {{ template \"osclass.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"osclass.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"osclass.fullname\" . }}\n chart: {{ template \"osclass.chart\" . }}\n release: \"{{ .Release.Name }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"osclass.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"osclass.fullname\" . }}\n image: {{ template \"osclass.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"osclass.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: OSCLASS_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: OSCLASS_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: OSCLASS_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"osclass.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: OSCLASS_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: OSCLASS_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: OSCLASS_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n {{- $port:=.Values.service.port | toString }}\n - name: OSCLASS_HOST\n value: \"{{ include \"osclass.host\" . }}{{- if ne $port \"80\" }}:{{ .Values.service.port }}{{ end }}\"\n - name: OSCLASS_USERNAME\n value: {{ default \"\" .Values.osclassUsername | quote }}\n - name: OSCLASS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"osclass.fullname\" . }}\n key: osclass-password\n - name: OSCLASS_EMAIL\n value: {{ default \"\" .Values.osclassEmail | quote }}\n - name: OSCLASS_WEB_TITLE\n value: {{ default \"\" .Values.osclassWebTitle | quote }}\n - name: OSCLASS_PING_ENGINES\n value: {{ default \"\" .Values.osclassPingEngines | quote }}\n - name: OSCLASS_SAVE_STATS\n value: {{ default \"\" .Values.osclassSaveStats | quote }}\n - name: SMTP_HOST\n value: {{ default \"\" .Values.smtpHost | quote }}\n - name: SMTP_PORT\n value: {{ default \"\" .Values.smtpPort | quote }}\n - name: SMTP_USER\n value: {{ default \"\" .Values.smtpUser | quote }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"osclass.fullname\" . }}\n key: smtp-password\n - name: SMTP_PROTOCOL\n value: {{ default \"\" .Values.smtpProtocol | quote }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n httpGet:\n path: /index.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"osclass.host\" . | quote }}\n initialDelaySeconds: 120\n timeoutSeconds: 120\n failureThreshold: 6\n readinessProbe:\n httpGet:\n path: /index.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"osclass.host\" . | quote }}\n initialDelaySeconds: 30\n timeoutSeconds: 120\n periodSeconds: 5\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: osclass-data\n mountPath: /bitnami/osclass\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"osclass.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: osclass-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"osclass.fullname\" . }}-osclass\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- end -}}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n labels:\n app: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n chart: {{ template \"osclass.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"osclass.fullname\" . }}\n labels:\n app: \"{{ template \"osclass.fullname\" . }}\"\n chart: \"{{ template \"osclass.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ template \"osclass.fullname\" $ }}\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# osclass-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"osclass.fullname\" . }}-osclass\n labels:\n app: {{ template \"osclass.fullname\" . }}\n chart: {{ template \"osclass.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.osclass.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.osclass.size | quote }}\n {{ include \"osclass.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"osclass.fullname\" . }}\n labels:\n app: {{ template \"osclass.fullname\" . }}\n chart: {{ template \"osclass.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.osclassPassword }}\n osclass-password: {{ default \"\" .Values.osclassPassword | b64enc | quote }}\n {{ else }}\n osclass-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n smtp-password: {{ default \"\" .Values.smtpPassword | b64enc | quote }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"osclass.fullname\" . }}\n labels:\n app: {{ template \"osclass.fullname\" . }}\n chart: {{ template \"osclass.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n {{- if eq .Values.service.type \"LoadBalancer\" }}\n loadBalancerIP: {{ default \"\" .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app: {{ template \"osclass.fullname\" . }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami Osclass image version
## ref: https://hub.docker.com/r/bitnami/osclass/tags/
##
image:
registry: docker.io
repository: bitnami/osclass
tag: 3.7.4-debian-10-r24
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override osclass.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override osclass.fullname template
##
# fullnameOverride:
## Osclass host to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
##
# osclassHost:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
##
osclassUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
##
# osclassPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
##
osclassEmail: [email protected]
## Application title
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
osclassWebTitle: 'Sample Web Page'
## Allow site to appear in search engines
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
osclassPingEngines: 1
## Automatically send usage statistics and crash reports to Osclass
## ref: https://github.com/bitnami/bitnami-docker-osclass#configuration
##
osclassSaveStats: 1
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-osclass#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_osclass
## Database password
password:
## Database name
database: bitnami_osclass
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-osclass/#smtp-configuration
##
# smtpHost:
# smtpPort:
# smtpUser:
# smtpPassword:
# smtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_osclass
user: bn_osclass
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
## loadBalancerIP:
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Configure the ingress resource that allows you to access the
## osclass installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: osclass.local
path: /
## Set this to true in order to enable TLS on the ingress record
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.osclass.local
# - osclass.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: osclass.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: osclass.local-tls
# key:
# certificate:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
osclass:
## osclass data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r33
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
sealed-secrets | [
"# _helpers.tpl\n{{/*\nExpand to the namespace sealed-secrets installs into.\n*/}}\n{{- define \"sealed-secrets.namespace\" -}}\n{{- default .Release.Namespace .Values.namespace -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"sealed-secrets.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"sealed-secrets.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"sealed-secrets.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"sealed-secrets.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"sealed-secrets.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n",
"# cluster-role-binding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: secrets-unsealer\nsubjects:\n - apiGroup: \"\"\n kind: ServiceAccount\n name: {{ template \"sealed-secrets.serviceAccountName\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n{{ end }}\n",
"# cluster-role.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: secrets-unsealer\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nrules:\n - apiGroups:\n - bitnami.com\n resources:\n - sealedsecrets\n verbs:\n - get\n - list\n - watch\n - apiGroups:\n - bitnami.com\n resources:\n - sealedsecrets/status\n verbs:\n - update\n - apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - get\n - create\n - update\n - delete\n - apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n{{ end }}\n",
"# configmap-dashboards.yaml\n{{- if .Values.dashboards.create }}\n{{- $namespace := .Values.dashboards.namespace | default $.Release.Namespace }}\n{{- range $path, $_ := .Files.Glob \"dashboards/*.json\" }}\n{{- $filename := trimSuffix (ext $path) (base $path) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" $ }}-{{ $filename }}\n namespace: {{ $namespace }}\n labels:\n grafana_dashboard: \"1\"\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" $ }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" $ }}\n app.kubernetes.io/managed-by: {{ $.Release.Service }}\n app.kubernetes.io/instance: {{ $.Release.Name }}\n app.kubernetes.io/version: {{ $.Chart.AppVersion }}\n {{- if $.Values.dashboards.labels }}\n {{- toYaml $.Values.dashboards.labels | nindent 4 }}\n {{- end }}\ndata:\n {{ base $path }}: |-\n{{ $.Files.Get $path | indent 4 }}\n---\n{{- end }}\n{{- end }}\n",
"# deployment.yaml\n{{- if .Values.controller.create -}}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n annotations:\n {{- with .Values.podAnnotations }}\n {{- toYaml . | nindent 8 }}\n {{- end }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | indent 8 }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"sealed-secrets.serviceAccountName\" . }}\n {{- if .Values.priorityClassName }}\n priorityClassName: \"{{ .Values.priorityClassName }}\"\n {{- end }}\n containers:\n - name: {{ template \"sealed-secrets.fullname\" . }}\n command:\n - controller\n args:\n - \"--key-prefix\"\n - \"{{ .Values.secretName }}\"\n {{- range $value := .Values.commandArgs }}\n - {{ $value | quote }}\n {{- end }}\n image: {{ .Values.image.repository }}:{{ .Values.image.tag }}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - containerPort: 8080\n name: http\n volumeMounts:\n - mountPath: /tmp\n name: tmp\n livenessProbe:\n httpGet:\n path: /healthz\n port: 8080\n readinessProbe:\n httpGet:\n path: /healthz\n port: 8080\n securityContext:\n readOnlyRootFilesystem: true\n {{- if .Values.securityContext.runAsUser }}\n runAsNonRoot: true\n runAsUser: {{ .Values.securityContext.runAsUser }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.securityContext.fsGroup }}\n securityContext:\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- end }}\n volumes:\n - name: tmp\n emptyDir: {}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := include \"sealed-secrets.fullname\" . -}}\n{{- $ingressPath := .Values.ingress.path -}}\n{{- if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\napiVersion: networking.k8s.io/v1beta1\n{{- else -}}\napiVersion: extensions/v1beta1\n{{- end }}\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\n {{- with .Values.ingress.annotations }}\n annotations:\n {{- toYaml . | nindent 4 }}\n {{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: 8080\n {{- end }}\n{{- end }}\n",
"# networkpolicy.yaml\n{{- if .Values.networkPolicy -}}\napiVersion: networking.k8s.io/v1\nkind: NetworkPolicy\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nspec:\n podSelector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n ingress:\n - ports:\n - port: 8080\n{{- end -}}\n",
"# psp-clusterrole.yaml\n{{- if .Values.rbac.pspEnabled }}\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}-psp\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nrules:\n- apiGroups: ['extensions']\n resources: ['podsecuritypolicies']\n verbs: ['use']\n resourceNames:\n - {{ template \"sealed-secrets.fullname\" . }}\n{{- end }}\n",
"# psp-clusterrolebinding.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}-psp\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"sealed-secrets.fullname\" . }}-psp\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"sealed-secrets.serviceAccountName\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n{{- end }}\n",
"# psp.yaml\n{{- if .Values.rbac.pspEnabled }}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nspec:\n privileged: false\n allowPrivilegeEscalation: false\n allowedCapabilities: []\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'RunAsAny'\n fsGroup:\n rule: 'RunAsAny'\n{{- end }}\n",
"# role-binding.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}-key-admin\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"sealed-secrets.fullname\" . }}-key-admin\nsubjects:\n - apiGroup: \"\"\n kind: ServiceAccount\n name: {{ template \"sealed-secrets.serviceAccountName\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}-service-proxier\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: Role\n name: {{ template \"sealed-secrets.fullname\" . }}-service-proxier\nsubjects:\n- apiGroup: rbac.authorization.k8s.io\n kind: Group\n name: system:authenticated\n{{ end }}\n",
"# role.yaml\n{{ if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}-key-admin\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nrules:\n - apiGroups:\n - \"\"\n resourceNames:\n - {{ .Values.secretName }}\n resources:\n - secrets\n verbs:\n - get\n - apiGroups:\n - \"\"\n resources:\n - secrets\n verbs:\n - create\n - list\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}-service-proxier\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nrules:\n- apiGroups:\n - \"\"\n resourceNames:\n - 'http:{{ template \"sealed-secrets.fullname\" . }}:'\n - {{ template \"sealed-secrets.fullname\" . }}\n resources:\n - services/proxy\n verbs:\n - create\n - get\n{{ end }}\n",
"# sealedsecret-crd.yaml\n{{ if .Values.crd.create }}\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n name: sealedsecrets.bitnami.com\n {{ if .Values.crd.keep }}\n annotations:\n \"helm.sh/resource-policy\": keep\n {{ end }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nspec:\n group: bitnami.com\n names:\n kind: SealedSecret\n listKind: SealedSecretList\n plural: sealedsecrets\n singular: sealedsecret\n scope: Namespaced\n subresources:\n status: {}\n version: v1alpha1\n{{ end }}\n",
"# service-account.yaml\n{{ if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"sealed-secrets.serviceAccountName\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\n{{ end }}\n",
"# service.yaml\n{{- if .Values.controller.create -}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}\n namespace: {{ template \"sealed-secrets.namespace\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\nspec:\n ports:\n - port: 8080\n targetPort: 8080\n selector:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n type: ClusterIP\n{{- end }}\n",
"# servicemonitor.yaml\n{{ if .Values.serviceMonitor.create }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"sealed-secrets.fullname\" . }}\n {{- if .Values.serviceMonitor.namespace }}\n namespace: {{ .Values.serviceMonitor.namespace }}\n {{- end }}\n labels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\n {{- if .Values.serviceMonitor.labels }}\n {{- toYaml .Values.serviceMonitor.labels | nindent 4 }}\n {{- end }}\nspec:\n endpoints:\n - honorLabels: true\n targetPort: 8080\n {{- with .Values.serviceMonitor.interval }}\n interval: {{ . }}\n {{- end }}\n {{- with .Values.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ . }}\n {{- end }}\n namespaceSelector:\n matchNames:\n - {{ .Release.Namespace }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"sealed-secrets.name\" . }}\n helm.sh/chart: {{ template \"sealed-secrets.chart\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/version: {{ .Chart.AppVersion }}\n{{- end }}\n"
] | image:
repository: quay.io/bitnami/sealed-secrets-controller
tag: v0.13.1
pullPolicy: IfNotPresent
resources: {}
nodeSelector: {}
tolerations: []
affinity: {}
controller:
# controller.create: `true` if Sealed Secrets controller should be created
create: true
# namespace: Namespace to deploy the controller.
namespace: ""
serviceAccount:
# serviceAccount.create: Whether to create a service account or not
create: true
# serviceAccount.name: The name of the service account to create or use
name: ""
rbac:
# rbac.create: `true` if rbac resources should be created
create: true
pspEnabled: false
# secretName: The name of the TLS secret containing the key used to encrypt secrets
secretName: "sealed-secrets-key"
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /v1/cert.pem
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
crd:
# crd.create: `true` if the crd resources should be created
create: true
# crd.keep: `true` if the sealed secret CRD should be kept when the chart is deleted
keep: true
networkPolicy: false
securityContext:
# securityContext.runAsUser defines under which user the operator Pod and its containers/processes run.
runAsUser: 1001
# securityContext.fsGroup defines the filesystem group
fsGroup: 65534
podAnnotations: {}
podLabels: {}
priorityClassName: ""
serviceMonitor:
# Enables ServiceMonitor creation for the Prometheus Operator
create: false
# How frequently Prometheus should scrape the ServiceMonitor
interval:
# Extra labels to apply to the sealed-secrets ServiceMonitor
labels:
# The namespace where the ServiceMonitor is deployed, defaults to the installation namespace
namespace:
# The timeout after which the scrape is ended
scrapeTimeout:
dashboards:
# If enabled, sealed-secrets will create a configmap with a dashboard in json that's going to be picked up by grafana
# See https://github.com/helm/charts/tree/master/stable/grafana#configuration - `sidecar.dashboards.enabled`
create: false
# Extra labels to apply to the dashboard configmaps
labels:
# The namespace where the dashboards are deployed, defaults to the installation namespace
namespace:
|
gitlab-ee | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"gitlab-ee.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"gitlab-ee.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified postgresql name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"gitlab-ee.postgresql.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"postgresql\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified redis name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"gitlab-ee.redis.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"redis\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"gitlab-ee.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ee.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ndata:\n ## This is used by GitLab Omnibus as the primary means of configuration.\n ## ref: https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/files/gitlab-config-template/gitlab.rb.template\n ##\n gitlab_omnibus_config: |\n external_url ENV['EXTERNAL_URL'];\n root_pass = ENV['GITLAB_ROOT_PASSWORD'];\n gitlab_rails['initial_root_password'] = root_pass unless root_pass.to_s == '';\n postgresql['enable'] = false;\n gitlab_rails['db_host'] = ENV['DB_HOST'];\n gitlab_rails['db_password'] = ENV['DB_PASSWORD'];\n gitlab_rails['db_username'] = ENV['DB_USER'];\n gitlab_rails['db_database'] = ENV['DB_DATABASE'];\n redis['enable'] = false;\n gitlab_rails['redis_host'] = ENV['REDIS_HOST'];\n gitlab_rails['redis_password'] = ENV['REDIS_PASSWORD'];\n unicorn['worker_processes'] = 2;\n manage_accounts['enable'] = true;\n manage_storage_directories['manage_etc'] = false;\n gitlab_shell['auth_file'] = '/gitlab-data/ssh/authorized_keys';\n git_data_dir '/gitlab-data/git-data';\n gitlab_rails['shared_path'] = '/gitlab-data/shared';\n gitlab_rails['uploads_directory'] = '/gitlab-data/uploads';\n gitlab_ci['builds_directory'] = '/gitlab-data/builds';\n",
"# data-pvc.yaml\n{{- if .Values.persistence.gitlabData.enabled }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"gitlab-ee.fullname\" . }}-data\n annotations:\n {{- if .Values.persistence.gitlabData.storageClass }}\n volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.gitlabData.storageClass | quote }}\n {{- else }}\n volume.alpha.kubernetes.io/storage-class: default\n {{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.gitlabData.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.gitlabData.size | quote }}\n{{- end }}\n",
"# deployment.yaml\n{{- if default \"\" .Values.externalUrl }}\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n name: {{ template \"gitlab-ee.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ee.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"gitlab-ee.fullname\" . }}\n spec:\n containers:\n - name: {{ template \"gitlab-ee.fullname\" . }}\n image: {{ .Values.image }}\n imagePullPolicy: {{ default \"\" .Values.imagePullPolicy | quote }}\n env:\n ## General GitLab Configs\n ##\n # This is a free-form env var that GitLab Omnibus uses to configure\n # everything. We're passing this in from a configmap and pulling some\n # of the values from the env vars defined below. This is done to\n # avoid leaving secrets visible in kubectl.\n - name: GITLAB_OMNIBUS_CONFIG\n valueFrom:\n configMapKeyRef:\n name: {{ template \"gitlab-ee.fullname\" . }}\n key: gitlab_omnibus_config\n - name: GITLAB_ROOT_PASSWORD\n {{- if default \"\" .Values.gitlabRootPassword }}\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ee.fullname\" . }}\n key: gitlab-root-password\n {{ end }}\n - name: EXTERNAL_URL\n value: {{ default \"\" .Values.externalUrl | quote }}\n ## DB configuration\n ##\n - name: DB_HOST\n value: {{ template \"gitlab-ee.postgresql.fullname\" . }}\n - name: DB_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ee.fullname\" . }}\n key: db-user\n - name: DB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ee.fullname\" . }}\n key: db-password\n - name: DB_DATABASE\n value: {{ .Values.postgresql.postgresDatabase | quote }}\n ## Redis configuration\n ##\n - name: REDIS_HOST\n value: {{ template \"gitlab-ee.redis.fullname\" . }}\n - name: REDIS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"gitlab-ee.fullname\" . }}\n key: redis-password\n ports:\n - name: ssh\n containerPort: 22\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n httpGet:\n path: /help\n port: http\n # This pod takes a very long time to start up. Be cautious when\n # lowering this value to avoid Pod death during startup.\n initialDelaySeconds: 200\n timeoutSeconds: 1\n periodSeconds: 10\n successThreshold: 1\n failureThreshold: 10\n readinessProbe:\n httpGet:\n path: /help\n port: http\n initialDelaySeconds: 30\n timeoutSeconds: 1\n periodSeconds: 10\n successThreshold: 1\n failureThreshold: 3\n volumeMounts:\n - name: gitlab-etc\n mountPath: /etc/gitlab\n - name: gitlab-data\n mountPath: /gitlab-data\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumes:\n - name: gitlab-etc\n {{- if .Values.persistence.gitlabEtc.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"gitlab-ee.fullname\" . }}-etc\n {{- else }}\n emptyDir: {}\n {{- end }}\n - name: gitlab-data\n {{- if .Values.persistence.gitlabData.enabled }}\n persistentVolumeClaim:\n claimName: {{ template \"gitlab-ee.fullname\" . }}-data\n {{- else }}\n emptyDir: {}\n {{- end }}\n{{ else }}\n{{ end }}\n",
"# etc-pvc.yaml\n{{- if .Values.persistence.gitlabEtc.enabled }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"gitlab-ee.fullname\" . }}-etc\n annotations:\n {{- if .Values.persistence.gitlabEtc.storageClass }}\n volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.gitlabEtc.storageClass | quote }}\n {{- else }}\n volume.alpha.kubernetes.io/storage-class: default\n {{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.gitlabEtc.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.gitlabEtc.size | quote }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"gitlab-ee.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ee.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{- if default \"\" .Values.gitlabRootPassword }}\n # Defaulting to a non-sensical value to silence b64enc warning. We'll never\n # actually use this default due to the if statement.\n gitlab-root-password: {{ default \"ignore\" .Values.gitlabRootPassword | b64enc | quote }}\n {{ end }}\n db-user: {{ .Values.postgresql.postgresUser | b64enc | quote }}\n db-password: {{ .Values.postgresql.postgresPassword | b64enc | quote }}\n redis-password: {{ .Values.redis.redisPassword | b64enc | quote }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"gitlab-ee.fullname\" . }}\n labels:\n app: {{ template \"gitlab-ee.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.serviceType }}\n ports:\n - name: ssh\n port: {{ .Values.sshPort | int }}\n targetPort: ssh\n - name: http\n port: {{ .Values.httpPort | int }}\n targetPort: http\n - name: https\n port: {{ .Values.httpsPort | int }}\n targetPort: https\n selector:\n app: {{ template \"gitlab-ee.fullname\" . }}\n"
] | ## GitLab EE image
## ref: https://hub.docker.com/r/gitlab/gitlab-ee/tags/
##
image: gitlab/gitlab-ee:9.4.1-ee.0
## Specify a imagePullPolicy
## 'Always' if imageTag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
# imagePullPolicy:
## The URL (with protocol) that your users will use to reach the install.
## ref: https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab
##
# externalUrl: http://your-domain.com/
## Change the initial default admin password if set. If not set, you'll be
## able to set it when you first visit your install.
##
# gitlabRootPassword: ""
## For minikube, set this to NodePort, elsewhere use LoadBalancer
## ref: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
##
serviceType: LoadBalancer
## Configure external service ports
## ref: http://kubernetes.io/docs/user-guide/services/
sshPort: 22
httpPort: 80
httpsPort: 443
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
## GitLab requires a good deal of resources. We have split out Postgres and
## redis, which helps some. Refer to the guidelines for larger installs.
## ref: https://docs.gitlab.com/ee/install/requirements.html#hardware-requirements
requests:
memory: 1Gi
cpu: 500m
limits:
memory: 2Gi
cpu: 1
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
## ref: https://docs.gitlab.com/ee/install/requirements.html#storage
##
persistence:
## This volume persists generated configuration files, keys, and certs.
##
gitlabEtc:
enabled: true
size: 1Gi
## If defined, volume.beta.kubernetes.io/storage-class: <storageClass>
## Default: volume.alpha.kubernetes.io/storage-class: default
##
# storageClass:
accessMode: ReadWriteOnce
## This volume is used to store git data and other project files.
## ref: https://docs.gitlab.com/omnibus/settings/configuration.html#storing-git-data-in-an-alternative-directory
##
gitlabData:
enabled: true
size: 10Gi
## If defined, volume.beta.kubernetes.io/storage-class: <storageClass>
## Default: volume.alpha.kubernetes.io/storage-class: default
##
# storageClass:
accessMode: ReadWriteOnce
## Configuration values for the postgresql dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
##
postgresql:
# 9.6 is the newest supported version for the GitLab container
imageTag: "9.6"
cpu: 1000m
memory: 1Gi
postgresUser: gitlab
postgresPassword: gitlab
postgresDatabase: gitlab
persistence:
size: 10Gi
## Configuration values for the redis dependency.
## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
##
redis:
redisPassword: "gitlab"
resources:
requests:
memory: 1Gi
persistence:
size: 10Gi
|
weave-scope | [
"# _helpers.tpl\n{{/* Helm standard labels */}}\n{{- define \"weave-scope.helm_std_labels\" }}\nchart: {{ .Chart.Name }}-{{ .Chart.Version }}\nheritage: {{ .Release.Service }}\nrelease: {{ .Release.Name }}\napp: {{ template \"toplevel.name\" . }}\n{{- end }}\n\n{{/* Weave Scope default annotations */}}\n{{- define \"weave-scope.annotations\" }}\ncloud.weave.works/launcher-info: |-\n {\n \"server-version\": \"master-4fe8efe\",\n \"original-request\": {\n \"url\": \"/k8s/v1.7/scope.yaml\"\n },\n \"email-address\": \"[email protected]\",\n \"source-app\": \"weave-scope\",\n \"weave-cloud-component\": \"scope\"\n }\n{{- end }}\n\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"weave-scope.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nExpand the name of the top-level chart.\n*/}}\n{{- define \"toplevel.name\" -}}\n{{- default (.Template.BasePath | split \"/\" )._0 .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name. We truncate at 63 chars.\n*/}}\n{{- define \"weave-scope.fullname\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified name that always uses the name of the top-level chart.\n*/}}\n{{- define \"toplevel.fullname\" -}}\n{{- $name := default (.Template.BasePath | split \"/\" )._0 .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# clusterrole.yaml\n{{- if .Values.enabled -}}\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n {{- include \"weave-scope.helm_std_labels\" . | indent 4 }}\n component: agent\n name: {{ template \"weave-scope-agent.serviceAccountName\" . }} \n annotations:\n {{- include \"weave-scope.annotations\" . | indent 4 }}\nrules:\n - apiGroups:\n - '*'\n resources:\n - '*'\n verbs:\n - '*'\n - nonResourceURLs:\n - '*'\n verbs:\n - '*'\n{{- end }}\n{{- end -}}\n",
"# clusterrolebinding.yaml\n{{- if .Values.enabled -}}\n{{- if .Values.rbac.create }}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n {{- include \"weave-scope.helm_std_labels\" . | indent 4 }}\n component: agent\n name: {{ include \"toplevel.fullname\" . }}\n annotations:\n {{- include \"weave-scope.annotations\" . | indent 4 }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"weave-scope-agent.serviceAccountName\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"weave-scope-agent.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n{{- end -}}\n",
"# daemonset.yaml\n{{- if .Values.enabled -}}\napiVersion: {{ template \"daemonset.apiVersion\" . }}\nkind: DaemonSet\nmetadata:\n labels:\n {{- include \"weave-scope-agent.helm_std_labels\" . | indent 4 }}\n component: agent\n name: {{ template \"weave-scope-agent.fullname\" . }}\n annotations:\n {{- include \"weave-scope-agent.annotations\" . | indent 4 }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"toplevel.name\" . }}\n release: {{ .Release.Name }}\n component: agent\n updateStrategy:\n type: RollingUpdate\n template:\n metadata:\n labels:\n {{- include \"weave-scope-agent.helm_std_labels\" . | indent 8 }}\n component: agent\n spec:\n tolerations:\n - effect: NoSchedule\n operator: Exists\n {{- if .Values.priorityClassName }}\n priorityClassName: {{ .Values.priorityClassName }}\n {{- end }}\n containers:\n - name: {{ template \"weave-scope-agent.name\" . }}\n image: \"{{ .Values.global.image.repository }}:{{ .Values.global.image.tag }}\"\n imagePullPolicy: \"{{ .Values.global.image.pullPolicy }}\"\n args:\n - '--mode=probe'\n - '--probe-only'\n - '--probe.kubernetes.role=host'\n - '--probe.docker.bridge={{ .Values.dockerBridge }}'\n - '--probe.docker=true'\n - '--probe.kubernetes=true'\n {{- range $arg := .Values.flags }}\n - {{ $arg | quote }}\n {{- end }}\n {{if .Values.readOnly}}\n - \"--probe.no-controls\"\n {{end}}\n {{- if .Values.global.probeToken }}\n - '--probe-token={{ .Values.global.probeToken }}'\n {{- else if .Values.global.scopeFrontendAddr }}\n - {{ .Values.global.scopeFrontendAddr }}\n {{- else }}\n - {{ .Values.global.service.name | default (include \"toplevel.fullname\" .) }}.{{ .Release.Namespace }}.svc:{{ .Values.global.service.port }}\n {{- end }}\n securityContext:\n privileged: true\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumeMounts:\n - name: docker-socket\n mountPath: /var/run/docker.sock\n - name: scope-plugins\n mountPath: /var/run/scope/plugins\n - name: sys-kernel-debug\n mountPath: /sys/kernel/debug\n volumes:\n - name: docker-socket\n hostPath:\n path: /var/run/docker.sock\n - name: scope-plugins\n hostPath:\n path: /var/run/scope/plugins\n - name: sys-kernel-debug\n hostPath:\n path: /sys/kernel/debug\n hostPID: true\n hostNetwork: true\n dnsPolicy: ClusterFirstWithHostNet\n{{- end -}}\n",
"# deployment.yaml\n{{- if .Values.enabled -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n labels:\n {{- include \"weave-scope-cluster-agent.helm_std_labels\" . | indent 4 }}\n component: cluster-agent\n name: {{ template \"weave-scope-cluster-agent.fullname\" . }}\n annotations:\n {{- include \"weave-scope-cluster-agent.annotations\" . | indent 4 }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"toplevel.name\" . }}\n release: {{ .Release.Name }}\n component: cluster-agent\n strategy:\n type: RollingUpdate\n template:\n metadata:\n labels:\n {{- include \"weave-scope-cluster-agent.helm_std_labels\" . | indent 8 }}\n component: cluster-agent\n spec:\n containers:\n - name: {{ template \"weave-scope-cluster-agent.name\" . }}\n image: \"{{ .Values.global.image.repository }}:{{ .Values.global.image.tag }}\"\n imagePullPolicy: \"{{ .Values.global.image.pullPolicy }}\"\n args:\n - '--mode=probe'\n - '--probe-only'\n - '--probe.kubernetes.role=cluster'\n {{- range $arg := .Values.flags }}\n - {{ $arg | quote }}\n {{- end }}\n {{if .Values.readOnly}}\n - \"--probe.no-controls\"\n {{end}}\n {{- if .Values.global.scopeFrontendAddr }}\n - {{ .Values.global.scopeFrontendAddr }}\n {{- else }}\n - {{ .Values.global.service.name | default (include \"toplevel.fullname\" .) }}.{{ .Release.Namespace }}.svc:{{ .Values.global.service.port }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n serviceAccountName: {{ template \"weave-scope-cluster-agent.serviceAccountName\" . }}\n{{- end -}}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $fullName := .Values.global.service.name | default (include \"toplevel.fullname\" .) -}}\n{{- $ingressPaths := .Values.ingress.paths -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n labels:\n {{- include \"weave-scope-frontend.helm_std_labels\" . | indent 4 }}\n component: frontend\n name: {{ template \"weave-scope-frontend.fullname\" . }}\n annotations:\n {{- include \"weave-scope.annotations\" . | indent 4 }}\n {{- with .Values.ingress.annotations }}\n {{- toYaml . | nindent 4 }}\n {{- end }}\nspec:\n{{- if .Values.ingress.tls }}\n tls:\n {{- range .Values.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n {{- range $ingressPaths }}\n - path: {{ . }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: http\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# service.yaml\n{{- if .Values.enabled -}}\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n {{- include \"weave-scope-frontend.helm_std_labels\" . | indent 4 }}\n component: frontend\n name: {{ .Values.global.service.name | default (include \"toplevel.fullname\" .) }}\n annotations:\n {{- include \"weave-scope-frontend.annotations\" . | indent 4 }}\nspec:\n ports:\n - name: http\n port: {{ .Values.global.service.port }}\n targetPort: http\n protocol: TCP\n selector:\n app: {{ template \"toplevel.name\" . }}\n release: {{ .Release.Name }}\n component: frontend\n type: {{ .Values.global.service.type }}\n{{- end -}}\n",
"# serviceaccount.yaml\n{{- if .Values.enabled -}}\n{{- if .Values.serviceAccount.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n {{- include \"weave-scope.helm_std_labels\" . | indent 4 }}\n component: agent\n name: {{ template \"weave-scope-agent.serviceAccountName\" . }}\n annotations:\n {{- include \"weave-scope.annotations\" . | indent 4 }}\n{{- end }}\n{{- end -}}\n",
"# test-config.yaml\n{{- $frontend := index .Values \"weave-scope-frontend\" -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"weave-scope.fullname\" . }}-tests\n labels:\n {{- include \"weave-scope.helm_std_labels\" . | indent 4 }}\ndata:\n run.sh: |-\n{{ if $frontend.enabled }}\n @test \"Testing Weave Scope UI is accessible\" {\n curl --retry 12 --retry-delay 10 http://{{ .Values.global.service.name | default (include \"toplevel.fullname\" .) }}.{{ .Release.Namespace }}.svc:{{ .Values.global.service.port }}\n }\n{{- else }}\n @test \"Null test if the frontend is not installed\" {\n true\n }\n{{- end }}\n",
"# weave-scope-tests.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}\"\n annotations:\n \"helm.sh/hook\": test-success\n labels:\n {{- include \"weave-scope.helm_std_labels\" . | indent 4 }}\nspec:\n initContainers:\n - name: \"test-framework\"\n image: \"dduportal/bats:0.4.0\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # copy bats to tools dir\n cp -R /usr/local/libexec/ /tools/bats/\n volumeMounts:\n - mountPath: /tools\n name: tools\n containers:\n - name: {{ .Release.Name }}-ui-test\n image: dduportal/bats:0.4.0\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"weave-scope.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n"
] | # Where defaults exist, the values are set to them here.
# Values with no preferred or common defaults are set to empty strings.
global:
# global.image: the image that will be used for this release
image:
repository: weaveworks/scope
tag: 1.12.0
# global.image.pullPolicy: must be Always, IfNotPresent, or Never
pullPolicy: "IfNotPresent"
# global.service.*: the configuration of the service used to access the frontend
service:
# global.service.name: the short name desired for the frontend service
# global.service.name may be specified if you need to use a specific service name, but will be generated if not specified
# global.service.name is a global so we can access its value easily from the agent subchart
# name: "weave-scope-app"
# global.service.port: (required if frontend.enabled == true) the port exposed by the Scope frontend service
# global.service.port is a global so we can access its value easily from the agent subchart
port: 80
# global.service.type: (required if frontend.enabled == true) the type of the frontend service -- must be ClusterIP, NodePort or LoadBalancer
# global.service.type is a global to keep it with the other values for configuring the frontend service
type: "ClusterIP"
# weave-scope-frontend.* controls how the Scope frontend is installed
weave-scope-frontend:
enabled: true
# weave-scope-frontend.resources.*: controls requests/limits for the frontend
# weave-scope-frontend.resources.* values are all optional but should not be set to empty values
# resources:
# requests:
# weave-scope-frontend.resources.requests.cpu: CPU req. in MHz (m)
# cpu: ""
# weave-scope-frontend.resources.requests.memory: memory req. in MiB (Mi)
# memory: ""
# limits:
# weave-scope-frontend.resources.limits.cpu: CPU limit in MHz (m)
# cpu: ""
# weave-scope-frontend.resources.limits.memory: memory limit in MiB (Mi)
# memory: ""
flags: []
# weave-scope-frontend Ingress
ingress:
# If true, weave-scope-frontend ingress will be created
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# weave-scope-frontend path(s) must be provided if Ingress is enabled
paths: []
# weave-scope-frontend hostname(s) must be provided if Ingress is enabled
hosts:
- weave-scope.example.test
# Ingress TLS secret
# Must be created manually in the namespace
tls: []
# - secretName: weave-scope-example-tls
# hosts:
# - weave-scope.example.test
# weave-scope-agent.* controls how the Weave Scope node agent pods are installed
weave-scope-agent:
enabled: true
flags: []
# priorityClassName:
# weave-scope-agent.dockerBridge: (required if agent.enabled == true) the name of the Docker bridge interface
dockerBridge: "docker0"
# weave-scope-agent.scopeFrontendAddr: the host:port of a Scope frontend to send data to
# weave-scope-agent.scopeFrontendAddr is only needed for some cases where the frontend is deployed separately from the agent
scopeFrontendAddr: ""
# weave-scope-agent.probeToken: the token used to connect to Weave Cloud
# weave-scope-agent.probeToken is not needed for connecting to non-cloud Scope frontends
probeToken: ""
# weave-scope-agent.rbac.*: controls RBAC resource creation/use
# Enabling readOnly adds --probe.no-controls to args list.
readOnly: false
# weave-scope-agent.resources.*: controls requests/limits for the agent
# weave-scope-agent.resources.* values are all optional but should not be set to empty values
# resources:
# requests:
# weave-scope-agent.resources.requests.cpu: CPU req. in MHz (m)
# cpu: ""
# weave-scope-agent.resources.requests.memory: memory req. in MiB (Mi)
# memory: ""
# limits:
# weave-scope-agent.resources.limits.cpu: CPU limit in MHz (m)
# cpu: ""
# weave-scope-agent.resources.limits.memory: memory limit in MiB (Mi)
# memory: ""
# weave-scope-agent.* controls how the Weave Scope node agent pods are installed
weave-scope-cluster-agent:
enabled: true
flags: []
# weave-scope-cluster-agent.scopeFrontendAddr: the host:port of a Scope frontend to send data to
# weave-scope-cluster-agent.scopeFrontendAddr is only needed for some cases where the frontend is deployed separately from the agent
scopeFrontendAddr: ""
# weave-scope-cluster-agent.probeToken: the token used to connect to Weave Cloud
# weave-scope-cluster-agent.probeToken is not needed for connecting to non-cloud Scope frontends
probeToken: ""
# weave-scope-cluster-agent.rbac.*: controls RBAC resource creation/use
rbac:
# weave-scope-cluster-agent.rbac.create: whether RBAC resources should be created
# weave-scope-cluster-agent.rbac.create *must* be set to false if RBAC is not enabled in the cluster
# weave-scope-cluster-agent.rbac.create *may* be set to false in an RBAC-enabled cluster to allow for external management of RBAC
create: true
# Enabling readOnly adds --probe.no-controls to args list.
readOnly: false
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name: "weave-scope"
# weave-scope-cluster-agent.resources.*: controls requests/limits for the agent
# weave-scope-cluster-agent.resources.* values are all optional but should not be set to empty values
# resources:
# requests:
# weave-scope-cluster-agent.resources.requests.cpu: CPU req. in MHz (m)
# cpu: ""
# weave-scope-cluster-agent.resources.requests.memory: memory req. in MiB (Mi)
# memory: ""
# limits:
# weave-scope-cluster-agent.resources.limits.cpu: CPU limit in MHz (m)
# cpu: ""
# weave-scope-cluster-agent.resources.limits.memory: memory limit in MiB (Mi)
# memory: ""
|
eventrouter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"eventrouter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"eventrouter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"eventrouter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/* Generate basic labels */}}\n{{- define \"eventrouter.labels\" }}\napp: {{ template \"eventrouter.name\" . }}\nheritage: {{.Release.Service }}\nrelease: {{.Release.Name }}\nchart: {{ template \"eventrouter.chart\" . }}\n{{- if .Values.podLabels}}\n{{ toYaml .Values.podLabels }}\n{{- end }}\n{{- end }}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"eventrouter.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"eventrouter.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}",
"# clusterrole.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels: {{ include \"eventrouter.labels\" . | indent 4 }}\n name: {{ template \"eventrouter.fullname\" . }}\nrules:\n - apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - get\n - list\n - watch\n{{- end -}}",
"# clusterrolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels: {{ include \"eventrouter.labels\" . | indent 4 }}\n name: {{ template \"eventrouter.fullname\" . }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"eventrouter.fullname\" . }}\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"eventrouter.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"eventrouter.fullname\" . }}\n labels: {{ include \"eventrouter.labels\" . | indent 4 }}\n namespace: {{ .Release.Namespace }}\ndata:\n config.json: |-\n {\n \"sink\": \"{{ .Values.sink }}\",\n \"enable-prometheus\": \"{{ .Values.enablePrometheus }}\"\n }\n",
"# deployment.yaml\n{{- if semverCompare \"^1.9-0\" .Capabilities.KubeVersion.GitVersion }}\napiVersion: apps/v1\n{{- else }}\napiVersion: apps/v1beta1\n{{- end }}\nkind: Deployment\nmetadata:\n labels: {{ include \"eventrouter.labels\" . | indent 4 }}\n name: {{ template \"eventrouter.fullname\" . }}\n namespace: {{ .Release.Namespace }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"eventrouter.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"eventrouter.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n volumeMounts:\n - name: config-volume\n mountPath: /etc/eventrouter\n {{- if .Values.containerPorts }}\n ports:\n{{ toYaml .Values.containerPorts | indent 10 }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n {{- if .Values.securityContext }}\n securityContext:\n{{ toYaml .Values.securityContext | indent 8 }}\n {{- end }}\n serviceAccountName: {{ template \"eventrouter.serviceAccountName\" . }}\n volumes:\n - name: config-volume\n configMap:\n name: {{ template \"eventrouter.fullname\" . }}\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels: {{ include \"eventrouter.labels\" . | indent 4 }}\n name: {{ template \"eventrouter.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end }}\n"
] | # Default values for eventrouter.
image:
repository: gcr.io/heptio-images/eventrouter
tag: v0.3
pullPolicy: IfNotPresent
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
rbac:
# Specifies whether RBAC resources should be created
create: true
tolerations: []
nodeSelector: {}
sink: glog
podAnnotations: {}
containerPorts: []
securityContext: {}
# runAsUser: 1000
enablePrometheus: true
|
neo4j | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"neo4j.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"neo4j.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name for core servers.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"neo4j.core.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-core\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name for read replica servers.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"neo4j.replica.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-replica\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name for secrets.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"neo4j.secrets.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-secrets\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name for core config.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"neo4j.coreConfig.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-core-config\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name for RR config.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"neo4j.replicaConfig.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s-replica-config\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}",
"# core-configmap.yaml\n# This ConfigMap gets passed to all core cluster members to configure them.\n# Take note that some networking settings like internal hostname still get configured\n# when the pod starts, but most non-networking specific configs can be tailored here.\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"neo4j.coreConfig.fullname\" . }}\ndata:\n NEO4J_ACCEPT_LICENSE_AGREEMENT: \"{{ .Values.acceptLicenseAgreement }}\"\n NEO4J_dbms_mode: CORE\n NUMBER_OF_CORES: \"{{ .Values.core.numberOfServers }}\"\n AUTH_ENABLED: \"{{ .Values.authEnabled }}\"\n NEO4J_dbms_default__database: \"{{ .Values.defaultDatabase }}\"\n NEO4J_causal__clustering_discovery__type: LIST\n NEO4J_dbms_connector_bolt_listen__address: 0.0.0.0:7687\n NEO4J_dbms_connector_http_listen__address: 0.0.0.0:7474\n NEO4J_dbms_connector_https_listen__address: 0.0.0.0:7473\n NEO4J_causal__clustering_initial__discovery__members: \"{{ template \"neo4j.fullname\" . }}-core-0.{{ template \"neo4j.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:5000,{{ template \"neo4j.fullname\" . }}-core-1.{{ template \"neo4j.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:5000,{{ template \"neo4j.fullname\" . }}-core-2.{{ template \"neo4j.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:5000\"\n NEO4J_causal__clustering_minimum__core__cluster__size__at__formation: \"3\"\n NEO4J_causal__clustering_minimum__core__cluster__size__at__runtime: \"2\"\n NEO4J_dbms_jvm_additional: \"-XX:+ExitOnOutOfMemoryError\"\n {{- if .Values.useAPOC }}\n NEO4JLABS_PLUGINS: \"[\\\"apoc\\\"]\"\n NEO4J_apoc_import_file_use__neo4j__config: \"true\"\n NEO4J_dbms_security_procedures_unrestricted: \"apoc.*\"\n {{- end }}\n",
"# core-dns.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"neo4j.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/component: core\nspec:\n clusterIP: None\n # This next line is critical: cluster members cannot discover each other without published\n # addresses, but without this, they can't get addresses unless they're ready (Catch-22)\n publishNotReadyAddresses: true \n ports:\n - name: http\n port: 7474\n targetPort: 7474\n - name: bolt\n port: 7687\n targetPort: 7687\n - name: https\n port: 7473\n targetPort: 7473\n selector:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: core\n",
"# core-statefulset.yaml\napiVersion: \"apps/v1\"\nkind: StatefulSet\nmetadata:\n name: \"{{ template \"neo4j.core.fullname\" . }}\"\nspec:\n podManagementPolicy: Parallel\n serviceName: {{ template \"neo4j.fullname\" . }}\n replicas: {{ .Values.core.numberOfServers }}\n selector:\n matchLabels:\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/component: core\n template:\n metadata:\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/component: core\n spec:\n containers:\n - name: {{ template \"neo4j.fullname\" . }}\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: \"{{ .Values.imagePullPolicy }}\"\n # Most pod config is factored into a different configMap, which is user overrideable.\n envFrom:\n - configMapRef:\n {{- if .Values.core.configMap }}\n name: \"{{ .Values.core.configMap }}\"\n {{- else }}\n name: {{ template \"neo4j.coreConfig.fullname\" . }}\n {{- end }} \n env:\n {{- if .Values.authEnabled }}\n - name: NEO4J_SECRETS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"neo4j.secrets.fullname\" . }}\n key: neo4j-password\n {{- end }} \n command:\n - \"/bin/bash\"\n - \"-c\"\n - |\n # Making this host the default address, so that advertised addresses are over-rideable\n # by custom configmaps if specified.\n export HOST=$(hostname -f)\n export NEO4J_causal__clustering_discovery__advertised__address=${NEO4J_causal__clustering_discovery__advertised__address:-$HOST}\n export NEO4J_dbms_default__advertised__address=${NEO4J_dbms_default__advertised__address:-$HOST}\n export NEO4J_dbms_connector_bolt_advertised__address=${NEO4J_dbms_connector_bolt_advertised__address:-$HOST}\n export NEO4J_dbms_connector_http_advertised__address=${NEO4J_dbms_connector_http_advertised__address:-$HOST}\n export NEO4J_dbms_connector_https_advertised__address=${NEO4J_dbms_connector_https_advertised__address:-$HOST}\n \n # These settings are *not* overrideable, because they must match the initial_discovery_members\n # In the Neo4j node config, otherwise Neo4j's akka layer will reject connections.\n export NEO4J_causal__clustering_discovery__advertised__address=$(hostname -f):5000\n export NEO4J_causal__clustering_transaction__advertised__address=$(hostname -f):6000\n export NEO4J_causal__clustering_raft__advertised__address=$(hostname -f):7000\n\n if [ \"${AUTH_ENABLED:-}\" == \"true\" ]; then\n export NEO4J_AUTH=\"neo4j/${NEO4J_SECRETS_PASSWORD}\"\n else\n export NEO4J_AUTH=\"none\"\n fi\n\n echo \"Starting Neo4j CORE on $HOST\"\n exec /docker-entrypoint.sh \"neo4j\"\n ports:\n - containerPort: 5000\n name: discovery\n - containerPort: 7000\n name: raft\n - containerPort: 6000\n name: tx\n - containerPort: 7474\n name: browser\n - containerPort: 7687\n name: bolt\n volumeMounts:\n - name: datadir\n mountPath: \"{{ .Values.core.persistentVolume.mountPath }}\"\n {{- if .Values.core.persistentVolume.subPath }}\n subPath: {{ .Values.core.persistentVolume.subPath }}\n {{- end }}\n - name: plugins\n mountPath: /plugins\n readinessProbe:\n tcpSocket:\n port: 7687\n initialDelaySeconds: 30\n periodSeconds: 3\n livenessProbe:\n initialDelaySeconds: 60\n tcpSocket:\n port: 7687\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n{{- if .Values.core.sidecarContainers }}\n{{ toYaml .Values.core.sidecarContainers | indent 6 }}\n{{- end }}\n{{- if .Values.imagePullSecret }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecret }}\n{{- end -}}\n{{- if .Values.core.initContainers }}\n initContainers:\n{{ toYaml .Values.core.initContainers | indent 6 }}\n{{- end }}\n volumes:\n {{- if not .Values.core.persistentVolume.enabled }}\n - name: datadir\n emptyDir: {}\n {{- end }}\n - name: plugins\n emptyDir: {}\n{{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n {{- if .Values.core.persistentVolume.enabled }}\n volumeClaimTemplates:\n - metadata:\n name: datadir\n annotations:\n {{- if .Values.core.persistentVolume.annotations }}\n{{ toYaml .Values.core.persistentVolume.annotations | indent 12 }}\n {{- end }}\n spec:\n accessModes:\n - ReadWriteOnce\n {{- if .Values.core.persistentVolume.storageClass }}\n {{- if (eq \"-\" .Values.core.persistentVolume.storageClass) }}\n storageClassName: \"\"\n {{- else }}\n storageClassName: \"{{ .Values.core.persistentVolume.storageClass }}\"\n {{- end }}\n {{- end }}\n resources:\n requests:\n storage: \"{{ .Values.core.persistentVolume.size }}\"\n {{- end }}\n",
"# poddisruptionbudget.yaml\n{{- if .Values.podDisruptionBudget -}}\napiVersion: policy/v1beta1\nkind: PodDisruptionBudget\nmetadata:\n labels:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n name: {{ template \"neo4j.fullname\" . }}\nspec:\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n{{ toYaml .Values.podDisruptionBudget | indent 2 }}\n{{- end -}}\n",
"# readreplicas-configmap.yaml\n# This ConfigMap gets passed to all core cluster members to configure them.\n# Take note that some networking settings like internal hostname still get configured\n# when the pod starts, but most non-networking specific configs can be tailored here.\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"neo4j.replicaConfig.fullname\" . }}\ndata:\n NEO4J_ACCEPT_LICENSE_AGREEMENT: \"{{ .Values.acceptLicenseAgreement }}\"\n NEO4J_dbms_mode: READ_REPLICA\n NUMBER_OF_CORES: \"{{ .Values.core.numberOfServers }}\"\n AUTH_ENABLED: \"{{ .Values.authEnabled }}\"\n NEO4J_dbms_default__database: \"{{ .Values.defaultDatabase }}\"\n NEO4J_dbms_connector_bolt_listen__address: 0.0.0.0:7687\n NEO4J_dbms_connector_http_listen__address: 0.0.0.0:7474\n NEO4J_dbms_connector_https_listen__address: 0.0.0.0:7473\n NEO4J_causal__clustering_discovery__type: LIST\n NEO4J_causal__clustering_initial__discovery__members: \"{{ template \"neo4j.fullname\" . }}-core-0.{{ template \"neo4j.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:5000,{{ template \"neo4j.fullname\" . }}-core-1.{{ template \"neo4j.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:5000,{{ template \"neo4j.fullname\" . }}-core-2.{{ template \"neo4j.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local:5000\"\n NEO4J_causal__clustering_minimum__core__cluster__size__at__formation: \"3\"\n NEO4J_causal__clustering_minimum__core__cluster__size__at__runtime: \"2\"\n NEO4J_dbms_jvm_additional: \"-XX:+ExitOnOutOfMemoryError\"\n {{- if .Values.useAPOC }}\n NEO4JLABS_PLUGINS: \"[\\\"apoc\\\"]\"\n NEO4J_apoc_import_file_use__neo4j__config: \"true\"\n NEO4J_dbms_security_procedures_unrestricted: \"apoc.*\"\n {{- end }}\n",
"# readreplicas-deployment.yaml\napiVersion: \"apps/v1\"\nkind: Deployment\nmetadata:\n name: \"{{ template \"neo4j.replica.fullname\" . }}\"\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/component: replica\nspec:\n{{- if not .Values.readReplica.autoscaling.enabled }}\n replicas: {{ .Values.readReplica.numberOfServers }}\n{{- end }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: replica\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: replica\n spec:\n containers:\n - name: neo4j\n image: \"{{ .Values.image }}:{{ .Values.imageTag }}\"\n imagePullPolicy: \"{{ .Values.imagePullPolicy }}\"\n # Most pod config is factored into a different configMap, which is user overrideable.\n envFrom:\n - configMapRef:\n {{- if .Values.readReplica.configMap }}\n name: \"{{ .Values.readReplica.configMap }}\"\n {{- else }}\n name: {{ template \"neo4j.replicaConfig.fullname\" . }}\n {{- end }}\n env:\n {{- if .Values.authEnabled }}\n - name: NEO4J_SECRETS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"neo4j.secrets.fullname\" . }}\n key: neo4j-password\n {{- end }}\n command:\n - \"/bin/bash\"\n - \"-c\"\n - |\n # Replicas advertise by bare IP, not hostname. This is because deployments in kubernetes\n # don't provide good FQDNs. (https://github.com/kubernetes/kubernetes/issues/60789)\n # The FQDN advertisement address is necessary for the akka cluster formation in Neo4j to work,\n # so if you advertise with a bare local hostname or something invalid, the read replica will be\n # unable to join the raft group.\n export HOST=$(hostname -i)\n export NEO4J_causal__clustering_discovery__advertised__address=${NEO4J_causal__clustering_discovery__advertised__address:-$HOST}\n export NEO4J_dbms_default__advertised__address=${NEO4J_dbms_default__advertised__address:-$HOST}\n export NEO4J_dbms_connector_bolt_advertised__address=${NEO4J_dbms_connector_bolt_advertised__address:-$HOST}\n export NEO4J_dbms_connector_http_advertised__address=${NEO4J_dbms_connector_http_advertised__address:-$HOST}\n export NEO4J_dbms_connector_https_advertised__address=${NEO4J_dbms_connector_https_advertised__address:-$HOST}\n\n # These settings are *not* overrideable, because they must match the addresses the\n # core members see to avoid akka rejections.\n export NEO4J_causal__clustering_discovery__advertised__address=$HOST:5000\n export NEO4J_causal__clustering_transaction__advertised__address=$HOST:6000\n export NEO4J_causal__clustering_raft__advertised__address=$HOST:7000\n\n if [ \"${AUTH_ENABLED:-}\" == \"true\" ]; then\n export NEO4J_AUTH=\"neo4j/${NEO4J_SECRETS_PASSWORD}\"\n else\n export NEO4J_AUTH=\"none\"\n fi\n\n echo \"Starting Neo4j READ_REPLICA on $HOST\"\n exec /docker-entrypoint.sh \"neo4j\"\n ports:\n - containerPort: 5000\n name: discovery\n - containerPort: 7000\n name: raft\n - containerPort: 6000\n name: tx\n - containerPort: 7474\n name: browser\n - containerPort: 7687\n name: bolt\n volumeMounts:\n - name: plugins\n mountPath: /plugins\n resources:\n{{ toYaml .Values.readReplica.resources | indent 10 }}\n{{- if .Values.core.sidecarContainers }}\n{{ toYaml .Values.core.sidecarContainers | indent 6 }}\n{{- end }}\n{{- if .Values.readReplica.initContainers }}\n initContainers:\n{{ toYaml .Values.readReplica.initContainers | indent 6 }}\n{{- end }}\n{{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n{{- end }}\n{{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n{{- end }}\n volumes:\n - name: plugins\n emptyDir: {}\n{{- if .Values.imagePullSecret }}\n imagePullSecrets:\n - name: {{ .Values.imagePullSecret }}\n{{- end -}}\n",
"# readreplicas-dns.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"neo4j.replica.fullname\" . }}\n labels:\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app.kubernetes.io/name: {{ template \"neo4j.replica.fullname\" . }}\n app.kubernetes.io/component: core\nspec:\n clusterIP: None\n # This next line is critical: cluster members cannot discover each other without published\n # addresses, but without this, they can't get addresses unless they're ready (Catch-22)\n publishNotReadyAddresses: true \n ports:\n - name: http\n port: 7474\n targetPort: 7474\n - name: bolt\n port: 7687\n targetPort: 7687\n - name: https\n port: 7473\n targetPort: 7473\n selector:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\n app.kubernetes.io/component: replica\n",
"# readreplicas-hpa.yaml\n{{- if .Values.readReplica.autoscaling.enabled }}\napiVersion: autoscaling/v2beta1\nkind: HorizontalPodAutoscaler\nmetadata:\n name: {{ template \"neo4j.name\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/instance: {{ .Release.Name }}\nspec:\n scaleTargetRef:\n apiVersion: apps/v1\n kind: Deployment\n name: {{ template \"neo4j.name\" . }}\n minReplicas: {{ .Values.readReplica.autoscaling.minReplicas }}\n maxReplicas: {{ .Values.readReplica.autoscaling.maxReplicas }}\n metrics:\n - type: Resource\n resource:\n name: cpu\n targetAverageUtilization: {{ .Values.readReplica.autoscaling.targetAverageUtilization }}\n{{- end }}\n",
"# secret.yaml\n{{- if .Values.authEnabled -}}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"neo4j.secrets.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"neo4j.name\" . }}\n helm.sh/chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app.kubernetes.io/managed-by: {{ .Release.Service | quote }}\n app.kubernetes.io/instance: {{ .Release.Name | quote }}\ntype: Opaque\ndata:\n {{- if .Values.neo4jPassword }}\n neo4j-password: {{ .Values.neo4jPassword | b64enc | quote }}\n {{- else }}\n neo4j-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{- end }}\n{{- end -}}\n",
"# test-config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"neo4j.fullname\" . }}-tests\n labels:\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n app: {{ template \"neo4j.name\" . }}\ndata:\n run.sh: |-\n @test \"Testing Neo4j cluster has quorum\" {\n echo \"checking if the cluster is up\"\n # path=\"data\"\n for id in $(seq 0 $((CORE_REPLICAS - 1))); do\n host=\"${STATEFULSET_NAME}-core-$id.${STATEFULSET_NAME}.${NAMESPACE}.svc.cluster.local\"\n auth=\"neo4j:${NEO4J_SECRETS_PASSWORD}\"\n url=\"http://${auth}@${host}:7474/\"\n attempts=10\n attempt=0\n while true; do\n attempt=$[$attempt + 1]\n echo \"Try $attempt: Connecting to $host:7474/\"\n run wget ${url} -qO- 2>&1\n echo \"Exit code: $status\"\n [ $status -eq 0 ] && break\n [ \"${attempt}\" -ge \"${attempts}\" ] && exit 1\n sleep 5\n done\n done\n }\n\n @test \"Testing Neo4j core servers are available\" {\n echo \"checking if nodes are available\"\n path=\"manage/server/core/available\"\n for id in $(seq 0 $((CORE_REPLICAS - 1))); do\n host=\"${STATEFULSET_NAME}-core-$id.${STATEFULSET_NAME}.${NAMESPACE}.svc.cluster.local\"\n auth=\"neo4j:${NEO4J_SECRETS_PASSWORD}\"\n url=\"http://${auth}@${host}:7474/db/system/tx/commit\"\n attempts=10\n attempt=0\n while true; do\n attempt=$[$attempt + 1]\n echo \"Try $attempt: Connecting to $url\"\n \n # Attempt post via netcat since old busybox doesn't support wget --post-data\n POST_DATA='{\"statements\":[{\"statement\":\"SHOW DATABASES\"}]}'\n POST_PATH=/db/system/tx/commit\n HERE_HOST=$(hostname -f)\n AUTH_TOK=$(echo -n $auth | base64)\n BODY_LEN=$( echo -n \"${POST_DATA}\" | wc -c )\n SHOW_DATABASES_RESPONSE=$(echo -ne \"POST ${POST_PATH} HTTP/1.0\\r\\nHost: ${HERE_HOST}\\r\\nAuthorization: Basic $AUTH_TOK\\r\\nContent-Type: application/json\\r\\nContent-Length: ${BODY_LEN}\\r\\n\\r\\n${POST_DATA}\" | \\\n nc -i 3 ${host} 7474)\n echo \"SHOW DATABASES response:\"\n echo $SHOW_DATABASES_RESPONSE\n didItWork=$(echo \"${SHOW_DATABASES_RESPONSE}\" | grep -i online)\n [ \"$?\" = 0 ] && break\n [ \"${attempt}\" -ge \"${attempts}\" ] && exit 1\n sleep 5\n done\n done\n }\n",
"# test-neo4j-cluster.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ template \"neo4j.fullname\" . }}-service-test-{{ randAlphaNum 5 | lower }}\"\n labels:\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version }}\n app: {{ template \"neo4j.name\" . }}\n annotations:\n \"helm.sh/hook\": test-success\n \"helm.sh/hook-delete-policy\": \"before-hook-creation,hook-succeeded\"\nspec:\n containers:\n - name: {{ .Release.Name }}-ui-test\n image: {{ .Values.testImage }}:{{ .Values.testImageTag }}\n env:\n - name: \"STATEFULSET_NAME\"\n value: \"{{ template \"neo4j.fullname\" . }}\"\n - name: \"CORE_REPLICAS\"\n value: \"{{ .Values.core.numberOfServers }}\"\n - name: \"NAMESPACE\"\n value: {{ .Release.Namespace }}\n - name: NEO4J_SECRETS_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"neo4j.secrets.fullname\" . }}\n key: neo4j-password\n command: [\"/tools/bats/bats\", \"-t\", \"/tests/run.sh\"]\n volumeMounts:\n - mountPath: /tests\n name: tests\n readOnly: true\n - mountPath: /tools\n name: tools\n initContainers:\n - name: test-framework\n image: \"dduportal/bats:0.4.0\"\n command: [\"bash\", \"-c\", \"set -ex\\ncp -R /usr/local/libexec/ /tools/bats/\"]\n volumeMounts:\n - mountPath: \"/tools\"\n name: tools\n volumes:\n - name: tests\n configMap:\n name: {{ template \"neo4j.fullname\" . }}-tests\n - name: tools\n emptyDir: {}\n restartPolicy: Never\n"
] | # Default values for Neo4j.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
name: "neo4j"
# Specs for the Neo4j docker image
image: "neo4j"
imageTag: "4.0.3-enterprise"
imagePullPolicy: "IfNotPresent"
# imagePullSecret: registry-secret
acceptLicenseAgreement: "no"
podDisruptionBudget: {}
# minAvailable: 2
# maxUnavailable: 1
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Use password authentication
authEnabled: true
## Specify password for neo4j user
## Defaults to a random 10-character alphanumeric string if not set and authEnabled is true
# neo4jPassword:
# Specify cluster domain (used eg. as suffix in definition of NEO4J_causal__clustering_initial__discovery__members environment variable)
clusterDomain: "cluster.local"
# Specs for the images used for running tests against the Helm package
# https://github.com/mneedham/k8s-kubectl this is a general kubectl docker image
testImage: "markhneedham/k8s-kubectl"
testImageTag: "master"
# Whether or not to use APOC: https://neo4j.com/labs/apoc/
# Comment out if you do not want to use it.
useAPOC: "true"
# The default name of the Neo4j database to use.
# See https://neo4j.com/docs/operations-manual/current/manage-databases/introduction/#manage-databases-default
defaultDatabase: "neo4j"
# Cores
core:
# configMap: "my-custom-configmap"
numberOfServers: 3
persistentVolume:
## whether or not persistence is enabled
##
enabled: true
## core server data Persistent Volume mount root path
##
mountPath: /data
## core server data Persistent Volume size
##
size: 10Gi
## core server data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
## storageClass: "-"
## Subdirectory of core server data Persistent Volume to mount
## Useful if the volume's root directory is not empty
##
## subPath: ""
sidecarContainers: []
## Additional containers to be added to the Neo4j core pod.
# - name: my-sidecar
# image: nginx:latest
initContainers: []
## init containers to run before the Neo4j core pod e.g. to install plugins
## They can also be used to restore from last available backup, to ensure that newly joining
## core members have less TX history to catch up on before joining the cluster.
## Note that this is specifically *not* needed for APOC, which is included by default.
# - name: init-plugins
# image: "appropriate/curl:latest"
# imagePullPolicy: "IfNotPresent"
# volumeMounts:
# - name: plugins
# mountPath: /plugins
# command:
# - "/bin/sh"
# - "-c"
# - |
# curl -L https://somesite.com/path/to/plugin.jar -O
# cp plugin.jar /plugins/
# Read Replicas
readReplica:
# configMap: "my-custom-configmap"
resources: {}
# limits:
# cpu: 100m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 512Mi
autoscaling:
enabled: false
targetAverageUtilization: 70
minReplicas: 1
maxReplicas: 3
numberOfServers: 0
sidecarContainers: []
## Additional containers to be added to the Neo4j replica pod.
# - name: my-sidecar
# image: nginx:latest
initContainers: []
## init containers to run before the Neo4j replica pod e.g. to install custom plugins
## They can also be used to restore from last available backup, to ensure that newly joining
## core members have less TX history to catch up on before joining the cluster.
## Note that this is specifically *not* needed for APOC, which is included by default.
# - name: init-plugins
# image: "appropriate/curl:latest"
# imagePullPolicy: "IfNotPresent"
# volumeMounts:
# - name: plugins
# mountPath: /plugins
# command:
# - "/bin/sh"
# - "-c"
# - |
# curl -L https://somesite.com/path/to/plugin.jar -O
# cp plugin.jar /plugins/
resources: {}
# limits:
# cpu: 100m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 512Mi
|
metabase | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"metabase.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"metabase.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the apiVersion of deployment.\n*/}}\n{{- define \"deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# config.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"metabase.fullname\" . }}-config\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metabase.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ndata:\n {{- if .Values.log4jProperties }}\n log4j.properties:\n{{ toYaml .Values.log4jProperties | indent 4}}\n {{- end}}\n",
"# database-secret.yaml\n{{- if and (ne (.Values.database.type | lower) \"h2\") (not .Values.database.existingSecret) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"metabase.fullname\" . }}-database\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metabase.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.database.encryptionKey }}\n encryptionKey: {{ .Values.database.encryptionKey | b64enc | quote }}\n {{- end }}\n {{- if .Values.database.connectionURI }}\n connectionURI: {{ .Values.database.connectionURI | b64enc | quote }}\n {{- else }}\n username: {{ .Values.database.username | b64enc | quote }}\n password: {{ .Values.database.password | b64enc | quote }}\n {{- end }}\n{{- end }}\n",
"# deployment.yaml\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"metabase.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metabase.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"metabase.name\" . }}\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n annotations:\n checksum/config: {{ include (print $.Template.BasePath \"/config.yaml\") . | sha256sum }}\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | trim | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"metabase.name\" . }}\n release: {{ .Release.Name }}\n {{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | trim | indent 8 }}\n {{- end }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n env:\n - name: MB_JETTY_HOST\n value: {{ .Values.listen.host | quote }}\n - name: MB_JETTY_PORT\n value: {{ .Values.listen.port | quote }}\n {{- if .Values.ssl.enabled }}\n - name: MB_JETTY_SSL\n value: true\n - name: MB_JETTY_SSL_Port\n value: {{ .Values.ssl.port | quote }}\n - name: MB_JETTY_SSL_Keystore\n valueFrom:\n secretKeyRef:\n name: {{ template \"metabase.fullname\" . }}-ssl\n key: keystore\n - name: MB_JETTY_SSL_Keystore_Password\n valueFrom:\n secretKeyRef:\n name: {{ template \"metabase.fullname\" . }}-ssl\n key: password\n {{- end }}\n {{- if .Values.jetty }}\n {{- range $key, $value := .Values.jetty }}\n - name: MB_JETTY_{{ $key | upper }}\n value: {{ $value | quote }}\n {{- end }}\n {{- end }}\n - name: MB_DB_TYPE\n value: {{ .Values.database.type | lower }}\n {{- if .Values.database.encryptionKey }}\n - name: MB_ENCRYPTION_SECRET_KEY\n valueFrom:\n secretKeyRef:\n name: {{ template \"metabase.fullname\" . }}-database\n key: encryptionKey\n {{- end }}\n {{- if ne (.Values.database.type | lower) \"h2\" }}\n {{- if or .Values.database.existingSecretConnectionURIKey .Values.database.connectionURI }}\n - name: MB_DB_CONNECTION_URI\n valueFrom:\n secretKeyRef:\n name: {{ or .Values.database.existingSecret (printf \"%s-database\" (include \"metabase.fullname\" .)) }}\n key: {{ or .Values.database.existingSecretConnectionURIKey \"connectionURI\" }}\n {{- else }}\n - name: MB_DB_HOST\n value: {{ .Values.database.host | quote }}\n - name: MB_DB_PORT\n value: {{ .Values.database.port | quote }}\n - name: MB_DB_DBNAME\n value: {{ .Values.database.dbname | quote }}\n - name: MB_DB_USER\n valueFrom:\n secretKeyRef:\n name: {{ or .Values.database.existingSecret (printf \"%s-database\" (include \"metabase.fullname\" .)) }}\n key: {{ or .Values.database.existingSecretUsernameKey \"username\" }}\n - name: MB_DB_PASS\n valueFrom:\n secretKeyRef:\n name: {{ or .Values.database.existingSecret (printf \"%s-database\" (include \"metabase.fullname\" .)) }}\n key: {{ or .Values.database.existingSecretPasswordKey \"password\" }}\n {{- end }}\n {{- end }}\n - name: MB_PASSWORD_COMPLEXITY\n value: {{ .Values.password.complexity }}\n - name: MB_PASSWORD_LENGTH\n value: {{ .Values.password.length | quote }}\n - name: JAVA_TIMEZONE\n value: {{ .Values.timeZone }}\n {{- if .Values.javaOpts }}\n - name: JAVA_OPTS\n value: {{ .Values.javaOpts | quote }}\n {{- else }}\n {{- if .Values.log4jProperties }}\n - name: JAVA_OPTS\n value: \"-Dlog4j.configuration=file:/tmp/conf/log4j.properties\"\n {{- end }}\n {{- end }}\n {{- if .Values.pluginsDirectory }}\n - name: MB_PLUGINS_DIR\n value: {{ .Values.pluginsDirectory | quote }}\n {{- end }}\n - name: MB_EMOJI_IN_LOGS\n value: {{ .Values.emojiLogging | quote }}\n {{- if .Values.siteUrl }}\n - name: MB_SITE_URL\n value: {{ .Values.siteUrl | quote }}\n {{- end }}\n {{- if .Values.session.maxSessionAge }}\n - name: MAX_SESSION_AGE\n value: {{ .Values.session.maxSessionAge | quote }}\n {{- end }}\n {{- if .Values.session.sessionCookies }}\n - name: MB_SESSION_COOKIES\n value: {{ .Values.session.sessionCookies | quote }}\n {{- end }}\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n livenessProbe:\n httpGet:\n path: /\n port: {{ .Values.service.internalPort }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n readinessProbe:\n httpGet:\n path: /\n port: {{ .Values.service.internalPort }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n {{- if .Values.log4jProperties }}\n volumeMounts:\n - name: config\n mountPath: /tmp/conf/\n {{- end}}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n volumes:\n {{- if .Values.log4jProperties}}\n - name: config\n configMap:\n name: {{ template \"metabase.fullname\" . }}-config\n items:\n - key: log4j.properties\n path: log4j.properties\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"metabase.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\n{{- $ingressPath := .Values.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"metabase.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metabase.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- range $key, $value := .Values.ingress.labels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"metabase.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metabase.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n{{- if .Values.service.loadBalancerSourceRanges}}\n loadBalancerSourceRanges:\n{{toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}\n{{- end}}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n{{- if .Values.service.nodePort }}\n nodePort: {{ .Values.service.nodePort }}\n{{- end}}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"metabase.name\" . }}\n release: {{ .Release.Name }}\n",
"# ssl-secret.yaml\n{{- if .Values.ssl.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"metabase.fullname\" . }}-ssl\n namespace: {{ .Release.Namespace }}\n labels:\n app: {{ template \"metabase.name\" . }}\n chart: {{ .Chart.Name }}-{{ .Chart.Version | replace \"+\" \"_\" }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n keystore: {{ .Values.ssl.keystore | b64enc | quote }}\n password: {{ .Values.ssl.keyStorePassword | b64enc | quote }}\n{{- end }}\n"
] | # Currently Metabase is not horizontly scalable. See
# https://github.com/metabase/metabase/issues/1446 and
# https://github.com/metabase/metabase/issues/2754
# NOTE: Should remain 1
replicaCount: 1
podAnnotations: {}
podLabels: {}
image:
repository: metabase/metabase
tag: v0.36.3
pullPolicy: IfNotPresent
## String to fully override metabase.fullname template
##
# fullnameOverride:
# Config Jetty web server
listen:
host: "0.0.0.0"
port: 3000
ssl:
# If you have an ssl certificate and would prefer to have Metabase run over HTTPS
enabled: false
# port: 8443
# keyStore: |-
# << JKS KEY STORE >>
# keyStorePassword: storepass
jetty:
# maxThreads: 254
# minThreads: 8
# maxQueued: -1
# maxIdleTime: 60000
# Backend database
database:
# Database type (h2 / mysql / postgres), default: h2
type: h2
# encryptionKey: << YOUR ENCRYPTION KEY >>
## Only need when you use mysql / postgres
# host:
# port:
# dbname:
# username:
# password:
## Alternatively, use a connection URI for full configurability. Example for SSL enabled Postgres.
# connectionURI: postgres://user:password@host:port/database?ssl=true&sslmode=require&sslfactory=org.postgresql.ssl.NonValidatingFactory"
## If a secret with the database credentials already exists, use the following values:
# existingSecret:
# existingSecretUsernameKey:
# existingSecretPasswordKey:
# existingSecretConnectionURIKey:
password:
# Changing Metabase password complexity:
# weak: no character constraints
# normal: at least 1 digit (default)
# strong: minimum 8 characters w/ 2 lowercase, 2 uppercase, 1 digit, and 1 special character
complexity: normal
length: 6
timeZone: UTC
emojiLogging: true
# javaOpts:
# pluginsDirectory:
# siteUrl:
session: {}
# maxSessionAge:
# sessionCookies:
livenessProbe:
initialDelaySeconds: 120
timeoutSeconds: 30
failureThreshold: 6
readinessProbe:
initialDelaySeconds: 30
timeoutSeconds: 3
periodSeconds: 5
service:
name: metabase
type: ClusterIP
externalPort: 80
internalPort: 3000
# Used to fix NodePort when service.type: NodePort.
nodePort:
annotations: {}
# Used to add custom annotations to the Service.
# service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
ingress:
enabled: false
# Used to create Ingress record (should used with service.type: ClusterIP).
hosts:
# - metabase.domain.com
# The ingress path. Useful to host metabase on a subpath, such as `/metabase`.
path: /
labels:
# Used to add custom labels to the Ingress
# Useful if for example you have multiple Ingress controllers and want your Ingress controllers to bind to specific Ingresses
# traffic: internal
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: metabase-tls
# hosts:
# - metabase.domain.com
# A custom log4j.properties file can be provided using a multiline YAML string.
# See https://github.com/metabase/metabase/blob/master/resources/log4j.properties
#
# log4jProperties:
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment
## ref: https://kubernetes.io/docs/user-guide/node-selection/
#
nodeSelector: {}
## Tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Affinity for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
|
janusgraph | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"janusgraph.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"janusgraph.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"janusgraph.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n",
"# configmap.yaml\n{{- if (empty .Values.configMapOverrideName) -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ .Release.Name }}-config\ndata:\n janusgraph.properties: |-\n gremlin.graph=org.janusgraph.core.JanusGraphFactory\n\n {{- $eshostname := (index .Values.properties \"index.search.hostname\") | default (printf \"%s-elasticsearch-client\" .Release.Name | trunc 63 | trimSuffix \"-\")}}\n {{- range $key, $val := .Values.properties }}\n {{- if ne $key \"index.search.hostname\" }}\n {{ $key }}={{ $val }}\n {{- end -}}\n {{ end }}\n\n {{- if eq (index .Values.properties \"index.search.backend\") \"elasticsearch\" \"es\"}}\n index.search.hostname={{ $eshostname }}\n {{- end }}\n gremlin-server.yaml: |-\n host: 0.0.0.0\n port: 8182\n scriptEvaluationTimeout: 30000\n channelizer: org.apache.tinkerpop.gremlin.server.channel.WsAndHttpChannelizer\n graphs: {\n graph: conf/gremlin-server/janusgraph.properties\n }\n plugins:\n - janusgraph.imports\n scriptEngines: {\n gremlin-groovy: {\n imports: [java.lang.Math],\n staticImports: [java.lang.Math.PI],\n scripts: [scripts/empty-sample.groovy]}}\n serializers:\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }}\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoLiteMessageSerializerV1d0, config: {ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }}\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true }}\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }}\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV2d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }}\n - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0, config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistryV1d0] }}\n processors:\n - { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { sessionTimeout: 28800000 }}\n - { className: org.apache.tinkerpop.gremlin.server.op.traversal.TraversalOpProcessor, config: { cacheExpirationTime: 600000, cacheMaxSize: 1000 }}\n metrics: {\n consoleReporter: {enabled: true, interval: 180000},\n csvReporter: {enabled: true, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv},\n jmxReporter: {enabled: true},\n slf4jReporter: {enabled: true, interval: 180000},\n gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},\n graphiteReporter: {enabled: false, interval: 180000}}\n maxInitialLineLength: 4096\n maxHeaderSize: 8192\n maxChunkSize: 8192\n maxContentLength: 65536\n maxAccumulationBufferComponents: 1024\n resultIterationBatchSize: 64\n writeBufferLowWaterMark: 32768\n writeBufferHighWaterMark: 65536\n {{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"janusgraph.fullname\" . }}\n labels:\n app: {{ template \"janusgraph.name\" . }}\n chart: {{ template \"janusgraph.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"janusgraph.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"janusgraph.name\" . }}\n release: {{ .Release.Name }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if .Values.extraEnvs }}\n env:\n {{- with .Values.extraEnvs }}\n {{- toYaml . | trim | nindent 12 -}}\n {{- end }}\n {{- end }}\n ports:\n - containerPort: 8182\n protocol: TCP\n volumeMounts:\n - name: janusgraph-config\n mountPath: /janusgraph-config\n - name: local-db\n mountPath: {{ .Values.persistence.path }}\n livenessProbe:\n exec:\n command:\n - /tmp/healthcheck.py\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumes:\n - name: janusgraph-config\n configMap:\n name: {{ .Values.configMapOverrideName | default ( printf \"%s-config\" .Release.Name ) }}\n - name: local-db\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.existingClaim | default (include \"janusgraph.fullname\" .) }}\n {{- else }}\n emptyDir: {}\n {{- end -}}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n",
"# pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"janusgraph.fullname\" . }}\n labels:\n app: {{ template \"janusgraph.name\" . }}\n chart: {{ template \"janusgraph.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"janusgraph.fullname\" . }}\n labels:\n app: {{ template \"janusgraph.name\" . }}\n chart: {{ template \"janusgraph.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.serviceAnnotations }}\n annotations:\n{{ toYaml .Values.service.serviceAnnotations | indent 4 }}\n{{- end }}\n\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: 8182\n protocol: TCP\n selector:\n app: {{ template \"janusgraph.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for JanusGraph chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: gcr.io/cloud-solutions-images/janusgraph
tag: v2
pullPolicy: IfNotPresent
## The default configuration provided here uses attached storage for db and indexing
## For a distributed deployment, increase the number of replicas and choose
## a distributed backend for storage and indexing below (i.e. hbase and elasticsearch)
replicaCount: 1
## set any pod specific resource requests here
resources: {}
extraEnvs: {}
service:
type: ClusterIP # Change to LoadBalancer if you plan to access JanusGraph outside k8s cluster
port: 8182
serviceAnnotations:
# the following line is ignored unless unless using a LoadBalancer with GCP
# cloud.google.com/load-balancer-type: "Internal"
## This chart can deploy the Elasticsearch as a dependency.
## Use this section to provide elasticsearch chart specific values
elasticsearch:
deploy: false # change to true if you want to deploy Elasticsearch as a requirement along with this chart
rbac:
create: true # required for kubernetes >1.7
properties:
## use this section to add or adjust JanusGraph properties as needed
## all uncommented values in this section will be placed in the janusgraph.properties file
## see http://docs.janusgraph.org/0.2.0/storage-backends.html, choose the desired storage backend
## (i.e. berkeleyje, cassandra, cassandrathrift, cql, embeddedcassandra, hbase, inmemory )
## for Cloud Bigtable choose hbase
storage.backend: berkeleyje
storage.directory: /db/berkeley
## Google Cloud Bigtable specific configuration
## To use Cloud Bigtable, uncomment the following three lines and replace values
# storage.hbase.ext.google.bigtable.instance.id: <your-cbt-instance> # replace with your Cloud Bigtable Instance ID
# storage.hbase.ext.google.bigtable.project.id: <your-cbt-project> # replace with your Cloud Bigtable Project ID
# storage.hbase.ext.hbase.client.connection.impl: com.google.cloud.bigtable.hbase1_x.BigtableConnection # required for using Cloud Bigtable
## Indexing/Search backend configuration (see http://docs.janusgraph.org/latest/index-backends.html)
index.search.backend: lucene
index.search.directory: /db/searchindex
## choose the index backend you want to use: elasticsearch, es, solr or lucene (default "lucene")
## if you plan to use elasticsearch, change to "index.search.backend=elasticsearch"
## Elasticsearch configuration (see http://docs.janusgraph.org/latest/elasticsearch.html)
## This property is only relevant if you are using Elasticsearch as your index backend.
# index.search.hostname: <your-es-hostname>
## Only set this if you plan to use an elasticsearch deployment created outside of this chart,
## If you plan to deploy Elasticsearch as a requirement with this helm chart,
## then leave this commented out or empty, it will be filled in automatically
## other common properties
# cache.db-cache: true
# cache.db-cache-clean-wait: 20
# cache.db-cache-time: 180000
# cache.db-cache-size: 0.5
## when using local storage and indexing, choose whether to persist day
persistence:
enabled: true # set to false if you are testing and do not want to persist data
path: /db
accessMode: ReadWriteOnce
size: 4Gi # adjust size as needed depending on the size of local storage and indexing required
existingClaim: # to reattach to previously used storage, provide an existing claim (or use --set)
## To make adjustments to janusgraph.properties and gremlin-server.yaml, provide a
## custom ConfigMap in your k8s cluster (using the helm created ConfigMap as a base).
configMapOverrideName: ""
# nodeSelector:
# beta.kubernetes.io/os: linux
# beta.kubernetes.io/arch: amd64
|
drupal | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"drupal.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"drupal.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"drupal.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"drupal.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the proper Drupal image name\n*/}}\n{{- define \"drupal.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"drupal.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"drupal.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"drupal.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.drupal.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.drupal.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.drupal.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.drupal.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.drupal.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.drupal.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"drupal.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: {{ template \"drupal.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"drupal.fullname\" . }}\n labels:\n app: {{ template \"drupal.fullname\" . }}\n chart: {{ template \"drupal.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"drupal.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n replicas: 1\n template:\n metadata:\n labels:\n app: {{ template \"drupal.fullname\" . }}\n chart: {{ template \"drupal.chart\" . }}\n release: \"{{ .Release.Name }}\"\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"drupal.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: drupal\n image: {{ template \"drupal.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n {{- if .Values.allowEmptyPassword }}\n value: \"yes\"\n {{- else }}\n value: \"no\"\n {{- end }}\n - name: MARIADB_HOST\n {{- if .Values.mariadb.enabled }}\n value: {{ template \"drupal.mariadb.fullname\" . }}\n {{- else }}\n value: {{ default \"\" .Values.externalDatabase.host | quote }}\n {{- end }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: DRUPAL_DATABASE_NAME\n {{- if .Values.mariadb.enabled }}\n value: {{ default \"\" .Values.mariadb.db.name | quote }}\n {{- else }}\n value: {{ default \"\" .Values.externalDatabase.database | quote }}\n {{- end }}\n - name: DRUPAL_DATABASE_USER\n {{- if .Values.mariadb.enabled }}\n value: {{ default \"\" .Values.mariadb.db.user | quote }}\n {{- else }}\n value: {{ default \"\" .Values.externalDatabase.user | quote }}\n {{- end }}\n - name: DRUPAL_DATABASE_PASSWORD\n {{- if .Values.mariadb.enabled }}\n valueFrom:\n secretKeyRef:\n name: {{ template \"drupal.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n value: {{ default \"\" .Values.externalDatabase.password | quote }}\n {{- end }}\n {{- if .Values.drupalProfile }}\n - name: DRUPAL_PROFILE\n value: {{ .Values.drupalProfile | quote }}\n {{- end }}\n {{- if .Values.drupalUsername }}\n - name: DRUPAL_USERNAME\n value: {{ .Values.drupalUsername | quote }}\n {{- end }}\n - name: DRUPAL_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"drupal.fullname\" . }}\n key: drupal-password\n {{- if .Values.drupalEmail }}\n - name: DRUPAL_EMAIL\n value: {{ .Values.drupalEmail | quote }}\n {{- end }}\n{{- if .Values.extraVars }}\n{{ toYaml .Values.extraVars | indent 8 }}\n{{- end }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n livenessProbe:\n{{ toYaml .Values.livenessProbe | indent 10 }}\n readinessProbe:\n{{ toYaml .Values.readinessProbe | indent 10 }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: drupal-data\n mountPath: {{ .Values.volumeMounts.drupal.mountPath }}\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"drupal.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n {{ toYaml .Values.metrics.resources | indent 10 }}\n{{- end }}\n volumes:\n - name: drupal-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ .Values.persistence.drupal.existingClaim | default (printf \"%s-drupal\" (include \"drupal.fullname\" .)) }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# drupal-pv.yaml\n{{- if and .Values.persistence.enabled .Values.persistence.drupal.hostPath (not .Values.persistence.drupal.existingClaim) -}}\napiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: {{ template \"drupal.fullname\" . }}-drupal\nspec:\n accessModes:\n - {{ .Values.persistence.drupal.accessMode | quote }}\n capacity:\n storage: {{ .Values.persistence.drupal.size | quote }}\n hostPath:\n path: {{ .Values.persistence.drupal.hostPath | quote }}\n{{- end -}}\n",
"# drupal-pvc.yaml\n{{- if and .Values.persistence.enabled (not .Values.persistence.drupal.existingClaim) -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"drupal.fullname\" . }}-drupal\n labels:\n app: {{ template \"drupal.fullname\" . }}\n chart: {{ template \"drupal.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n {{- if .Values.persistence.drupal.hostPath }}\n storageClassName: \"\"\n {{- end }}\n accessModes:\n - {{ .Values.persistence.drupal.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.drupal.size | quote }}\n {{ include \"drupal.storageClass\" . }}\n{{- end -}}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"drupal.fullname\" . }}\n labels:\n app: {{ template \"drupal.fullname\" . }}\n chart: {{ template \"drupal.chart\" . }}\n release: {{ $.Release.Name }}\n heritage: {{ $.Release.Service }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- if .Values.ingress.hostname }}\n - host: {{ .Values.ingress.hostname }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ template \"drupal.fullname\" $ }}\n servicePort: http\n {{- end }}\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: \"{{ template \"drupal.fullname\" $ }}\"\n servicePort: http\n {{- end }}\n {{- if .Values.ingress.tls }}\n tls: {{- toYaml .Values.ingress.tls | nindent 4 }}\n {{- end }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"drupal.fullname\" . }}\n labels:\n app: {{ template \"drupal.fullname\" . }}\n chart: {{ template \"drupal.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\ntype: Opaque\ndata:\n {{ if .Values.drupalPassword }}\n drupal-password: {{ .Values.drupalPassword | b64enc | quote }}\n {{ else }}\n drupal-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"drupal.fullname\" . }}\n labels:\n app: {{ template \"drupal.fullname\" . }}\n chart: {{ template \"drupal.chart\" . }}\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app: {{ template \"drupal.fullname\" . }}\n",
"# tls-secrets.yaml\n{{- if .Values.ingress.enabled }}\n{{- range .Values.ingress.secrets }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ .name }}\n labels:\n app: {{ template \"drupal.fullname\" $ }}\n chart: {{ template \"drupal.chart\" $ }}\n release: {{ $.Release.Name }}\n heritage: {{ $.Release.Service }}\ntype: kubernetes.io/tls\ndata:\n tls.crt: {{ .certificate | b64enc }}\n tls.key: {{ .key | b64enc }}\n{{- end }}\n{{- end }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami Drupal image version
## ref: https://hub.docker.com/r/bitnami/drupal/tags/
##
image:
registry: docker.io
repository: bitnami/drupal
tag: 8.8.3-debian-10-r1
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override drupal.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override drupal.fullname template
##
# fullnameOverride:
## Installation Profile
## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration
##
drupalProfile: standard
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration
##
drupalUsername: user
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration
##
# drupalPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-drupal#configuration
##
drupalEmail: [email protected]
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-drupal#environment-variables
allowEmptyPassword: "yes"
## External database configuration
##
externalDatabase:
## Database host
host: localhost
## Database host
port: 3306
## Database user
user: bn_drupal
## Database password
password: ""
## Database name
database: bitnami_drupal
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_drupal
user: bn_drupal
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
## Use ClusterIP if your setup includes ingress controller
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## Configure the ingress resource that allows you to access the
## Drupal installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
##
certManager: false
## When the ingress is enabled, a host pointing to this will be created
##
hostname: drupal.local
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
##
annotations: {}
# kubernetes.io/ingress.class: nginx
## The list of additional hostnames to be covered with this ingress record.
## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array
## hosts:
## - name: drupal.local
## path: /
## The tls configuration for the ingress
## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
## tls:
## - hosts:
## - drupal.local
## secretName: drupal.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: drupal.local-tls
# key:
# certificate:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
drupal:
## drupal data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## A manually managed Persistent Volume Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
##
# existingClaim:
## If defined, the drupal-data volume will mount to the specified hostPath.
## Requires persistence.enabled: true
## Requires persistence.existingClaim: nil|false
## Default: nil.
##
# hostPath:
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Configure volume mounts. This is useful for images whose data mount paths are
## different than the default.
##
volumeMounts:
drupal:
mountPath: /bitnami/drupal
## Pass extra environment variables to the Drupal container.
##
# extraVars:
# - name: EXTRA_VAR_1
# value: extra-var-value-1
# - name: EXTRA_VAR_2
# value: extra-var-value-2
## Configure liveness and readiness probes.
## Drupal core exposes /user/login to unauthenticated requests, making it a good
## default liveness and readiness path. However, that may not always be the
## case. For example, if the image value is overridden to an image containing a
## module that alters that route, or an image that does not auto-install Drupal.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
#
livenessProbe:
httpGet:
path: /user/login
port: http
initialDelaySeconds: 120
readinessProbe:
httpGet:
path: /user/login
port: http
initialDelaySeconds: 30
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r39
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
risk-advisor | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"risk-advisor.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"risk-advisor.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"risk-advisor.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"risk-advisor.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: \"{{ template \"risk-advisor.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: \"{{ template \"risk-advisor.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n ports:\n - containerPort: {{ .Values.service.targetPort }}\n livenessProbe:\n httpGet:\n path: /healthz\n port: {{ .Values.service.targetPort }}\n initialDelaySeconds: 3\n periodSeconds: 3\n readinessProbe:\n httpGet:\n path: /healthz\n port: {{ .Values.service.targetPort }}\n initialDelaySeconds: 3\n periodSeconds: 3\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"risk-advisor.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n app: \"{{ template \"risk-advisor.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n ports:\n - name: http\n protocol: TCP\n port: {{ .Values.service.port }}\n targetPort: {{ .Values.service.targetPort }}\n nodePort: {{ .Values.service.nodePort }}\n selector:\n app: \"{{ template \"risk-advisor.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n"
] | # Default values for risk-advisor.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: pposkrobko/risk-advisor
tag: v1.0.0
pullPolicy: IfNotPresent
service:
type: NodePort
port: 9997
targetPort: 9997
nodePort: 31111
# resources:
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
|
nextcloud | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nextcloud.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"nextcloud.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"nextcloud.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n\n{{/*\nCreate a default fully qualified redis app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"nextcloud.redis.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"redis\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"nextcloud.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# config.yaml\n{{- if .Values.nextcloud.configs -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}-config\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.nextcloud.configs }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n{{- end }}\n{{- if .Values.nextcloud.defaultConfigs }}\n{{- if index .Values.nextcloud.defaultConfigs \".htaccess\" }}\n .htaccess: |-\n # line below if for Apache 2.4\n <ifModule mod_authz_core.c>\n Require all denied\n </ifModule>\n # line below if for Apache 2.2\n <ifModule !mod_authz_core.c>\n deny from all\n </ifModule>\n # section for Apache 2.2 and 2.4\n <ifModule mod_autoindex.c>\n IndexIgnore *\n </ifModule>\n{{- end }}\n{{- if index .Values.nextcloud.defaultConfigs \"redis.config.php\" }}\n redis.config.php: |-\n <?php\n if (getenv('REDIS_HOST')) {\n $CONFIG = array (\n 'memcache.distributed' => '\\OC\\Memcache\\Redis',\n 'memcache.locking' => '\\OC\\Memcache\\Redis',\n 'redis' => array(\n 'host' => getenv('REDIS_HOST'),\n 'port' => getenv('REDIS_HOST_PORT') ?: 6379,\n ),\n );\n }\n{{- end }}\n{{- if index .Values.nextcloud.defaultConfigs \"apache-pretty-urls.config.php\" }}\n apache-pretty-urls.config.php: |-\n <?php\n $CONFIG = array (\n 'htaccess.RewriteBase' => '/',\n );\n{{- end }}\n{{- if index .Values.nextcloud.defaultConfigs \"apcu.config.php\" }}\n apcu.config.php: |-\n <?php\n $CONFIG = array (\n 'memcache.local' => '\\OC\\Memcache\\APCu',\n );\n{{- end }}\n{{- if index .Values.nextcloud.defaultConfigs \"apps.config.php\" }}\n apps.config.php: |-\n <?php\n $CONFIG = array (\n \"apps_paths\" => array (\n 0 => array (\n \"path\" => OC::$SERVERROOT.\"/apps\",\n \"url\" => \"/apps\",\n \"writable\" => false,\n ),\n 1 => array (\n \"path\" => OC::$SERVERROOT.\"/custom_apps\",\n \"url\" => \"/custom_apps\",\n \"writable\" => true,\n ),\n ),\n );\n{{- end }}\n{{- if index .Values.nextcloud.defaultConfigs \"autoconfig.php\" }}\n autoconfig.php: |-\n <?php\n $autoconfig_enabled = false;\n if (getenv('SQLITE_DATABASE')) {\n $AUTOCONFIG[\"dbtype\"] = \"sqlite\";\n $AUTOCONFIG[\"dbname\"] = getenv('SQLITE_DATABASE');\n $autoconfig_enabled = true;\n } elseif (getenv('MYSQL_DATABASE') && getenv('MYSQL_USER') && getenv('MYSQL_PASSWORD') && getenv('MYSQL_HOST')) {\n $AUTOCONFIG[\"dbtype\"] = \"mysql\";\n $AUTOCONFIG[\"dbname\"] = getenv('MYSQL_DATABASE');\n $AUTOCONFIG[\"dbuser\"] = getenv('MYSQL_USER');\n $AUTOCONFIG[\"dbpass\"] = getenv('MYSQL_PASSWORD');\n $AUTOCONFIG[\"dbhost\"] = getenv('MYSQL_HOST');\n $autoconfig_enabled = true;\n } elseif (getenv('POSTGRES_DB') && getenv('POSTGRES_USER') && getenv('POSTGRES_PASSWORD') && getenv('POSTGRES_HOST')) {\n $AUTOCONFIG[\"dbtype\"] = \"pgsql\";\n $AUTOCONFIG[\"dbname\"] = getenv('POSTGRES_DB');\n $AUTOCONFIG[\"dbuser\"] = getenv('POSTGRES_USER');\n $AUTOCONFIG[\"dbpass\"] = getenv('POSTGRES_PASSWORD');\n $AUTOCONFIG[\"dbhost\"] = getenv('POSTGRES_HOST');\n $autoconfig_enabled = true;\n }\n if ($autoconfig_enabled) {\n if (getenv('NEXTCLOUD_TABLE_PREFIX')) {\n $AUTOCONFIG[\"dbtableprefix\"] = getenv('NEXTCLOUD_TABLE_PREFIX');\n }\n $AUTOCONFIG[\"directory\"] = getenv('NEXTCLOUD_DATA_DIR') ?: \"/var/www/html/data\";\n }\n{{- end }}\n{{- if index .Values.nextcloud.defaultConfigs \"smtp.config.php\" }}\n smtp.config.php: |-\n <?php\n if (getenv('SMTP_HOST') && getenv('MAIL_FROM_ADDRESS') && getenv('MAIL_DOMAIN')) {\n $CONFIG = array (\n 'mail_smtpmode' => 'smtp',\n 'mail_smtphost' => getenv('SMTP_HOST'),\n 'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25),\n 'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '',\n 'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'),\n 'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN',\n 'mail_smtpname' => getenv('SMTP_NAME') ?: '',\n 'mail_smtppassword' => getenv('SMTP_PASSWORD') ?: '',\n 'mail_from_address' => getenv('MAIL_FROM_ADDRESS'),\n 'mail_domain' => getenv('MAIL_DOMAIN'),\n );\n }\n{{- end }}\n{{- end }}\n{{- end }}\n",
"# cronjob.yaml\n{{- if .Values.cronjob.enabled }}\napiVersion: batch/v1beta1\nkind: CronJob\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n annotations:\n{{ toYaml .Values.cronjob.annotations | indent 4 }}\nspec:\n schedule: \"{{ .Values.cronjob.schedule }}\"\n concurrencyPolicy: Forbid\n {{- with .Values.cronjob.failedJobsHistoryLimit }}\n failedJobsHistoryLimit: {{ . }}\n {{- end }}\n {{- with .Values.cronjob.successfulJobsHistoryLimit }}\n successfulJobsHistoryLimit: {{ . }}\n {{- end }}\n jobTemplate:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n spec:\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n spec:\n restartPolicy: Never\n {{- if (default .Values.image.pullSecrets .Values.cronjob.image.pullSecrets) }}\n imagePullSecrets:\n {{- range (default .Values.image.pullSecrets .Values.cronjob.image.pullSecrets) }}\n - name: {{ . }}\n {{- end }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ default .Values.image.repository .Values.cronjob.image.repository }}:{{ default .Values.image.tag .Values.cronjob.image.tag }}\"\n imagePullPolicy: {{ default .Values.image.pullPolicy .Values.cronjob.image.pullPolicy }}\n command: [ \"curl\" ]\n args:\n {{- if .Values.cronjob.curlInsecure }}\n - \"-k\"\n {{- end }}\n - \"--fail\"\n - \"-L\"\n {{- if .Values.ingress.tls }}\n - \"https://{{ .Values.nextcloud.host }}/cron.php\"\n {{- else }}\n - \"http://{{ .Values.nextcloud.host }}/cron.php\"\n {{- end }}\n resources:\n{{ toYaml (default .Values.resources .Values.cronjob.resources) | indent 16 }}\n {{- with (default .Values.nodeSelector .Values.cronjob.nodeSelector) }}\n nodeSelector:\n{{ toYaml . | indent 12 }}\n {{- end }}\n {{- with (default .Values.affinity .Values.cronjob.affinity) }}\n affinity:\n{{ toYaml . | indent 12 }}\n {{- end }}\n {{- with (default .Values.tolerations .Values.cronjob.tolerations) }}\n tolerations:\n{{ toYaml . | indent 12 }}:\n {{- end }}\n{{- end }}\n",
"# db-secret.yaml\n{{- if or .Values.mariadb.enabled .Values.externalDatabase.enabled }}\n{{- if not .Values.externalDatabase.existingSecret.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ printf \"%s-%s\" .Release.Name \"db\" }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n {{- if .Values.mariadb.enabled }}\n db-password: {{ default \"\" .Values.mariadb.db.password | b64enc | quote }}\n db-username: {{ default \"\" .Values.mariadb.db.user | b64enc | quote }}\n {{- else }}\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n db-username: {{ default \"\" .Values.externalDatabase.user | b64enc | quote }}\n {{- end }}\n{{- end }}\n{{- end }}",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n {{- if .Values.deploymentAnnotations }}\n annotations:\n{{ toYaml .Values.deploymentAnnotations | indent 4 }}\n {{- end }}\nspec:\n replicas: {{ .Values.replicaCount }}\n strategy:\n{{ toYaml .Values.nextcloud.strategy | indent 4 }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n {{- if .Values.redis.enabled }}\n {{ template \"nextcloud.redis.fullname\" . }}-client: \"true\"\n {{- end }}\n {{- if .Values.podAnnotations }}\n annotations:\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n spec:\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- if .Values.lifecycle }}\n lifecycle:\n {{- if .Values.lifecycle.postStartCommand }}\n postStart:\n exec:\n command:\n {{- toYaml .Values.lifecycle.postStartCommand | nindent 16 -}}\n {{- end }}\n {{- if .Values.lifecycle.preStopCommand }}\n preStop:\n exec:\n command:\n {{- toYaml .Values.lifecycle.preStopCommand | nindent 16 -}}\n {{- end }}\n {{- end }}\n env:\n {{- if .Values.internalDatabase.enabled }}\n - name: SQLITE_DATABASE\n value: {{ .Values.internalDatabase.name | quote }}\n {{- else if .Values.mariadb.enabled }}\n - name: MYSQL_HOST\n value: {{ template \"nextcloud.mariadb.fullname\" . }}\n - name: MYSQL_DATABASE\n value: {{ .Values.mariadb.db.name | quote }}\n - name: MYSQL_USER\n valueFrom:\n secretKeyRef:\n name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf \"%s-%s\" .Release.Name \"db\") }}\n key: {{ .Values.externalDatabase.existingSecret.usernameKey | default \"db-username\" }}\n - name: MYSQL_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf \"%s-%s\" .Release.Name \"db\") }}\n key: {{ .Values.externalDatabase.existingSecret.passwordKey | default \"db-password\" }}\n {{- else }}\n {{- if eq .Values.externalDatabase.type \"postgresql\" }}\n - name: POSTGRES_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: POSTGRES_DB\n value: {{ .Values.externalDatabase.database | quote }}\n - name: POSTGRES_USER\n valueFrom:\n secretKeyRef:\n name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf \"%s-%s\" .Release.Name \"db\") }}\n key: {{ .Values.externalDatabase.existingSecret.usernameKey | default \"db-username\" }}\n - name: POSTGRES_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf \"%s-%s\" .Release.Name \"db\") }}\n key: {{ .Values.externalDatabase.existingSecret.passwordKey | default \"db-password\" }}\n {{- else }}\n - name: MYSQL_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MYSQL_DATABASE\n value: {{ .Values.externalDatabase.database | quote }}\n - name: MYSQL_USER\n valueFrom:\n secretKeyRef:\n name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf \"%s-%s\" .Release.Name \"db\") }}\n key: {{ .Values.externalDatabase.existingSecret.usernameKey | default \"db-username\" }}\n - name: MYSQL_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ .Values.externalDatabase.existingSecret.secretName | default (printf \"%s-%s\" .Release.Name \"db\") }}\n key: {{ .Values.externalDatabase.existingSecret.passwordKey | default \"db-password\" }}\n {{- end }}\n {{- end }}\n - name: NEXTCLOUD_ADMIN_USER\n valueFrom:\n secretKeyRef:\n name: {{ template \"nextcloud.fullname\" . }}\n key: nextcloud-username\n - name: NEXTCLOUD_ADMIN_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"nextcloud.fullname\" . }}\n key: nextcloud-password\n - name: NEXTCLOUD_TRUSTED_DOMAINS\n value: {{ .Values.nextcloud.host }}\n {{- if ne (int .Values.nextcloud.update) 0 }}\n - name: NEXTCLOUD_UPDATE\n value: {{ .Values.nextcloud.update | quote }}\n {{- end }}\n - name: NEXTCLOUD_DATA_DIR\n value: {{ .Values.nextcloud.datadir | quote }}\n {{- if .Values.nextcloud.tableprefix }}\n - name: NEXTCLOUD_TABLE_PREFIX\n value: {{ .Values.nextcloud.tableprefix | quote }}\n {{- end }}\n {{- if .Values.nextcloud.mail.enabled }}\n - name: MAIL_FROM_ADDRESS\n value: {{ .Values.nextcloud.mail.fromAddress | quote }}\n - name: MAIL_DOMAIN\n value: {{ .Values.nextcloud.mail.domain | quote }}\n - name: SMTP_HOST\n value: {{ .Values.nextcloud.mail.smtp.host | quote }}\n - name: SMTP_SECURE\n value: {{ .Values.nextcloud.mail.smtp.secure | quote }}\n - name: SMTP_PORT\n value: {{ .Values.nextcloud.mail.smtp.port | quote }}\n - name: SMTP_AUTHTYPE\n value: {{ .Values.nextcloud.mail.smtp.authtype | quote }}\n - name: SMTP_NAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"nextcloud.fullname\" . }}\n key: smtp-username\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"nextcloud.fullname\" . }}\n key: smtp-password\n {{- end }}\n {{- if .Values.redis.enabled }}\n - name: REDIS_HOST\n value: {{ template \"nextcloud.redis.fullname\" . }}-master\n - name: REDIS_HOST_PORT\n value: {{ .Values.redis.redisPort | quote }}\n {{- end }}\n {{- if .Values.nextcloud.extraEnv }}\n{{ toYaml .Values.nextcloud.extraEnv | indent 8 }}\n {{- end }}\n {{- if not .Values.nginx.enabled }}\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n {{- end }}\n {{- if and .Values.livenessProbe.enabled (not .Values.nginx.enabled) }}\n livenessProbe:\n httpGet:\n path: /status.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ .Values.nextcloud.host | quote }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if and .Values.readinessProbe.enabled (not .Values.nginx.enabled) }}\n readinessProbe:\n httpGet:\n path: /status.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ .Values.nextcloud.host | quote }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - name: nextcloud-data\n mountPath: /var/www/\n subPath: {{ ternary \"root\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"root\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html\n subPath: {{ ternary \"html\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"html\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: {{ .Values.nextcloud.datadir }}\n subPath: {{ ternary \"data\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"data\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html/config\n subPath: {{ ternary \"config\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"config\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html/custom_apps\n subPath: {{ ternary \"custom_apps\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"custom_apps\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/tmp\n subPath: {{ ternary \"tmp\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"tmp\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html/themes\n subPath: {{ ternary \"themes\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"themes\") (empty .Values.nextcloud.persistence.subPath) }}\n {{- range $key, $value := .Values.nextcloud.configs }}\n - name: nextcloud-config\n mountPath: /var/www/html/config/{{ $key }}\n subPath: {{ $key }}\n {{- end }}\n {{- if .Values.nextcloud.configs }}\n {{- range $key, $value := .Values.nextcloud.defaultConfigs }}\n {{- if $value }}\n - name: nextcloud-config\n mountPath: /var/www/html/config/{{ $key }}\n subPath: {{ $key }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- range $key, $value := .Values.nextcloud.phpConfigs }}\n - name: nextcloud-phpconfig\n mountPath: /usr/local/etc/php/conf.d/{{ $key }}\n subPath: {{ $key }}\n {{- end }}\n {{- if .Values.nextcloud.extraVolumeMounts }}\n{{ toYaml .Values.nextcloud.extraVolumeMounts | indent 8 }}\n {{- end }}\n {{- if .Values.nginx.enabled }}\n - name: {{ .Chart.Name }}-nginx\n image: \"{{ .Values.nginx.image.repository }}:{{ .Values.nginx.image.tag }}\"\n imagePullPolicy: {{ .Values.nginx.image.pullPolicy }}\n ports:\n - name: http\n containerPort: 80\n protocol: TCP\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /status.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ .Values.nextcloud.host | quote }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /status.php\n port: http\n httpHeaders:\n - name: Host\n value: {{ .Values.nextcloud.host | quote }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.nginx.resources | indent 10 }}\n volumeMounts:\n - name: nextcloud-data\n mountPath: /var/www/\n subPath: {{ ternary \"root\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"root\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html\n subPath: {{ ternary \"html\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"html\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: {{ .Values.nextcloud.datadir }}\n subPath: {{ ternary \"data\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"data\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html/config\n subPath: {{ ternary \"config\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"config\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html/custom_apps\n subPath: {{ ternary \"custom_apps\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"custom_apps\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/tmp\n subPath: {{ ternary \"tmp\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"tmp\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-data\n mountPath: /var/www/html/themes\n subPath: {{ ternary \"themes\" (printf \"%s/%s\" .Values.nextcloud.persistence.subPath \"themes\") (empty .Values.nextcloud.persistence.subPath) }}\n - name: nextcloud-nginx-config\n mountPath: /etc/nginx/nginx.conf\n subPath: nginx.conf\n {{- end }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n {{- toYaml . | nindent 8 }}\n {{- end }}\n volumes:\n - name: nextcloud-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"nextcloud.fullname\" . }}-nextcloud{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- if .Values.nextcloud.configs }}\n - name: nextcloud-config\n configMap:\n name: {{ template \"nextcloud.fullname\" . }}-config\n {{- end }}\n {{- if .Values.nextcloud.phpConfigs }}\n - name: nextcloud-phpconfig\n configMap:\n name: {{ template \"nextcloud.fullname\" . }}-phpconfig\n {{- end }}\n {{- if .Values.nginx.enabled }}\n - name: nextcloud-nginx-config\n configMap:\n name: {{ template \"nextcloud.fullname\" . }}-nginxconfig\n {{- end }}\n {{- if .Values.nextcloud.extraVolumes }}\n{{ toYaml .Values.nextcloud.extraVolumes | indent 6 }}\n {{- end }}\n {{- if .Values.nginx.enabled }}\n # Will mount configuration files as www-data (id: 82) for nextcloud\n securityContext:\n fsGroup: 82\n {{- else }}\n # Will mount configuration files as www-data (id: 33) for nextcloud\n securityContext:\n fsGroup: 33\n {{- end }}\n",
"# hpa.yaml\n{{- if .Values.hpa.enabled -}}\napiVersion: autoscaling/v1\nkind: HorizontalPodAutoscaler\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n scaleTargetRef:\n kind: Deployment\n apiVersion: apps/v1\n name: {{ template \"nextcloud.fullname\" . }}\n minReplicas: {{ .Values.hpa.minPods }}\n maxReplicas: {{ .Values.hpa.maxPods }}\n targetCPUUtilizationPercentage: {{ .Values.hpa.cputhreshold }}\n{{- end }}",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: networking.k8s.io/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- if .Values.ingress.labels }}\n{{ toYaml .Values.ingress.labels | indent 4 }}\n{{- end }}\n{{- if .Values.ingress.annotations }}\n annotations:\n{{ toYaml .Values.ingress.annotations | indent 4 }}\n{{- end }}\nspec:\n rules:\n - host: {{ .Values.nextcloud.host }}\n http:\n paths:\n - backend:\n serviceName: {{ template \"nextcloud.fullname\" . }}\n servicePort: {{ .Values.service.port }}\n{{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n{{- end -}}\n{{- end }}\n",
"# metrics-deployment.yaml\n{{- if .Values.metrics.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}-metrics\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n app.kubernetes.io/component: metrics\nspec:\n replicas: {{ .Values.metrics.replicaCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n template:\n metadata:\n annotations: {{- toYaml .Values.metrics.podAnnotations | nindent 8 }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/component: metrics\n {{- if .Values.metrics.podLabels }}\n{{ toYaml .Values.metrics.podLabels | indent 8 }}\n {{- end }}\n spec:\n containers:\n - name: metrics-exporter\n image: \"{{ .Values.metrics.image.repository }}:{{ .Values.metrics.image.tag }}\"\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy }}\n env:\n - name: NEXTCLOUD_USERNAME\n valueFrom:\n secretKeyRef:\n name: {{ template \"nextcloud.fullname\" . }}\n key: nextcloud-username\n - name: NEXTCLOUD_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"nextcloud.fullname\" . }}\n key: nextcloud-password\n - name: NEXTCLOUD_SERVER\n value: http{{ if .Values.metrics.https }}s{{ end }}://{{ .Values.nextcloud.host }}\n - name: NEXTCLOUD_TIMEOUT\n value: {{ .Values.metrics.timeout }}\n ports:\n - name: metrics\n containerPort: 9205\n {{- if .Values.metrics.resources }}\n resources: {{- toYaml .Values.metrics.resources | nindent 10 }}\n {{- end }}\n{{- end }}\n",
"# metrics-service.yaml\n{{- if .Values.metrics.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}-metrics\n labels:\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n {{- if .Values.metrics.service.labels -}}\n {{ toYaml .Values.metrics.service.labels | nindent 4 }}\n {{- end -}}\n {{- if .Values.metrics.service.annotations }}\n annotations: {{ toYaml .Values.metrics.service.annotations | nindent 4 }}\n {{- end }}\nspec:\n type: {{ .Values.metrics.service.type }}\n {{ if eq .Values.metrics.service.type \"LoadBalancer\" -}} {{ if .Values.metrics.service.loadBalancerIP }}\n loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }}\n {{ end -}}\n {{- end -}}\n ports:\n - name: metrics\n port: 9205\n targetPort: metrics\n selector:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n{{- end }}\n",
"# nextcloud-pvc.yaml\n{{- if .Values.persistence.enabled -}}\n{{- if not .Values.persistence.existingClaim -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}-nextcloud\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- if .Values.persistence.annotations }}\n annotations:\n{{ toYaml .Values.persistence.annotations | indent 4 }}\n{{- end }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n{{- if .Values.persistence.storageClass }}\n{{- if (eq \"-\" .Values.persistence.storageClass) }}\n storageClassName: \"\"\n{{- else }}\n storageClassName: \"{{ .Values.persistence.storageClass }}\"\n{{- end }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n",
"# nginx-config.yaml\n{{- if .Values.nginx.enabled -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}-nginxconfig\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ndata:\n nginx.conf: |-\n{{- if .Values.nginx.config.default }}\n worker_processes auto;\n\n error_log /var/log/nginx/error.log warn;\n pid /var/run/nginx.pid;\n\n\n events {\n worker_connections 1024;\n }\n\n\n http {\n include /etc/nginx/mime.types;\n default_type application/octet-stream;\n\n log_format main '$remote_addr - $remote_user [$time_local] \"$request\" '\n '$status $body_bytes_sent \"$http_referer\" '\n '\"$http_user_agent\" \"$http_x_forwarded_for\"';\n\n access_log /var/log/nginx/access.log main;\n\n sendfile on;\n #tcp_nopush on;\n\n keepalive_timeout 65;\n\n #gzip on;\n\n upstream php-handler {\n server 127.0.0.1:9000;\n }\n\n server {\n listen 80;\n\n # Add headers to serve security related headers\n # Before enabling Strict-Transport-Security headers please read into this\n # topic first.\n #add_header Strict-Transport-Security \"max-age=15768000; includeSubDomains; preload;\" always;\n #\n # WARNING: Only add the preload option once you read about\n # the consequences in https://hstspreload.org/. This option\n # will add the domain to a hardcoded list that is shipped\n # in all major browsers and getting removed from this list\n # could take several months.\n add_header Referrer-Policy \"no-referrer\" always;\n add_header X-Content-Type-Options \"nosniff\" always;\n add_header X-Download-Options \"noopen\" always;\n add_header X-Frame-Options \"SAMEORIGIN\" always;\n add_header X-Permitted-Cross-Domain-Policies \"none\" always;\n add_header X-Robots-Tag \"none\" always;\n add_header X-XSS-Protection \"1; mode=block\" always;\n\n # Remove X-Powered-By, which is an information leak\n fastcgi_hide_header X-Powered-By;\n\n # Path to the root of your installation\n root /var/www/html;\n\n location = /robots.txt {\n allow all;\n log_not_found off;\n access_log off;\n }\n\n # The following 2 rules are only needed for the user_webfinger app.\n # Uncomment it if you're planning to use this app.\n #rewrite ^/.well-known/host-meta /public.php?service=host-meta last;\n #rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last;\n\n # The following rule is only needed for the Social app.\n # Uncomment it if you're planning to use this app.\n #rewrite ^/.well-known/webfinger /public.php?service=webfinger last;\n\n location = /.well-known/carddav {\n return 301 $scheme://$host:$server_port/remote.php/dav;\n }\n\n location = /.well-known/caldav {\n return 301 $scheme://$host:$server_port/remote.php/dav;\n }\n\n # set max upload size\n client_max_body_size 10G;\n fastcgi_buffers 64 4K;\n\n # Enable gzip but do not remove ETag headers\n gzip on;\n gzip_vary on;\n gzip_comp_level 4;\n gzip_min_length 256;\n gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;\n gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;\n\n # Uncomment if your server is build with the ngx_pagespeed module\n # This module is currently not supported.\n #pagespeed off;\n\n location / {\n rewrite ^ /index.php;\n }\n\n location ~ ^\\/(?:build|tests|config|lib|3rdparty|templates|data)\\/ {\n deny all;\n }\n location ~ ^\\/(?:\\.|autotest|occ|issue|indie|db_|console) {\n deny all;\n }\n\n location ~ ^\\/(?:index|remote|public|cron|core\\/ajax\\/update|status|ocs\\/v[12]|updater\\/.+|oc[ms]-provider\\/.+)\\.php(?:$|\\/) {\n fastcgi_split_path_info ^(.+?\\.php)(\\/.*|)$;\n set $path_info $fastcgi_path_info;\n try_files $fastcgi_script_name =404;\n include fastcgi_params;\n fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;\n fastcgi_param PATH_INFO $path_info;\n # fastcgi_param HTTPS on;\n\n # Avoid sending the security headers twice\n fastcgi_param modHeadersAvailable true;\n\n # Enable pretty urls\n fastcgi_param front_controller_active true;\n fastcgi_pass php-handler;\n fastcgi_intercept_errors on;\n fastcgi_request_buffering off;\n }\n\n location ~ ^\\/(?:updater|oc[ms]-provider)(?:$|\\/) {\n try_files $uri/ =404;\n index index.php;\n }\n\n # Adding the cache control header for js, css and map files\n # Make sure it is BELOW the PHP block\n location ~ \\.(?:css|js|woff2?|svg|gif|map)$ {\n try_files $uri /index.php$request_uri;\n add_header Cache-Control \"public, max-age=15778463\";\n # Add headers to serve security related headers (It is intended to\n # have those duplicated to the ones above)\n # Before enabling Strict-Transport-Security headers please read into\n # this topic first.\n #add_header Strict-Transport-Security \"max-age=15768000; includeSubDomains; preload;\" always;\n #\n # WARNING: Only add the preload option once you read about\n # the consequences in https://hstspreload.org/. This option\n # will add the domain to a hardcoded list that is shipped\n # in all major browsers and getting removed from this list\n # could take several months.\n add_header Referrer-Policy \"no-referrer\" always;\n add_header X-Content-Type-Options \"nosniff\" always;\n add_header X-Download-Options \"noopen\" always;\n add_header X-Frame-Options \"SAMEORIGIN\" always;\n add_header X-Permitted-Cross-Domain-Policies \"none\" always;\n add_header X-Robots-Tag \"none\" always;\n add_header X-XSS-Protection \"1; mode=block\" always;\n\n # Optional: Don't log access to assets\n access_log off;\n }\n\n location ~ \\.(?:png|html|ttf|ico|jpg|jpeg|bcmap)$ {\n try_files $uri /index.php$request_uri;\n # Optional: Don't log access to other assets\n access_log off;\n }\n }\n }\n{{- else }}\n{{ .Values.nginx.config.custom | indent 4 }}\n{{- end }}\n{{- end }}\n",
"# php-config.yaml\n{{- if .Values.nextcloud.phpConfigs -}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}-phpconfig\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ndata:\n{{- range $key, $value := .Values.nextcloud.phpConfigs }}\n {{ $key }}: |-\n{{ $value | indent 4 }}\n{{- end }}\n{{- end }}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\ntype: Opaque\ndata:\n nextcloud-username: {{ .Values.nextcloud.username | b64enc | quote }}\n {{ if .Values.nextcloud.password }}\n nextcloud-password: {{ .Values.nextcloud.password | b64enc | quote }}\n {{ else }}\n nextcloud-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n {{- if .Values.nextcloud.mail.enabled }}\n smtp-username: {{ default \"\" .Values.nextcloud.mail.smtp.name | b64enc | quote }}\n smtp-password: {{ default \"\" .Values.nextcloud.mail.smtp.password | b64enc | quote }}\n {{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nextcloud.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n helm.sh/chart: {{ include \"nextcloud.chart\" . }}\n app.kubernetes.io/instance: {{ .Release.Name }}\n app.kubernetes.io/managed-by: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n {{- if eq .Values.service.type \"LoadBalancer\" }}\n loadBalancerIP: {{ default \"\" .Values.service.loadBalancerIP }}\n {{- end }}\n ports:\n - port: {{ .Values.service.port }}\n targetPort: http\n protocol: TCP\n name: http\n {{- if eq .Values.service.type \"NodePort\" }}\n nodePort: {{ default \"\" .Values.service.nodePort}}\n {{- end }}\n selector:\n app.kubernetes.io/name: {{ include \"nextcloud.name\" . }}\n"
] | ## Official nextcloud image version
## ref: https://hub.docker.com/r/library/nextcloud/tags/
##
image:
repository: nextcloud
tag: 17.0.0-apache
pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistrKeySecretName
nameOverride: ""
fullnameOverride: ""
# Number of replicas to be deployed
replicaCount: 1
## Allowing use of ingress controllers
## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/
##
ingress:
enabled: false
annotations: {}
# nginx.ingress.kubernetes.io/proxy-body-size: 4G
# kubernetes.io/tls-acme: "true"
# certmanager.k8s.io/cluster-issuer: letsencrypt-prod
# nginx.ingress.kubernetes.io/server-snippet: |-
# server_tokens off;
# proxy_hide_header X-Powered-By;
# rewrite ^/.well-known/webfinger /public.php?service=webfinger last;
# rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
# rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json;
# location = /.well-known/carddav {
# return 301 $scheme://$host/remote.php/dav;
# }
# location = /.well-known/caldav {
# return 301 $scheme://$host/remote.php/dav;
# }
# location = /robots.txt {
# allow all;
# log_not_found off;
# access_log off;
# }
# location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ {
# deny all;
# }
# location ~ ^/(?:autotest|occ|issue|indie|db_|console) {
# deny all;
# }
# tls:
# - secretName: nextcloud-tls
# hosts:
# - nextcloud.kube.home
labels: {}
# Allow configuration of lifecycle hooks
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/
lifecycle: {}
# postStartCommand: []
# preStopCommand: []
nextcloud:
host: nextcloud.kube.home
username: admin
password: changeme
update: 0
datadir: /var/www/html/data
tableprefix:
persistence:
subPath:
mail:
enabled: false
fromAddress: user
domain: domain.com
smtp:
host: domain.com
secure: ssl
port: 465
authtype: LOGIN
name: user
password: pass
# PHP Configuration files
# Will be injected in /usr/local/etc/php/conf.d
phpConfigs: {}
# Default config files
# IMPORTANT: Will be used only if you put extra configs, otherwise default will come from nextcloud itself
# Default confgurations can be found here: https://github.com/nextcloud/docker/tree/master/16.0/apache/config
defaultConfigs:
# To protect /var/www/html/config
.htaccess: true
# Redis default configuration
redis.config.php: true
# Apache configuration for rewrite urls
apache-pretty-urls.config.php: true
# Define APCu as local cache
apcu.config.php: true
# Apps directory configs
apps.config.php: true
# Used for auto configure database
autoconfig.php: true
# SMTP default configuration
smtp.config.php: true
# Extra config files created in /var/www/html/config/
# ref: https://docs.nextcloud.com/server/15/admin_manual/configuration_server/config_sample_php_parameters.html#multiple-config-php-file
configs: {}
# For example, to use S3 as primary storage
# ref: https://docs.nextcloud.com/server/13/admin_manual/configuration_files/primary_storage.html#simple-storage-service-s3
#
# configs:
# s3.config.php: |-
# <?php
# $CONFIG = array (
# 'objectstore' => array(
# 'class' => '\\OC\\Files\\ObjectStore\\S3',
# 'arguments' => array(
# 'bucket' => 'my-bucket',
# 'autocreate' => true,
# 'key' => 'xxx',
# 'secret' => 'xxx',
# 'region' => 'us-east-1',
# 'use_ssl' => true
# )
# )
# );
## Strategy used to replace old pods
## IMPORTANT: use with care, it is suggested to leave as that for upgrade purposes
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
strategy:
type: Recreate
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 1
# maxUnavailable: 0
##
## Extra environment variables
extraEnv:
# - name: SOME_SECRET_ENV
# valueFrom:
# secretKeyRef:
# name: nextcloud
# key: secret_key
# Extra mounts for the pods. Example shown is for connecting a legacy NFS volume
# to NextCloud pods in Kubernetes. This can then be configured in External Storage
extraVolumes:
# - name: nfs
# nfs:
# server: "10.0.0.1"
# path: "/nextcloud_data"
# readOnly: false
extraVolumeMounts:
# - name: nfs
# mountPath: "/legacy_data"
nginx:
## You need to set an fpm version of the image for nextcloud if you want to use nginx!
enabled: false
image:
repository: nginx
tag: alpine
pullPolicy: IfNotPresent
config:
# This generates the default nginx config as per the nextcloud documentation
default: true
# custom: |-
# worker_processes 1;..
resources: {}
internalDatabase:
enabled: true
name: nextcloud
##
## External database configuration
##
externalDatabase:
enabled: false
## Supported database engines: mysql or postgresql
type: mysql
## Database host
host:
## Database user
user: nextcloud
## Database password
password:
## Database name
database: nextcloud
## Use a existing secret
existingSecret:
enabled: false
# secretName: nameofsecret
# usernameKey: username
# passwordKey: password
##
## MariaDB chart configuration
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: false
db:
name: nextcloud
user: nextcloud
password: changeme
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: false
accessMode: ReadWriteOnce
size: 8Gi
redis:
enabled: false
usePassword: false
## Cronjob to execute Nextcloud background tasks
## ref: https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/background_jobs_configuration.html#cron-jobs
##
cronjob:
enabled: false
# Nexcloud image is used as default but only curl is needed
image: {}
# repository: nextcloud
# tag: 16.0.3-apache
# pullPolicy: IfNotPresent
# pullSecrets:
# - myRegistrKeySecretName
# Every 15 minutes
# Note: Setting this to any any other value than 15 minutes might
# cause issues with how nextcloud background jobs are executed
schedule: "*/15 * * * *"
annotations: {}
# Set curl's insecure option if you use e.g. self-signed certificates
curlInsecure: false
failedJobsHistoryLimit: 5
successfulJobsHistoryLimit: 2
# If not set, nextcloud deployment one will be set
# resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# If not set, nextcloud deployment one will be set
# nodeSelector: {}
# If not set, nextcloud deployment one will be set
# tolerations: []
# If not set, nextcloud deployment one will be set
# affinity: {}
service:
type: ClusterIP
port: 8080
loadBalancerIP: nil
nodePort: nil
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
# Nextcloud Data (/var/www/html)
enabled: false
annotations: {}
## nextcloud data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 15
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
## Enable pod autoscaling using HorizontalPodAutoscaler
## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
##
hpa:
enabled: false
cputhreshold: 60
minPods: 1
maxPods: 10
nodeSelector: {}
tolerations: []
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
replicaCount: 1
# The metrics exporter needs to know how you serve Nextcloud either http or https
https: false
timeout: 5s
image:
repository: xperimental/nextcloud-exporter
tag: v0.3.0
pullPolicy: IfNotPresent
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
## Metrics exporter pod Annotation and Labels
# podAnnotations: {}
# podLabels: {}
service:
type: ClusterIP
## Use serviceLoadBalancerIP to request a specific static IP,
## otherwise leave blank
# loadBalancerIP:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9205"
labels: {}
|
tomcat | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"tomcat.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"tomcat.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"tomcat.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# appsrv-svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"tomcat.fullname\" . }}\n labels:\n app: {{ template \"tomcat.name\" . }}\n chart: {{ template \"tomcat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"tomcat.name\" . }}\n release: {{ .Release.Name }}\n",
"# appsrv.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"tomcat.fullname\" . }}\n labels:\n app: {{ template \"tomcat.name\" . }}\n chart: {{ template \"tomcat.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"tomcat.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"tomcat.name\" . }}\n release: {{ .Release.Name }}\n spec:\n volumes:\n - name: app-volume\n emptyDir: {}\n {{- with .Values.extraVolumes }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n initContainers:\n - name: war\n image: {{ .Values.image.webarchive.repository }}:{{ .Values.image.webarchive.tag }}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n command:\n - \"sh\"\n - \"-c\"\n - \"cp /*.war /app\"\n volumeMounts:\n - name: app-volume\n mountPath: /app\n {{- with .Values.extraInitContainers }}\n{{ toYaml . | indent 8 }}\n {{- end }}\n\n containers:\n - name: {{ .Chart.Name }}\n image: {{ .Values.image.tomcat.repository }}:{{ .Values.image.tomcat.tag }}\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n {{- with .Values.env }}\n env:\n{{ toYaml . | indent 12 }}\n {{- end }}\n volumeMounts:\n - name: app-volume\n mountPath: {{ .Values.deploy.directory }}\n {{- with .Values.extraVolumeMounts }}\n{{ toYaml . | indent 12 }}\n {{- end }}\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n {{- with .Values.hostPort }}\n hostPort: {{ . }}\n {{- end }}\n livenessProbe:\n httpGet:\n path: {{ .Values.livenessProbe.path }}\n port: {{ .Values.service.internalPort }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n readinessProbe:\n httpGet:\n path: {{ .Values.readinessProbe.path }}\n port: {{ .Values.service.internalPort }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n{{ toYaml .Values.image.pullSecrets | indent 8 }}\n {{- end }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n"
] | # Default values for the chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
webarchive:
repository: ananwaresystems/webarchive
tag: "1.0"
tomcat:
repository: tomcat
tag: "7.0"
pullPolicy: IfNotPresent
pullSecrets: []
deploy:
directory: /usr/local/tomcat/webapps
service:
name: http
type: LoadBalancer
externalPort: 80
internalPort: 8080
hostPort: 8009
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
env: []
# - name: env
# value: test
extraVolumes: []
# - name: extra
# emptyDir: {}
extraVolumeMounts: []
# - name: extra
# mountPath: /usr/local/tomcat/webapps/app
# readOnly: true
extraInitContainers: []
# - name: do-something
# image: busybox
# command: ['do', 'something']
readinessProbe:
path: "/sample"
initialDelaySeconds: 60
periodSeconds: 30
failureThreshold: 6
timeoutSeconds: 5
livenessProbe:
path: "/sample"
initialDelaySeconds: 60
periodSeconds: 30
failureThreshold: 6
timeoutSeconds: 5
resources: {}
# limits:
# cpu: 100m
# memory: 256Mi
# requests:
# cpu: 100m
# memory: 256Mi
nodeSelector: {}
tolerations: []
affinity: {}
|
selenium | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"selenium.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"selenium.fullname\" -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name, for hub.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"selenium.hub.fullname\" -}}\n{{- printf \"%s-selenium-hub\" .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name, for chrome.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"selenium.chrome.fullname\" -}}\n{{- printf \"%s-selenium-chrome\" .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name, for chromeDebug.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"selenium.chromeDebug.fullname\" -}}\n{{- printf \"%s-selenium-chrome-debug\" .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name, for firefox.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"selenium.firefox.fullname\" -}}\n{{- printf \"%s-selenium-firefox\" .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name, for firefoxDebug.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"selenium.firefoxDebug.fullname\" -}}\n{{- printf \"%s-selenium-firefox-debug\" .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nReturn the apiVersion of deployment.\n*/}}\n{{- define \"deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else if semverCompare \">=1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# chrome-daemonset.yaml\n{{- if and (eq true .Values.chrome.enabled) (eq true .Values.chrome.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: DaemonSet\nmetadata:\n name: {{ template \"selenium.chrome.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"selenium.chrome.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.chrome.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.chrome.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }}\n {{- if .Values.chrome.podAnnotations }}\n annotations:\n{{ toYaml .Values.chrome.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.chrome.image }}:{{ .Values.chrome.tag }}\"\n imagePullPolicy: {{ .Values.chrome.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n {{- if .Values.chrome.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.chrome.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.chrome.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.chrome.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.chrome.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.chrome.seOpts | quote }}\n {{- if .Values.chrome.chromeVersion }}\n - name: CHROME_VERSION\n value: {{ .Values.chrome.chromeVersion | quote }}\n {{- end }}\n {{- if .Values.chrome.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.chrome.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.chrome.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.chrome.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.chrome.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.chrome.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.chrome.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.chrome.nodePort | quote }}\n {{- end }}\n {{- if .Values.chrome.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.chrome.screenWidth | quote }}\n {{- end }}\n {{- if .Values.chrome.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.chrome.screenHeight | quote }}\n {{- end }}\n {{- if .Values.chrome.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.chrome.screenDepth | quote }}\n {{- end }}\n {{- if .Values.chrome.display }}\n - name: DISPLAY\n value: {{ .Values.chrome.display | quote }}\n {{- end }}\n {{- if .Values.chrome.timeZone }}\n - name: TZ\n value: {{ .Values.chrome.timeZone | quote }}\n {{- end }}\n {{- if .Values.chrome.extraEnvs }}\n{{ toYaml .Values.chrome.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.chrome.volumeMounts -}}\n{{ toYaml .Values.chrome.volumeMounts | trim | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.chrome.resources | trim | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.chrome.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.chrome.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.chrome.volumes -}}\n{{ toYaml .Values.chrome.volumes | trim | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | trim | indent 8 }}\n nodeSelector:\n{{- if .Values.chrome.nodeSelector }}\n{{ toYaml .Values.chrome.nodeSelector | trim | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | trim | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.chrome.affinity }}\n{{ toYaml .Values.chrome.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.chrome.tolerations }}\n{{ toYaml .Values.chrome.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# chrome-deployment.yaml\n{{- if and (eq true .Values.chrome.enabled) (eq false .Values.chrome.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"selenium.chrome.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.chrome.replicas }}\n selector:\n matchLabels:\n app: {{ template \"selenium.chrome.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.chrome.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.chrome.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }}\n {{- if .Values.chrome.podAnnotations }}\n annotations:\n{{ toYaml .Values.chrome.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n {{- if .Values.chrome.securityContext }}\n securityContext:\n{{ toYaml .Values.chrome.securityContext | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.chrome.image }}:{{ .Values.chrome.tag }}\"\n imagePullPolicy: {{ .Values.chrome.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n {{- if .Values.chrome.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.chrome.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.chrome.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.chrome.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.chrome.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.chrome.seOpts | quote }}\n {{- if .Values.chrome.chromeVersion }}\n - name: CHROME_VERSION\n value: {{ .Values.chrome.chromeVersion | quote }}\n {{- end }}\n {{- if .Values.chrome.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.chrome.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.chrome.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.chrome.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.chrome.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.chrome.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.chrome.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.chrome.nodePort | quote }}\n {{- end }}\n {{- if .Values.chrome.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.chrome.screenWidth | quote }}\n {{- end }}\n {{- if .Values.chrome.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.chrome.screenHeight | quote }}\n {{- end }}\n {{- if .Values.chrome.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.chrome.screenDepth | quote }}\n {{- end }}\n {{- if .Values.chrome.display }}\n - name: DISPLAY\n value: {{ .Values.chrome.display | quote }}\n {{- end }}\n {{- if .Values.chrome.timeZone }}\n - name: TZ\n value: {{ .Values.chrome.timeZone | quote }}\n {{- end }}\n {{- if .Values.chrome.extraEnvs }}\n{{ toYaml .Values.chrome.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.chrome.volumeMounts -}}\n{{ toYaml .Values.chrome.volumeMounts | trim | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.chrome.resources | trim | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.chrome.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.chrome.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.chrome.volumes -}}\n{{ toYaml .Values.chrome.volumes | trim | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | trim | indent 8 }}\n nodeSelector:\n{{- if .Values.chrome.nodeSelector }}\n{{ toYaml .Values.chrome.nodeSelector | trim | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | trim | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.chrome.affinity }}\n{{ toYaml .Values.chrome.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.chrome.tolerations }}\n{{ toYaml .Values.chrome.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# chromeDebug-daemonset.yaml\n{{- if and (eq true .Values.chromeDebug.enabled) (eq true .Values.chromeDebug.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: DaemonSet\nmetadata:\n name: {{ template \"selenium.chromeDebug.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"selenium.chromeDebug.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.chromeDebug.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.chromeDebug.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }} \n {{- if .Values.chromeDebug.podAnnotations }}\n annotations:\n{{ toYaml .Values.chromeDebug.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.chromeDebug.image }}:{{ .Values.chromeDebug.tag }}\"\n imagePullPolicy: {{ .Values.chromeDebug.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n - containerPort: 5900\n name: vnc\n {{- if .Values.chromeDebug.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.chromeDebug.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.chromeDebug.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.chromeDebug.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.chromeDebug.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.chromeDebug.seOpts | quote }}\n {{- if .Values.chromeDebug.chromeVersion }}\n - name: CHROME_VERSION\n value: {{ .Values.chromeDebug.chromeVersion | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.chromeDebug.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.chromeDebug.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.chromeDebug.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.chromeDebug.nodePort | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.chromeDebug.screenWidth | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.chromeDebug.screenHeight | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.chromeDebug.screenDepth | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.display }}\n - name: DISPLAY\n value: {{ .Values.chromeDebug.display | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.timeZone }}\n - name: TZ\n value: {{ .Values.chromeDebug.timeZone | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.extraEnvs }}\n{{ toYaml .Values.chromeDebug.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.chromeDebug.volumeMounts -}}\n{{ toYaml .Values.chromeDebug.volumeMounts | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.chromeDebug.resources | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.chromeDebug.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.chromeDebug.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.chromeDebug.volumes -}}\n{{ toYaml .Values.chromeDebug.volumes | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | indent 8 }}\n nodeSelector:\n{{- if .Values.chromeDebug.nodeSelector }}\n{{ toYaml .Values.chromeDebug.nodeSelector | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.chromeDebug.affinity }}\n{{ toYaml .Values.chromeDebug.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.chromeDebug.tolerations }}\n{{ toYaml .Values.chromeDebug.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# chromeDebug-deployment.yaml\n{{- if and (eq true .Values.chromeDebug.enabled) (eq false .Values.chromeDebug.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"selenium.chromeDebug.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.chromeDebug.replicas }}\n selector:\n matchLabels:\n app: {{ template \"selenium.chromeDebug.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.chromeDebug.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.chromeDebug.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }} \n {{- if .Values.chromeDebug.podAnnotations }}\n annotations:\n{{ toYaml .Values.chromeDebug.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n {{- if .Values.chromeDebug.securityContext }}\n securityContext:\n{{ toYaml .Values.chromeDebug.securityContext | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.chromeDebug.image }}:{{ .Values.chromeDebug.tag }}\"\n imagePullPolicy: {{ .Values.chromeDebug.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n - containerPort: 5900\n name: vnc\n {{- if .Values.chromeDebug.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.chromeDebug.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.chromeDebug.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.chromeDebug.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.chromeDebug.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.chromeDebug.seOpts | quote }}\n {{- if .Values.chromeDebug.chromeVersion }}\n - name: CHROME_VERSION\n value: {{ .Values.chromeDebug.chromeVersion | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.chromeDebug.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.chromeDebug.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.chromeDebug.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.chromeDebug.nodePort | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.chromeDebug.screenWidth | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.chromeDebug.screenHeight | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.chromeDebug.screenDepth | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.display }}\n - name: DISPLAY\n value: {{ .Values.chromeDebug.display | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.timeZone }}\n - name: TZ\n value: {{ .Values.chromeDebug.timeZone | quote }}\n {{- end }}\n {{- if .Values.chromeDebug.extraEnvs }}\n{{ toYaml .Values.chromeDebug.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.chromeDebug.volumeMounts -}}\n{{ toYaml .Values.chromeDebug.volumeMounts | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.chromeDebug.resources | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.chromeDebug.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.chromeDebug.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.chromeDebug.volumes -}}\n{{ toYaml .Values.chromeDebug.volumes | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | indent 8 }}\n nodeSelector:\n{{- if .Values.chromeDebug.nodeSelector }}\n{{ toYaml .Values.chromeDebug.nodeSelector | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.chromeDebug.affinity }}\n{{ toYaml .Values.chromeDebug.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.chromeDebug.tolerations }}\n{{ toYaml .Values.chromeDebug.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# firefox-daemonset.yaml\n{{- if and (eq true .Values.firefox.enabled) (eq true .Values.firefox.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: DaemonSet\nmetadata:\n name: {{ template \"selenium.firefox.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n selector:\n matchLabels:\n app: {{ template \"selenium.firefox.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.firefox.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.firefox.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }}\n {{- if .Values.firefox.podAnnotations }}\n annotations:\n{{ toYaml .Values.firefox.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.firefox.image }}:{{ .Values.firefox.tag }}\"\n imagePullPolicy: {{ .Values.firefox.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n {{- if .Values.firefox.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.firefox.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.firefox.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.firefox.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.firefox.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.firefox.seOpts | quote }}\n {{- if .Values.firefox.firefoxVersion }}\n - name: FIREFOX_VERSION\n value: {{ .Values.firefox.firefoxVersion | quote }}\n {{- end }}\n {{- if .Values.firefox.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.firefox.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.firefox.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.firefox.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.firefox.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.firefox.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.firefox.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.firefox.nodePort | quote }}\n {{- end }}\n {{- if .Values.firefox.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.firefox.screenWidth | quote }}\n {{- end }}\n {{- if .Values.firefox.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.firefox.screenHeight | quote }}\n {{- end }}\n {{- if .Values.firefox.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.firefox.screenDepth | quote }}\n {{- end }}\n {{- if .Values.firefox.display }}\n - name: DISPLAY\n value: {{ .Values.firefox.display | quote }}\n {{- end }}\n {{- if .Values.firefox.timeZone }}\n - name: TZ\n value: {{ .Values.firefox.timeZone | quote }}\n {{- end }}\n {{- if .Values.firefox.extraEnvs }}\n{{ toYaml .Values.firefox.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.firefox.volumeMounts -}}\n{{ toYaml .Values.firefox.volumeMounts | trim | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.firefox.resources | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.firefox.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.firefox.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.firefox.volumes -}}\n{{ toYaml .Values.firefox.volumes | trim | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | indent 8 }}\n nodeSelector:\n{{- if .Values.firefox.nodeSelector }}\n{{ toYaml .Values.firefox.nodeSelector | trim | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | trim | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.firefox.affinity }}\n{{ toYaml .Values.firefox.affinity | trim | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | trim | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.firefox.tolerations }}\n{{ toYaml .Values.firefox.tolerations | trim | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | trim | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# firefox-deployment.yaml\n{{- if and (eq true .Values.firefox.enabled) (eq false .Values.firefox.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"selenium.firefox.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.firefox.replicas }}\n selector:\n matchLabels:\n app: {{ template \"selenium.firefox.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.firefox.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.firefox.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }}\n {{- if .Values.firefox.podAnnotations }}\n annotations:\n{{ toYaml .Values.firefox.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n {{- if .Values.firefox.securityContext }}\n securityContext:\n{{ toYaml .Values.firefox.securityContext | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.firefox.image }}:{{ .Values.firefox.tag }}\"\n imagePullPolicy: {{ .Values.firefox.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n {{- if .Values.firefox.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.firefox.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.firefox.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.firefox.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.firefox.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.firefox.seOpts | quote }}\n {{- if .Values.firefox.firefoxVersion }}\n - name: FIREFOX_VERSION\n value: {{ .Values.firefox.firefoxVersion | quote }}\n {{- end }}\n {{- if .Values.firefox.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.firefox.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.firefox.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.firefox.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.firefox.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.firefox.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.firefox.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.firefox.nodePort | quote }}\n {{- end }}\n {{- if .Values.firefox.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.firefox.screenWidth | quote }}\n {{- end }}\n {{- if .Values.firefox.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.firefox.screenHeight | quote }}\n {{- end }}\n {{- if .Values.firefox.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.firefox.screenDepth | quote }}\n {{- end }}\n {{- if .Values.firefox.display }}\n - name: DISPLAY\n value: {{ .Values.firefox.display | quote }}\n {{- end }}\n {{- if .Values.firefox.timeZone }}\n - name: TZ\n value: {{ .Values.firefox.timeZone | quote }}\n {{- end }}\n {{- if .Values.firefox.extraEnvs }}\n{{ toYaml .Values.firefox.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.firefox.volumeMounts -}}\n{{ toYaml .Values.firefox.volumeMounts | trim | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.firefox.resources | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.firefox.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.firefox.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.firefox.volumes -}}\n{{ toYaml .Values.firefox.volumes | trim | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | indent 8 }}\n nodeSelector:\n{{- if .Values.firefox.nodeSelector }}\n{{ toYaml .Values.firefox.nodeSelector | trim | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | trim | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.firefox.affinity }}\n{{ toYaml .Values.firefox.affinity | trim | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | trim | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.firefox.tolerations }}\n{{ toYaml .Values.firefox.tolerations | trim | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | trim | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# firefoxDebug-daemonset.yaml\n{{- if and (eq true .Values.firefoxDebug.enabled) (eq true .Values.firefoxDebug.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"selenium.firefoxDebug.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.firefoxDebug.replicas }}\n selector:\n matchLabels:\n app: {{ template \"selenium.firefoxDebug.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.firefoxDebug.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.firefoxDebug.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }}\n {{- if .Values.firefoxDebug.podAnnotations }}\n annotations:\n{{ toYaml .Values.firefoxDebug.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.firefoxDebug.image }}:{{ .Values.firefoxDebug.tag }}\"\n imagePullPolicy: {{ .Values.firefoxDebug.pullPolicy }}\n ports:\n {{- if .Values.firefoxDebug.jmxPort }}\n - containerPort: {{ .Values.firefoxDebug.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n - containerPort: 5900\n name: vnc\n {{- if .Values.firefoxDebug.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.firefoxDebug.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.firefoxDebug.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.firefoxDebug.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.firefoxDebug.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.firefoxDebug.seOpts | quote }}\n {{- if .Values.firefoxDebug.firefoxVersion }}\n - name: FIREFOX_VERSION\n value: {{ .Values.firefoxDebug.firefoxVersion | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.firefoxDebug.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.firefoxDebug.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.firefoxDebug.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.firefoxDebug.nodePort | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.firefoxDebug.screenWidth | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.firefoxDebug.screenHeight | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.firefoxDebug.screenDepth | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.display }}\n - name: DISPLAY\n value: {{ .Values.firefoxDebug.display | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.timeZone }}\n - name: TZ\n value: {{ .Values.firefoxDebug.timeZone | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.extraEnvs }}\n{{ toYaml .Values.firefoxDebug.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.firefoxDebug.volumeMounts -}}\n{{ toYaml .Values.firefoxDebug.volumeMounts | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.firefoxDebug.resources | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.firefoxDebug.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.firefoxDebug.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.firefoxDebug.volumes -}}\n{{ toYaml .Values.firefoxDebug.volumes | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | indent 8 }}\n nodeSelector:\n{{- if .Values.firefoxDebug.nodeSelector }}\n{{ toYaml .Values.firefoxDebug.nodeSelector | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.firefoxDebug.affinity }}\n{{ toYaml .Values.firefoxDebug.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.firefoxDebug.tolerations }}\n{{ toYaml .Values.firefoxDebug.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# firefoxDebug-deployment.yaml\n{{- if and (eq true .Values.firefoxDebug.enabled) (eq false .Values.firefoxDebug.runAsDaemonSet) -}}\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"selenium.firefoxDebug.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: {{ .Values.firefoxDebug.replicas }}\n selector:\n matchLabels:\n app: {{ template \"selenium.firefoxDebug.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.firefoxDebug.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.firefoxDebug.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }}\n {{- if .Values.firefoxDebug.podAnnotations }}\n annotations:\n{{ toYaml .Values.firefoxDebug.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n {{- if .Values.firefoxDebug.securityContext }}\n securityContext:\n{{ toYaml .Values.firefoxDebug.securityContext | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.firefoxDebug.image }}:{{ .Values.firefoxDebug.tag }}\"\n imagePullPolicy: {{ .Values.firefoxDebug.pullPolicy }}\n ports:\n {{- if .Values.firefoxDebug.jmxPort }}\n - containerPort: {{ .Values.firefoxDebug.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n - containerPort: 5900\n name: vnc\n {{- if .Values.firefoxDebug.enableLivenessProbe }}\n livenessProbe:\n httpGet:\n path: /wd/hub/status\n port: {{ default \"5555\" .Values.firefoxDebug.nodePort }}\n initialDelaySeconds: 30\n periodSeconds: 30\n failureThreshold: 1\n {{- end }}\n {{- if .Values.firefoxDebug.waitForRunningSessions }}\n lifecycle:\n preStop:\n exec:\n command:\n - /bin/bash\n - -c\n - \"while [ $(wget -q -O - http://localhost:{{ default \"5555\" .Values.firefoxDebug.nodePort }}/wd/hub/sessions | grep -c capabilities) -gt 0 ]; do sleep 1; done\"\n {{- end }}\n env:\n - name: HUB_PORT_4444_TCP_ADDR\n value: {{ template \"selenium.hub.fullname\" . }}\n - name: HUB_PORT_4444_TCP_PORT\n value: {{ .Values.hub.servicePort | quote }}\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.firefoxDebug.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.firefoxDebug.seOpts | quote }}\n {{- if .Values.firefoxDebug.firefoxVersion }}\n - name: FIREFOX_VERSION\n value: {{ .Values.firefoxDebug.firefoxVersion | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodeMaxInstances }}\n - name: NODE_MAX_INSTANCES\n value: {{ .Values.firefoxDebug.nodeMaxInstances | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodeMaxSession }}\n - name: NODE_MAX_SESSION\n value: {{ .Values.firefoxDebug.nodeMaxSession | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodeRegisterCycle }}\n - name: NODE_REGISTER_CYCLE\n value: {{ .Values.firefoxDebug.nodeRegisterCycle | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.nodePort }}\n - name: NODE_PORT\n value: {{ .Values.firefoxDebug.nodePort | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.screenWidth }}\n - name: SCREEN_WIDTH\n value: {{ .Values.firefoxDebug.screenWidth | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.screenHeight }}\n - name: SCREEN_HEIGHT\n value: {{ .Values.firefoxDebug.screenHeight | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.screenDepth }}\n - name: SCREEN_DEPTH\n value: {{ .Values.firefoxDebug.screenDepth | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.display }}\n - name: DISPLAY\n value: {{ .Values.firefoxDebug.display | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.timeZone }}\n - name: TZ\n value: {{ .Values.firefoxDebug.timeZone | quote }}\n {{- end }}\n {{- if .Values.firefoxDebug.extraEnvs }}\n{{ toYaml .Values.firefoxDebug.extraEnvs | indent 12 }}\n {{- end }}\n volumeMounts:\n{{ if .Values.firefoxDebug.volumeMounts -}}\n{{ toYaml .Values.firefoxDebug.volumeMounts | indent 12 }}\n{{- end }}\n resources:\n{{ toYaml .Values.firefoxDebug.resources | indent 12 }}\n{{- if or .Values.global.imagePullSecrets .Values.firefoxDebug.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.firefoxDebug.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n volumes:\n{{ if .Values.firefoxDebug.volumes -}}\n{{ toYaml .Values.firefoxDebug.volumes | indent 8 }}\n{{- end }}\n hostAliases:\n{{ toYaml .Values.global.hostAliases | indent 8 }}\n nodeSelector:\n{{- if .Values.firefoxDebug.nodeSelector }}\n{{ toYaml .Values.firefoxDebug.nodeSelector | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.firefoxDebug.affinity }}\n{{ toYaml .Values.firefoxDebug.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.firefoxDebug.tolerations }}\n{{ toYaml .Values.firefoxDebug.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n{{- end -}}\n",
"# hub-deployment.yaml\napiVersion: {{ template \"deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"selenium.hub.fullname\" . }}\n labels:\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\nspec:\n replicas: 1\n selector:\n matchLabels:\n app: {{ template \"selenium.hub.fullname\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: {{ template \"selenium.hub.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n {{- with .Values.hub.podLabels }}\n {{ toYaml .| indent 2 }}\n {{- end }} \n {{- if .Values.hub.podAnnotations }}\n annotations:\n{{ toYaml .Values.hub.podAnnotations | indent 8 }}\n {{- end}}\n spec:\n {{- if .Values.hub.securityContext }}\n securityContext:\n{{ toYaml .Values.hub.securityContext | indent 8 }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.hub.image }}:{{ .Values.hub.tag }}\"\n imagePullPolicy: {{ .Values.hub.pullPolicy }}\n ports:\n {{- if .Values.hub.jmxPort }}\n - containerPort: {{ .Values.hub.jmxPort }}\n name: jmx\n protocol: TCP\n {{- end }}\n - containerPort: {{ .Values.hub.port }}\n name: http\n livenessProbe:\n httpGet:\n path: {{ .Values.hub.probePath }}\n port: {{ .Values.hub.port }}\n initialDelaySeconds: 30\n periodSeconds: 5\n timeoutSeconds: {{ .Values.hub.livenessTimeout }}\n readinessProbe:\n httpGet:\n path: {{ .Values.hub.probePath }}\n port: {{ .Values.hub.port }}\n initialDelaySeconds: {{ .Values.hub.readinessDelay }}\n timeoutSeconds: {{ .Values.hub.readinessTimeout }}\n env:\n - name: JAVA_TOOL_OPTIONS\n value: {{ default \"\" .Values.hub.javaOpts | quote }}\n - name: SE_OPTS\n value: {{ default \"\" .Values.hub.seOpts | quote }}\n {{- if .Values.hub.gridNewSessionWaitTimeout }}\n - name: GRID_NEW_SESSION_WAIT_TIMEOUT\n value: {{ .Values.hub.gridNewSessionWaitTimeout | quote }}\n {{- end }}\n {{- if .Values.hub.gridJettyMaxThreads }}\n - name: GRID_JETTY_MAX_THREADS\n value: {{ .Values.hub.gridJettyMaxThreads | quote }}\n {{- end }}\n {{- if .Values.hub.gridNodePolling }}\n - name: GRID_NODE_POLLING\n value: {{ .Values.hub.gridNodePolling | quote }}\n {{- end }}\n {{- if .Values.hub.gridCleanUpCycle }}\n - name: GRID_CLEAN_UP_CYCLE\n value: {{ .Values.hub.gridCleanUpCycle | quote }}\n {{- end }}\n {{- if .Values.hub.gridTimeout }}\n - name: GRID_TIMEOUT\n value: {{ .Values.hub.gridTimeout | quote }}\n {{- end }}\n {{- if .Values.hub.gridBrowserTimeout }}\n - name: GRID_BROWSER_TIMEOUT\n value: {{ .Values.hub.gridBrowserTimeout | quote }}\n {{- end }}\n {{- if .Values.hub.gridMaxSession }}\n - name: GRID_MAX_SESSION\n value: {{ .Values.hub.gridMaxSession | quote }}\n {{- end }}\n {{- if .Values.hub.gridUnregisterIfStillDownAfter }}\n - name: GRID_UNREGISTER_IF_STILL_DOWN_AFTER\n value: {{ .Values.hub.gridUnregisterIfStillDownAfter | quote }}\n {{- end }}\n {{- if .Values.hub.timeZone }}\n - name: TZ\n value: {{ .Values.hub.timeZone | quote }}\n {{- end }}\n {{- if .Values.hub.port }}\n - name: GRID_HUB_PORT\n value: {{ .Values.hub.port | quote }}\n {{- end }}\n {{- if .Values.hub.extraEnvs }}\n{{ toYaml .Values.hub.extraEnvs | indent 12 }}\n {{- end }}\n resources:\n{{ toYaml .Values.hub.resources | trim | indent 12 -}}\n{{- if or .Values.global.imagePullSecrets .Values.hub.imagePullSecrets }}\n imagePullSecrets:\n - name: {{ .Values.hub.imagePullSecrets | default .Values.global.imagePullSecrets | quote }}\n{{- end }}\n nodeSelector:\n{{- if .Values.hub.nodeSelector }}\n{{ toYaml .Values.hub.nodeSelector | trim | indent 8 }}\n{{- else if .Values.global.nodeSelector }}\n{{ toYaml .Values.global.nodeSelector | trim | indent 8 }}\n{{- end }}\n affinity:\n{{- if .Values.hub.affinity }}\n{{ toYaml .Values.hub.affinity | indent 8 }}\n{{- else if .Values.global.affinity }}\n{{ toYaml .Values.global.affinity | indent 8 }}\n{{- end }}\n tolerations:\n{{- if .Values.hub.tolerations }}\n{{ toYaml .Values.hub.tolerations | indent 8 }}\n{{- else if .Values.global.tolerations }}\n{{ toYaml .Values.global.tolerations | indent 8 }}\n{{- end }}\n",
"# hub-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"selenium.hub.fullname\" . }}\n labels:\n app: {{ template \"selenium.hub.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- if .Values.hub.serviceAnnotations }}\n annotations:\n{{ toYaml .Values.hub.serviceAnnotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.hub.serviceType | quote }}\n {{- if .Values.hub.serviceLoadBalancerIP }}\n loadBalancerIP: {{ .Values.hub.serviceLoadBalancerIP | quote }}\n {{- end }}\n{{- if .Values.hub.loadBalancerSourceRanges }}\n loadBalancerSourceRanges:\n{{ toYaml .Values.hub.loadBalancerSourceRanges | indent 4 }}\n{{- end }}\n sessionAffinity: {{ .Values.hub.serviceSessionAffinity | quote }}\n ports:\n - name: hub\n port: {{ .Values.hub.servicePort }}\n targetPort: {{ .Values.hub.port }}\n {{- if and ( eq .Values.hub.serviceType \"NodePort\") .Values.hub.nodePort }}\n nodePort: {{ .Values.hub.nodePort }}\n {{- end }}\n selector:\n app: {{ template \"selenium.hub.fullname\" . }}\n",
"# ingress.yaml\n{{- if .Values.hub.ingress.enabled -}}\n{{- $fullName := include \"selenium.hub.fullname\" . -}}\n{{- $ingressPath := .Values.hub.ingress.path -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ $fullName }}\n labels:\n app: {{ template \"selenium.hub.fullname\" . }}\n chart: \"{{ .Chart.Name }}-{{ .Chart.Version }}\"\n release: \"{{ .Release.Name }}\"\n heritage: \"{{ .Release.Service }}\"\n{{- with .Values.hub.ingress.annotations }}\n annotations:\n{{ toYaml . | indent 4 }}\n{{- end }}\nspec:\n{{- if .Values.hub.ingress.tls }}\n tls:\n {{- range .Values.hub.ingress.tls }}\n - hosts:\n {{- range .hosts }}\n - {{ . | quote }}\n {{- end }}\n secretName: {{ .secretName }}\n {{- end }}\n{{- end }}\n rules:\n {{- range .Values.hub.ingress.hosts }}\n - host: {{ . | quote }}\n http:\n paths:\n - path: {{ $ingressPath }}\n backend:\n serviceName: {{ $fullName }}\n servicePort: hub\n {{- end }}\n{{- end }}"
] | global:
## NodeSelector to be used in every deployment
## hub, chrome, firefox, chromeDebug and firefoxDebug
## can also be specified at chart level see below
nodeSelector:
# label: value
## Configure HostAliases
hostAliases: []
affinity:
# label: value
tolerations:
# label: value
## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image
## to be used in every deployment hub, chrome, firefox, chromeDebug and firefoxDebug
## can also be specified at chart level see below.
## Specifying secret at a chart level will override the global option
imagePullSecrets:
hub:
## The repository and image
## ref: https://hub.docker.com/r/selenium/hub/
image: "selenium/hub"
## The tag for the image
## ref: https://hub.docker.com/r/selenium/hub/tags/
tag: "3.141.59"
## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image
# imagePullSecrets: "regcred"
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
pullPolicy: "IfNotPresent"
## The port which the hub listens on
port: 4444
## The port the service listens on
servicePort: 4444
## nodePort - The node port the service exposed on
# nodePort: 30044
## Timeout for probe Hub readiness via HTTP request on hub.probePath
readinessTimeout: 1
## Initial delay before performing the first readinessProbe
readinessDelay: 15
## Timeout for probe Hub liveness via HTTP request on hub.probePath
livenessTimeout: 1
## Path for checking readiness and liveness via HTTP Request
probePath: "/wd/hub/status"
# Configure security context on the hub pod
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## Set the JAVA_TOOL_OPTIONS environment variable
## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC
javaOpts: "-Xmx400m"
## Set the SE_OPTS environment variable
## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration
# seOpts:
## Defining a JMX port will open the port on the container, however, it
## requires additional javaOpts, ie
## javaOpts: >
## -Dcom.sun.management.jmxremote.port=4000
## -Dcom.sun.management.jmxremote.authenticate=false
## -Dcom.sun.management.jmxremote.ssl=false
## ref: http://openjdk.java.net/groups/jmx/
# jmxPort: 4000
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
limits:
cpu: ".5"
memory: "512Mi"
## Configure annotations on the hub pod
podAnnotations: {}
## Configure labels on the hub pod
podLabels: {}
## The type of service to create
## Values: ClusterIP, NodePort, LoadBalancer, or ExternalName
## ref: https://kubernetes.io/docs/user-guide/services/
serviceType: "LoadBalancer"
## The LoadBalancer IP Address
## ref: https://kubernetes.io/docs/user-guide/services/
## serviceLoadBalancerIP: "40.121.183.52"
loadBalancerSourceRanges: []
## Control where client requests go, to the same pod or round-robin
## Values: ClientIP or None
## ref: https://kubernetes.io/docs/user-guide/services/
serviceSessionAffinity: "None"
## Define various attributes of the service
# serviceAnnotations:
# # internal AWS ELB
# service.beta.kubernetes.io/aws-load-balancer-internal: "0.0.0.0/0"
## ref: https://github.com/SeleniumHQ/selenium/wiki/Grid2#configuring-the-nodes
## In milliseconds
# gridNewSessionWaitTimeout: -1
# gridJettyMaxThreads: -1
## In milliseconds
# gridNodePolling: 5000
## In milliseconds
# gridCleanUpCycle: 5000
## In seconds
# gridTimeout: 30
## In seconds
# gridBrowserTimeout: 0
# gridMaxSession: 5
## In milliseconds
# gridUnregisterIfStillDownAfter: 30000
# timeZone: UTC
## NodeSelector to be used for the hub
nodeSelector:
# label: value
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- selenium-hub.local
tls: []
# - secretName: selenium-hub-tls
# hosts:
# - selenium-hub.local
chrome:
## Enable the creation of a node-chrome pod
enabled: false
## DaemonSet instead of Deployment
runAsDaemonSet: false
## The repository and image
## ref: https://hub.docker.com/r/selenium/node-chrome/
image: "selenium/node-chrome"
## The tag for the image
## ref: https://hub.docker.com/r/selenium/node-chrome/tags/
tag: "3.141.59"
## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image
# imagePullSecrets: "regcred"
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
pullPolicy: "IfNotPresent"
## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled.
replicas: 1
## When true will add a liveness check to the pod
enableLivenessProbe: false
## When true will wait for current running sessions to finish before terminating the pod
waitForRunningSessions: false
## Configure annotations on the chrome pods
podAnnotations: {}
## Configure Labels on the chrome pods
podLabels: {}
# Configure security context on the chrome pods
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## Set the JAVA_TOOL_OPTIONS environment variable
## If you find your selenium node is OOMKilled, try adding -XX:+UseSerialGC
javaOpts: "-Xmx900m"
## Set the SE_OPTS environment variable
## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration
# seOpts:
## Defining a JMX port will open the port on the container, however, it
## requires additional javaOpts, ie
## javaOpts: >
## -Dcom.sun.management.jmxremote.port=4000
## -Dcom.sun.management.jmxremote.authenticate=false
## -Dcom.sun.management.jmxremote.ssl=false
## ref: http://openjdk.java.net/groups/jmx/
# jmxPort: 4000
## User defined volumes
## ref: https://kubernetes.io/docs/user-guide/volumes/
volumes:
## https://github.com/kubernetes/kubernetes/pull/34928#issuecomment-277952723
## http://stackoverflow.com/questions/39852716/chrome-driver-throwing-org-openqa-selenium-remote-sessionnotfoundexception-whe
## Chrome wants more than 64mb of shared memory. Docker/k8s default to 64mb.
- name: dshm
emptyDir:
medium: Memory
volumeMounts:
- mountPath: /dev/shm
name: dshm
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
limits:
cpu: ".5"
memory: "1000Mi"
## Characteristics of the browser window
# screenWidth: 1280
# screenHeight: 1024
# screenDepth: 24
# display: :10
## Selenium node options
# chromeVersion:
# nodeMaxInstances: 1
# nodeMaxSession: 1
## In milliseconds
# nodeRegisterCycle: 5000
# nodePort: 5555
# timeZone: UTC
## NodeSelector to be used for chrome
nodeSelector:
# label: value
chromeDebug:
## Enable the creation of a node-chrome-debug pod
enabled: false
## DaemonSet instead of Deployment
runAsDaemonSet: false
## The repository and image
## ref: https://hub.docker.com/r/selenium/node-chrome-debug/
image: "selenium/node-chrome-debug"
## The tag for the image
## ref: https://hub.docker.com/r/selenium/node-chrome-debug/tags/
tag: "3.141.59"
## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image
# imagePullSecrets: "regcred"
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
pullPolicy: "IfNotPresent"
## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled.
replicas: 1
## When true will add a liveness check to the pod
enableLivenessProbe: false
## When true will wait for current running sessions to finish before terminating the pod
waitForRunningSessions: false
## Configure annotations on the chrome debug pods
podAnnotations: {}
## Configure labels on the chrome debug pods
podLabels: {}
# Configure security context on the chrome debug pods
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## Set the JAVA_TOOL_OPTIONS environment variable
## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC
javaOpts: "-Xmx900m"
## Set the SE_OPTS environment variable
## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration
# seOpts:
## Defining a JMX port will open the port on the container, however, it
## requires additional javaOpts, ie
## javaOpts: >
## -Dcom.sun.management.jmxremote.port=4000
## -Dcom.sun.management.jmxremote.authenticate=false
## -Dcom.sun.management.jmxremote.ssl=false
## ref: http://openjdk.java.net/groups/jmx/
# jmxPort: 4000
## User defined volumes
## ref: https://kubernetes.io/docs/user-guide/volumes/
volumes:
## https://github.com/kubernetes/kubernetes/pull/34928#issuecomment-277952723
## http://stackoverflow.com/questions/39852716/chrome-driver-throwing-org-openqa-selenium-remote-sessionnotfoundexception-whe
## Chrome wants more than 64mb of shared memory. Docker/k8s default to 64mb.
- name: dshm
emptyDir:
medium: Memory
volumeMounts:
- mountPath: /dev/shm
name: dshm
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
limits:
cpu: ".5"
memory: "1500Mi"
## Characteristics of the browser window
# screenWidth: 1280
# screenHeight: 1024
# screenDepth: 24
# display: :10
## Selenium node options
# chromeVersion:
# nodeMaxInstances: 1
# nodeMaxSession: 1
## In milliseconds
# nodeRegisterCycle: 5000
# nodePort: 5555
# timeZone: UTC
## NodeSelector to be used for chromeDebug
nodeSelector:
# label: value
firefox:
## Enable the creation of a node-firefox pod
enabled: false
## DaemonSet instead of Deployment
runAsDaemonSet: false
## The repository and image
## ref: https://hub.docker.com/r/selenium/node-firefox/
image: "selenium/node-firefox"
## The tag for the image
## ref: https://hub.docker.com/r/selenium/node-firefox/tags/
tag: "3.141.59"
## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image
# imagePullSecrets: "regcred"
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
pullPolicy: "IfNotPresent"
## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled.
replicas: 1
## When true will add a liveness check to the pod
enableLivenessProbe: false
## When true will wait for current running sessions to finish before terminating the pod
waitForRunningSessions: false
## Configure annotations on the firefox pods
podAnnotations: {}
## Configure labels on the firefox pods
podLabels: {}
# Configure security context on the firefox pods
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## Set the JAVA_TOOL_OPTIONS environment variable
## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC
javaOpts: "-Xmx900m"
## Set the SE_OPTS environment variable
## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration
# seOpts:
## Defining a JMX port will open the port on the container, however, it
## requires additional javaOpts, ie
## javaOpts: >
## -Dcom.sun.management.jmxremote.port=4000
## -Dcom.sun.management.jmxremote.authenticate=false
## -Dcom.sun.management.jmxremote.ssl=false
## ref: http://openjdk.java.net/groups/jmx/
# jmxPort: 4000
volumes:
## https://docs.openshift.com/container-platform/3.6/dev_guide/shared_memory.html
- name: dshm
emptyDir:
medium: Memory
volumeMounts:
- mountPath: /dev/shm
name: dshm
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
limits:
cpu: ".5"
memory: "1000Mi"
## Characteristics of the browser window
# screenWidth: 1280
# screenHeight: 1024
# screenDepth: 24
# display: :10
## Selenium node options
# firefoxVersion:
# nodeMaxInstances: 1
# nodeMaxSession: 1
## In milliseconds
# nodeRegisterCycle: 5000
# nodePort: 5555
# timeZone: UTC
## NodeSelector to be used for firefox
nodeSelector:
# label: value
firefoxDebug:
## Enable the creation of a node-firefox-debug pod
enabled: false
## DaemonSet instead of Deployment
runAsDaemonSet: false
## The repository and image
## ref: https://hub.docker.com/r/selenium/node-firefox-debug/
image: "selenium/node-firefox-debug"
## The tag for the image
## ref: https://hub.docker.com/r/selenium/node-firefox-debug/tags/
tag: "3.141.59"
## imagePullSecrets is the secret to use to pull the image from in case of a private flavour image
# imagePullSecrets: "regcred"
## Specify an imagePullPolicy
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
pullPolicy: "IfNotPresent"
## The number of pods in the deployment. This is ignored if runAsDaemonSet is enabled.
replicas: 1
## When true will add a liveness check to the pod
enableLivenessProbe: false
## When true will wait for current running sessions to finish before terminating the pod
waitForRunningSessions: false
## Configure annotations on the firefox debug pods
podAnnotations: {}
## Configure labels on the firefox debug pods
podLabels: {}
# Configure security context on the firefox debug pods
# securityContext:
# fsGroup: 1000
# runAsUser: 1000
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## Set the JAVA_TOOL_OPTIONS environment variable
## If you find your selenium hub is OOMKilled, try adding -XX:+UseSerialGC
javaOpts: "-Xmx900m"
## Set the SE_OPTS environment variable
## ref: http://www.seleniumhq.org/docs/07_selenium_grid.jsp#node-configuration
# seOpts:
## Defining a JMX port will open the port on the container, however, it
## requires additional javaOpts, ie
## javaOpts: >
## -Dcom.sun.management.jmxremote.port=4000
## -Dcom.sun.management.jmxremote.authenticate=false
## -Dcom.sun.management.jmxremote.ssl=false
## ref: http://openjdk.java.net/groups/jmx/
# jmxPort: 4000
volumes:
## https://docs.openshift.com/container-platform/3.6/dev_guide/shared_memory.html
- name: dshm
emptyDir:
medium: Memory
volumeMounts:
- mountPath: /dev/shm
name: dshm
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
resources:
limits:
cpu: ".5"
memory: "1500Mi"
## Characteristics of the browser window
# screenWidth: 1280
# screenHeight: 1024
# screenDepth: 24
# display: :10
## Selenium node options
# firefoxVersion:
# nodeMaxInstances: 1
# nodeMaxSession: 1
## In milliseconds
# nodeRegisterCycle: 5000
# nodePort: 5555
# timeZone: UTC
## NodeSelector to be used for firefoxDebug
nodeSelector:
# label: value
|
hazelcast-jet | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"hazelcast-jet.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"hazelcast-jet.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name as used by the chart label.\n*/}}\n{{- define \"hazelcast-jet.chart\" -}}\n{{- printf \"%s\" .Chart.Name | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the service account to use\n*/}}\n{{- define \"hazelcast-jet.serviceAccountName\" -}}\n{{- if .Values.serviceAccount.create -}}\n {{ default (include \"hazelcast-jet.fullname\" .) .Values.serviceAccount.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccount.name }}\n{{- end -}}\n{{- end -}}\n\n\n{{/*\nCreate a default fully qualified Hazelcast Jet Management Center app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"hazelcast-jet-management-center.fullname\" -}}\n{{ (include \"hazelcast-jet.fullname\" .) | trunc 45 | }}-management-center\n{{- end -}}\n",
"# config.yaml\n{{- if or .Values.jet.configurationFiles .Values.jet.yaml }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"hazelcast-jet.fullname\" . }}-configuration\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\ndata:\n{{- range $key, $val := .Values.jet.configurationFiles }}\n {{ $key }}: |-\n{{ $val | indent 4}}\n{{- end }}\n hazelcast.yaml: |-\n hazelcast:\n{{ toYaml .Values.jet.yaml.hazelcast | indent 5 }}\n{{ $jetYaml := index .Values \"jet\" \"yaml\" \"hazelcast-jet\"}}\n hazelcast-jet.yaml: |-\n hazelcast-jet:\n{{ toYaml $jetYaml | indent 5 }}\n{{- end -}}\n",
"# management-center-config.yaml\n{{- if or .Values.managementcenter.configurationFiles .Values.managementcenter.yaml}}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"hazelcast-jet-management-center.fullname\" . }}-configuration\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\ndata:\n{{- range $key, $val := .Values.managementcenter.configurationFiles }}\n {{ $key }}: |-\n{{ $val | indent 4}}\n{{- end }}\n hazelcast-client.yaml: |-\n{{ toYaml .Values.managementcenter.yaml | indent 4 }}\n{{- end -}}\n",
"# management-center-deployment.yaml\n{{- if .Values.managementcenter.enabled }}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"hazelcast-jet-management-center.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\nspec:\n replicas: 1\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n role: hazelcast-jet-management-center\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n role: hazelcast-jet-management-center\n spec:\n {{- if .Values.managementcenter.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.managementcenter.image.pullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end}}\n {{- if .Values.managementcenter.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.managementcenter.nodeSelector | indent 8 }}\n {{- end }}\n hostNetwork: false\n hostPID: false\n hostIPC: false\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n runAsGroup: {{ .Values.securityContext.runAsGroup }}\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- end }}\n {{- if .Values.managementcenter.affinity }}\n affinity:\n{{ toYaml .Values.managementcenter.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.managementcenter.tolerations }}\n tolerations:\n{{ toYaml .Values.managementcenter.tolerations | indent 8 }}\n {{- end }}\n containers:\n - name: {{ template \"hazelcast-jet-management-center.fullname\" . }}\n image: \"{{ .Values.managementcenter.image.repository }}:{{ .Values.managementcenter.image.tag }}\"\n imagePullPolicy: {{ .Values.managementcenter.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.managementcenter.resources | indent 10 }}\n ports:\n - name: mc-port\n containerPort: 8081\n - name: mc-https-port\n containerPort: 443\n {{- if .Values.managementcenter.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /\n port: 8081\n initialDelaySeconds: {{ .Values.managementcenter.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.managementcenter.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.managementcenter.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.managementcenter.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.managementcenter.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.managementcenter.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /\n port: 8081\n initialDelaySeconds: {{ .Values.managementcenter.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.managementcenter.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.managementcenter.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.managementcenter.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.managementcenter.readinessProbe.failureThreshold }}\n {{- end }}\n volumeMounts:\n - name: hazelcast-jet-management-center-storage\n mountPath: /data/hazelcast-jet-management-center\n env:\n - name: MC_LICENSE_KEY\n {{- if .Values.managementcenter.licenseKeySecretName }}\n valueFrom:\n secretKeyRef:\n name: {{ .Values.managementcenter.licenseKeySecretName }}\n key: key\n {{- else }}\n value: {{ .Values.managementcenter.licenseKey }}\n {{- end }}\n - name: JAVA_OPTS\n value: \" -Djet.clientConfig=/data/hazelcast-jet-management-center/hazelcast-client.yaml -DserviceName={{ template \"hazelcast-jet.fullname\" . }} -Dnamespace={{ .Release.Namespace }} {{ .Values.managementcenter.javaOpts }}\"\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n runAsGroup: {{ .Values.securityContext.runAsGroup }}\n privileged: false\n readOnlyRootFilesystem: false\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n {{- end }}\n serviceAccountName: {{ template \"hazelcast-jet.serviceAccountName\" . }}\n volumes:\n - name: hazelcast-jet-management-center-storage\n configMap:\n name: {{ template \"hazelcast-jet-management-center.fullname\" . }}-configuration\n{{- end -}}\n",
"# management-center-service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n {{- if .Values.managementcenter.service.annotations }}\n annotations:\n {{ toYaml .Values.managementcenter.service.annotations | indent 4 }}\n {{- end }}\n name: {{ template \"hazelcast-jet-management-center.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.managementcenter.service.type }}\n {{- if .Values.managementcenter.service.clusterIP }}\n clusterIP: {{ .Values.managementcenter.service.clusterIP }}\n {{- end }}\n selector:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n role: hazelcast-jet-management-center\n ports:\n - protocol: TCP\n port: {{ .Values.managementcenter.service.port }}\n targetPort: mc-port\n name: mc-port\n - protocol: TCP\n port: {{ .Values.managementcenter.service.httpsPort }}\n targetPort: mc-port\n name: mc-https-port\n",
"# metrics-service.yaml\n{{- if .Values.metrics.enabled }}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"hazelcast-jet.fullname\" . }}-metrics\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n annotations:\n{{ toYaml .Values.metrics.service.annotations | indent 4 }}\nspec:\n type: {{ .Values.metrics.service.type }}\n selector:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n ports:\n - protocol: TCP\n port: {{ .Values.metrics.service.port }}\n targetPort: metrics\n name: metrics\n{{- end }}\n",
"# role.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n name: {{ template \"hazelcast-jet.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\nrules:\n- apiGroups:\n - \"\"\n resources:\n - endpoints\n - pods\n - nodes\n verbs:\n - get\n - list\n{{- end -}}\n",
"# rolebinding.yaml\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: {{ template \"hazelcast-jet.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"hazelcast-jet.fullname\" . }}\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"hazelcast-jet.serviceAccountName\" . }}\n namespace: {{ .Release.Namespace }}\n{{ end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"hazelcast-jet.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\nspec:\n type: {{ .Values.service.type }}\n {{- if .Values.service.clusterIP }}\n clusterIP: {{ .Values.service.clusterIP }}\n {{- end }}\n selector:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n ports:\n - protocol: TCP\n port: {{ .Values.service.port }}\n targetPort: hazelcast-jet\n name: hzport\n",
"# serviceaccount.yaml\n{{- if .Values.serviceAccount.create -}}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: {{ template \"hazelcast-jet.serviceAccountName\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n{{- end -}}\n",
"# statefulset.yaml\napiVersion: apps/v1\nkind: StatefulSet\nmetadata:\n name: {{ template \"hazelcast-jet.fullname\" . }}\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\nspec:\n serviceName: {{ template \"hazelcast-jet.fullname\" . }}\n replicas: {{ .Values.cluster.memberCount }}\n selector:\n matchLabels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n role: hazelcast-jet\n template:\n metadata:\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n role: hazelcast-jet\n spec:\n {{- if .Values.image.pullSecrets }}\n imagePullSecrets:\n {{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n {{- end}}\n {{- end}}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- if .Values.gracefulShutdown.enabled }}\n terminationGracePeriodSeconds: {{ .Values.gracefulShutdown.maxWaitSeconds }}\n {{- end }}\n hostNetwork: false\n hostPID: false\n hostIPC: false\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n runAsGroup: {{ .Values.securityContext.runAsGroup }}\n fsGroup: {{ .Values.securityContext.fsGroup }}\n {{- end }}\n {{- if .Values.affinity }}\n affinity:\n{{ toYaml .Values.affinity | indent 8 }}\n {{- end }}\n {{- if .Values.tolerations }}\n tolerations:\n{{ toYaml .Values.tolerations | indent 8 }}\n {{- end }}\n containers:\n - name: {{ template \"hazelcast-jet.fullname\" . }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n ports:\n - name: hazelcast-jet\n containerPort: {{ if .Values.hostPort }}{{ .Values.hostPort }}{{ else }}5701{{ end }}\n hostPort: {{ .Values.hostPort }}\n {{- if .Values.metrics.enabled }}\n - name: metrics\n containerPort: {{ .Values.metrics.service.port }}\n {{- end }}\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: {{ .Values.livenessProbe.path }}\n port: {{ if .Values.livenessProbe.port }}{{ .Values.livenessProbe.port }}{{ else if .Values.hostPort }}{{ .Values.hostPort }}{{ else }}5701{{ end }}\n scheme: {{ .Values.livenessProbe.scheme }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: {{ .Values.readinessProbe.path }}\n port: {{ if .Values.readinessProbe.port }}{{ .Values.readinessProbe.port }}{{ else if .Values.hostPort }}{{ .Values.hostPort }}{{ else }}5701{{ end }}\n scheme: {{ .Values.readinessProbe.scheme }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n volumeMounts:\n - name: hazelcast-jet-storage\n mountPath: /data/hazelcast-jet\n {{- if .Values.customVolume }}\n - name: hazelcast-jet-custom-volume\n mountPath: /data/custom\n {{- end }}\n env:\n {{- if .Values.customVolume }}\n - name: CLASSPATH\n value: \"/data/custom:/data/custom/*\"\n {{- end }}\n {{- if .Values.metrics.enabled }}\n - name: PROMETHEUS_PORT\n value: \"{{ .Values.metrics.service.port }}\"\n {{- end }}\n - name: JAVA_OPTS\n value: \"-Dhazelcast.config=/data/hazelcast-jet/hazelcast.yaml -Dhazelcast.jet.config=/data/hazelcast-jet/hazelcast-jet.yaml -DserviceName={{ template \"hazelcast-jet.fullname\" . }} -Dnamespace={{ .Release.Namespace }} {{ if .Values.gracefulShutdown.enabled }}-Dhazelcast.shutdownhook.policy=GRACEFUL -Dhazelcast.shutdownhook.enabled=true -Dhazelcast.graceful.shutdown.max.wait={{ .Values.gracefulShutdown.maxWaitSeconds }} {{ end }} {{ if .Values.metrics.enabled }}-Dhazelcast.jmx=true{{ end }} {{ .Values.jet.javaOpts }}\"\n {{- if .Values.jet.loggingLevel }}\n - name: LOGGING_LEVEL\n value: {{ .Values.jet.loggingLevel }}\n {{- end }}\n {{- if .Values.securityContext.enabled }}\n securityContext:\n runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }}\n runAsUser: {{ .Values.securityContext.runAsUser }}\n runAsGroup: {{ .Values.securityContext.runAsGroup }}\n privileged: false\n readOnlyRootFilesystem: {{ .Values.securityContext.readOnlyRootFilesystem }}\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n {{- end }}\n serviceAccountName: {{ template \"hazelcast-jet.serviceAccountName\" . }}\n volumes:\n - name: hazelcast-jet-storage\n configMap:\n name: {{ template \"hazelcast-jet.fullname\" . }}-configuration\n {{- if .Values.customVolume }}\n - name: hazelcast-jet-custom-volume\n{{ toYaml .Values.customVolume | indent 8 }}\n {{- end }}\n",
"# test-hazelcast-jet.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ template \"hazelcast-jet.fullname\" . }}-test-{{ randAlphaNum 5 | lower }}\"\n annotations:\n \"helm.sh/hook\": test-success\n \"helm.sh/hook-delete-policy\": hook-succeeded, hook-failed\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n app.kubernetes.io/component: \"test\"\n role: test\nspec:\n hostNetwork: false\n hostPID: false\n hostIPC: false\n securityContext:\n runAsNonRoot: true\n runAsUser: 1001\n runAsGroup: 1001\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 4 }}\n {{- end }}\n containers:\n - name: \"{{ template \"hazelcast-jet.fullname\" . }}-test\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # Get the number of Hazelcast members in the cluster\n CLUSTER_SIZE=$(curl {{ template \"hazelcast-jet.fullname\" . }}:{{ .Values.service.port }}/hazelcast/health/cluster-size)\n # Test the currect number of Hazelcast members\n test ${CLUSTER_SIZE} -eq {{ .Values.cluster.memberCount }}\n securityContext:\n runAsNonRoot: true\n runAsUser: 1001\n runAsGroup: 1001\n privileged: false\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n restartPolicy: Never\n",
"# test-management-center.yaml\napiVersion: v1\nkind: Pod\nmetadata:\n name: \"{{ template \"hazelcast-jet-management-center.fullname\" . }}-test-{{ randAlphaNum 5 | lower }}\"\n annotations:\n \"helm.sh/hook\": test-success\n \"helm.sh/hook-delete-policy\": hook-succeeded, hook-failed\n labels:\n app.kubernetes.io/name: {{ template \"hazelcast-jet.name\" . }}\n helm.sh/chart: {{ template \"hazelcast-jet.chart\" . }}\n app.kubernetes.io/instance: \"{{ .Release.Name }}\"\n app.kubernetes.io/managed-by: \"{{ .Release.Service }}\"\n app.kubernetes.io/component: \"test\"\n role: test\nspec:\n hostNetwork: false\n hostPID: false\n hostIPC: false\n securityContext:\n runAsNonRoot: true\n runAsUser: 1001\n runAsGroup: 1001\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 4 }}\n {{- end }}\n containers:\n - name: \"{{ template \"hazelcast-jet-management-center.fullname\" . }}-test\"\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n command:\n - \"bash\"\n - \"-c\"\n - |\n set -ex\n # Get the HTTP Response Code of the Deployment\n HEALTH_CHECK_HTTP_RESPONSE_CODE=$(curl --write-out %{http_code} --silent --output /dev/null {{ template \"hazelcast-jet-management-center.fullname\" . }}:{{ .Values.managementcenter.service.port }}/login)\n test ${HEALTH_CHECK_HTTP_RESPONSE_CODE} -eq 200\n securityContext:\n runAsNonRoot: true\n runAsUser: 1001\n runAsGroup: 1001\n privileged: false\n readOnlyRootFilesystem: true\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n restartPolicy: Never"
] | ## Hazelcast Jet image version
## ref: https://hub.docker.com/r/hazelcast/hazelcast-jet/tags/
##
image:
# repository is the Hazelcast Jet image name
repository: "hazelcast/hazelcast-jet"
# tag is the Hazelcast Jet image tag
tag: "4.1"
# pullPolicy is the Docker image pull policy
# It's recommended to change this to 'Always' if the image tag is 'latest'
# ref: http://kubernetes.io/docs/user-guide/images/#updating-images
#
pullPolicy: IfNotPresent
# pullSecrets is an array of docker-registry secret names
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
# pullSecrets:
# - myRegistryKeySecretName
# Cluster settings
cluster:
# memberCount is the number Hazelcast Jet members
memberCount: 2
# Hazelcast Jet properties
jet:
# javaOpts are additional JAVA_OPTS properties for Hazelcast Jet member
javaOpts:
# loggingLevel is the level of Hazelcast logs (SEVERE, WARNING, INFO,
# CONFIG, FINE, FINER, and FINEST) Note that changing this value
# requires setting securityContext.runAsUser to 0.
# loggingLevel:
# Jet and Hazelcast IMDG YAML configuration files
yaml:
hazelcast:
cluster-name: jet
network:
join:
multicast:
enabled: false
kubernetes:
enabled: true
service-name: ${serviceName}
namespace: ${namespace}
resolve-not-ready-addresses: true
rest-api:
enabled: true
endpoint-groups:
HEALTH_CHECK:
enabled: true
management-center:
enabled: ${hazelcast.mancenter.enabled}
url: ${hazelcast.mancenter.url}
hazelcast-jet:
instance:
# period between flow control packets in milliseconds
flow-control-period: 100
# number of backup copies to configure for Hazelcast IMaps used internally in a Jet job
backup-count: 1
# the delay after which auto-scaled jobs will restart if a new member is added to the
# cluster. The default is 10 seconds. Has no effect on jobs with auto scaling disabled
scale-up-delay-millis: 10000
# Sets whether lossless job restart is enabled for the node. With
# lossless restart you can restart the whole cluster without losing the
# jobs and their state. The feature is implemented on top of the Hot
# Restart feature of Hazelcast IMDG which persists the data to disk.
lossless-restart-enabled: false
edge-defaults:
# capacity of the concurrent SPSC queue between each two processors
queue-size: 1024
# network packet size limit in bytes, only applies to distributed edges
packet-size-limit: 16384
# receive window size multiplier, only applies to distributed edges
receive-window-multiplier: 3
metrics:
# whether metrics collection is enabled
enabled: true
# whether jmx mbean metrics collection is enabled
jmx-enabled: true
# the number of seconds the metrics will be retained on the instance
retention-seconds: 120
# the metrics collection interval in seconds
collection-interval-seconds: 5
# whether metrics should be collected for data structures. Metrics
# collection can have some overhead if there is a large number of data
# structures
metrics-for-data-structures: false
# configurationFiles are any additional Hazelcast Jet configuration files
# configurationFiles:
# affinity specifies the affinity/anti-affinity of different pods. The commented out
# example below shows how you could ensure your hazelcast jet pods are scheduled on
# different Kubernetes nodes
# affinity:
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - hazelcast-jet
# - key: role
# operator: In
# values:
# - hazelcast-jet
# topologyKey: kubernetes.io/hostname
# tolerations enable Hazelcast Jet PODs to be able to run on nodes with taints
# tolerations:
# nodeSelector is an array of Hazelcast Node labels for POD assignments
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# hostPort is a port under which Hazelcast Jet PODs are exposed on the host machines
# hostPort:
gracefulShutdown:
enabled: true
maxWaitSeconds: 600
# Hazelcast Liveness probe
livenessProbe:
# enabled is a flag to used to enable liveness probe
enabled: true
# initialDelaySeconds is a delay before liveness probe is initiated
initialDelaySeconds: 30
# periodSeconds decides how often to perform the probe
periodSeconds: 10
# timeoutSeconds decides when the probe times out
timeoutSeconds: 5
# successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded
failureThreshold: 3
# url path that will be called to check liveness
path: /hazelcast/health/node-state
# port that will be used in liveness probe calls
# port:
# HTTPS or HTTP scheme
scheme: HTTP
# Hazelcast Readiness probe
readinessProbe:
# enabled is a flag to used to enable readiness probe
enabled: true
# initialDelaySeconds is a delay before readiness probe is initiated
initialDelaySeconds: 30
# periodSeconds decides how often to perform the probe
periodSeconds: 10
# timeoutSeconds decides when the probe times out
timeoutSeconds: 1
# successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded
failureThreshold: 3
# url path that will be called to check readiness
path: /hazelcast/health/ready
# port that will be used in readiness probe calls
# port:
# HTTPS or HTTP scheme
scheme: HTTP
# Configure resource requests and limits
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
#
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
# Hazelcast Service properties
service:
# type defines the Kubernetes service type ('ClusterIP', 'LoadBalancer', or 'NodePort')
type: ClusterIP
# port is the Kubernetes service port
port: 5701
# clusterIP set to None makes the service headless
# It is required if DNS Lookup is used (https://github.com/hazelcast/hazelcast-kubernetes#dns-lookup)
clusterIP: "None"
# Role-based Access Control
rbac:
# Specifies whether RBAC resources should be created
# It is not required if DNS Lookup is used (https://github.com/hazelcast/hazelcast-kubernetes#dns-lookup)
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Security Context properties
securityContext:
# enabled is a flag to enable Security Context
enabled: true
# runAsUser is the user ID used to run the container
runAsUser: 65534
# runAsGroup is the primary group ID used to run all processes within any container of the pod
runAsGroup: 65534
# fsGroup is the group ID associated with the container
fsGroup: 65534
# readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context
readOnlyRootFilesystem: true
# Allows to enable a Prometheus to scrape pods
metrics:
enabled: false
service:
type: ClusterIP
port: 8080
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "8080"
# customVolume is the configuration for a volume will be mounted as '/data/custom/' (e.g. to mount a volume with custom JARs)
# customVolume:
# Hazelcast Jet Management Center application properties
managementcenter:
# enabled is a flag to enable Hazelcast Jet Management Center application
enabled: true
## Hazelcast Jet Management Center image version
## ref: https://hub.docker.com/r/hazelcast/hazelcast-jet-management-center/tags/
##
image:
# repository is the Hazelcast Jet Management Center image name
repository: "hazelcast/hazelcast-jet-management-center"
# tag is the Hazelcast Jet Management Center image tag
tag: "4.1"
# pullPolicy is the Docker image pull policy
# It's recommended to change this to 'Always' if the image tag is 'latest'
# ref: http://kubernetes.io/docs/user-guide/images/#updating-images
#
pullPolicy: IfNotPresent
# pullSecrets is an array of docker-registry secret names
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
# pullSecrets:
# - myRegistryKeySecretName
# javaOpts are additional JAVA_OPTS properties for Hazelcast Jet Management Center
javaOpts:
# licenseKey is the license key for Hazelcast Jet Management Center
# if not provided, it can be filled in the Management Center web interface
licenseKey:
# licenseKeySecretName is the name of the secret where the Hazelcast Jet Management Center License Key is stored (can be used instead of licenseKey)
# licenseKeySecretName:
# affinity specifies the Hazelcast Jet Management Center affinity/anti-affinity of different pods
# affinity:
# tolerations enable Hazelcast Jet Management Center POD to be able to run on nodes with taints
# tolerations:
# nodeSelector is an array of Hazelcast Jet Management Center Node labels for POD assignments
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
# Jet Client configuration YAML file which will be used by Hazelcast Jet Management Center
yaml:
hazelcast-client:
cluster-name: jet
network:
kubernetes:
enabled: true
namespace: ${namespace}
service-name: ${serviceName}
resolve-not-ready-addresses: true
# configurationFiles are any additional Hazelcast Jet Client configuration files
# configurationFiles:
# Configure resource requests and limits
# ref: http://kubernetes.io/docs/user-guide/compute-resources/
#
# resources:
# requests:
# memory: 256Mi
# cpu: 100m
# Hazelcast Jet Management Center Service properties
service:
# type defines the Kubernetes service type ('ClusterIP', 'LoadBalancer', or 'NodePort')
type: LoadBalancer
# port is the Kubernetes service port
port: 8081
# service https port
httpsPort: 443
# service annotations for management center
annotations: {}
# Hazelcast Jet Management Center Liveness probe
livenessProbe:
# enabled is a flag to used to enable liveness probe
enabled: true
# initialDelaySeconds is a delay before liveness probe is initiated
initialDelaySeconds: 30
# periodSeconds decides how often to perform the probe
periodSeconds: 10
# timeoutSeconds decides when the probe times out
timeoutSeconds: 5
# successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded
failureThreshold: 3
# Hazelcast Jet Management Center Readiness probe
readinessProbe:
# enabled is a flag to used to enable readiness probe
enabled: true
# initialDelaySeconds is a delay before readiness probe is initiated
initialDelaySeconds: 30
# periodSeconds decides how often to perform the probe
periodSeconds: 10
# timeoutSeconds decides when the probe times out
timeoutSeconds: 1
# successThreshold is the minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# failureThreshold is the minimum consecutive failures for the probe to be considered failed after having succeeded
failureThreshold: 3
|
prestashop | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prestashop.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"prestashop.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prestashop.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"prestashop.mariadb.fullname\" -}}\n{{- printf \"%s-%s\" .Release.Name \"mariadb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"prestashop.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- .Values.service.loadBalancerIP | default \"\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\nWhen using Ingress, it will be set to the Ingress hostname.\n*/}}\n{{- define \"prestashop.host\" -}}\n{{- if .Values.ingress.enabled }}\n{{- $host := (index .Values.ingress.hosts 0).name | default \"\" -}}\n{{- default (include \"prestashop.serviceIP\" .) $host -}}\n{{- else -}}\n{{- $host := index .Values (printf \"%sHost\" .Chart.Name) | default \"\" -}}\n{{- default (include \"prestashop.serviceIP\" .) $host -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Prestashop image name\n*/}}\n{{- define \"prestashop.image\" -}}\n{{- $registryName := .Values.image.registry -}}\n{{- $repositoryName := .Values.image.repository -}}\n{{- $tag := .Values.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the metrics image)\n*/}}\n{{- define \"prestashop.metrics.image\" -}}\n{{- $registryName := .Values.metrics.image.registry -}}\n{{- $repositoryName := .Values.metrics.image.repository -}}\n{{- $tag := .Values.metrics.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"prestashop.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.metrics.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"prestashop.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the appropriate apiVersion for deployment.\n*/}}\n{{- define \"prestashop.deployment.apiVersion\" -}}\n{{- if semverCompare \"<1.14-0\" .Capabilities.KubeVersion.GitVersion -}}\n{{- print \"extensions/v1beta1\" -}}\n{{- else -}}\n{{- print \"apps/v1\" -}}\n{{- end -}}\n{{- end -}}\n",
"# deployment.yaml\n{{- if include \"prestashop.host\" . -}}\napiVersion: {{ template \"prestashop.deployment.apiVersion\" . }}\nkind: Deployment\nmetadata:\n name: {{ template \"prestashop.fullname\" . }}\n labels:\n app: \"{{ template \"prestashop.name\" . }}\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"prestashop.name\" . }}\n release: \"{{ .Release.Name }}\"\n template:\n metadata:\n labels:\n app: \"{{ template \"prestashop.name\" . }}\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n{{- if or .Values.podAnnotations .Values.metrics.enabled }}\n annotations:\n {{- if .Values.podAnnotations }}\n{{ toYaml .Values.podAnnotations | indent 8 }}\n {{- end }}\n {{- if .Values.metrics.podAnnotations }}\n{{ toYaml .Values.metrics.podAnnotations | indent 8 }}\n {{- end }}\n{{- end }}\n spec:\n{{- include \"prestashop.imagePullSecrets\" . | indent 6 }}\n hostAliases:\n - ip: \"127.0.0.1\"\n hostnames:\n - \"status.localhost\"\n containers:\n - name: {{ template \"prestashop.fullname\" . }}\n image: {{ template \"prestashop.image\" . }}\n imagePullPolicy: {{ .Values.image.pullPolicy | quote }}\n env:\n - name: ALLOW_EMPTY_PASSWORD\n value: {{ .Values.allowEmptyPassword | quote }}\n {{- if .Values.prestashopCookieCheckIP }}\n - name: PRESTASHOP_COOKIE_CHECK_IP\n value: {{ .Values.prestashopCookieCheckIP | quote }}\n {{- end }}\n {{- if .Values.prestashopCountry }}\n - name: PRESTASHOP_COUNTRY\n value: {{ .Values.prestashopCountry | quote }}\n {{- end }}\n {{- if .Values.prestashopLanguage }}\n - name: PRESTASHOP_LANGUAGE\n value: {{ .Values.prestashopLanguage | quote }}\n {{- end }}\n {{- if .Values.mariadb.enabled }}\n - name: MARIADB_HOST\n value: {{ template \"prestashop.mariadb.fullname\" . }}\n - name: MARIADB_PORT_NUMBER\n value: \"3306\"\n - name: PRESTASHOP_DATABASE_NAME\n value: {{ .Values.mariadb.db.name | quote }}\n - name: PRESTASHOP_DATABASE_USER\n value: {{ .Values.mariadb.db.user | quote }}\n - name: PRESTASHOP_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"prestashop.mariadb.fullname\" . }}\n key: mariadb-password\n {{- else }}\n - name: MARIADB_HOST\n value: {{ .Values.externalDatabase.host | quote }}\n - name: MARIADB_PORT_NUMBER\n value: {{ .Values.externalDatabase.port | quote }}\n - name: PRESTASHOP_DATABASE_NAME\n value: {{ .Values.externalDatabase.database | quote }}\n - name: PRESTASHOP_DATABASE_USER\n value: {{ .Values.externalDatabase.user | quote }}\n - name: PRESTASHOP_DATABASE_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ printf \"%s-%s\" .Release.Name \"externaldb\" }}\n key: db-password\n {{- end }}\n{{- $port:=.Values.service.port | toString }}\n - name: PRESTASHOP_HOST\n value: \"{{ include \"prestashop.host\" . }}{{- if ne $port \"80\" }}:{{ .Values.service.port }}{{ end }}\"\n - name: PRESTASHOP_USERNAME\n value: {{ .Values.prestashopUsername | quote }}\n - name: PRESTASHOP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"prestashop.fullname\" . }}\n key: prestashop-password\n - name: PRESTASHOP_EMAIL\n value: {{ .Values.prestashopEmail | quote }}\n - name: PRESTASHOP_FIRST_NAME\n value: {{ .Values.prestashopFirstName | quote }}\n - name: PRESTASHOP_LAST_NAME\n value: {{ .Values.prestashopLastName | quote }}\n {{- if .Values.smtpHost }}\n - name: SMTP_HOST\n value: {{ .Values.smtpHost | quote }}\n {{- end }}\n {{- if .Values.smtpPort }}\n - name: SMTP_PORT\n value: {{ .Values.smtpPort | quote }}\n {{- end }}\n {{- if .Values.smtpUser }}\n - name: SMTP_USER\n value: {{ .Values.smtpUser | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n - name: SMTP_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"prestashop.fullname\" . }}\n key: smtp-password\n {{- end }}\n {{- if .Values.smtpProtocol }}\n - name: SMTP_PROTOCOL\n value: {{ .Values.smtpProtocol | quote }}\n {{- end }}\n ports:\n - name: http\n containerPort: 80\n - name: https\n containerPort: 443\n {{- if .Values.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /login\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"prestashop.host\" . | quote }}\n initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /login\n port: http\n httpHeaders:\n - name: Host\n value: {{ include \"prestashop.host\" . | quote }}\n initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 10 }}\n volumeMounts:\n - mountPath: /bitnami/prestashop\n name: prestashop-data\n subPath: prestashop\n{{- if .Values.metrics.enabled }}\n - name: metrics\n image: {{ template \"prestashop.metrics.image\" . }}\n imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }}\n command: [ '/bin/apache_exporter', '-scrape_uri', 'http://status.localhost:80/server-status/?auto']\n ports:\n - name: metrics\n containerPort: 9117\n livenessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 15\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /metrics\n port: metrics\n initialDelaySeconds: 5\n timeoutSeconds: 1\n resources:\n{{ toYaml .Values.metrics.resources | indent 12 }}\n{{- end }}\n volumes:\n - name: prestashop-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ template \"prestashop.fullname\" . }}-prestashop{{- end }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n{{- end -}}\n",
"# externaldb-secrets.yaml\n{{- if not .Values.mariadb.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: \"{{ template \"prestashop.fullname\" . }}-externaldb\"\n labels:\n app: \"{{ template \"prestashop.name\" . }}-externaldb\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\ntype: Opaque\ndata:\n db-password: {{ default \"\" .Values.externalDatabase.password | b64enc | quote }}\n{{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"prestashop.fullname\" . }}\n labels:\n app: \"{{ template \"prestashop.name\" . }}\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range .Values.ingress.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: \"{{ template \"prestashop.fullname\" $ }}\"\n servicePort: http\n {{- end }}\n tls:\n {{- range .Values.ingress.hosts }}\n {{- if .tls }}\n - hosts:\n - {{ .name }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# prestashop-pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ template \"prestashop.fullname\" . }}-prestashop\n labels:\n app: \"{{ template \"prestashop.name\" . }}\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"prestashop.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"prestashop.fullname\" . }}\n labels:\n app: \"{{ template \"prestashop.name\" . }}\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\n annotations:\n \"helm.sh/hook\": pre-install \ntype: Opaque\ndata:\n {{- if .Values.prestashopPassword }}\n prestashop-password: {{ default \"\" .Values.prestashopPassword | b64enc | quote }}\n {{- else }}\n prestashop-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{- end }}\n {{- if .Values.smtpPassword }}\n smtp-password: {{ .Values.smtpPassword | b64enc | quote }}\n {{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prestashop.fullname\" . }}\n labels:\n app: \"{{ template \"prestashop.name\" . }}\"\n chart: \"{{ template \"prestashop.chart\" . }}\"\n release: {{ .Release.Name | quote }}\n heritage: {{ .Release.Service | quote }}\nspec:\n type: {{ .Values.service.type }}\n {{- if (and (eq .Values.service.type \"LoadBalancer\") (not (empty .Values.service.loadBalancerIP))) }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP }}\n {{- end }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: http\n port: {{ .Values.service.port }}\n targetPort: http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n - name: https\n port: {{ .Values.service.httpsPort }}\n targetPort: https\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.https)))}}\n nodePort: {{ .Values.service.nodePorts.https }}\n {{- end }}\n selector:\n app: \"{{ template \"prestashop.name\" . }}\"\n release: \"{{ .Release.Name }}\"\n",
"# tls-secrets.yaml\n{{- if .Values.ingress.enabled }}\n{{- range .Values.ingress.secrets }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ .name }}\n labels:\n app: {{ template \"prestashop.name\" $ }}\n chart: {{ template \"prestashop.chart\" $ }}\n release: {{ $.Release.Name }}\n heritage: {{ $.Release.Service }}\ntype: kubernetes.io/tls\ndata:\n tls.crt: {{ .certificate | b64enc }}\n tls.key: {{ .key | b64enc }}\n{{- end }}\n{{- end }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## Bitnami PrestaShop image version
## ref: https://hub.docker.com/r/bitnami/prestashop/tags/
##
image:
registry: docker.io
repository: bitnami/prestashop
tag: 1.7.6-4-debian-10-r0
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## String to partially override prestashop.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override prestashop.fullname template
##
# fullnameOverride:
## PrestaShop host to create application URLs
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
# prestashopHost:
## User of the application
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopUsername: [email protected]
## Application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
# prestashopPassword:
## Admin email
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopEmail: [email protected]
## First Name
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopFirstName: Bitnami
## Last Name
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopLastName: User
## Cookie Check IP
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopCookieCheckIP: "no"
## Country
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopCountry: "us"
## Language
## ref: https://github.com/bitnami/bitnami-docker-prestashop#configuration
##
prestashopLanguage: "en"
## Set to `yes` to allow the container to be started with blank passwords
## ref: https://github.com/bitnami/bitnami-docker-prestashop#environment-variables
allowEmptyPassword: "yes"
##
## External database configuration
##
externalDatabase:
## Database host
host:
## Database host
port: 3306
## Database user
user: bn_jasperreports
## Database password
password:
## Database name
database: bitnami_prestashop
## SMTP mail delivery configuration
## ref: https://github.com/bitnami/bitnami-docker-prestashop/#smtp-configuration
##
# smtpHost:
# smtpPort:
# smtpUser:
# smtpPassword:
# smtpProtocol:
##
## MariaDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mariadb/values.yaml
##
mariadb:
## Whether to deploy a mariadb server to satisfy the applications database requirements. To use an external database set this to false and configure the externalDatabase parameters
enabled: true
## Tag for the Bitnami MariaDB image to use
## ref: https://github.com/bitnami/bitnami-docker-mariadb
image:
registry: docker.io
repository: bitnami/mariadb
tag: 10.1.44-debian-10-r32
## Disable MariaDB replication
replication:
enabled: false
## Create a database and a database user
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#creating-a-database-user-on-first-run
##
db:
name: bitnami_prestashop
user: bn_prestashop
## If the password is not specified, mariadb will generates a random password
##
# password:
## MariaDB admin password
## ref: https://github.com/bitnami/bitnami-docker-mariadb/blob/master/README.md#setting-the-root-password-on-first-run
##
# rootUser:
# password:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
master:
persistence:
enabled: true
## mariadb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
## Kubernetes configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
service:
type: LoadBalancer
# HTTP Port
port: 80
# HTTPS Port
httpsPort: 443
## loadBalancerIP for the PrestaShop Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
## loadBalancerIP
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
## https: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
https: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Local
## Configure the ingress resource that allows you to access the
## PrestaShop installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
certManager: false
## Ingress annotations done as key:value pairs
## For a full list of possible ingress annotations, please see
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set
## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set
annotations:
# kubernetes.io/ingress.class: nginx
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
hosts:
- name: prestashop.local
path: /
# Set this to true in order to enable TLS on the ingress record
tls: false
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
tlsSecret: prestashop.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: prestashop.local-tls
# key:
# certificate:
## Control where client requests go, to the same pod or round-robin
## Values: ClientIP or None
## ref: https://kubernetes.io/docs/user-guide/services/
sessionAffinity: "None"
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## Prestashop Data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
accessMode: ReadWriteOnce
size: 8Gi
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 512Mi
cpu: 300m
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 600
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 6
successThreshold: 1
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Affinity for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Prometheus Exporter / Metrics
##
metrics:
enabled: false
image:
registry: docker.io
repository: bitnami/apache-exporter
tag: 0.7.0-debian-10-r38
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Metrics exporter pod Annotation and Labels
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9117"
## Metrics exporter resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
# resources: {}
|
kiam | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"kiam.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"kiam.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified agent name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"kiam.agent.fullname\" -}}\n{{- if .Values.agent.fullnameOverride -}}\n{{- .Values.agent.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf \"%s-%s\" .Release.Name .Values.agent.name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s-%s\" .Release.Name $name .Values.agent.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a fully qualified server name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"kiam.server.fullname\" -}}\n{{- if .Values.server.fullnameOverride -}}\n{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- printf \"%s-%s\" .Release.Name .Values.server.name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s-%s\" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"kiam.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate the name of the agent service account to use.\n*/}}\n{{- define \"kiam.serviceAccountName.agent\" -}}\n{{- if .Values.serviceAccounts.agent.create -}}\n {{ default (include \"kiam.agent.fullname\" .) .Values.serviceAccounts.agent.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccounts.agent.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate the name of the server service account to use.\n*/}}\n{{- define \"kiam.serviceAccountName.server\" -}}\n{{- if .Values.serviceAccounts.server.create -}}\n {{ default (include \"kiam.server.fullname\" .) .Values.serviceAccounts.server.name }}\n{{- else -}}\n {{ default \"default\" .Values.serviceAccounts.server.name }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGenerate certificates for kiam server and agent\n*/}}\n{{- define \"kiam.agent.gen-certs\" -}}\n{{- $ca := .ca | default (genCA \"kiam-ca\" 365) -}}\n{{- $_ := set . \"ca\" $ca -}}\n{{- $cert := genSignedCert \"Kiam Agent\" nil nil 365 $ca -}}\n{{.Values.agent.tlsCerts.caFileName }}: {{ $ca.Cert | b64enc }}\n{{.Values.agent.tlsCerts.certFileName }}: {{ $cert.Cert | b64enc }}\n{{.Values.agent.tlsCerts.keyFileName }}: {{ $cert.Key | b64enc }}\n{{- end -}}\n{{- define \"kiam.server.gen-certs\" -}}\n{{- $altNames := list (include \"kiam.server.fullname\" .) (printf \"%s:%d\" (include \"kiam.server.fullname\" .) .Values.server.service.port) (printf \"127.0.0.1:%d\" .Values.server.service.targetPort) -}}\n{{- $ca := .ca | default (genCA \"kiam-ca\" 365) -}}\n{{- $_ := set . \"ca\" $ca -}}\n{{- $cert := genSignedCert \"Kiam Server\" (list \"127.0.0.1\") $altNames 365 $ca -}}\n{{.Values.server.tlsCerts.caFileName }}: {{ $ca.Cert | b64enc }}\n{{.Values.server.tlsCerts.certFileName }}: {{ $cert.Cert | b64enc }}\n{{.Values.server.tlsCerts.keyFileName }}: {{ $cert.Key | b64enc }}\n{{- end -}}\n",
"# agent-daemonset.yaml\n{{- if .Values.agent.enabled -}}\napiVersion: apps/v1beta2\nkind: DaemonSet\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.agent.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-agent\nspec:\n selector:\n matchLabels:\n app: {{ template \"kiam.name\" . }}\n component: \"{{ .Values.agent.name }}\"\n release: {{ .Release.Name }}\n {{- if .Values.agent.podLabels }}\n{{ toYaml .Values.agent.podLabels | indent 6 }}\n {{- end }}\n template:\n metadata:\n {{- if .Values.agent.podAnnotations }}\n annotations:\n{{ toYaml .Values.agent.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"kiam.name\" . }}\n component: \"{{ .Values.agent.name }}\"\n release: {{ .Release.Name }}\n {{- if .Values.agent.podLabels }}\n{{ toYaml .Values.agent.podLabels | indent 8 }}\n {{- end }}\n spec:\n hostNetwork: true\n dnsPolicy: {{ .Values.agent.dnsPolicy }}\n serviceAccountName: {{ template \"kiam.serviceAccountName.agent\" . }}\n {{- if .Values.agent.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.agent.nodeSelector | indent 8 }}\n {{- end }}\n tolerations:\n{{ toYaml .Values.agent.tolerations | indent 8 }}\n {{- if .Values.agent.affinity }}\n affinity:\n{{ toYaml .Values.agent.affinity | indent 10 }}\n {{- end }}\n volumes:\n - name: tls\n secret:\n {{- if .Values.agent.tlsSecret }}\n secretName: {{ .Values.agent.tlsSecret }}\n {{else}}\n secretName: {{ template \"kiam.fullname\" . }}-agent\n {{- end }}\n - name: xtables\n hostPath:\n path: /run/xtables.lock\n type: FileOrCreate\n {{- range .Values.agent.extraHostPathMounts }}\n - name: {{ .name }}\n hostPath:\n path: {{ .hostPath }}\n {{- end }}\n {{- if .Values.agent.priorityClassName }}\n priorityClassName: {{ .Values.agent.priorityClassName | quote }}\n {{- end }}\n containers:\n - name: {{ template \"kiam.name\" . }}-{{ .Values.agent.name }}\n {{- if .Values.agent.host.iptables }}\n securityContext:\n capabilities:\n add: [\"NET_ADMIN\"]\n {{- end }}\n image: \"{{ .Values.agent.image.repository }}:{{ .Values.agent.image.tag }}\"\n imagePullPolicy: {{ .Values.agent.image.pullPolicy }}\n command:\n - /kiam\n - agent\n args:\n {{- if .Values.agent.host.iptables }}\n - --iptables\n {{- end }}\n - --host-interface={{ .Values.agent.host.interface }}\n {{- if .Values.agent.log.jsonOutput }}\n - --json-log\n {{- end }}\n - --level={{ .Values.agent.log.level }}\n - --port={{ .Values.agent.host.port }}\n - --cert=/etc/kiam/tls/{{ .Values.agent.tlsCerts.certFileName }}\n - --key=/etc/kiam/tls/{{ .Values.agent.tlsCerts.keyFileName }}\n - --ca=/etc/kiam/tls/{{ .Values.agent.tlsCerts.caFileName }}\n - --server-address={{ template \"kiam.fullname\" . }}-server:{{ .Values.server.service.port }}\n {{- if .Values.agent.prometheus.scrape }}\n - --prometheus-listen-addr=0.0.0.0:{{ .Values.agent.prometheus.port }}\n - --prometheus-sync-interval={{ .Values.agent.prometheus.syncInterval }}\n {{- end }}\n {{- if .Values.agent.whiteListRouteRegexp }}\n - --whitelist-route-regexp={{ .Values.agent.whiteListRouteRegexp }}\n {{- end }}\n - --gateway-timeout-creation={{ .Values.agent.gatewayTimeoutCreation }}\n {{- range $key, $value := .Values.agent.extraArgs }}\n {{- if $value }}\n - --{{ $key }}={{ $value }}\n {{- else }}\n - --{{ $key }}\n {{- end }}\n {{- end }}\n env:\n - name: HOST_IP\n valueFrom:\n fieldRef:\n fieldPath: status.podIP\n {{- range $name, $value := .Values.agent.extraEnv }}\n - name: {{ $name }}\n value: {{ quote $value }}\n {{- end }}\n volumeMounts:\n - mountPath: /etc/kiam/tls\n name: tls\n - mountPath: /var/run/xtables.lock\n name: xtables\n {{- range .Values.agent.extraHostPathMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n readOnly: {{ .readOnly }}\n {{- end }}\n livenessProbe:\n httpGet:\n path: /ping\n port: {{ .Values.agent.host.port }}\n initialDelaySeconds: 3\n periodSeconds: 3\n {{- if .Values.agent.resources }}\n resources:\n{{ toYaml .Values.agent.resources | indent 12 }}\n {{- end }}\n updateStrategy:\n type: {{ .Values.agent.updateStrategy }}\n{{- end }}\n",
"# agent-psp-clusterrole.yaml\n{{- if and .Values.agent.enabled .Values.psp.create -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.agent.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n k8s-addon: podsecuritypolicy.addons.k8s.io\n name: {{ template \"kiam.fullname\" . }}-agent-psp-use\nrules:\n- apiGroups:\n - policy\n resources:\n - podsecuritypolicies\n resourceNames:\n - {{ template \"kiam.fullname\" . }}-agent\n verbs:\n - use\n{{- end -}}\n{{- end }}\n",
"# agent-psp-rolebinding.yaml\n{{- if and .Values.agent.enabled .Values.psp.create -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.agent.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-agent-psp\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"kiam.fullname\" . }}-agent-psp-use\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kiam.serviceAccountName.agent\" . }}\n{{- end -}}\n{{- end }}\n",
"# agent-psp.yaml\n{{- if and .Values.agent.enabled .Values.psp.create -}}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"kiam.fullname\" . }}-agent\nspec:\n # Prevents running in privileged mode\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n{{- if .Values.agent.host.iptables }}\n allowedCapabilities:\n - \"NET_ADMIN\"\n{{ end }}\n volumes:\n - 'secret'\n - 'hostPath'\n allowedHostPaths:\n - pathPrefix: \"/run/xtables.lock\"\n{{- range .Values.agent.extraHostPathMounts }}\n - pathPrefix: {{ .hostPath }}\n readOnly: {{ .readOnly }}\n{{- end }}\n hostNetwork: true\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# agent-secret.yaml\n{{- if and (.Values.agent.enabled) (not .Values.agent.tlsSecret) -}}\nkind: Secret\napiVersion: v1\nmetadata:\n name: {{ template \"kiam.fullname\" . }}-agent\ntype: Opaque\ndata:\n{{- if .Values.agent.tlsFiles.ca }}\n{{ toYaml .Values.agent.tlsFiles | indent 2 }}\n{{- else }}\n{{ include \"kiam.agent.gen-certs\" . | indent 2 }}\n{{- end -}}\n{{- end }}\n",
"# agent-service.yaml\n{{- if .Values.agent.enabled -}}\n{{- if .Values.agent.prometheus.scrape -}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"kiam.fullname\" . }}-agent\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.agent.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n {{- range $key, $value := .Values.agent.serviceLabels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- if or .Values.agent.serviceAnnotations .Values.agent.prometheus.scrape }}\n annotations:\n {{- range $key, $value := .Values.agent.serviceAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- if .Values.agent.prometheus.scrape }}\n prometheus.io/scrape: \"true\"\n prometheus.io/port: {{ .Values.agent.prometheus.port | quote }}\n {{- end }}\n {{- end }}\nspec:\n clusterIP: None\n selector:\n app: {{ template \"kiam.name\" . }}\n component: \"{{ .Values.agent.name }}\"\n release: {{ .Release.Name }}\n ports:\n - name: metrics\n port: {{ .Values.agent.prometheus.port }}\n targetPort: {{ .Values.agent.prometheus.port }}\n protocol: TCP\n{{- end -}}\n{{- end }}\n",
"# agent-serviceaccount.yaml\n{{- if .Values.agent.enabled -}}\n{{- if .Values.serviceAccounts.agent.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.agent.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.serviceAccountName.agent\" . }}\n{{- end }}\n{{- end }}\n",
"# server-daemonset.yaml\n{{- if .Values.server.enabled -}}\napiVersion: apps/v1beta2\nkind: DaemonSet\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-server\nspec:\n selector:\n matchLabels:\n app: {{ template \"kiam.name\" . }}\n component: \"{{ .Values.server.name }}\"\n release: {{ .Release.Name }}\n template:\n metadata:\n {{- if .Values.server.podAnnotations }}\n annotations:\n{{ toYaml .Values.server.podAnnotations | indent 8 }}\n {{- end }}\n labels:\n app: {{ template \"kiam.name\" . }}\n component: \"{{ .Values.server.name }}\"\n release: {{ .Release.Name }}\n {{- if .Values.server.podLabels }}\n{{ toYaml .Values.server.podLabels | indent 8 }}\n {{- end }}\n spec:\n serviceAccountName: {{ template \"kiam.serviceAccountName.server\" . }}\n hostNetwork: {{ .Values.server.useHostNetwork }}\n {{- if .Values.server.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.server.nodeSelector | indent 8 }}\n {{- end }}\n tolerations:\n{{ toYaml .Values.server.tolerations | indent 8 }}\n {{- if .Values.server.affinity }}\n affinity:\n{{ toYaml .Values.server.affinity | indent 10 }}\n {{- end }}\n volumes:\n - name: tls\n secret:\n {{- if .Values.server.tlsSecret }}\n secretName: {{ .Values.server.tlsSecret }}\n {{else}}\n secretName: {{ template \"kiam.fullname\" . }}-server\n {{- end }}\n {{- range .Values.server.extraHostPathMounts }}\n - name: {{ .name }}\n hostPath:\n path: {{ .hostPath }}\n {{- end }}\n {{- if .Values.server.priorityClassName }}\n priorityClassName: {{ .Values.server.priorityClassName | quote }}\n {{- end }}\n containers:\n - name: {{ template \"kiam.name\" . }}-{{ .Values.server.name }}\n image: \"{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}\"\n imagePullPolicy: {{ .Values.server.image.pullPolicy }}\n command:\n - /kiam\n - server\n args:\n {{- if .Values.server.log.jsonOutput }}\n - --json-log\n {{- end }}\n - --level={{ .Values.server.log.level }}\n - --bind=0.0.0.0:{{ .Values.server.service.targetPort }}\n - --cert=/etc/kiam/tls/{{ .Values.server.tlsCerts.certFileName }}\n - --key=/etc/kiam/tls/{{ .Values.server.tlsCerts.keyFileName }}\n - --ca=/etc/kiam/tls/{{ .Values.server.tlsCerts.caFileName }}\n {{- if .Values.server.roleBaseArn }}\n - --role-base-arn={{ .Values.server.roleBaseArn }}\n {{- else }}\n - --role-base-arn-autodetect\n {{- end }}\n {{- if .Values.server.assumeRoleArn }}\n - --assume-role-arn={{ .Values.server.assumeRoleArn }}\n {{- end }}\n - --session-duration={{ .Values.server.sessionDuration }}\n - --sync={{ .Values.server.cache.syncInterval }}\n {{- if .Values.server.prometheus.scrape }}\n - --prometheus-listen-addr=0.0.0.0:{{ .Values.server.prometheus.port }}\n - --prometheus-sync-interval={{ .Values.server.prometheus.syncInterval }}\n {{- end }}\n {{- range $key, $value := .Values.server.extraArgs }}\n {{- if $value }}\n - --{{ $key }}={{ $value }}\n {{- else }}\n - --{{ $key }}\n {{- end }}\n {{- end }}\n {{- if .Values.server.extraEnv }}\n env:\n {{- range $name, $value := .Values.server.extraEnv }}\n - name: {{ $name }}\n value: {{ quote $value }}\n {{- end }}\n {{- end }}\n volumeMounts:\n - mountPath: /etc/kiam/tls\n name: tls\n {{- range .Values.server.extraHostPathMounts }}\n - name: {{ .name }}\n mountPath: {{ .mountPath }}\n readOnly: {{ .readOnly }}\n {{- end }}\n livenessProbe:\n exec:\n command:\n - /kiam\n - health\n - --cert=/etc/kiam/tls/{{ .Values.server.tlsCerts.certFileName }}\n - --key=/etc/kiam/tls/{{ .Values.server.tlsCerts.keyFileName }}\n - --ca=/etc/kiam/tls/{{ .Values.server.tlsCerts.caFileName }}\n - --server-address={{ .Values.server.probes.serverAddress }}:{{ .Values.server.service.targetPort }}\n - --server-address-refresh=2s\n - --timeout=5s\n - --gateway-timeout-creation={{ .Values.server.gatewayTimeoutCreation }}\n initialDelaySeconds: 10\n periodSeconds: 10\n timeoutSeconds: 10\n readinessProbe:\n exec:\n command:\n - /kiam\n - health\n - --cert=/etc/kiam/tls/{{ .Values.server.tlsCerts.certFileName }}\n - --key=/etc/kiam/tls/{{ .Values.server.tlsCerts.keyFileName }}\n - --ca=/etc/kiam/tls/{{ .Values.server.tlsCerts.caFileName }}\n - --server-address={{ .Values.server.probes.serverAddress }}:{{ .Values.server.service.targetPort }}\n - --server-address-refresh=2s\n - --timeout=5s\n - --gateway-timeout-creation={{ .Values.server.gatewayTimeoutCreation }}\n initialDelaySeconds: 3\n periodSeconds: 10\n timeoutSeconds: 10\n {{- if .Values.server.resources }}\n resources:\n{{ toYaml .Values.server.resources | indent 12 }}\n {{- end }}\n updateStrategy:\n type: {{ .Values.server.updateStrategy }}\n{{- end }}\n",
"# server-psp-clusterrole.yaml\n{{- if and .Values.server.enabled .Values.psp.create -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n annotations:\n k8s-addon: podsecuritypolicy.addons.k8s.io\n name: {{ template \"kiam.fullname\" . }}-server-psp-use\nrules:\n- apiGroups:\n - policy\n resources:\n - podsecuritypolicies\n resourceNames:\n - {{ template \"kiam.fullname\" . }}-server\n verbs:\n - use\n{{- end -}}\n{{- end }}\n",
"# server-psp-rolebinding.yaml\n{{- if and .Values.server.enabled .Values.psp.create -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-server-psp\n namespace: {{ .Release.Namespace }}\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"kiam.fullname\" . }}-server-psp-use\nsubjects:\n- kind: ServiceAccount\n name: {{ template \"kiam.serviceAccountName.server\" . }}\n{{- end -}}\n{{- end }}\n",
"# server-psp.yaml\n{{- if and .Values.server.enabled .Values.psp.create -}}\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: {{ template \"kiam.fullname\" . }}-server\nspec:\n # Prevents running in privileged mode\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n volumes:\n - 'secret'\n{{- if .Values.server.extraHostPathMounts }}\n - 'hostPath'\n {{- range .Values.server.extraHostPathMounts }}\n allowedHostPaths:\n - pathPrefix: {{ .hostPath }}\n readOnly: {{ .readOnly }}\n {{- end }}\n{{- end }}\n hostNetwork: {{ .Values.server.useHostNetwork }}\n hostIPC: false\n hostPID: false\n runAsUser:\n rule: 'RunAsAny'\n seLinux:\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n{{- end }}\n",
"# server-read-clusterrole.yaml\n{{- if .Values.server.enabled -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-read\nrules:\n - apiGroups:\n - \"\"\n resources:\n - namespaces\n - pods\n verbs:\n - watch\n - get\n - list\n{{- end -}}\n{{- end -}}\n",
"# server-read-clusterrolebinding.yaml\n{{- if .Values.server.enabled -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-read\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"kiam.fullname\" . }}-read\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"kiam.serviceAccountName.server\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n{{- end }}\n",
"# server-secret.yaml\n{{- if and (.Values.server.enabled) (not .Values.server.tlsSecret) -}}\nkind: Secret\napiVersion: v1\nmetadata:\n name: {{ template \"kiam.fullname\" . }}-server\ntype: Opaque\ndata:\n{{- if .Values.server.tlsFiles.ca }}\n{{ toYaml .Values.server.tlsFiles | indent 2 }}\n{{- else }}\n{{ include \"kiam.server.gen-certs\" . | indent 2 }}\n{{- end -}}\n{{- end }}\n",
"# server-service.yaml\n{{- if .Values.server.enabled -}}\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"kiam.fullname\" . }}-server\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n {{- range $key, $value := .Values.server.serviceLabels }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- if or .Values.server.serviceAnnotations .Values.server.prometheus.scrape }}\n annotations:\n {{- range $key, $value := .Values.server.serviceAnnotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\n {{- if .Values.server.prometheus.scrape }}\n prometheus.io/scrape: \"true\"\n prometheus.io/port: {{ .Values.server.prometheus.port | quote }}\n {{- end }}\n {{- end }}\nspec:\n clusterIP: None\n selector:\n app: {{ template \"kiam.name\" . }}\n component: \"{{ .Values.server.name }}\"\n release: {{ .Release.Name }}\n ports:\n {{- if .Values.server.prometheus.scrape }}\n - name: metrics\n port: {{ .Values.server.prometheus.port }}\n targetPort: {{ .Values.server.prometheus.port }}\n protocol: TCP\n {{- end }}\n - name: grpclb\n port: {{ .Values.server.service.port }}\n targetPort: {{ .Values.server.service.targetPort }}\n protocol: TCP\n{{- end }}\n",
"# server-serviceaccount.yaml\n{{- if .Values.server.enabled -}}\n{{- if .Values.serviceAccounts.server.create }}\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.serviceAccountName.server\" . }}\n{{- end }}\n{{- end }}\n",
"# server-write-clusterrole.yaml\n{{- if .Values.server.enabled -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-write\nrules:\n - apiGroups:\n - \"\"\n resources:\n - events\n verbs:\n - create\n - patch\n{{- end -}}\n{{- end -}}\n",
"# server-write-clusterrolebinding.yaml\n{{- if .Values.server.enabled -}}\n{{- if .Values.rbac.create -}}\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n labels:\n app: {{ template \"kiam.name\" . }}\n chart: {{ template \"kiam.chart\" . }}\n component: \"{{ .Values.server.name }}\"\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\n name: {{ template \"kiam.fullname\" . }}-write\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: {{ template \"kiam.fullname\" . }}-write\nsubjects:\n - kind: ServiceAccount\n name: {{ template \"kiam.serviceAccountName.server\" . }}\n namespace: {{ .Release.Namespace }}\n{{- end -}}\n{{- end }}\n"
] | extraArgs: {}
agent:
## If false, agent will not be installed
##
enabled: true
## agent container name
##
name: agent
image:
repository: quay.io/uswitch/kiam
tag: v3.3
pullPolicy: IfNotPresent
## agent whitelist of proxy routes matching this reg-ex
##
# whiteListRouteRegexp:
## Logging settings
##
log:
jsonOutput: true
level: info
## Host networking settings
##
host:
iptables: false
port: 8181
interface: cali+
## Prometheus metrics
##
prometheus:
scrape: true
port: 9620
syncInterval: 5s
## Annotations to be added to pods
##
podAnnotations: {}
## Labels to be added to pods
##
podLabels: {}
## Annotations to be added to service
##
serviceAnnotations: {}
## Labels to be added to service
##
serviceLabels: {}
## Used to assign priority to agent pods
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## Strategy for DaemonSet updates (requires Kubernetes 1.6+)
## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy: OnDelete
## Pod DNS policy
## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy
##
dnsPolicy: ClusterFirstWithHostNet
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
# kubernetes.io/role: node
## Pod tolerations
## Ref https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Agent container resources
## Ref https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
# Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# Expects input structure as per specification for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
resources: {}
## Additional container arguments
##
extraArgs: {}
## Additional container environment variables
##
extraEnv: {}
## Additional container hostPath mounts
##
extraHostPathMounts: []
# - name: ssl-certs
# mountPath: /etc/ssl/certs
## Running on Amazon Linux or RHEL distros:
# hostPath: /etc/pki/ca-trust/extracted/pem
## else:
# hostPath: /usr/share/ca-certificates
# readOnly: true
## Timeout when creating the kiam gateway
##
gatewayTimeoutCreation: 50ms
## Base64-encoded PEM values for agent's CA certificate(s), certificate and private key
##
tlsFiles:
ca:
cert:
key:
## Secret name of agent's TLS certificates
##
tlsSecret:
## Agent TLS Certificate filenames
tlsCerts:
certFileName: cert
keyFileName: key
caFileName: ca
server:
## If false, server will not be installed
##
enabled: true
## server container name
##
name: server
image:
repository: quay.io/uswitch/kiam
tag: v3.3
pullPolicy: IfNotPresent
## Logging settings
##
log:
jsonOutput: true
level: info
## Prometheus metrics
##
prometheus:
scrape: true
port: 9620
syncInterval: 5s
## Annotations to be added to pods
##
podAnnotations: {}
## Labels to be added to pods
##
podLabels: {}
## Annotations to be added to service
##
serviceAnnotations: {}
## Labels to be added to service
##
serviceLabels: {}
## Used to assign priority to server pods
## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
##
priorityClassName: ""
## Strategy for DaemonSet updates (requires Kubernetes 1.6+)
## Ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/
##
updateStrategy: OnDelete
# Ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# Expects input structure as per specification for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
## Node labels for pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
# kubernetes.io/role: master
## Pod tolerations
## Ref https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
## Server container resources
## Ref https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
##
resources: {}
## Additional container arguments
##
extraArgs: {}
## Additional container environment variables
##
extraEnv: {}
## Additional container hostPath mounts
##
extraHostPathMounts: []
# - name: ssl-certs
# mountPath: /etc/ssl/certs
## Running on Amazon Linux or RHEL distros:
# hostPath: /etc/pki/ca-trust/extracted/pem
## else:
# hostPath: /usr/share/ca-certificates
# readOnly: true
service:
port: 443
targetPort: 443
## Timeout when creating the kiam gateway
##
gatewayTimeoutCreation: 50ms
## Server probe configuration
probes:
serverAddress: 127.0.0.1
## Base64-encoded PEM values for server's CA certificate(s), certificate and private key
##
tlsFiles:
ca:
cert:
key:
## Secret name of server's TLS certificates
##
tlsSecret:
## Base ARN for IAM roles
## If not specified use EC2 metadata service to detect ARN prefix
##
roleBaseArn: null
## Pod cache settings
##
cache:
syncInterval: 1m
## IAM role for the server to assume
##
assumeRoleArn: null
## Session duration for STS tokens
##
sessionDuration: 15m
## Use hostNetwork for server
## Set this to true when running the servers on the same nodes as the agents
useHostNetwork: false
## Agent TLS Certificate filenames
tlsCerts:
certFileName: cert
keyFileName: key
caFileName: ca
rbac:
# Specifies whether RBAC resources should be created
create: true
psp:
# Specifies whether PodSecurityPolicies should be created
create: false
serviceAccounts:
agent:
create: true
name:
server:
create: true
name:
|
parse | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"parse.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"parse.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"parse.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\n*/}}\n{{- define \"parse.mongodb.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- printf \"%s-%s\" .Values.fullnameOverride \"mongodb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name \"mongodb\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCommon labels\n*/}}\n{{- define \"parse.labels\" -}}\napp.kubernetes.io/name: {{ include \"parse.name\" . }}\nhelm.sh/chart: {{ include \"parse.chart\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\napp.kubernetes.io/managed-by: {{ .Release.Service }}\n{{- end -}}\n\n{{/*\nLabels to use on deploy.spec.selector.matchLabels and svc.spec.selector\n*/}}\n{{- define \"parse.matchLabels\" -}}\napp.kubernetes.io/name: {{ include \"parse.name\" . }}\napp.kubernetes.io/instance: {{ .Release.Name }}\n{{- end -}}\n\n{{/*\nGet the user defined LoadBalancerIP for this release.\nNote, returns 127.0.0.1 if using ClusterIP.\n*/}}\n{{- define \"parse.serviceIP\" -}}\n{{- if eq .Values.service.type \"ClusterIP\" -}}\n127.0.0.1\n{{- else -}}\n{{- default \"\" .Values.service.loadBalancerIP -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the host to be used for this application.\nIf not using ClusterIP, or if a host or LoadBalancerIP is not defined, the value will be empty.\n*/}}\n{{- define \"parse.host\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- $host := default \"\" .Values.server.host -}}\n{{- if .Values.ingress.enabled -}}\n{{- $ingressHost := first .Values.ingress.server.hosts -}}\n{{- $serverHost := default $ingressHost.name $host -}}\n{{- default (include \"parse.serviceIP\" .) $serverHost -}}\n{{- else -}}\n{{- default (include \"parse.serviceIP\" .) $host -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nGets the port to access Parse outside the cluster.\nWhen using ingress, we should use the port 80/443 instead of service.port\n*/}}\n{{- define \"parse.external-port\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.ingress.enabled -}}\n{{- $ingressHttpPort := \"80\" -}}\n{{- $ingressHttpsPort := \"443\" -}}\n{{- if eq .Values.dashboard.parseServerUrlProtocol \"https\" -}}\n{{- $ingressHttpsPort -}}\n{{- else -}}\n{{- $ingressHttpPort -}}\n{{- end -}}\n{{- else -}}\n{{ .Values.server.port }}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Parse dashboard image name\n*/}}\n{{- define \"parse.dashboard.image\" -}}\n{{- $registryName := .Values.dashboard.image.registry -}}\n{{- $repositoryName := .Values.dashboard.image.repository -}}\n{{- $tag := .Values.dashboard.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Parse server client image name\n*/}}\n{{- define \"parse.server.image\" -}}\n{{- $registryName := .Values.server.image.registry -}}\n{{- $repositoryName := .Values.server.image.repository -}}\n{{- $tag := .Values.server.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Docker Image Registry Secret Names\n*/}}\n{{- define \"parse.imagePullSecrets\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\nAlso, we can not use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n{{- if .Values.global.imagePullSecrets }}\nimagePullSecrets:\n{{- range .Values.global.imagePullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- else if or .Values.server.image.pullSecrets .Values.dashboard.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.server.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.dashboard.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- else if or .Values.server.image.pullSecrets .Values.dashboard.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}\nimagePullSecrets:\n{{- range .Values.server.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.dashboard.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- range .Values.volumePermissions.image.pullSecrets }}\n - name: {{ . }}\n{{- end }}\n{{- end -}}\n{{- end -}}\n\n{{/* Check if there are rolling tags in the images */}}\n{{- define \"parse.checkRollingTags\" -}}\n{{- if and (contains \"bitnami/\" .Values.server.image.repository) (not (.Values.server.image.tag | toString | regexFind \"-r\\\\d+$|sha256:\")) }}\nWARNING: Rolling tag detected ({{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.\n+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/\n{{- end }}\n{{- if and (contains \"bitnami/\" .Values.dashboard.image.repository) (not (.Values.dashboard.image.tag | toString | regexFind \"-r\\\\d+$|sha256:\")) }}\nWARNING: Rolling tag detected ({{ .Values.dashboard.image.repository }}:{{ .Values.dashboard.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.\n+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/\n{{- end }}\n{{- end -}}\n\n{{/*\nReturn the proper image name (for the init container volume-permissions image)\n*/}}\n{{- define \"parse.volumePermissions.image\" -}}\n{{- $registryName := .Values.volumePermissions.image.registry -}}\n{{- $repositoryName := .Values.volumePermissions.image.repository -}}\n{{- $tag := .Values.volumePermissions.image.tag | toString -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.\nAlso, we can't use a single if because lazy evaluation is not an option\n*/}}\n{{- if .Values.global }}\n {{- if .Values.global.imageRegistry }}\n {{- printf \"%s/%s:%s\" .Values.global.imageRegistry $repositoryName $tag -}}\n {{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n {{- end -}}\n{{- else -}}\n {{- printf \"%s/%s:%s\" $registryName $repositoryName $tag -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the proper Storage Class\n*/}}\n{{- define \"parse.storageClass\" -}}\n{{/*\nHelm 2.11 supports the assignment of a value to a variable defined in a different scope,\nbut Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.\n*/}}\n{{- if .Values.global -}}\n {{- if .Values.global.storageClass -}}\n {{- if (eq \"-\" .Values.global.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.global.storageClass -}}\n {{- end -}}\n {{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n {{- end -}}\n{{- else -}}\n {{- if .Values.persistence.storageClass -}}\n {{- if (eq \"-\" .Values.persistence.storageClass) -}}\n {{- printf \"storageClassName: \\\"\\\"\" -}}\n {{- else }}\n {{- printf \"storageClassName: %s\" .Values.persistence.storageClass -}}\n {{- end -}}\n {{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nReturn the Parse Cloud Clode scripts configmap.\n*/}}\n{{- define \"parse.cloudCodeScriptsCMName\" -}}\n{{- if .Values.server.existingCloudCodeScriptsCM -}}\n {{- printf \"%s\" (tpl .Values.server.existingCloudCodeScriptsCM $) -}}\n{{- else -}}\n {{- printf \"%s-cloud-code-scripts\" (include \"parse.fullname\" .) -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nRenders a value that contains template.\nUsage:\n{{ include \"parse.tplValue\" ( dict \"value\" .Values.path.to.the.Value \"context\" $) }}\n*/}}\n{{- define \"parse.tplValue\" -}}\n {{- if typeIs \"string\" .value }}\n {{- tpl .value .context }}\n {{- else }}\n {{- tpl (.value | toYaml) .context }}\n {{- end }}\n{{- end -}}\n\n{{/*\nCompile all warnings into a single message, and call fail.\n*/}}\n{{- define \"parse.validateValues\" -}}\n{{- $messages := list -}}\n{{- $messages := append $messages (include \"parse.validateValues.dashboard.serverUrlProtocol\" .) -}}\n{{- $messages := without $messages \"\" -}}\n{{- $message := join \"\\n\" $messages -}}\n\n{{- if $message -}}\n{{- printf \"\\nVALUES VALIDATION:\\n%s\" $message | fail -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nValidate values of Parse Dashboard - if tls is enable on server side must provide https protocol\n*/}}\n{{- define \"parse.validateValues.dashboard.serverUrlProtocol\" -}}\n{{- if .Values.ingress.enabled -}}\n{{- range .Values.ingress.server.hosts -}}\n{{- if and (.tls) (ne $.Values.dashboard.parseServerUrlProtocol \"https\") -}}\nparse: dashboard.parseServerUrlProtocol\n If Parse Server is using ingress with tls enable then It must be set as \"https\"\n in order to form the URLs with this protocol, in another case, Parse Dashboard will always redirect to \"http\".\n{{- end -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}",
"# cloud-code-configmap.yaml\n{{- if and .Values.server.enableCloudCode (or .Values.server.cloudCodeScripts (.Files.Glob \"files/cloud/*.js\")) (not .Values.server.existingCloudCodeCM) }}\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ include \"parse.fullname\" . }}-cloud-code-scripts\n labels: {{ include \"parse.labels\" . | nindent 4 }}\n app.kubernetes.io/component: server\ndata:\n{{- with .Files.Glob \"files/cloud/*.js\" }}\n{{ .AsConfig | indent 2 }}\n{{- end }}\n{{- if .Values.server.cloudCodeScripts }}\n{{- include \"parse.tplValue\" (dict \"value\" .Values.server.cloudCodeScripts \"context\" $) | nindent 2 }}\n{{- end }}\n{{- end }}\n",
"# dashboard-deployment.yaml\n{{- if and (include \"parse.host\" .) .Values.dashboard.enabled -}}\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"parse.fullname\" . }}-dashboard\n labels: {{ include \"parse.labels\" . | nindent 4 }}\n app.kubernetes.io/component: dashboard\nspec:\n selector:\n matchLabels: {{ include \"parse.matchLabels\" . | nindent 6 }}\n app.kubernetes.io/component: dashboard\n replicas: 1\n template:\n metadata:\n labels: {{ include \"parse.labels\" . | nindent 8 }}\n app.kubernetes.io/component: dashboard\n spec:\n {{- if .Values.dashboard.affinity }}\n affinity: {{- include \"parse.tplValue\" (dict \"value\" .Values.dashboard.affinity \"context\" $) | nindent 8 }}\n {{- end }}\n {{- if .Values.dashboard.nodeSelector }}\n nodeSelector: {{- include \"parse.tplValue\" (dict \"value\" .Values.dashboard.nodeSelector \"context\" $) | nindent 8 }}\n {{- end }}\n {{- if .Values.dashboard.tolerations }}\n tolerations: {{- include \"parse.tplValue\" (dict \"value\" .Values.dashboard.tolerations \"context\" $) | nindent 8 }}\n {{- end }}\n {{- if .Values.dashboard.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.dashboard.securityContext.fsGroup }}\n runAsUser: {{ .Values.dashboard.securityContext.runAsUser }}\n {{- end }}\n{{- include \"parse.imagePullSecrets\" . | indent 6 }}\n containers:\n - name: parse-dashboard\n image: {{ include \"parse.dashboard.image\" . }}\n imagePullPolicy: {{ .Values.dashboard.image.pullPolicy | quote }}\n env:\n - name: PARSE_DASHBOARD_USER\n value: {{ .Values.dashboard.username }}\n - name: PARSE_DASHBOARD_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ include \"parse.fullname\" . }}\n key: parse-dashboard-password\n - name: PARSE_HOST\n value: {{ include \"parse.host\" . | quote }}\n - name: PARSE_USE_HOSTNAME\n value: {{ ternary \"yes\" \"no\" .Values.ingress.enabled | quote }}\n - name: PARSE_PORT_NUMBER\n value: {{ include \"parse.external-port\" . | quote }}\n - name: PARSE_PROTOCOL\n value: {{ .Values.dashboard.parseServerUrlProtocol | quote }}\n - name: PARSE_APP_ID\n value: {{ .Values.server.appId | quote }}\n - name: PARSE_MASTER_KEY\n valueFrom:\n secretKeyRef:\n name: {{ include \"parse.fullname\" . }}\n key: master-key\n - name: PARSE_DASHBOARD_APP_NAME\n value: {{ .Values.dashboard.appName | quote }}\n {{- if .Values.dashboard.extraEnvVars }}\n {{- include \"parse.tplValue\" ( dict \"value\" .Values.dashboard.extraEnvVars \"context\" $ ) | nindent 12 }}\n {{- end }}\n {{- if or .Values.dashboard.extraEnvVarsCM .Values.dashboard.extraEnvVarsSecret }}\n envFrom:\n {{- if .Values.dashboard.extraEnvVarsCM }}\n - configMapRef:\n name: {{ include \"parse.tplValue\" ( dict \"value\" .Values.dashboard.extraEnvVarsCM \"context\" $ ) }}\n {{- end }}\n {{- if .Values.dashboard.extraEnvVarsSecret }}\n - secretRef:\n name: {{ include \"parse.tplValue\" ( dict \"value\" .Values.dashboard.extraEnvVarsSecret \"context\" $ ) }}\n {{- end }}\n {{- end }}\n ports:\n - name: dashboard-http\n containerPort: 4040\n {{- if and .Values.dashboard.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: /\n port: dashboard-http\n initialDelaySeconds: {{ .Values.dashboard.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.dashboard.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.dashboard.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.dashboard.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.dashboard.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if and .Values.dashboard.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: /\n port: dashboard-http\n initialDelaySeconds: {{ .Values.dashboard.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.dashboard.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.dashboard.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.dashboard.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.dashboard.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.dashboard.resources }}\n resources: {{- toYaml .Values.dashboard.resources | nindent 12 }}\n {{- end }}\n volumeMounts:\n - name: parse-dashboard-data\n mountPath: /bitnami/parse-dashboard\n volumes:\n - name: parse-dashboard-data\n emptyDir: {}\n{{- end -}}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled }}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ include \"parse.fullname\" . }}\n labels: {{ include \"parse.labels\" . | nindent 4 }}\n annotations:\n {{- if .Values.ingress.certManager }}\n kubernetes.io/tls-acme: \"true\"\n {{- end }}\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- if .Values.dashboard.enabled }}\n {{- range .Values.ingress.dashboard.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ include \"parse.fullname\" $ }}\n servicePort: dashboard-http\n {{- end }}\n {{- end }}\n {{- range .Values.ingress.server.hosts }}\n - host: {{ .name }}\n http:\n paths:\n - path: {{ default \"/\" .path }}\n backend:\n serviceName: {{ include \"parse.fullname\" $ }}\n servicePort: server-http\n {{- end }}\n tls:\n {{- if .Values.dashboard.enabled }}\n {{- range .Values.ingress.dashboard.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n {{- end }}\n {{- range .Values.ingress.server.hosts }}\n {{- if .tls }}\n - hosts:\n {{- if .tlsHosts }}\n {{- range $host := .tlsHosts }}\n - {{ $host }}\n {{- end }}\n {{- else }}\n - {{ .name }}\n {{- end }}\n secretName: {{ .tlsSecret }}\n {{- end }}\n {{- end }}\n{{- end }}\n",
"# pvc.yaml\n{{- if .Values.persistence.enabled -}}\nkind: PersistentVolumeClaim\napiVersion: v1\nmetadata:\n name: {{ include \"parse.fullname\" . }}\n labels: {{ include \"parse.labels\" . | nindent 4 }}\nspec:\n accessModes:\n - {{ .Values.persistence.accessMode | quote }}\n resources:\n requests:\n storage: {{ .Values.persistence.size | quote }}\n {{ include \"parse.storageClass\" . }}\n{{- end -}}\n",
"# secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ include \"parse.fullname\" . }}\n labels: {{ include \"parse.labels\" . | nindent 4 }}\ntype: Opaque\ndata:\n {{ if .Values.server.masterKey }}\n master-key: {{ .Values.server.masterKey | b64enc | quote }}\n {{ else }}\n master-key: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n {{ if .Values.dashboard.enabled }}\n {{ if .Values.dashboard.password }}\n parse-dashboard-password: {{ .Values.dashboard.password | b64enc | quote }}\n {{ else }}\n parse-dashboard-password: {{ randAlphaNum 10 | b64enc | quote }}\n {{ end }}\n {{ end }}\n",
"# server-deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ include \"parse.fullname\" . }}-server\n labels: {{ include \"parse.labels\" . | nindent 4 }}\n app.kubernetes.io/component: server\nspec:\n selector:\n matchLabels: {{ include \"parse.matchLabels\" . | nindent 6 }}\n app.kubernetes.io/component: server\n replicas: 1\n template:\n metadata:\n labels: {{ include \"parse.labels\" . | nindent 8 }}\n app.kubernetes.io/component: server\n spec:\n {{- if .Values.server.affinity }}\n affinity: {{- include \"parse.tplValue\" (dict \"value\" .Values.server.affinity \"context\" $) | nindent 8 }}\n {{- end }}\n {{- if .Values.server.nodeSelector }}\n nodeSelector: {{- include \"parse.tplValue\" (dict \"value\" .Values.server.nodeSelector \"context\" $) | nindent 8 }}\n {{- end }}\n {{- if .Values.server.tolerations }}\n tolerations: {{- include \"parse.tplValue\" (dict \"value\" .Values.server.tolerations \"context\" $) | nindent 8 }}\n {{- end }}\n {{- if .Values.server.securityContext.enabled }}\n securityContext:\n fsGroup: {{ .Values.server.securityContext.fsGroup }}\n runAsUser: {{ .Values.server.securityContext.runAsUser }}\n {{- end }}\n{{- include \"parse.imagePullSecrets\" . | indent 6 }}\n {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }}\n initContainers:\n - name: volume-permissions\n image: {{ include \"parse.volumePermissions.image\" . }}\n imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }}\n command: [\"chown\", \"-R\", \"{{ .Values.server.securityContext.runAsUser }}:{{ .Values.server.securityContext.fsGroup }}\", \"/bitnami/parse\"]\n securityContext:\n runAsUser: 0\n resources: {{ toYaml .Values.volumePermissions.resources | nindent 12 }}\n volumeMounts:\n - name: parse-data\n mountPath: /bitnami/parse\n {{- end }}\n containers:\n - name: parse\n image: {{ include \"parse.server.image\" . }}\n imagePullPolicy: {{ .Values.server.image.pullPolicy | quote }}\n env:\n - name: PARSE_HOST\n value: \"0.0.0.0\"\n - name: PARSE_PORT_NUMBER\n value: {{ .Values.server.port | quote }}\n - name: PARSE_MOUNT_PATH\n value: {{ .Values.server.mountPath | quote }}\n - name: PARSE_APP_ID\n value: {{ .Values.server.appId | quote }}\n - name: PARSE_MASTER_KEY\n valueFrom:\n secretKeyRef:\n name: {{ include \"parse.fullname\" . }}\n key: master-key\n - name: PARSE_ENABLE_CLOUD_CODE\n value: {{ ternary \"yes\" \"no\" .Values.server.enableCloudCode | quote }}\n - name: MONGODB_HOST\n value: {{ include \"parse.mongodb.fullname\" . }}\n - name: MONGODB_PORT\n value: \"27017\"\n {{- if .Values.mongodb.usePassword }}\n - name: MONGODB_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ include \"parse.mongodb.fullname\" . }}\n key: mongodb-root-password\n {{- end }}\n {{- if .Values.server.extraEnvVars }}\n {{- include \"parse.tplValue\" ( dict \"value\" .Values.server.extraEnvVars \"context\" $ ) | nindent 12 }}\n {{- end }}\n {{- if or .Values.server.extraEnvVarsCM .Values.server.extraEnvVarsSecret }}\n envFrom:\n {{- if .Values.server.extraEnvVarsCM }}\n - configMapRef:\n name: {{ include \"parse.tplValue\" ( dict \"value\" .Values.server.extraEnvVarsCM \"context\" $ ) }}\n {{- end }}\n {{- if .Values.server.extraEnvVarsSecret }}\n - secretRef:\n name: {{ include \"parse.tplValue\" ( dict \"value\" .Values.server.extraEnvVarsSecret \"context\" $ ) }}\n {{- end }}\n {{- end }}\n ports:\n - name: server-http\n containerPort: {{ .Values.server.port }}\n {{- if and .Values.server.livenessProbe.enabled }}\n livenessProbe:\n httpGet:\n path: {{ .Values.server.mountPath }}/users\n port: server-http\n httpHeaders:\n - name: X-Parse-Application-Id\n value: {{ .Values.server.appId }}\n initialDelaySeconds: {{ .Values.server.livenessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.server.livenessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.server.livenessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.server.livenessProbe.successThreshold }}\n failureThreshold: {{ .Values.server.livenessProbe.failureThreshold }}\n {{- end }}\n {{- if and .Values.server.readinessProbe.enabled }}\n readinessProbe:\n httpGet:\n path: {{ .Values.server.mountPath }}/users\n port: server-http\n httpHeaders:\n - name: X-Parse-Application-Id\n value: {{ .Values.server.appId }}\n initialDelaySeconds: {{ .Values.server.readinessProbe.initialDelaySeconds }}\n periodSeconds: {{ .Values.server.readinessProbe.periodSeconds }}\n timeoutSeconds: {{ .Values.server.readinessProbe.timeoutSeconds }}\n successThreshold: {{ .Values.server.readinessProbe.successThreshold }}\n failureThreshold: {{ .Values.server.readinessProbe.failureThreshold }}\n {{- end }}\n {{- if .Values.server.resources }}\n resources: {{- toYaml .Values.server.resources | nindent 12 }}\n {{- end }}\n volumeMounts:\n - name: parse-data\n mountPath: /bitnami/parse\n {{- if and .Values.server.enableCloudCode (or (.Files.Glob \"files/cloud/*.js\") .Values.server.cloudCodeScripts .Values.server.existingCloudCodeCM) }}\n - name: cloud-code-config\n mountPath: /opt/bitnami/parse/cloud\n {{- end }}\n volumes:\n {{- if and .Values.server.enableCloudCode (or (.Files.Glob \"files/cloud/*.js\") .Values.server.cloudCodeScripts .Values.server.existingCloudCodeCM) }}\n - name: cloud-code-config\n configMap:\n name: {{ include \"parse.cloudCodeScriptsCMName\" . }}\n {{- end }}\n - name: parse-data\n {{- if .Values.persistence.enabled }}\n persistentVolumeClaim:\n claimName: {{ include \"parse.fullname\" . }}\n {{- else }}\n emptyDir: {}\n {{- end }}\n",
"# svc.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ include \"parse.fullname\" . }}\n labels: {{ include \"parse.labels\" . | nindent 4 }}\nspec:\n type: {{ .Values.service.type }}\n {{- if and (.Values.service.loadBalancerIP) (eq .Values.service.type \"LoadBalancer\") }}\n loadBalancerIP: {{ .Values.service.loadBalancerIP | quote }}\n {{- end }}\n {{- if (or (eq .Values.service.type \"LoadBalancer\") (eq .Values.service.type \"NodePort\")) }}\n externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }}\n {{- end }}\n ports:\n - name: server-http\n port: {{ .Values.server.port }}\n targetPort: server-http\n - name: dashboard-http\n port: {{ .Values.service.port }}\n targetPort: dashboard-http\n {{- if (and (eq .Values.service.type \"NodePort\") (not (empty .Values.service.nodePorts.http)))}}\n nodePort: {{ .Values.service.nodePorts.http }}\n {{- end }}\n selector: {{ include \"parse.matchLabels\" . | nindent 4 }}\n"
] | ## Global Docker image parameters
## Please, note that this will override the image parameters, including dependencies, configured to use the global value
## Current available global Docker image parameters: imageRegistry and imagePullSecrets
##
# global:
# imageRegistry: myRegistryName
# imagePullSecrets:
# - myRegistryKeySecretName
# storageClass: myStorageClass
## String to partially override parse.fullname template (will maintain the release name)
##
# nameOverride:
## String to fully override parse.fullname template
##
# fullnameOverride:
## Init containers parameters:
## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
##
volumePermissions:
enabled: false
image:
registry: docker.io
repository: bitnami/minideb
tag: buster
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
resources: {}
## Kubernetes serviceType for Parse Deployment
## ref: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
##
service:
type: LoadBalancer
# Parse dashboard HTTP Port
port: 80
## loadBalancerIP:
##
## nodePorts:
## http: <to set explicitly, choose port between 30000-32767>
nodePorts:
http: ""
## Enable client source IP preservation
## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## loadBalancerIP for the Parse Service (optional, cloud specific)
## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
##
#
server:
## Bitnami Parse image version
## ref: https://hub.docker.com/r/bitnami/parse/tags/
##
image:
registry: docker.io
repository: bitnami/parse
tag: 3.10.0-debian-10-r30
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Parse Server Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Parse Server Port
## ref: https://github.com/bitnami/bitnami-docker-parse#configuration
##
port: 1337
## Parse API mount path
## ref: https://github.com/bitnami/bitnami-docker-parse#configuration
##
mountPath: /parse
## Parse Server App ID
## ref: https://github.com/bitnami/bitnami-docker-parse#configuration
##
appId: myappID
## Parse Server Master Key
## ref: https://github.com/bitnami/bitnami-docker-parse#configuration
##
# masterKey:
## An array to add extra env vars
## For example:
## extraEnvVars:
## - name: PARSE_SERVER_ALLOW_CLIENT_CLASS_CREATION
## value: "true"
##
extraEnvVars: []
## Name of a ConfigMap containing extra env vars
##
extraEnvVarsCM:
## Name of a Secret containing extra env vars
##
extraEnvVarsSecret:
## Enable Cloud Clode
## ref: https://github.com/bitnami/bitnami-docker-parse#how-to-deploy-your-cloud-functions-with-parse-cloud-code
##
enableCloudCode: false
## Cloud Code scripts
## Specify dictionary of Cloud Code scripts and content
## Alternatively, you can put your scripts under the files/cloud directory
##
# cloudCodeScripts:
# main.js: |
# Parse.Cloud.define("sayHelloWorld", function(request, response) {
# return "Hello world!";
# });
## ConfigMap with Cloud Code scripts
## NOTE: This will override cloudCodeScripts
##
# existingCloudCodeScriptsCM
## Parse Server pods' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
## Parse Server pods' liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 120
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
## Affinity for pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
dashboard:
## Enable deployment of Parse Dashboard
##
enabled: true
## Bitnami Parse Dashboard image version
## ref: https://hub.docker.com/r/bitnami/parse-dashboard/tags/
##
image:
registry: docker.io
repository: bitnami/parse-dashboard
tag: 2.0.5-debian-10-r27
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Parse Dashboard Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## Parse Dashboard application username
## ref: https://github.com/bitnami/bitnami-docker-parse-dashboard#configuration
##
username: user
## Parse Dashboard application password
## Defaults to a random 10-character alphanumeric string if not set
## ref: https://github.com/bitnami/bitnami-docker-parse-dashboard#configuration
##
# password:
## Parse Dashboard application name
## ref: https://github.com/bitnami/bitnami-docker-parse-dashboard#configuration
##
appName: MyDashboard
## Parse Dashboard pods' resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
## Parse Dashboard pods' liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
##
livenessProbe:
enabled: true
initialDelaySeconds: 240
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
## Affinity for pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
## Node labels for pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: {}
## Protocol to form URLs to Parse
##
parseServerUrlProtocol: "http"
## An array to add extra env vars
## For example:
## extraEnvVars:
## - name: KIBANA_ELASTICSEARCH_URL
## value: test
##
extraEnvVars: []
## Name of a ConfigMap containing extra env vars
##
extraEnvVarsCM:
## Name of a Secret containing extra env vars
##
extraEnvVarsSecret:
## Configure the ingress resource that allows you to access the
## Parse installation.
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
## Set to true to enable ingress record generation
##
enabled: false
## Set this to true in order to add the corresponding annotations for cert-manager
##
certManager: false
## Ingress annotations done as key:value pairs. If certManager is set to true,
## the annotation 'kubernetes.io/tls-acme: "true"' will automatically be set
## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
##
annotations:
# kubernetes.io/ingress.class: nginx
dashboard:
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
##
hosts:
- name: parse.local
path: /
## Set this to true in order to enable TLS on the ingress record
##
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.parse.local
# - parse.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
##
tlsSecret: parse.local-tls
server:
## The list of hostnames to be covered with this ingress record.
## Most likely this will be just one host, but in the event more hosts are needed, this is an array
##
hosts:
- name: parse-server.local
path: /
## Set this to true in order to enable TLS on the ingress record
##
tls: false
## Optionally specify the TLS hosts for the ingress record
## Useful when the Ingress controller supports www-redirection
## If not specified, the above host name will be used
# tlsHosts:
# - www.parse-server.local
# - parse-server.local
## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
##
tlsSecret: parse.local-tls
secrets:
## If you're providing your own certificates, please use this to add the certificates as secrets
## key and certificate should start with -----BEGIN CERTIFICATE----- or
## -----BEGIN RSA PRIVATE KEY-----
##
## name should line up with a tlsSecret set further up
## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set
##
## It is also possible to create and manage the certificates outside of this helm chart
## Please see README.md for more information
# - name: parse.local-tls
# key:
# certificate:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## parse data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
##
## MongoDB chart configuration
##
## https://github.com/helm/charts/blob/master/stable/mongodb/values.yaml
##
mongodb:
## MongoDB Password authentication
usePassword: true
## If the password is not specified, MongoDB will generate a random password
##
# mongodbRootPassword:
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## mongodb data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
accessMode: ReadWriteOnce
size: 8Gi
|
prometheus-mysql-exporter | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"prometheus-mysql-exporter.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"prometheus-mysql-exporter.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"prometheus-mysql-exporter.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nSecret name for cloudsql credentials\n*/}}\n{{- define \"prometheus-mysql-exporter.cloudsqlsecret\" -}}\n{{ template \"prometheus-mysql-exporter.fullname\" . }}-cloudsqlsecret\n{{- end -}}\n\n{{/*\nSecret name for DATA_SOURCE_NAME\n*/}}\n{{- define \"prometheus-mysql-exporter.secret\" -}}\n {{- if .Values.mysql.existingSecret -}}\n {{- printf \"%s\" .Values.mysql.existingSecret -}}\n {{- else -}}\n {{ template \"prometheus-mysql-exporter.fullname\" . }}\n {{- end -}}\n{{- end -}}",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"prometheus-mysql-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n chart: {{ template \"prometheus-mysql-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n replicas: {{ .Values.replicaCount }}\n selector:\n matchLabels:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n release: {{ .Release.Name }}\n template:\n metadata:\n labels:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n release: {{ .Release.Name }}\n{{- if .Values.podLabels }}\n{{ toYaml .Values.podLabels | trim | indent 8 }}\n{{- end }}\n annotations:\n {{- if .Values.cloudsqlproxy.enabled }}\n checksum/config: {{ include (print .Template.BasePath \"/secret.yaml\") . | sha256sum }}\n {{- if .Values.annotations }}\n{{ toYaml .Values.annotations | indent 8 }}\n {{- end }}\n {{- else }}\n{{ toYaml .Values.annotations | indent 8 }}\n {{- end }}\n spec:\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n{{- with .Values.collectors }}\n args: [\n{{- range $index, $element := . }}\n{{- if and (typeIs \"bool\" $element) $element }}\n{{ printf \"--collect.%s\" $index | quote | indent 12 }},\n{{- else if and (typeIs \"bool\" $element) (not $element) }}\n{{ printf \"--no-collect.%s\" $index | quote | indent 12 }},\n{{- else }}\n{{ printf \"--collect.%s\" $index | quote | indent 12 }}, {{ $element | quote }},\n{{- end }}\n{{- end }}\n ]\n{{- end }}\n envFrom:\n - secretRef:\n name: {{ template \"prometheus-mysql-exporter.secret\" . }}\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n livenessProbe:\n httpGet:\n path: /\n port: {{ .Values.service.internalPort }}\n readinessProbe:\n httpGet:\n path: /\n port: {{ .Values.service.internalPort }}\n {{- if .Values.cloudsqlproxy.enabled }}\n - name: cloudsql-proxy\n image: \"{{ .Values.cloudsqlproxy.image.repo }}:{{ .Values.cloudsqlproxy.image.tag }}\"\n imagePullPolicy: \"{{ .Values.cloudsqlproxy.image.PullPolicy }}\"\n command: [\"/cloud_sql_proxy\",\n \"-instances={{ .Values.cloudsqlproxy.instanceConnectionName }}=tcp:{{ .Values.cloudsqlproxy.port }}\",\n \"-credential_file=/secrets/cloudsql/credentials.json\"]\n livenessProbe:\n exec:\n command: [\"nc\", \"-z\", \"127.0.0.1\", \"3306\"]\n volumeMounts:\n - name: cloudsql-proxy-sa-credentials\n mountPath: /secrets/cloudsql\n readOnly: true\n {{- end }}\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n {{- with .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- if .Values.cloudsqlproxy.enabled }}\n volumes:\n - name: cloudsql-proxy-sa-credentials\n secret:\n secretName: {{ template \"prometheus-mysql-exporter.cloudsqlsecret\" . }}\n {{- end }}\n",
"# secret-env.yaml\n{{- if or (not .Values.mysql.existingSecret) }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"prometheus-mysql-exporter.fullname\" . }}\ntype: Opaque\nstringData:\n DATA_SOURCE_NAME: \"{{ .Values.mysql.user }}:{{ .Values.mysql.pass }}@{{ if .Values.mysql.protocol }}{{ .Values.mysql.protocol }}{{ end }}({{ .Values.mysql.host }}:{{ .Values.mysql.port }})/{{ if .Values.mysql.db }}{{ .Values.mysql.db }}{{ end }}{{ if .Values.mysql.param }}?{{ .Values.mysql.param }}{{ end }}\"\n{{- end }}",
"# secret.yaml\n{{- if .Values.cloudsqlproxy.enabled }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"prometheus-mysql-exporter.cloudsqlsecret\" . }}\ntype: Opaque\ndata:\n credentials.json: {{ .Values.cloudsqlproxy.credentials | b64enc }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"prometheus-mysql-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n chart: {{ template \"prometheus-mysql-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n{{- if .Values.service.labels }}\n{{ toYaml .Values.service.labels | indent 4 }}\n{{- end }}\n{{- if .Values.service.annotations }}\n annotations:\n{{ toYaml .Values.service.annotations | indent 4 }}\n{{- end }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n release: {{ .Release.Name }}\n",
"# servicemonitor.yaml\n{{- if .Values.serviceMonitor.enabled }}\napiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n name: {{ template \"prometheus-mysql-exporter.fullname\" . }}\n labels:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n chart: {{ template \"prometheus-mysql-exporter.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n {{- if .Values.serviceMonitor.additionalLabels }}\n{{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }}\n {{- end }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"prometheus-mysql-exporter.name\" . }}\n release: {{ .Release.Name }}\n {{- with .Values.serviceMonitor.jobLabel }}\n jobLabel: {{ . | quote}}\n {{- end }}\n {{- with .Values.serviceMonitor.targetLabels }}\n targetLabels:\n{{ toYaml . | trim | indent 4 -}}\n {{- end }}\n {{- with .Values.serviceMonitor.podTargetLabels }}\n podTargetLabels:\n{{ toYaml . | trim | indent 4 -}}\n {{- end }}\n endpoints:\n - path: /metrics\n port: {{ .Values.service.name }}\n {{- if .Values.serviceMonitor.interval }}\n interval: {{ .Values.serviceMonitor.interval }}\n {{- end }}\n {{- if .Values.serviceMonitor.scrapeTimeout }}\n scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}\n {{- end }}\n {{- if .Values.serviceMonitor.metricRelabelings }}\n metricRelabelings: {{ toYaml .Values.serviceMonitor.metricRelabelings | nindent 8 }}\n {{- end }}\n{{- end }}"
] | # Default values for prometheus-mysql-exporter.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: "prom/mysqld-exporter"
tag: "v0.11.0"
pullPolicy: "IfNotPresent"
service:
labels: {}
annotations: {}
name: mysql-exporter
type: ClusterIP
externalPort: 9104
internalPort: 9104
serviceMonitor:
# enabled should be set to true to enable prometheus-operator discovery of this service
enabled: false
# interval is the interval at which metrics should be scraped
# interval: 30s
# scrapeTimeout is the timeout after which the scrape is ended
# scrapeTimeout: 10s
# additionalLabels is the set of additional labels to add to the ServiceMonitor
additionalLabels: {}
jobLabel: ""
targetLabels: []
podTargetLabels: []
metricRelabelings: []
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podLabels: {}
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9104"
collectors: {}
# auto_increment.columns: false
# binlog_size: false
# engine_innodb_status: false
# engine_tokudb_status: false
# global_status: true
# global_variables: true
# info_schema.clientstats: false
# info_schema.innodb_metrics: false
# info_schema.innodb_tablespaces: false
# info_schema.innodb_cmp: false
# info_schema.innodb_cmpmem: false
# info_schema.processlist: false
# info_schema.processlist.min_time: 0
# info_schema.query_response_time: false
# info_schema.tables: true
# info_schema.tables.databases: '*'
# info_schema.tablestats: false
# info_schema.schemastats: false
# info_schema.userstats: false
# perf_schema.eventsstatements: false
# perf_schema.eventsstatements.digest_text_limit: 120
# perf_schema.eventsstatements.limit: false
# perf_schema.eventsstatements.timelimit: 86400
# perf_schema.eventswaits: false
# perf_schema.file_events: false
# perf_schema.file_instances: false
# perf_schema.indexiowaits: false
# perf_schema.tableiowaits: false
# perf_schema.tablelocks: false
# perf_schema.replication_group_member_stats: false
# slave_status: true
# slave_hosts: false
# heartbeat: false
# heartbeat.database: heartbeat
# heartbeat.table: heartbeat
# mysql connection params which build the DATA_SOURCE_NAME env var of the docker container
mysql:
db: ""
host: "localhost"
param: ""
pass: "password"
port: 3306
protocol: ""
user: "exporter"
existingSecret: false
# cloudsqlproxy https://cloud.google.com/sql/docs/mysql/sql-proxy
cloudsqlproxy:
enabled: false
image:
repo: "gcr.io/cloudsql-docker/gce-proxy"
tag: "1.14"
pullPolicy: "IfNotPresent"
instanceConnectionName: "project:us-central1:dbname"
port: "3306"
credentials:
'{
"type": "service_account",
"project_id": "project",
"private_key_id": "KEYID1",
"private_key": "-----BEGIN PRIVATE KEY-----\sdajsdnasd\n-----END PRIVATE KEY-----\n",
"client_email": "[email protected]",
"client_id": "111111111",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40project.iam.gserviceaccount.com"
}'
|
nginx-ldapauth-proxy | [
"# _helpers.tpl\n{{/* vim: set filetype=mustache: */}}\n{{/*\nExpand the name of the chart.\n*/}}\n{{- define \"nginx-ldapauth-proxy.name\" -}}\n{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n\n{{/*\nCreate a default fully qualified app name.\nWe truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).\nIf release name contains chart name it will be used as a full name.\n*/}}\n{{- define \"nginx-ldapauth-proxy.fullname\" -}}\n{{- if .Values.fullnameOverride -}}\n{{- .Values.fullnameOverride | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- $name := default .Chart.Name .Values.nameOverride -}}\n{{- if contains $name .Release.Name -}}\n{{- .Release.Name | trunc 63 | trimSuffix \"-\" -}}\n{{- else -}}\n{{- printf \"%s-%s\" .Release.Name $name | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n{{- end -}}\n{{- end -}}\n\n{{/*\nCreate chart name and version as used by the chart label.\n*/}}\n{{- define \"nginx-ldapauth-proxy.chart\" -}}\n{{- printf \"%s-%s\" .Chart.Name .Chart.Version | replace \"+\" \"_\" | trunc 63 | trimSuffix \"-\" -}}\n{{- end -}}\n",
"# configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n labels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n chart: {{ template \"nginx-ldapauth-proxy.chart\" . }}\n heritage: {{ .Release.Service }}\n release: {{ .Release.Name }}\ndata:\n nginx.conf: |-\n worker_processes 10;\n worker_rlimit_nofile 16384;\n\n events {\n worker_connections 1024;\n }\n\n http {\n\n upstream backend-server {\n server {{ .Values.proxy.host}}:{{ .Values.proxy.port }};\n }\n\n{{- if and .Values.proxy.ldapHost .Values.secrets.ldapBindPassword }}\n ldap_server ldapserver {\n url {{ .Values.proxy.protocol }}://{{ .Values.proxy.ldapHost }}:{{ .Values.proxy.ldapPort }}/{{ .Values.proxy.ldapDN }}?uid?sub?(&({{ .Values.proxy.ldapFilter}}));\n binddn \"{{ .Values.proxy.ldapBindDN }}\";\n binddn_passwd {{ .Values.secrets.ldapBindPassword }};\n group_attribute {{ .Values.proxy.ldapGroup }};\n group_attribute_is_dn on;\n {{- range $require := .Values.proxy.requires }}\n require group {{ $require.filter | quote }};\n {{- end }}\n require valid_user;\n satisfy all;\n }\n{{- end }}\n\n server {\n\n listen {{ .Values.service.internalPort }};\n server_name ldapauth-proxy;\n\n error_log /var/log/nginx/error.log debug;\n access_log /var/log/nginx/access.log;\n\n client_max_body_size 0;\n\n chunked_transfer_encoding on;\n\n location / {\n{{- if and .Values.proxy.ldapHost .Values.secrets.ldapBindPassword }}\n auth_ldap \"{{ .Values.proxy.authName }}\";\n auth_ldap_servers ldapserver;\n proxy_pass http://backend-server;\n proxy_set_header Host $http_host; # required for docker client's sake\n proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP\n proxy_set_header Authorization \"\"; # see https://github.com/dotcloud/docker-registry/issues/170\n proxy_read_timeout 900;\n{{- end }}\n }\n\n location /_ping {\n auth_basic off;\n root /usr/share/nginx/html;\n stub_status on;\n }\n }\n\n }\n",
"# deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n labels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n chart: {{ template \"nginx-ldapauth-proxy.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n selector:\n matchLabels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n release: {{ .Release.Name }}\n replicas: {{ .Values.replicaCount }}\n template:\n metadata:\n labels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n release: {{ .Release.Name }}\n annotations:\n checksum/config: {{ include (print .Template.BasePath \"/configmap.yaml\") . | sha256sum }}\n spec:\n {{- if .Values.image.pullSecrets }}\n {{- range $pullSecret := .Values.image.pullSecrets }}\n imagePullSecrets:\n - name: {{ $pullSecret }}\n {{- end }}\n {{- end }}\n containers:\n - name: {{ .Chart.Name }}\n image: \"{{ .Values.image.repository }}:{{ .Values.image.tag }}\"\n imagePullPolicy: {{ .Values.image.pullPolicy }}\n{{- if and .Values.proxy.ldapHost .Values.secrets.ldapBindPassword }}\n env:\n - name: LDAP_BIND_PASSWORD\n valueFrom:\n secretKeyRef:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n key: ldapBindPassword\n{{- end }}\n ports:\n - containerPort: {{ .Values.service.internalPort }}\n livenessProbe:\n httpGet:\n path: /_ping\n port: {{ .Values.service.internalPort }}\n readinessProbe:\n httpGet:\n path: /_ping\n port: {{ .Values.service.internalPort }}\n volumeMounts:\n - mountPath: /etc/nginx/nginx.conf\n name: config\n subPath: nginx.conf\n resources:\n{{ toYaml .Values.resources | indent 12 }}\n volumes:\n - name: config\n configMap:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n {{- if .Values.nodeSelector }}\n nodeSelector:\n{{ toYaml .Values.nodeSelector | indent 8 }}\n {{- end }}\n {{- with .Values.affinity }}\n affinity:\n{{ toYaml . | indent 8 }}\n {{- end }}\n {{- with .Values.tolerations }}\n tolerations:\n{{ toYaml . | indent 8 }}\n {{- end }}\n",
"# ingress.yaml\n{{- if .Values.ingress.enabled -}}\n{{- $serviceName := include \"nginx-ldapauth-proxy.fullname\" . -}}\n{{- $servicePort := .Values.service.externalPort -}}\napiVersion: extensions/v1beta1\nkind: Ingress\nmetadata:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n labels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n chart: {{ template \"nginx-ldapauth-proxy.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\n annotations:\n {{- range $key, $value := .Values.ingress.annotations }}\n {{ $key }}: {{ $value | quote }}\n {{- end }}\nspec:\n rules:\n {{- range $host := .Values.ingress.hosts }}\n - host: {{ $host }}\n http:\n paths:\n - path: /\n backend:\n serviceName: {{ $serviceName }}\n servicePort: {{ $servicePort }}\n {{- end -}}\n {{- if .Values.ingress.tls }}\n tls:\n{{ toYaml .Values.ingress.tls | indent 4 }}\n {{- end -}}\n{{- end -}}\n",
"# secrets.yaml\n{{- if .Values.secrets.ldapBindPassword }}\napiVersion: v1\nkind: Secret\nmetadata:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n labels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n chart: {{ template \"nginx-ldapauth-proxy.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\ntype: Opaque\ndata:\n ldapBindPassword: {{ .Values.secrets.ldapBindPassword | b64enc | quote }}\n{{- end }}\n",
"# service.yaml\napiVersion: v1\nkind: Service\nmetadata:\n name: {{ template \"nginx-ldapauth-proxy.fullname\" . }}\n labels:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n chart: {{ template \"nginx-ldapauth-proxy.chart\" . }}\n release: {{ .Release.Name }}\n heritage: {{ .Release.Service }}\nspec:\n type: {{ .Values.service.type }}\n ports:\n - port: {{ .Values.service.externalPort }}\n targetPort: {{ .Values.service.internalPort }}\n protocol: TCP\n name: {{ .Values.service.name }}\n selector:\n app: {{ template \"nginx-ldapauth-proxy.name\" . }}\n release: {{ .Release.Name }}\n"
] | # Default values for nginx-ldapauth-proxy.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: dweomer/nginx-auth-ldap
tag: 1.13.5-on-alpine-3.5
pullPolicy: IfNotPresent
# pullSecrets:
# - docker-secret
service:
name: nginx-ldapauth
type: ClusterIP
externalPort: 443
internalPort: 80
proxy:
protocol: "ldap"
port: 443
host: "kubernetes.default.svc.cluster.local"
authName: "Auth Required"
ldapHost: ""
ldapPort: 389
ldapGroup: "memberUid"
ldapDN: "dc=example,dc=com"
ldapFilter: "objectClass=organizationalPerson"
ldapBindDN: "cn=auth,dc=example,dc=com"
requires:
- name: "authGroup"
filter: "cn=secret,ou=groups,dc=example,dc=com"
secrets:
ldapBindPassword: ""
ingress:
enabled: false
# Used to create an Ingress record.
hosts:
- ldapauth-service.local
annotations:
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
# Secrets must be manually created in the namespace.
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
|