Haster
Haster

Reputation: 165

checksum annotation doesn't work as expected

I have such configMap file app-configmap-mdc.yaml:

apiVersion: v1
kind: ConfigMap
metadata:
  name: {{ include "app.fullname" . }}-app-mdc
  labels:
    app.kubernetes.io/name: "app"
    {{- include "app.labels" . | nindent 4 }}
data:
    mdc.properties: |
      {{- range .Values.app.log.mdc.properties }}
      {{ . }}
      {{- end }}

And I want to automatically restart pods when app.log.mdc.properties has been changed.

So, I add checksum annotation to deployment:

apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ include "app.fullname" . }}-app
  labels:
    app.kubernetes.io/name: "app"
spec:
  template:
    metadata:
      labels:
        app.kubernetes.io/name: "app"
      annotations:
        checksum/mdc: {{ include (print $.Template.BasePath "/app-configmap-mdc.yaml") . | sha256sum }}
    spec:
      containers:
        - name: app
          volumeMounts:
            - name: app-mdc
              mountPath: /app/config/mdc.properties
              subPath: mdc.properties
      volumes:
        - name: app-mdc
          configMap:
            name: "{{ include "app.fullname" . }}-app-mdc"
...

But when I execute helm update command pods don't restart, checksum/mdc annotation value doesn't change in metadata, but value of configmap app-app-mdc is changed.

So It looks like during helm update command cheksum recalculation don't happen. What do I do wrong?

Values:

global:
    # Parameters for all docker registry of installation product
    image:
      productRepository: docker-dev-local.comp.com/ps
      externalRepository: docker.comp.com
      pullPolicy: IfNotPresent
    imagePullSecrets:
      - name: docker-dev-local
    serviceAccount:
      name: user
    extraLabels: {}
    priorityClassName: ""

# Parameters for product "APP"
app:
  monitoring:
    jolokia: {}
  log:
    scanPeriodInSec: 30
    mdc:
      properties: {}
  configuration:
    appConfigName: app_config
# Parameters for component "app"
  replicaCount: 2
  minAvailable: 1
  resources:
    limits:
      cpu: 1
      memory: 1536Mi
    requests:
      cpu: 1
      memory: 1024Mi
  securityContext:
    privileged: false
    runAsNonRoot: true
    allowPrivilegeEscalation: false
    readOnlyRootFilesystem: true
    runAsUser: 1001
  service:
    type: LoadBalancer
    ports:
      http-api:
        port: 5235
        protocol: TCP
        appProtocol: http
        targetPort: 5235
    annotations:
       service.beta.kubernetes.io/do-loadbalancer-protocol: "http"
       service.beta.kubernetes.io/do-loadbalancer-sticky-sessions-type: "cookies"
       service.beta.kubernetes.io/do-loadbalancer-sticky-sessions-cookie-name: "JSESSIONID"
       service.beta.kubernetes.io/do-loadbalancer-sticky-sessions-cookie-ttl: "60"
  # Пример настроек ingress
  ingress:
    enabled: false
    annotations:
      kubernetes.io/ingress.class: nginx
      nginx.ingress.kubernetes.io/affinity: cookie
      nginx.ingress.kubernetes.io/affinity-mode: persistent
      nginx.ingress.kubernetes.io/affinity-canary-behavior: sticky
      nginx.ingress.kubernetes.io/session-cookie-name: EPMINGRESSCOOKIE
    hosts:
      - host: app-app.standname.mf.cloud.nexign.com
        paths:
          - /
    tls: []
  nodeSelector: {}
  affinity: {}
  tolerations: []
  # Configuration of Java environment
  java:
    maxMem: 1024M
    minMem: 512M
  # Application configuration

Template:

{{/*
Name of the product
*/}}
{{- define "app.productname" -}}
app
{{- end -}}

{{/*
Name of the product group
*/}}
{{- define "app.productgroup" -}}
bin
{{- end -}}

{{/*
Full name of the chart.
*/}}
{{- define "app.fullname" -}}
{{- if contains .Chart.Name .Release.Name -}}
    {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
  {{- else -}}
    {{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" -}}
  {{- end -}}
{{- end -}}


{{/*
Chart name with version
*/}}
{{- define "app.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}


{{/*
Common labels
*/}}
{{- define "app.labels" -}}
helm.sh/chart: "{{ include "app.chart" . }}"
app.kubernetes.io/instance: {{ .Release.Name  | quote }}
app.kubernetes.io/part-of: "{{ include "app.productname" . }}"
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
{{- with .Values.global.extraLabels }}
{{ toYaml .  }}
{{- end }}
{{- end -}}


{{/*
Common selectors
*/}}
{{- define "app.selectorLabels" -}}
app.kubernetes.io/instance: {{ .Release.Name  | quote }}
app.kubernetes.io/part-of: "{{ include "app.productname" . }}"
{{- end -}}

{{/*
Common annotations
*/}}
{{- define "app.annotations" -}}
logging: json
{{- end -}}

{{- define "app.app.propertiesHash" -}}
{{- $env := include (print $.Template.BasePath "/app-configmap-env.yaml") . | sha256sum -}}
{{ print $env | sha256sum }}
{{- end -}}


{{/*
Service account name
*/}}
{{- define "app.serviceAccountName" -}}
{{ default "default" .Values.global.serviceAccount.name }}
{{- end -}}```

Upvotes: 0

Views: 3338

Answers (1)

Haster
Haster

Reputation: 165

The problem was with resource quotas. After configMap replicaSet tried to start, but there was no needed resources. So it stayed in pending state and old replicaSet with old pod stayed working

Upvotes: 0

Related Questions