arttor/helmify

View on GitHub
test_data/sample-app.yaml

Summary

Maintainability
Test Coverage
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: myapp
  name: myapp
  namespace: my-ns
spec:
  replicas: 3
  revisionHistoryLimit: 5
  selector:
    matchLabels:
      app: myapp
  template:
    metadata:
      labels:
        app: myapp
    spec:
      initContainers:
        - name: init-container
          image: bash:latest
          command: ["/bin/sh", "-c", "echo 'Initializing container...'"]
      containers:
        - name: app
          args:
            - --health-probe-bind-address=:8081
            - --metrics-bind-address=127.0.0.1:8080
            - --leader-elect
          command:
            - /manager
          volumeMounts:
            - mountPath: /my_config.properties
              name: manager-config
              subPath: my_config.properties
            - name: secret-volume
              mountPath: /my.ca
            - name: props
              mountPath: /etc/props
            - name: sample-pv-storage
              mountPath: "/usr/share/nginx/html"
          env:
            - name: VAR1
              valueFrom:
                secretKeyRef:
                  name: my-secret-vars
                  key: VAR1
            - name: VAR2
              valueFrom:
                secretKeyRef:
                  name: my-secret-vars
                  key: VAR2
          image: controller:latest
          livenessProbe:
            httpGet:
              path: /healthz
              port: 8081
            initialDelaySeconds: 15
            periodSeconds: 20
          readinessProbe:
            httpGet:
              path: /readyz
              port: 8081
            initialDelaySeconds: 5
            periodSeconds: 10
          resources:
            limits:
              cpu: 100m
              memory: 30Mi
            requests:
              cpu: 100m
              memory: 20Mi
          securityContext:
            allowPrivilegeEscalation: false
        - name: proxy-sidecar
          args:
            - --secure-listen-address=0.0.0.0:8443
            - --v=10
          image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
          ports:
            - containerPort: 8443
              name: https
      securityContext:
        runAsNonRoot: true
      nodeSelector:
        region: east
        type: user-node
      terminationGracePeriodSeconds: 10
      volumes:
        - configMap:
            name: my-config
          name: manager-config
        - configMap:
            name: my-config-props
          name: props
        - name: secret-volume
          secret:
            secretName: my-secret-ca
        - name: sample-pv-storage
          persistentVolumeClaim:
            claimName: my-sample-pv-claim
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: my-sample-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 3Gi
    limits:
      storage: 5Gi
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: myapp
  name: myapp-service
  namespace: my-ns
spec:
  ports:
    - name: https
      port: 8443
      targetPort: https
  selector:
    app: myapp
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: myapp-ingress
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
    - http:
        paths:
          - path: /testpath
            pathType: Prefix
            backend:
              service:
                name: myapp-service
                port:
                  number: 8443
---
apiVersion: v1
kind: Secret
metadata:
  name: my-secret-ca
  namespace: my-ns
type: opaque
data:
  ca.crt: |
    c3VwZXJsb25ndGVzdGNydC1zdXBlcmxvbmd0ZXN0Y3J0LXN1cGVybG9uZ3Rlc3RjcnQtc3
    VwZXJsb25ndGVzdGNydC1zdXBlcmxvbmd0ZXN0Y3J0LXN1cGVybG9uZ3Rlc3RjcnQtc3Vw
    ZXJsb25ndGVzdGNydC0Kc3VwZXJsb25ndGVzdGNydC1zdXBlcmxvbmd0ZXN0Y3J0LXN1cG
    VybG9uZ3Rlc3RjcnQtc3VwZXJsb25ndGVzdGNydC1zdXBlcmxvbmd0ZXN0Y3J0LXN1cGVy
    bG9uZ3Rlc3RjcnQKc3VwZXJsb25ndGVzdGNydC1zdXBlcmxvbmd0ZXN0Y3J0LXN1cGVybG
    9uZ3Rlc3RjcnQtc3VwZXJsb25ndGVzdGNydC1zdXBlcmxvbmd0ZXN0Y3J0LXN1cGVybG9u
    Z3Rlc3RjcnQ=
---
apiVersion: v1
kind: Secret
metadata:
  name: my-secret-vars
  namespace: my-ns
type: opaque
data:
  VAR1: bXlfc2VjcmV0X3Zhcl8x
  VAR2: bXlfc2VjcmV0X3Zhcl8y
  ELASTIC_FOOBAR_HUNTER123_MEOWTOWN_VERIFY: bXlfc2VjcmV0X3Zhcl8y
stringData:
  str: |
    some big not so secret string with
    multiple lines
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: my-config
  namespace: my-ns
immutable: true
data:
  dummyconfigmapkey: dummyconfigmapvalue
  my_config.properties: |
    health.healthProbeBindAddress=8081
    metrics.bindAddress=127.0.0.1:8080
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: my-config-props
  namespace: my-ns
data:
  my.prop1: "1"
  my.prop2: "val 1"
  my.prop3: "true"
  myval.yaml: |
    apiVersion: clickhouse.altinity.com/v1
    kind: ClickHouseInstallationTemplate
    metadata:
      name: default-oneperhost-pod-template
    spec:
      templates:
        podTemplates: 
          - name: default-oneperhost-pod-template
            distribution: "OnePerHost"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: fluentd-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: fluentd-logging
spec:
  selector:
    matchLabels:
      name: fluentd-elasticsearch
  template:
    metadata:
      labels:
        name: fluentd-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      containers:
      - name: fluentd-elasticsearch
        image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - name: varlog
          mountPath: /var/log
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers
---
apiVersion: batch/v1
kind: Job
metadata:
  name: batch-job
spec:
  template:
    spec:
      containers:
        - name: pi
          image: perl:5.34.0
          command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      restartPolicy: Never
  backoffLimit: 4
---
apiVersion: batch/v1
kind: CronJob
metadata:
  name: cron-job
spec:
  schedule: "* * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
            - name: hello
              image: busybox:1.28
              imagePullPolicy: IfNotPresent
              command:
                - /bin/sh
                - -c
                - date; echo Hello from the Kubernetes cluster
          restartPolicy: OnFailure
---
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
    - port: 80
      name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  serviceName: "nginx"
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: registry.k8s.io/nginx-slim:0.8
          ports:
            - containerPort: 80
              name: web
          volumeMounts:
            - name: www
              mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
    - metadata:
        name: www
      spec:
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 1Gi
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  labels:
    app: nginx
  name: myapp-pdb
spec:
  minAvailable: 2
  selector:
    matchLabels:
      app: nginx