Browse Source

feat: add loki yaml

2637309949@qq.com 5 years ago
parent
commit
8e84a98484
59 changed files with 2347 additions and 0 deletions
  1. 28 0
      loki/README.md
  2. 19 0
      loki/fluent-bit/clusterrole.yaml
  3. 20 0
      loki/fluent-bit/clusterrolebinding.yaml
  4. 62 0
      loki/fluent-bit/configmap.yaml
  5. 85 0
      loki/fluent-bit/daemonset.yaml
  6. 13 0
      loki/fluent-bit/serviceaccount.yaml
  7. 16 0
      loki/grafana/clusterrole.yaml
  8. 20 0
      loki/grafana/clusterrolebinding.yaml
  9. 25 0
      loki/grafana/configmap.yaml
  10. 120 0
      loki/grafana/deployment.yaml
  11. 54 0
      loki/grafana/podsecuritypolicy.yaml
  12. 17 0
      loki/grafana/role.yaml
  13. 20 0
      loki/grafana/rolebinding.yaml
  14. 18 0
      loki/grafana/secret.yaml
  15. 23 0
      loki/grafana/service.yaml
  16. 13 0
      loki/grafana/serviceaccount.yaml
  17. 42 0
      loki/loki/podsecuritypolicy.yaml
  18. 19 0
      loki/loki/role.yaml
  19. 21 0
      loki/loki/rolebinding.yaml
  20. 14 0
      loki/loki/secret.yaml
  21. 22 0
      loki/loki/service-headless.yaml
  22. 25 0
      loki/loki/service.yaml
  23. 14 0
      loki/loki/serviceaccount.yaml
  24. 96 0
      loki/loki/statefulset.yaml
  25. 14 0
      loki/prometheus/alertmanager-clusterrole.yaml
  26. 21 0
      loki/prometheus/alertmanager-clusterrolebinding.yaml
  27. 23 0
      loki/prometheus/alertmanager-configmap.yaml
  28. 89 0
      loki/prometheus/alertmanager-deployment.yaml
  29. 19 0
      loki/prometheus/alertmanager-pvc.yaml
  30. 23 0
      loki/prometheus/alertmanager-service.yaml
  31. 12 0
      loki/prometheus/alertmanager-serviceaccount.yaml
  32. 81 0
      loki/prometheus/kube-state-metrics-clusterrole.yaml
  33. 20 0
      loki/prometheus/kube-state-metrics-clusterrolebinding.yaml
  34. 43 0
      loki/prometheus/kube-state-metrics-deployment.yaml
  35. 12 0
      loki/prometheus/kube-state-metrics-serviceaccount.yaml
  36. 27 0
      loki/prometheus/kube-state-metrics-svc.yaml
  37. 61 0
      loki/prometheus/node-exporter-daemonset.yaml
  38. 27 0
      loki/prometheus/node-exporter-service.yaml
  39. 12 0
      loki/prometheus/node-exporter-serviceaccount.yaml
  40. 14 0
      loki/prometheus/pushgateway-clusterrole.yaml
  41. 21 0
      loki/prometheus/pushgateway-clusterrolebinding.yaml
  42. 49 0
      loki/prometheus/pushgateway-deployment.yaml
  43. 26 0
      loki/prometheus/pushgateway-service.yaml
  44. 12 0
      loki/prometheus/pushgateway-serviceaccount.yaml
  45. 42 0
      loki/prometheus/server-clusterrole.yaml
  46. 21 0
      loki/prometheus/server-clusterrolebinding.yaml
  47. 210 0
      loki/prometheus/server-configmap.yaml
  48. 91 0
      loki/prometheus/server-deployment.yaml
  49. 19 0
      loki/prometheus/server-pvc.yaml
  50. 23 0
      loki/prometheus/server-service.yaml
  51. 13 0
      loki/prometheus/server-serviceaccount.yaml
  52. 22 0
      loki/promtail/clusterrole.yaml
  53. 20 0
      loki/promtail/clusterrolebinding.yaml
  54. 282 0
      loki/promtail/configmap.yaml
  55. 105 0
      loki/promtail/daemonset.yaml
  56. 34 0
      loki/promtail/podsecuritypolicy.yaml
  57. 18 0
      loki/promtail/role.yaml
  58. 21 0
      loki/promtail/rolebinding.yaml
  59. 14 0
      loki/promtail/serviceaccount.yaml

+ 28 - 0
loki/README.md

@@ -0,0 +1,28 @@
+# Kubernetes Loki Server
+> logger collection
+
+## deploy loki server
+```sh
+kubectl apply -f ./loki
+```
+
+## deploy loki agent, promtail or fluent-bit
+```sh
+kubectl apply -f ./promtail
+```
+## deploy grafana ui
+```sh
+kubectl apply -f ./grafana
+```
+
+To get the admin password for the Grafana pod, run the following command:
+
+```sh
+kubectl get secret --namespace <YOUR-NAMESPACE> loki-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
+```
+
+```sh
+kubectl port-forward --namespace <YOUR-NAMESPACE> service/loki-grafana 3000:80
+```
+
+Navigate to http://localhost:3000 and login with admin and the password output above. Then follow the instructions for adding the loki datasource, using the URL http://loki:3100/.

+ 19 - 0
loki/fluent-bit/clusterrole.yaml

@@ -0,0 +1,19 @@
+---
+# Source: loki-stack/charts/fluent-bit/templates/clusterrole.yaml
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  labels:
+    app: fluent-bit-loki
+    chart: fluent-bit-0.0.2
+    release: loki
+    heritage: Tiller
+  name: loki-fluent-bit-loki-clusterrole
+  namespace: loki
+rules:
+- apiGroups: [""] # "" indicates the core API group
+  resources:
+  - namespaces
+  - pods
+  verbs: ["get", "watch", "list"]

+ 20 - 0
loki/fluent-bit/clusterrolebinding.yaml

@@ -0,0 +1,20 @@
+---
+# Source: loki-stack/charts/fluent-bit/templates/clusterrolebinding.yaml
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: loki-fluent-bit-loki-clusterrolebinding
+  labels:
+    app: fluent-bit-loki
+    chart: fluent-bit-0.0.2
+    release: loki
+    heritage: Tiller
+subjects:
+  - kind: ServiceAccount
+    name: loki-fluent-bit-loki
+    namespace: loki
+roleRef:
+  kind: ClusterRole
+  name: loki-fluent-bit-loki-clusterrole
+  apiGroup: rbac.authorization.k8s.io

+ 62 - 0
loki/fluent-bit/configmap.yaml

@@ -0,0 +1,62 @@
+---
+# Source: loki-stack/charts/fluent-bit/templates/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: loki-fluent-bit-loki
+  namespace: loki
+  labels:
+    app: fluent-bit-loki
+    chart: fluent-bit-0.0.2
+    release: loki
+    heritage: Tiller
+data:
+  fluent-bit.conf: |-
+    [SERVICE]
+        Flush          1
+        Daemon         Off
+        Log_Level      warn
+        Parsers_File   parsers.conf
+    [INPUT]
+        Name           tail
+        Tag            kube.*
+        Path           /var/log/containers/*.log
+        Parser         docker
+        DB             /run/fluent-bit/flb_kube.db
+        Mem_Buf_Limit  5MB
+    [FILTER]
+        Name           kubernetes
+        Match          kube.*
+        Kube_URL       https://kubernetes.default.svc:443
+        Merge_Log On
+    [Output]
+        Name loki
+        Match *
+        Url http://loki:3100/api/prom/push
+        Labels {job="fluent-bit"}
+        RemoveKeys kubernetes,stream
+        LabelMapPath /fluent-bit/etc/labelmap.json
+        LineFormat json
+        LogLevel warn
+
+  parsers.conf: |-
+    [PARSER]
+        Name        docker
+        Format      json
+        Time_Key    time
+        Time_Format %Y-%m-%dT%H:%M:%S.%L
+
+  labelmap.json: |-
+    {
+      "kubernetes": {
+        "container_name": "container",
+        "host": "node",
+        "labels": {
+          "app": "app",
+          "release": "release"
+        },
+        "namespace_name": "namespace",
+        "pod_name": "instance"
+      },
+      "stream": "stream"
+    }

+ 85 - 0
loki/fluent-bit/daemonset.yaml

@@ -0,0 +1,85 @@
+---
+# Source: loki-stack/charts/fluent-bit/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: loki-fluent-bit-loki
+  namespace: loki
+  labels:
+    app: fluent-bit-loki
+    chart: fluent-bit-0.0.2
+    release: loki
+    heritage: Tiller
+  annotations:
+    {}
+    
+spec:
+  selector:
+    matchLabels:
+      app: fluent-bit-loki
+      release: loki
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: fluent-bit-loki
+        release: loki
+      annotations:
+        checksum/config: b15de805dfc12b6609c7b03af98727686ba86afc16c9f8956324777b7e85138c
+        prometheus.io/path: /api/v1/metrics/prometheus
+        prometheus.io/port: "2020"
+        prometheus.io/scrape: "true"
+        
+    spec:
+      serviceAccountName: loki-fluent-bit-loki
+      containers:
+        - name: fluent-bit-loki
+          image: "grafana/fluent-bit-plugin-loki:0.1"
+          imagePullPolicy: IfNotPresent
+          volumeMounts:
+            - name: config
+              mountPath: /fluent-bit/etc
+            - name: run
+              mountPath: /run/fluent-bit
+            - mountPath: /var/log
+              name: varlog
+            - mountPath: /var/lib/docker/containers
+              name: varlibdockercontainers
+              readOnly: true
+            
+          ports:
+            - containerPort: 2020
+              name: http-metrics
+          resources:
+            limits:
+              memory: 100Mi
+            requests:
+              cpu: 100m
+              memory: 100Mi
+            
+      nodeSelector:
+        {}
+        
+      affinity:
+        {}
+        
+      tolerations:
+        - effect: NoSchedule
+          key: node-role.kubernetes.io/master
+        
+      terminationGracePeriodSeconds: 10
+      volumes:
+        - name: config
+          configMap:
+            name: loki-fluent-bit-loki
+        - name: run
+          hostPath:
+            path: /run/fluent-bit
+        - hostPath:
+            path: /var/log
+          name: varlog
+        - hostPath:
+            path: /var/lib/docker/containers
+          name: varlibdockercontainers
+        

+ 13 - 0
loki/fluent-bit/serviceaccount.yaml

@@ -0,0 +1,13 @@
+---
+# Source: loki-stack/charts/fluent-bit/templates/serviceaccount.yaml
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: fluent-bit-loki
+    chart: fluent-bit-0.0.2
+    heritage: Tiller
+    release: loki
+  name: loki-fluent-bit-loki
+  namespace: loki

+ 16 - 0
loki/grafana/clusterrole.yaml

@@ -0,0 +1,16 @@
+---
+# Source: loki-stack/charts/grafana/templates/clusterrole.yaml
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    release: loki
+    heritage: Tiller
+  name: loki-grafana-clusterrole
+rules:
+- apiGroups: [""] # "" indicates the core API group
+  resources: ["configmaps", "secrets"]
+  verbs: ["get", "watch", "list"]

+ 20 - 0
loki/grafana/clusterrolebinding.yaml

@@ -0,0 +1,20 @@
+---
+# Source: loki-stack/charts/grafana/templates/clusterrolebinding.yaml
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: loki-grafana-clusterrolebinding
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    release: loki
+    heritage: Tiller
+subjects:
+  - kind: ServiceAccount
+    name: loki-grafana
+    namespace: loki
+roleRef:
+  kind: ClusterRole
+  name: loki-grafana-clusterrole
+  apiGroup: rbac.authorization.k8s.io

+ 25 - 0
loki/grafana/configmap.yaml

@@ -0,0 +1,25 @@
+---
+# Source: loki-stack/charts/grafana/templates/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    release: loki
+    heritage: Tiller
+data:
+  grafana.ini: |
+    [analytics]
+    check_for_updates = true
+    [grafana_net]
+    url = https://grafana.net
+    [log]
+    mode = console
+    [paths]
+    data = /var/lib/grafana/data
+    logs = /var/log/grafana
+    plugins = /var/lib/grafana/plugins
+    provisioning = /etc/grafana/provisioning

+ 120 - 0
loki/grafana/deployment.yaml

@@ -0,0 +1,120 @@
+---
+# Source: loki-stack/charts/grafana/templates/deployment.yaml
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    release: loki
+    heritage: Tiller
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: grafana
+      release: loki
+  strategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: grafana
+        release: loki
+      annotations:
+        checksum/config: 5718319f8bec4f87dd50e11caca9a5093df7131b8332e70c3f02091704c9381b
+        checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
+        checksum/sc-dashboard-provider-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
+        checksum/secret: 02a7e52c3461734cf0a0683b0d200a6d9b2a809bbfe55cff60109cacfbe28ba9
+    spec:
+      
+      serviceAccountName: loki-grafana
+      securityContext:
+        fsGroup: 472
+        runAsUser: 472
+        
+      initContainers:
+        - name: grafana-sc-datasources
+          image: "kiwigrid/k8s-sidecar:0.1.20"
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: METHOD
+              value: LIST
+            - name: LABEL
+              value: "grafana_datasource"
+            - name: FOLDER
+              value: "/etc/grafana/provisioning/datasources"
+            - name: RESOURCE
+              value: "both"
+          resources:
+            {}
+            
+          volumeMounts:
+            - name: sc-datasources-volume
+              mountPath: "/etc/grafana/provisioning/datasources"
+      containers:
+        - name: grafana
+          image: "grafana/grafana:6.4.1"
+          imagePullPolicy: IfNotPresent
+          volumeMounts:
+            - name: config
+              mountPath: "/etc/grafana/grafana.ini"
+              subPath: grafana.ini
+            - name: ldap
+              mountPath: "/etc/grafana/ldap.toml"
+              subPath: ldap.toml
+            - name: storage
+              mountPath: "/var/lib/grafana"
+            - name: sc-datasources-volume
+              mountPath: "/etc/grafana/provisioning/datasources"
+          ports:
+            - name: service
+              containerPort: 80
+              protocol: TCP
+            - name: grafana
+              containerPort: 3000
+              protocol: TCP
+          env:
+            - name: GF_SECURITY_ADMIN_USER
+              valueFrom:
+                secretKeyRef:
+                  name: loki-grafana
+                  key: admin-user
+            - name: GF_SECURITY_ADMIN_PASSWORD
+              valueFrom:
+                secretKeyRef:
+                  name: loki-grafana
+                  key: admin-password
+          livenessProbe:
+            failureThreshold: 10
+            httpGet:
+              path: /api/health
+              port: 3000
+            initialDelaySeconds: 60
+            timeoutSeconds: 30
+            
+          readinessProbe:
+            httpGet:
+              path: /api/health
+              port: 3000
+            
+          resources:
+            {}
+            
+      volumes:
+        - name: config
+          configMap:
+            name: loki-grafana
+        - name: ldap
+          secret:
+            secretName: loki-grafana
+            items:
+              - key: ldap-toml
+                path: ldap.toml
+        - name: storage
+          emptyDir: {}
+        - name: sc-datasources-volume
+          emptyDir: {}

+ 54 - 0
loki/grafana/podsecuritypolicy.yaml

@@ -0,0 +1,54 @@
+---
+# Source: loki-stack/charts/grafana/templates/podsecuritypolicy.yaml
+
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    heritage: Tiller
+    release: loki
+  annotations:
+    seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
+    seccomp.security.alpha.kubernetes.io/defaultProfileName:  'docker/default'
+    apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+    apparmor.security.beta.kubernetes.io/defaultProfileName:  'runtime/default'
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  requiredDropCapabilities:
+    # Default set from Docker, without DAC_OVERRIDE or CHOWN
+    - FOWNER
+    - FSETID
+    - KILL
+    - SETGID
+    - SETUID
+    - SETPCAP
+    - NET_BIND_SERVICE
+    - NET_RAW
+    - SYS_CHROOT
+    - MKNOD
+    - AUDIT_WRITE
+    - SETFCAP
+  volumes:
+    - 'configMap'
+    - 'emptyDir'
+    - 'projected'
+    - 'secret'
+    - 'downwardAPI'
+    - 'persistentVolumeClaim'
+  hostNetwork: false
+  hostIPC: false
+  hostPID: false
+  runAsUser:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'RunAsAny'
+  fsGroup:
+    rule: 'RunAsAny'
+  readOnlyRootFilesystem: false

+ 17 - 0
loki/grafana/role.yaml

@@ -0,0 +1,17 @@
+---
+# Source: loki-stack/charts/grafana/templates/role.yaml
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    heritage: Tiller
+    release: loki
+rules:
+- apiGroups:      ['extensions']
+  resources:      ['podsecuritypolicies']
+  verbs:          ['use']
+  resourceNames:  [loki-grafana]

+ 20 - 0
loki/grafana/rolebinding.yaml

@@ -0,0 +1,20 @@
+---
+# Source: loki-stack/charts/grafana/templates/rolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    heritage: Tiller
+    release: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: loki-grafana
+subjects:
+- kind: ServiceAccount
+  name: loki-grafana
+  namespace: loki

+ 18 - 0
loki/grafana/secret.yaml

@@ -0,0 +1,18 @@
+---
+# Source: loki-stack/charts/grafana/templates/secret.yaml
+
+apiVersion: v1
+kind: Secret
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    release: loki
+    heritage: Tiller
+type: Opaque
+data:
+  admin-user: "YWRtaW4="
+  admin-password: "cW9ESXZRU0J6NlBFYXVWSjB1UHFzTXZwWnY0dnprM2tVTzNFb2FneA=="
+  ldap-toml: ""

+ 23 - 0
loki/grafana/service.yaml

@@ -0,0 +1,23 @@
+---
+# Source: loki-stack/charts/grafana/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  name: loki-grafana
+  namespace: loki
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    release: loki
+    heritage: Tiller
+spec:
+  type: ClusterIP
+  ports:
+    - name: service
+      port: 80
+      protocol: TCP
+      targetPort: 3000
+
+  selector:
+    app: grafana
+    release: loki

+ 13 - 0
loki/grafana/serviceaccount.yaml

@@ -0,0 +1,13 @@
+---
+# Source: loki-stack/charts/grafana/templates/serviceaccount.yaml
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: grafana
+    chart: grafana-3.8.19
+    heritage: Tiller
+    release: loki
+  name: loki-grafana
+  namespace: loki

+ 42 - 0
loki/loki/podsecuritypolicy.yaml

@@ -0,0 +1,42 @@
+---
+# Source: loki-stack/charts/loki/templates/podsecuritypolicy.yaml
+
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: loki
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    heritage: Tiller
+    release: loki
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  volumes:
+    - 'configMap'
+    - 'emptyDir'
+    - 'persistentVolumeClaim'
+    - 'secret'
+  hostNetwork: false
+  hostIPC: false
+  hostPID: false
+  runAsUser:
+    rule: 'MustRunAsNonRoot'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'MustRunAs'
+    ranges:
+    - min: 1
+      max: 65535
+  fsGroup:
+    rule: 'MustRunAs'
+    ranges:
+    - min: 1
+      max: 65535
+  readOnlyRootFilesystem: true
+  requiredDropCapabilities:
+    - ALL
+

+ 19 - 0
loki/loki/role.yaml

@@ -0,0 +1,19 @@
+---
+# Source: loki-stack/charts/loki/templates/role.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: loki
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    heritage: Tiller
+    release: loki
+rules:
+- apiGroups:      ['extensions']
+  resources:      ['podsecuritypolicies']
+  verbs:          ['use']
+  resourceNames:  [loki]
+

+ 21 - 0
loki/loki/rolebinding.yaml

@@ -0,0 +1,21 @@
+---
+# Source: loki-stack/charts/loki/templates/rolebinding.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: loki
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    heritage: Tiller
+    release: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: loki
+subjects:
+- kind: ServiceAccount
+  name: loki
+

+ 14 - 0
loki/loki/secret.yaml

@@ -0,0 +1,14 @@
+---
+# Source: loki-stack/charts/loki/templates/secret.yaml
+apiVersion: v1
+kind: Secret
+metadata:
+  name: loki
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    release: loki
+    heritage: Tiller
+data:
+  loki.yaml: YXV0aF9lbmFibGVkOiBmYWxzZQpjaHVua19zdG9yZV9jb25maWc6CiAgbWF4X2xvb2tfYmFja19wZXJpb2Q6IDAKaW5nZXN0ZXI6CiAgY2h1bmtfYmxvY2tfc2l6ZTogMjYyMTQ0CiAgY2h1bmtfaWRsZV9wZXJpb2Q6IDNtCiAgY2h1bmtfcmV0YWluX3BlcmlvZDogMW0KICBsaWZlY3ljbGVyOgogICAgcmluZzoKICAgICAga3ZzdG9yZToKICAgICAgICBzdG9yZTogaW5tZW1vcnkKICAgICAgcmVwbGljYXRpb25fZmFjdG9yOiAxCmxpbWl0c19jb25maWc6CiAgZW5mb3JjZV9tZXRyaWNfbmFtZTogZmFsc2UKICByZWplY3Rfb2xkX3NhbXBsZXM6IHRydWUKICByZWplY3Rfb2xkX3NhbXBsZXNfbWF4X2FnZTogMTY4aApzY2hlbWFfY29uZmlnOgogIGNvbmZpZ3M6CiAgLSBmcm9tOiAiMjAxOC0wNC0xNSIKICAgIGluZGV4OgogICAgICBwZXJpb2Q6IDE2OGgKICAgICAgcHJlZml4OiBpbmRleF8KICAgIG9iamVjdF9zdG9yZTogZmlsZXN5c3RlbQogICAgc2NoZW1hOiB2OQogICAgc3RvcmU6IGJvbHRkYgpzZXJ2ZXI6CiAgaHR0cF9saXN0ZW5fcG9ydDogMzEwMApzdG9yYWdlX2NvbmZpZzoKICBib2x0ZGI6CiAgICBkaXJlY3Rvcnk6IC9kYXRhL2xva2kvaW5kZXgKICBmaWxlc3lzdGVtOgogICAgZGlyZWN0b3J5OiAvZGF0YS9sb2tpL2NodW5rcwp0YWJsZV9tYW5hZ2VyOgogIHJldGVudGlvbl9kZWxldGVzX2VuYWJsZWQ6IGZhbHNlCiAgcmV0ZW50aW9uX3BlcmlvZDogMAo=

+ 22 - 0
loki/loki/service-headless.yaml

@@ -0,0 +1,22 @@
+---
+# Source: loki-stack/charts/loki/templates/service-headless.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  name: loki-headless
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    release: loki
+    heritage: Tiller
+spec:
+  clusterIP: None
+  ports:
+    - port: 3100
+      protocol: TCP
+      name: http-metrics
+      targetPort: http-metrics
+  selector:
+    app: loki
+    release: loki

+ 25 - 0
loki/loki/service.yaml

@@ -0,0 +1,25 @@
+---
+# Source: loki-stack/charts/loki/templates/service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  name: loki
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    release: loki
+    heritage: Tiller
+  annotations:
+    {}
+    
+spec:
+  type: ClusterIP
+  ports:
+    - port: 3100
+      protocol: TCP
+      name: http-metrics
+      targetPort: http-metrics
+  selector:
+    app: loki
+    release: loki

+ 14 - 0
loki/loki/serviceaccount.yaml

@@ -0,0 +1,14 @@
+---
+# Source: loki-stack/charts/loki/templates/serviceaccount.yaml
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    heritage: Tiller
+    release: loki
+  name: loki
+  namespace: loki
+

+ 96 - 0
loki/loki/statefulset.yaml

@@ -0,0 +1,96 @@
+---
+# Source: loki-stack/charts/loki/templates/statefulset.yaml
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: loki
+  namespace: loki
+  labels:
+    app: loki
+    chart: loki-0.17.2
+    release: loki
+    heritage: Tiller
+  annotations:
+    {}
+    
+spec:
+  podManagementPolicy: OrderedReady
+  replicas: 1
+  selector:
+    matchLabels:
+      app: loki
+      release: loki
+  serviceName: loki-headless
+  updateStrategy:
+    type: RollingUpdate
+    
+  template:
+    metadata:
+      labels:
+        app: loki
+        name: loki
+        release: loki
+      annotations:
+        checksum/config: ab27fd3fb705f54260c7e22961f83331b8302a4d6d4551b0796d23245dce1cce
+        prometheus.io/port: http-metrics
+        prometheus.io/scrape: "true"
+        
+    spec:
+      serviceAccountName: loki
+      securityContext:
+        fsGroup: 10001
+        runAsGroup: 10001
+        runAsNonRoot: true
+        runAsUser: 10001
+        
+      containers:
+        - name: loki
+          image: "grafana/loki:v0.4.0"
+          imagePullPolicy: IfNotPresent
+          args:
+            - "-config.file=/etc/loki/loki.yaml"
+          volumeMounts:
+            - name: config
+              mountPath: /etc/loki
+            - name: storage
+              mountPath: "/data"
+              subPath: 
+          ports:
+            - name: http-metrics
+              containerPort: 3100
+              protocol: TCP
+          livenessProbe:
+            httpGet:
+              path: /ready
+              port: http-metrics
+            initialDelaySeconds: 45
+            
+          readinessProbe:
+            httpGet:
+              path: /ready
+              port: http-metrics
+            initialDelaySeconds: 45
+            
+          resources:
+            {}
+            
+          securityContext:
+            readOnlyRootFilesystem: true
+          env:
+      nodeSelector:
+        {}
+        
+      affinity:
+        {}
+        
+      tolerations:
+        []
+        
+      terminationGracePeriodSeconds: 4800
+      volumes:
+        - name: config
+          secret:
+            secretName: loki
+        - name: storage
+          emptyDir: {}
+

+ 14 - 0
loki/prometheus/alertmanager-clusterrole.yaml

@@ -0,0 +1,14 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-clusterrole.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager
+rules:

+ 21 - 0
loki/prometheus/alertmanager-clusterrolebinding.yaml

@@ -0,0 +1,21 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-clusterrolebinding.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager
+subjects:
+  - kind: ServiceAccount
+    name: loki-prometheus-alertmanager
+    namespace: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: loki-prometheus-alertmanager

+ 23 - 0
loki/prometheus/alertmanager-configmap.yaml

@@ -0,0 +1,23 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager
+data:
+  alertmanager.yml: |
+    global: {}
+    receivers:
+    - name: default-receiver
+    route:
+      group_interval: 5m
+      group_wait: 10s
+      receiver: default-receiver
+      repeat_interval: 3h
+    

+ 89 - 0
loki/prometheus/alertmanager-deployment.yaml

@@ -0,0 +1,89 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager
+spec:
+  selector:
+    matchLabels:
+      component: "alertmanager"
+      app: prometheus
+      release: loki
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        component: "alertmanager"
+        app: prometheus
+        release: loki
+        chart: prometheus-9.3.1
+        heritage: Tiller
+    spec:
+      serviceAccountName: loki-prometheus-alertmanager
+      containers:
+        - name: prometheus-alertmanager
+          image: "prom/alertmanager:v0.18.0"
+          imagePullPolicy: "IfNotPresent"
+          env:
+            - name: POD_IP
+              valueFrom:
+                fieldRef:
+                  apiVersion: v1
+                  fieldPath: status.podIP
+          args:
+            - --config.file=/etc/config/alertmanager.yml
+            - --storage.path=/data
+            - --cluster.advertise-address=$(POD_IP):6783
+            - --web.external-url=/
+
+          ports:
+            - containerPort: 9093
+          readinessProbe:
+            httpGet:
+              path: /#/status
+              port: 9093
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          resources:
+            {}
+            
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+            - name: storage-volume
+              mountPath: "/data"
+              subPath: ""
+
+        - name: prometheus-alertmanager-configmap-reload
+          image: "jimmidyson/configmap-reload:v0.2.2"
+          imagePullPolicy: "IfNotPresent"
+          args:
+            - --volume-dir=/etc/config
+            - --webhook-url=http://127.0.0.1:9093/-/reload
+          resources:
+            {}
+            
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+              readOnly: true
+      securityContext:
+        fsGroup: 65534
+        runAsGroup: 65534
+        runAsNonRoot: true
+        runAsUser: 65534
+        
+      volumes:
+        - name: config-volume
+          configMap:
+            name: loki-prometheus-alertmanager
+        - name: storage-volume
+          persistentVolumeClaim:
+            claimName: loki-prometheus-alertmanager

+ 19 - 0
loki/prometheus/alertmanager-pvc.yaml

@@ -0,0 +1,19 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-pvc.yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager
+spec:
+  accessModes:
+    - ReadWriteOnce
+    
+  resources:
+    requests:
+      storage: "2Gi"

+ 23 - 0
loki/prometheus/alertmanager-service.yaml

@@ -0,0 +1,23 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager
+spec:
+  ports:
+    - name: http
+      port: 80
+      protocol: TCP
+      targetPort: 9093
+  selector:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+  type: "ClusterIP"

+ 12 - 0
loki/prometheus/alertmanager-serviceaccount.yaml

@@ -0,0 +1,12 @@
+---
+# Source: loki-stack/charts/prometheus/templates/alertmanager-serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    component: "alertmanager"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-alertmanager

+ 81 - 0
loki/prometheus/kube-state-metrics-clusterrole.yaml

@@ -0,0 +1,81 @@
+---
+# Source: loki-stack/charts/prometheus/templates/kube-state-metrics-clusterrole.yaml
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    component: "kube-state-metrics"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-kube-state-metrics
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - namespaces
+      - nodes
+      - persistentvolumeclaims
+      - pods
+      - services
+      - resourcequotas
+      - replicationcontrollers
+      - limitranges
+      - persistentvolumeclaims
+      - persistentvolumes
+      - endpoints
+      - secrets
+      - configmaps
+    verbs:
+      - list
+      - watch
+  - apiGroups:
+      - extensions
+    resources:
+      - daemonsets
+      - deployments
+      - ingresses
+      - replicasets
+    verbs:
+      - list
+      - watch
+  - apiGroups:
+      - apps
+    resources:
+      - daemonsets
+      - deployments
+      - statefulsets
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - batch
+    resources:
+      - cronjobs
+      - jobs
+    verbs:
+      - list
+      - watch
+  - apiGroups:
+      - autoscaling
+    resources:
+      - horizontalpodautoscalers
+    verbs:
+      - list
+      - watch
+  - apiGroups:
+      - policy
+    resources:
+      - poddisruptionbudgets
+    verbs:
+      - list
+      - watch
+  - apiGroups:
+      - certificates.k8s.io
+    resources:
+      - certificatesigningrequests
+    verbs:
+      - list
+      - watch

+ 20 - 0
loki/prometheus/kube-state-metrics-clusterrolebinding.yaml

@@ -0,0 +1,20 @@
+---
+# Source: loki-stack/charts/prometheus/templates/kube-state-metrics-clusterrolebinding.yaml
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    component: "kube-state-metrics"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-kube-state-metrics
+subjects:
+  - kind: ServiceAccount
+    name: loki-prometheus-kube-state-metrics
+    namespace: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: loki-prometheus-kube-state-metrics

+ 43 - 0
loki/prometheus/kube-state-metrics-deployment.yaml

@@ -0,0 +1,43 @@
+---
+# Source: loki-stack/charts/prometheus/templates/kube-state-metrics-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    component: "kube-state-metrics"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-kube-state-metrics
+spec:
+  selector:
+    matchLabels:
+      component: "kube-state-metrics"
+      app: prometheus
+      release: loki
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        component: "kube-state-metrics"
+        app: prometheus
+        release: loki
+        chart: prometheus-9.3.1
+        heritage: Tiller
+    spec:
+      serviceAccountName: loki-prometheus-kube-state-metrics
+      containers:
+        - name: prometheus-kube-state-metrics
+          image: "quay.io/coreos/kube-state-metrics:v1.6.0"
+          imagePullPolicy: "IfNotPresent"
+          ports:
+            - name: metrics
+              containerPort: 8080
+          resources:
+            {}
+            
+      securityContext:
+        runAsNonRoot: true
+        runAsUser: 65534
+        

+ 12 - 0
loki/prometheus/kube-state-metrics-serviceaccount.yaml

@@ -0,0 +1,12 @@
+---
+# Source: loki-stack/charts/prometheus/templates/kube-state-metrics-serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    component: "kube-state-metrics"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-kube-state-metrics

+ 27 - 0
loki/prometheus/kube-state-metrics-svc.yaml

@@ -0,0 +1,27 @@
+---
+# Source: loki-stack/charts/prometheus/templates/kube-state-metrics-svc.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    prometheus.io/scrape: "true"
+    
+  labels:
+    component: "kube-state-metrics"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-kube-state-metrics
+spec:
+  clusterIP: None
+  ports:
+    - name: http
+      port: 80
+      protocol: TCP
+      targetPort: 8080
+  selector:
+    component: "kube-state-metrics"
+    app: prometheus
+    release: loki
+  type: "ClusterIP"

+ 61 - 0
loki/prometheus/node-exporter-daemonset.yaml

@@ -0,0 +1,61 @@
+---
+# Source: loki-stack/charts/prometheus/templates/node-exporter-daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  labels:
+    component: "node-exporter"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-node-exporter
+spec:
+  selector:
+    matchLabels:
+      component: "node-exporter"
+      app: prometheus
+      release: loki
+  updateStrategy:
+    type: RollingUpdate
+    
+  template:
+    metadata:
+      labels:
+        component: "node-exporter"
+        app: prometheus
+        release: loki
+        chart: prometheus-9.3.1
+        heritage: Tiller
+    spec:
+      serviceAccountName: loki-prometheus-node-exporter
+      containers:
+        - name: prometheus-node-exporter
+          image: "prom/node-exporter:v0.18.0"
+          imagePullPolicy: "IfNotPresent"
+          args:
+            - --path.procfs=/host/proc
+            - --path.sysfs=/host/sys
+          ports:
+            - name: metrics
+              containerPort: 9100
+              hostPort: 9100
+          resources:
+            {}
+            
+          volumeMounts:
+            - name: proc
+              mountPath: /host/proc
+              readOnly:  true
+            - name: sys
+              mountPath: /host/sys
+              readOnly: true
+      hostNetwork: true
+      hostPID: true
+      volumes:
+        - name: proc
+          hostPath:
+            path: /proc
+        - name: sys
+          hostPath:
+            path: /sys

+ 27 - 0
loki/prometheus/node-exporter-service.yaml

@@ -0,0 +1,27 @@
+---
+# Source: loki-stack/charts/prometheus/templates/node-exporter-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    prometheus.io/scrape: "true"
+    
+  labels:
+    component: "node-exporter"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-node-exporter
+spec:
+  clusterIP: None
+  ports:
+    - name: metrics
+      port: 9100
+      protocol: TCP
+      targetPort: 9100
+  selector:
+    component: "node-exporter"
+    app: prometheus
+    release: loki
+  type: "ClusterIP"

+ 12 - 0
loki/prometheus/node-exporter-serviceaccount.yaml

@@ -0,0 +1,12 @@
+---
+# Source: loki-stack/charts/prometheus/templates/node-exporter-serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    component: "node-exporter"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-node-exporter

+ 14 - 0
loki/prometheus/pushgateway-clusterrole.yaml

@@ -0,0 +1,14 @@
+---
+# Source: loki-stack/charts/prometheus/templates/pushgateway-clusterrole.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    component: "pushgateway"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-pushgateway
+rules:

+ 21 - 0
loki/prometheus/pushgateway-clusterrolebinding.yaml

@@ -0,0 +1,21 @@
+---
+# Source: loki-stack/charts/prometheus/templates/pushgateway-clusterrolebinding.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    component: "pushgateway"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-pushgateway
+subjects:
+  - kind: ServiceAccount
+    name: loki-prometheus-pushgateway
+    namespace: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: loki-prometheus-pushgateway

+ 49 - 0
loki/prometheus/pushgateway-deployment.yaml

@@ -0,0 +1,49 @@
+---
+# Source: loki-stack/charts/prometheus/templates/pushgateway-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    component: "pushgateway"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-pushgateway
+spec:
+  selector:
+    matchLabels:
+      component: "pushgateway"
+      app: prometheus
+      release: loki
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        component: "pushgateway"
+        app: prometheus
+        release: loki
+        chart: prometheus-9.3.1
+        heritage: Tiller
+    spec:
+      serviceAccountName: loki-prometheus-pushgateway
+      containers:
+        - name: prometheus-pushgateway
+          image: "prom/pushgateway:v0.8.0"
+          imagePullPolicy: "IfNotPresent"
+          args:
+          ports:
+            - containerPort: 9091
+          readinessProbe:
+            httpGet:
+              path: /#/status
+              port: 9091
+            initialDelaySeconds: 10
+            timeoutSeconds: 10
+          resources:
+            {}
+            
+      securityContext:
+        runAsNonRoot: true
+        runAsUser: 65534
+        

+ 26 - 0
loki/prometheus/pushgateway-service.yaml

@@ -0,0 +1,26 @@
+---
+# Source: loki-stack/charts/prometheus/templates/pushgateway-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  annotations:
+    prometheus.io/probe: pushgateway
+    
+  labels:
+    component: "pushgateway"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-pushgateway
+spec:
+  ports:
+    - name: http
+      port: 9091
+      protocol: TCP
+      targetPort: 9091
+  selector:
+    component: "pushgateway"
+    app: prometheus
+    release: loki
+  type: "ClusterIP"

+ 12 - 0
loki/prometheus/pushgateway-serviceaccount.yaml

@@ -0,0 +1,12 @@
+---
+# Source: loki-stack/charts/prometheus/templates/pushgateway-serviceaccount.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    component: "pushgateway"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-pushgateway

+ 42 - 0
loki/prometheus/server-clusterrole.yaml

@@ -0,0 +1,42 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-clusterrole.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+      - nodes/proxy
+      - nodes/metrics
+      - services
+      - endpoints
+      - pods
+      - ingresses
+      - configmaps
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - "extensions"
+    resources:
+      - ingresses/status
+      - ingresses
+    verbs:
+      - get
+      - list
+      - watch
+  - nonResourceURLs:
+      - "/metrics"
+    verbs:
+      - get

+ 21 - 0
loki/prometheus/server-clusterrolebinding.yaml

@@ -0,0 +1,21 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-clusterrolebinding.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server
+subjects:
+  - kind: ServiceAccount
+    name: loki-prometheus-server
+    namespace: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: loki-prometheus-server

+ 210 - 0
loki/prometheus/server-configmap.yaml

@@ -0,0 +1,210 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server
+data:
+  alerts: |
+    groups:
+
+  prometheus.yml: |
+    global:
+      evaluation_interval: 1m
+      scrape_interval: 1m
+      scrape_timeout: 10s
+    rule_files:
+    - /etc/config/rules
+    - /etc/config/alerts
+    scrape_configs:
+    - job_name: prometheus
+      static_configs:
+      - targets:
+        - localhost:9090
+    - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+      job_name: kubernetes-apiservers
+      kubernetes_sd_configs:
+      - role: endpoints
+      relabel_configs:
+      - action: keep
+        regex: default;kubernetes;https
+        source_labels:
+        - __meta_kubernetes_namespace
+        - __meta_kubernetes_service_name
+        - __meta_kubernetes_endpoint_port_name
+      scheme: https
+      tls_config:
+        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+        insecure_skip_verify: true
+    - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+      job_name: kubernetes-nodes
+      kubernetes_sd_configs:
+      - role: node
+      relabel_configs:
+      - action: labelmap
+        regex: __meta_kubernetes_node_label_(.+)
+      - replacement: kubernetes.default.svc:443
+        target_label: __address__
+      - regex: (.+)
+        replacement: /api/v1/nodes/$1/proxy/metrics
+        source_labels:
+        - __meta_kubernetes_node_name
+        target_label: __metrics_path__
+      scheme: https
+      tls_config:
+        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+        insecure_skip_verify: true
+    - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+      job_name: kubernetes-nodes-cadvisor
+      kubernetes_sd_configs:
+      - role: node
+      relabel_configs:
+      - action: labelmap
+        regex: __meta_kubernetes_node_label_(.+)
+      - replacement: kubernetes.default.svc:443
+        target_label: __address__
+      - regex: (.+)
+        replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
+        source_labels:
+        - __meta_kubernetes_node_name
+        target_label: __metrics_path__
+      scheme: https
+      tls_config:
+        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+        insecure_skip_verify: true
+    - job_name: kubernetes-service-endpoints
+      kubernetes_sd_configs:
+      - role: endpoints
+      relabel_configs:
+      - action: keep
+        regex: true
+        source_labels:
+        - __meta_kubernetes_service_annotation_prometheus_io_scrape
+      - action: replace
+        regex: (https?)
+        source_labels:
+        - __meta_kubernetes_service_annotation_prometheus_io_scheme
+        target_label: __scheme__
+      - action: replace
+        regex: (.+)
+        source_labels:
+        - __meta_kubernetes_service_annotation_prometheus_io_path
+        target_label: __metrics_path__
+      - action: replace
+        regex: ([^:]+)(?::\d+)?;(\d+)
+        replacement: $1:$2
+        source_labels:
+        - __address__
+        - __meta_kubernetes_service_annotation_prometheus_io_port
+        target_label: __address__
+      - action: labelmap
+        regex: __meta_kubernetes_service_label_(.+)
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: kubernetes_namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_service_name
+        target_label: kubernetes_name
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_node_name
+        target_label: kubernetes_node
+    - honor_labels: true
+      job_name: prometheus-pushgateway
+      kubernetes_sd_configs:
+      - role: service
+      relabel_configs:
+      - action: keep
+        regex: pushgateway
+        source_labels:
+        - __meta_kubernetes_service_annotation_prometheus_io_probe
+    - job_name: kubernetes-services
+      kubernetes_sd_configs:
+      - role: service
+      metrics_path: /probe
+      params:
+        module:
+        - http_2xx
+      relabel_configs:
+      - action: keep
+        regex: true
+        source_labels:
+        - __meta_kubernetes_service_annotation_prometheus_io_probe
+      - source_labels:
+        - __address__
+        target_label: __param_target
+      - replacement: blackbox
+        target_label: __address__
+      - source_labels:
+        - __param_target
+        target_label: instance
+      - action: labelmap
+        regex: __meta_kubernetes_service_label_(.+)
+      - source_labels:
+        - __meta_kubernetes_namespace
+        target_label: kubernetes_namespace
+      - source_labels:
+        - __meta_kubernetes_service_name
+        target_label: kubernetes_name
+    - job_name: kubernetes-pods
+      kubernetes_sd_configs:
+      - role: pod
+      relabel_configs:
+      - action: keep
+        regex: true
+        source_labels:
+        - __meta_kubernetes_pod_annotation_prometheus_io_scrape
+      - action: replace
+        regex: (.+)
+        source_labels:
+        - __meta_kubernetes_pod_annotation_prometheus_io_path
+        target_label: __metrics_path__
+      - action: replace
+        regex: ([^:]+)(?::\d+)?;(\d+)
+        replacement: $1:$2
+        source_labels:
+        - __address__
+        - __meta_kubernetes_pod_annotation_prometheus_io_port
+        target_label: __address__
+      - action: labelmap
+        regex: __meta_kubernetes_pod_label_(.+)
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: kubernetes_namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_name
+        target_label: kubernetes_pod_name
+    
+    alerting:
+      alertmanagers:
+      - kubernetes_sd_configs:
+          - role: pod
+        tls_config:
+          ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+        bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+        relabel_configs:
+        - source_labels: [__meta_kubernetes_namespace]
+          regex: loki
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_label_app]
+          regex: prometheus
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_label_component]
+          regex: alertmanager
+          action: keep
+        - source_labels: [__meta_kubernetes_pod_container_port_number]
+          regex:
+          action: drop
+  rules: |
+    {}
+    

+ 91 - 0
loki/prometheus/server-deployment.yaml

@@ -0,0 +1,91 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-deployment.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server
+spec:
+  selector:
+    matchLabels:
+      component: "server"
+      app: prometheus
+      release: loki
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        component: "server"
+        app: prometheus
+        release: loki
+        chart: prometheus-9.3.1
+        heritage: Tiller
+    spec:
+      serviceAccountName: loki-prometheus-server
+      containers:
+        - name: prometheus-server-configmap-reload
+          image: "jimmidyson/configmap-reload:v0.2.2"
+          imagePullPolicy: "IfNotPresent"
+          args:
+            - --volume-dir=/etc/config
+            - --webhook-url=http://127.0.0.1:9090/-/reload
+          resources:
+            {}
+            
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+              readOnly: true
+
+        - name: prometheus-server
+          image: "prom/prometheus:v2.13.1"
+          imagePullPolicy: "IfNotPresent"
+          args:
+            - --storage.tsdb.retention.time=15d
+            - --config.file=/etc/config/prometheus.yml
+            - --storage.tsdb.path=/data
+            - --web.console.libraries=/etc/prometheus/console_libraries
+            - --web.console.templates=/etc/prometheus/consoles
+            - --web.enable-lifecycle
+          ports:
+            - containerPort: 9090
+          readinessProbe:
+            httpGet:
+              path: /-/ready
+              port: 9090
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          livenessProbe:
+            httpGet:
+              path: /-/healthy
+              port: 9090
+            initialDelaySeconds: 30
+            timeoutSeconds: 30
+          resources:
+            {}
+            
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config
+            - name: storage-volume
+              mountPath: /data
+              subPath: ""
+      securityContext:
+        fsGroup: 65534
+        runAsGroup: 65534
+        runAsNonRoot: true
+        runAsUser: 65534
+        
+      terminationGracePeriodSeconds: 300
+      volumes:
+        - name: config-volume
+          configMap:
+            name: loki-prometheus-server
+        - name: storage-volume
+          persistentVolumeClaim:
+            claimName: loki-prometheus-server

+ 19 - 0
loki/prometheus/server-pvc.yaml

@@ -0,0 +1,19 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-pvc.yaml
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server
+spec:
+  accessModes:
+    - ReadWriteOnce
+    
+  resources:
+    requests:
+      storage: "8Gi"

+ 23 - 0
loki/prometheus/server-service.yaml

@@ -0,0 +1,23 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-service.yaml
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server
+spec:
+  ports:
+    - name: http
+      port: 80
+      protocol: TCP
+      targetPort: 9090
+  selector:
+    component: "server"
+    app: prometheus
+    release: loki
+  type: "ClusterIP"

+ 13 - 0
loki/prometheus/server-serviceaccount.yaml

@@ -0,0 +1,13 @@
+---
+# Source: loki-stack/charts/prometheus/templates/server-serviceaccount.yaml
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    component: "server"
+    app: prometheus
+    release: loki
+    chart: prometheus-9.3.1
+    heritage: Tiller
+  name: loki-prometheus-server

+ 22 - 0
loki/promtail/clusterrole.yaml

@@ -0,0 +1,22 @@
+---
+# Source: loki-stack/charts/promtail/templates/clusterrole.yaml
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    release: loki
+    heritage: Tiller
+  name: loki-promtail-clusterrole
+  namespace: loki
+rules:
+- apiGroups: [""] # "" indicates the core API group
+  resources:
+  - nodes
+  - nodes/proxy
+  - services
+  - endpoints
+  - pods
+  verbs: ["get", "watch", "list"]

+ 20 - 0
loki/promtail/clusterrolebinding.yaml

@@ -0,0 +1,20 @@
+---
+# Source: loki-stack/charts/promtail/templates/clusterrolebinding.yaml
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: loki-promtail-clusterrolebinding
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    release: loki
+    heritage: Tiller
+subjects:
+  - kind: ServiceAccount
+    name: loki-promtail
+    namespace: loki
+roleRef:
+  kind: ClusterRole
+  name: loki-promtail-clusterrole
+  apiGroup: rbac.authorization.k8s.io

+ 282 - 0
loki/promtail/configmap.yaml

@@ -0,0 +1,282 @@
+---
+# Source: loki-stack/charts/promtail/templates/configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: loki-promtail
+  namespace: loki
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    release: loki
+    heritage: Tiller
+data:
+  promtail.yaml: |
+    client:
+      backoff_config:
+        maxbackoff: 5s
+        maxretries: 20
+        minbackoff: 100ms
+      batchsize: 102400
+      batchwait: 1s
+      external_labels: {}
+      timeout: 10s
+    positions:
+      filename: /run/promtail/positions.yaml
+    server:
+      http_listen_port: 3101
+    target_config:
+      sync_period: 10s
+    
+    scrape_configs:
+    - job_name: kubernetes-pods-name
+      pipeline_stages:
+        - docker: {}
+        
+      kubernetes_sd_configs:
+      - role: pod
+      relabel_configs:
+      - source_labels:
+        - __meta_kubernetes_pod_label_name
+        target_label: __service__
+      - source_labels:
+        - __meta_kubernetes_pod_node_name
+        target_label: __host__
+      - action: drop
+        regex: ''
+        source_labels:
+        - __service__
+      - action: labelmap
+        regex: __meta_kubernetes_pod_label_(.+)
+      - action: replace
+        replacement: $1
+        separator: /
+        source_labels:
+        - __meta_kubernetes_namespace
+        - __service__
+        target_label: job
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_name
+        target_label: instance
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_container_name
+        target_label: container_name
+      - replacement: /var/log/pods/*$1/*.log
+        separator: /
+        source_labels:
+        - __meta_kubernetes_pod_uid
+        - __meta_kubernetes_pod_container_name
+        target_label: __path__
+    - job_name: kubernetes-pods-app
+      pipeline_stages:
+        - docker: {}
+        
+      kubernetes_sd_configs:
+      - role: pod
+      relabel_configs:
+      - action: drop
+        regex: .+
+        source_labels:
+        - __meta_kubernetes_pod_label_name
+      - source_labels:
+        - __meta_kubernetes_pod_label_app
+        target_label: __service__
+      - source_labels:
+        - __meta_kubernetes_pod_node_name
+        target_label: __host__
+      - action: drop
+        regex: ''
+        source_labels:
+        - __service__
+      - action: labelmap
+        regex: __meta_kubernetes_pod_label_(.+)
+      - action: replace
+        replacement: $1
+        separator: /
+        source_labels:
+        - __meta_kubernetes_namespace
+        - __service__
+        target_label: job
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_name
+        target_label: instance
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_container_name
+        target_label: container_name
+      - replacement: /var/log/pods/*$1/*.log
+        separator: /
+        source_labels:
+        - __meta_kubernetes_pod_uid
+        - __meta_kubernetes_pod_container_name
+        target_label: __path__
+    - job_name: kubernetes-pods-direct-controllers
+      pipeline_stages:
+        - docker: {}
+        
+      kubernetes_sd_configs:
+      - role: pod
+      relabel_configs:
+      - action: drop
+        regex: .+
+        separator: ''
+        source_labels:
+        - __meta_kubernetes_pod_label_name
+        - __meta_kubernetes_pod_label_app
+      - action: drop
+        regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
+        source_labels:
+        - __meta_kubernetes_pod_controller_name
+      - source_labels:
+        - __meta_kubernetes_pod_controller_name
+        target_label: __service__
+      - source_labels:
+        - __meta_kubernetes_pod_node_name
+        target_label: __host__
+      - action: drop
+        regex: ''
+        source_labels:
+        - __service__
+      - action: labelmap
+        regex: __meta_kubernetes_pod_label_(.+)
+      - action: replace
+        replacement: $1
+        separator: /
+        source_labels:
+        - __meta_kubernetes_namespace
+        - __service__
+        target_label: job
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_name
+        target_label: instance
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_container_name
+        target_label: container_name
+      - replacement: /var/log/pods/*$1/*.log
+        separator: /
+        source_labels:
+        - __meta_kubernetes_pod_uid
+        - __meta_kubernetes_pod_container_name
+        target_label: __path__
+    - job_name: kubernetes-pods-indirect-controller
+      pipeline_stages:
+        - docker: {}
+        
+      kubernetes_sd_configs:
+      - role: pod
+      relabel_configs:
+      - action: drop
+        regex: .+
+        separator: ''
+        source_labels:
+        - __meta_kubernetes_pod_label_name
+        - __meta_kubernetes_pod_label_app
+      - action: keep
+        regex: '[0-9a-z-.]+-[0-9a-f]{8,10}'
+        source_labels:
+        - __meta_kubernetes_pod_controller_name
+      - action: replace
+        regex: '([0-9a-z-.]+)-[0-9a-f]{8,10}'
+        source_labels:
+        - __meta_kubernetes_pod_controller_name
+        target_label: __service__
+      - source_labels:
+        - __meta_kubernetes_pod_node_name
+        target_label: __host__
+      - action: drop
+        regex: ''
+        source_labels:
+        - __service__
+      - action: labelmap
+        regex: __meta_kubernetes_pod_label_(.+)
+      - action: replace
+        replacement: $1
+        separator: /
+        source_labels:
+        - __meta_kubernetes_namespace
+        - __service__
+        target_label: job
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_name
+        target_label: instance
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_container_name
+        target_label: container_name
+      - replacement: /var/log/pods/*$1/*.log
+        separator: /
+        source_labels:
+        - __meta_kubernetes_pod_uid
+        - __meta_kubernetes_pod_container_name
+        target_label: __path__
+    - job_name: kubernetes-pods-static
+      pipeline_stages:
+        - docker: {}
+        
+      kubernetes_sd_configs:
+      - role: pod
+      relabel_configs:
+      - action: drop
+        regex: ''
+        source_labels:
+        - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_label_component
+        target_label: __service__
+      - source_labels:
+        - __meta_kubernetes_pod_node_name
+        target_label: __host__
+      - action: drop
+        regex: ''
+        source_labels:
+        - __service__
+      - action: labelmap
+        regex: __meta_kubernetes_pod_label_(.+)
+      - action: replace
+        replacement: $1
+        separator: /
+        source_labels:
+        - __meta_kubernetes_namespace
+        - __service__
+        target_label: job
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_namespace
+        target_label: namespace
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_name
+        target_label: instance
+      - action: replace
+        source_labels:
+        - __meta_kubernetes_pod_container_name
+        target_label: container_name
+      - replacement: /var/log/pods/*$1/*.log
+        separator: /
+        source_labels:
+        - __meta_kubernetes_pod_annotation_kubernetes_io_config_mirror
+        - __meta_kubernetes_pod_container_name
+        target_label: __path__

+ 105 - 0
loki/promtail/daemonset.yaml

@@ -0,0 +1,105 @@
+---
+# Source: loki-stack/charts/promtail/templates/daemonset.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: loki-promtail
+  namespace: loki
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    release: loki
+    heritage: Tiller
+  annotations:
+    {}
+    
+spec:
+  selector:
+    matchLabels:
+      app: promtail
+      release: loki
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: promtail
+        release: loki          
+      annotations:
+        checksum/config: f67946cf335ae9cf8fd8f3ca85db81366fef63f97f5ad24bf1d271fe4c54c429
+        prometheus.io/port: http-metrics
+        prometheus.io/scrape: "true"
+        
+    spec:
+      serviceAccountName: loki-promtail
+      containers:
+        - name: promtail
+          image: "grafana/promtail:v0.4.0"
+          imagePullPolicy: IfNotPresent
+          args:
+            - "-config.file=/etc/promtail/promtail.yaml"
+            - "-client.url=http://loki:3100/loki/api/v1/push"
+          volumeMounts:
+            - name: config
+              mountPath: /etc/promtail
+            - name: run
+              mountPath: /run/promtail
+            - mountPath: /var/lib/docker/containers
+              name: docker
+              readOnly: true
+            - mountPath: /var/log/pods
+              name: pods
+              readOnly: true
+            
+          env:
+          - name: HOSTNAME
+            valueFrom:
+              fieldRef:
+                fieldPath: spec.nodeName
+          ports:
+            - containerPort: 3101
+              name: http-metrics
+          securityContext:
+            readOnlyRootFilesystem: true
+            runAsGroup: 0
+            runAsUser: 0
+            
+          readinessProbe:
+            failureThreshold: 5
+            httpGet:
+              path: /ready
+              port: http-metrics
+            initialDelaySeconds: 10
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 1
+            
+          resources:
+            {}
+            
+      nodeSelector:
+        {}
+        
+      affinity:
+        {}
+        
+      tolerations:
+        - effect: NoSchedule
+          key: node-role.kubernetes.io/master
+          operator: Exists
+        
+      volumes:
+        - name: config
+          configMap:
+            name: loki-promtail
+        - name: run
+          hostPath:
+            path: /run/promtail
+        - hostPath:
+            path: /var/lib/docker/containers
+          name: docker
+        - hostPath:
+            path: /var/log/pods
+          name: pods
+        
+

+ 34 - 0
loki/promtail/podsecuritypolicy.yaml

@@ -0,0 +1,34 @@
+---
+# Source: loki-stack/charts/promtail/templates/podsecuritypolicy.yaml
+
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+  name: loki-promtail
+  namespace: loki
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    heritage: Tiller
+    release: loki
+spec:
+  privileged: false
+  allowPrivilegeEscalation: false
+  volumes:
+    - 'secret'
+    - 'configMap'
+    - 'hostPath'
+  hostNetwork: false
+  hostIPC: false
+  hostPID: false
+  runAsUser:
+    rule: 'RunAsAny'
+  seLinux:
+    rule: 'RunAsAny'
+  supplementalGroups:
+    rule: 'RunAsAny'
+  fsGroup:
+    rule: 'RunAsAny'
+  readOnlyRootFilesystem: true
+  requiredDropCapabilities:
+    - ALL

+ 18 - 0
loki/promtail/role.yaml

@@ -0,0 +1,18 @@
+---
+# Source: loki-stack/charts/promtail/templates/role.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: loki-promtail
+  namespace: loki
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    heritage: Tiller
+    release: loki
+rules:
+- apiGroups:      ['extensions']
+  resources:      ['podsecuritypolicies']
+  verbs:          ['use']
+  resourceNames:  [loki-promtail]

+ 21 - 0
loki/promtail/rolebinding.yaml

@@ -0,0 +1,21 @@
+---
+# Source: loki-stack/charts/promtail/templates/rolebinding.yaml
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: loki-promtail
+  namespace: loki
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    heritage: Tiller
+    release: loki
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: loki-promtail
+subjects:
+- kind: ServiceAccount
+  name: loki-promtail
+

+ 14 - 0
loki/promtail/serviceaccount.yaml

@@ -0,0 +1,14 @@
+---
+# Source: loki-stack/charts/promtail/templates/serviceaccount.yaml
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: promtail
+    chart: promtail-0.13.1
+    heritage: Tiller
+    release: loki
+  name: loki-promtail
+  namespace: loki
+