Reputation: 41
I'm trying to deploy Grafana Loki using Terraform with configuration:
loki.tf
resource "helm_release" "loki" {
name = "loki"
repository = "https://grafana.github.io/helm-charts"
chart = "loki"
version = "5.10.0"
values = [
templatefile("${path.module}/templates/loki.yaml", {
})
]
}
resource "helm_release" "promtail" {
chart = "promtail"
name = "promtail"
repository = "https://grafana.github.io/helm-charts"
version = "6.15.5"
values = [
templatefile("${path.module}/templates/promtail.yaml", {
loki_svc = "${helm_release.loki.name}"
})
]
depends_on = [helm_release.loki]
}
loki.yml
auth_enabled: true
server:
http_listen_port: 3100
common:
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
replication_factor: 1
path_prefix: /loki
schema_config:
configs:
- from: 2020-05-15
store: tsdb
object_store: s3
schema: v13
index:
prefix: index_
period: 24h
storage_config:
aws:
bucketnames: {name-of-my-bucket}
endpoint: s3.us-east-1.amazonaws.com
region: us-east-1
access_key_id: {my-acces-key}
secret_access_key: {my-secret-key}
insecure: false
http_config:
idle_conn_timeout: 90s
response_header_timeout: 0s
insecure_skip_verify: false
#s3forcepathstyle: false
grafana.tf
resource "helm_release" "grafana" {
chart = "grafana"
name = "grafana"
repository = "https://grafana.github.io/helm-charts"
version = "6.33.1"
values = [
templatefile("${path.module}/templates/grafana-values.yaml", {
admin_existing_secret = kubernetes_secret.grafana.metadata[0].name
admin_user_key = "{my-user-key}"
admin_password_key = "{my-password-key"
prometheus_svc = "${helm_release.prometheus.name}-server"
loki_svc = "${helm_release.loki.name}"
replicas = 1
root_url = "/grafana"
})
]
depends_on = [
helm_release.prometheus,
helm_release.loki,
helm_release.promtail
]
}
grafana-values.yaml
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://${prometheus_svc}
access: proxy
isDefault: true
- name: Loki
type: loki
access: proxy
url: http://loki-gateway.default.svc.cluster.local
jsonData:
httpHeaderName1: 'X-Scope-OrgID'
maxLines: 1000
secureJsonData:
httpHeaderValue1: '1'
tlsCACert: ""
tlsClientCert: ""
tlsClientKey: ""
version: 1
promtail.yaml
--- # Daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail-daemonset
spec:
selector:
matchLabels:
name: promtail
template:
metadata:
labels:
name: promtail
spec:
serviceAccount: promtail-serviceaccount
containers:
- name: promtail-container
image: grafana/promtail
args:
- -config.file=/etc/promtail/promtail.yaml
env:
- name: 'my-website-url' # needed when using kubernetes_sd_configs
valueFrom:
fieldRef:
fieldPath: 'spec.nodeName'
volumeMounts:
- name: logs
mountPath: /var/log
- name: promtail-config
mountPath: /etc/promtail
- mountPath: /var/lib/docker/containers
name: varlibdockercontainers
readOnly: true
volumes:
- name: logs
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: promtail-config
configMap:
name: promtail-config
--- # configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-config
data:
promtail.yaml: |
server:
http_listen_port: 9080
grpc_listen_port: 0
clients:
- url: https://loki-gateway/loki/api/v1/push
positions:
filename: /tmp/positions.yaml
target_config:
sync_period: 10s
scrape_configs:
- job_name: pod-logs
kubernetes_sd_configs:
- role: pod
pipeline_stages:
- docker: {}
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_pod_name
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
--- # Clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: promtail-clusterrole
rules:
- apiGroups: [""]
resources:
- nodes
- services
- pods
verbs:
- get
- watch
- list
--- # ServiceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail-serviceaccount
--- # Rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: promtail-clusterrolebinding
subjects:
- kind: ServiceAccount
name: promtail-serviceaccount
namespace: default
roleRef:
kind: ClusterRole
name: promtail-clusterrole
apiGroup: rbac.authorization.k8s.io
I try multiple loki.yaml files, but can bypass this. Now error is:
level=error ts=2024-04-23T12:54:33.875038295Z caller=flush.go:144 org_id=self-monitoring msg="failed to flush" err="failed to flush chunks: store put chunk: RequestError: send request failed\ncaused by: Put \"https://chunks.s3.dummy.amazonaws.com/self-monitoring/904272f770f14454/18ec8593247%3A18ec8638029%3A20e8c351%5C%5C": dial tcp: lookup chunks.s3.dummy.amazonaws.com on 172.20.0.10:53: no such host, num_chunks: 1, labels: {app_kubernetes_io_component=\"read\", app_kubernetes_io_instance=\"loki\", app_kubernetes_io_name=\"loki\", app_kubernetes_io_part_of=\"memberlist\", cluster=\"loki\", container=\"loki\", filename=\"/var/log/pods/default_loki-read-84566c7646-lkfjw_19badfe5-087a-458d-a421-58b580e79cdd/loki/0.log\", job=\"default/loki-read\", namespace=\"default\", pod=\"loki-read-84566c7646-lkfjw\", pod_template_hash=\"84566c7646\", stream=\"stderr\"}"
I look true problem here and on different sites, but nothing helped. I understand that can add logs to s3 bucket, but I don't know how to fix this.
Upvotes: 1
Views: 350
Reputation: 41
I don't know why, but i first added
loki:
commonConfig:
replication_factor: 3
storage:
bucketNames:
chunks: chunks
ruler: ruler
admin: admin
type: 's3'
s3:
endpoint: s3.us-east-1.amazonaws.com
region: us-east-1
secretAccessKey: {key}
accessKeyId: {KeyID}
s3ForcePathStyle: false
insecure: false
singleBinary:
replicas: 3
After that I run terraform apply and after that I added rest of the code and than all start to work.
Hope this will help someone
Upvotes: 1