gpt4 book ai didi

elasticsearch - Filebeat无法将数据发送到logstash,导致elastic & kibana中的数据为空

转载 作者:行者123 更新时间:2023-12-02 22:14:20 25 4
gpt4 key购买 nike

我正在尝试在 openshift 平台(OKD - v3.11)中部署 ELK 堆栈并使用 filebeat 自动检测日志。

ELK 堆栈版本:

FIlebeat - 6.4.1
Logstash - 6.3.1
elastic - 6.5.4 &
kibana - 6.5.4

请找到相同的模板,
apiVersion: v1
kind: Template
metadata:
name: logstash-filebeat
annotations:
description: logstash and filebeat template for openshift (version 6.3.1/6.4.1)
tags: log,storage,data,visualization
objects:
- apiVersion: v1
kind: SecurityContextConstraints
metadata:
name: hostpath
allowPrivilegedContainer: true
allowHostDirVolumePlugin: true
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
fsGroup:
type: RunAsAny
supplementalGroups:
type: RunAsAny
users:
- my-admin-user
groups:
- my-admin-group
- apiVersion: v1
kind: ConfigMap
metadata:
name: logging-configmap
data:
logstash.yml: |
http.host: "0.0.0.0"
http.port: 5044
path.config: /usr/share/logstash/pipeline
pipeline.workers: 1
pipeline.output.workers: 1
xpack.monitoring.enabled: false
logstash.conf: |
input {
beats {
client_inactivity_timeout => 86400
port => 5044
}
}
filter {
if "beats_input_codec_plain_applied" in [tags] {
mutate {
rename => ["log", "message"]
add_tag => [ "DBBKUP", "kubernetes" ]
}
mutate {
remove_tag => ["beats_input_codec_plain_applied"]
}
date {
match => ["time", "ISO8601"]
remove_field => ["time"]
}
grok {
#match => { "source" => "/var/log/containers/%{DATA:pod_name}_%{DATA:namespace}_%{GREEDYDATA:container_name}-%{DATA:container_id}.log" }
#remove_field => ["source"]
match => { "message" => "%{TIMESTAMP_ISO8601:LogTimeStamp}%{SPACE}%{GREEDYDATA:Message}" }
remove_field => ["message"]
add_tag => ["DBBKUP"]
}

if "DBBKUP" in [tags] and "vz1-warrior-job" in [kubernetes][pod][name] {
grok {
match => { "message" => "%{GREEDYDATA:bkupLog}" }
remove_field => ["message"]
add_tag => ["WARJOBS"]
remove_tag => ["DBBKUP"]
}
}
}
}

output {
elasticsearch {
#hosts => "localhost:9200"
hosts => "index.elastic:9200"
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
#document_type => "%{[@metadata][type]}"
}
}
filebeat.yml: |
#filebeat.registry_file: /var/tmp/filebeat/filebeat_registry # store the registry on the host filesystem so it doesn't get lost when pods are stopped
filebeat.autodiscover:
providers:
- type: kubernetes
tags:
- "kube-logs"
templates:
- condition:
or:
- contains:
kubernetes.pod.name: "db-backup-ne-mgmt"
- contains:
kubernetes.pod.name: "db-backup-list-manager"
- contains:
kubernetes.pod.name: "db-backup-scheduler"
config:
- type: docker
containers.ids:
- "${data.kubernetes.container.id}"
multiline.pattern: '^[[:space:]]'
multiline.negate: false
multiline.match: after
processors:
- drop_event:
when.or:
- equals:
kubernetes.namespace: "kube-system"
- equals:
kubernetes.namespace: "default"
- equals:
kubernetes.namespace: "logging"
output.logstash:
hosts: ["logstash-service.logging:5044"]
index: filebeat

setup.template.name: "filebeat"
setup.template.pattern: "filebeat-*"
kibana.yml: |
elasticsearch.url: "http://index.elastic:9200"
- apiVersion: v1
kind: Service
metadata:
name: logstash-service
spec:
clusterIP:
externalTrafficPolicy: Cluster
ports:
- nodePort: 31481
port: 5044
protocol: TCP
targetPort: 5044
selector:
app: logstash
sessionAffinity: None
type: NodePort
status:
loadBalancer: {}
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: logstash
name: logstash-deployment
spec:
replicas: 1
selector:
matchLabels:
app: logstash
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: logstash
spec:
containers:
- env:
- name: ES_VERSION
value: 2.4.6
image: docker.elastic.co/logstash/logstash:6.3.1
imagePullPolicy: IfNotPresent
name: logstash
ports:
- containerPort: 5044
protocol: TCP
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "1"
memory: 4Gi
volumeMounts:
- mountPath: /usr/share/logstash/config
name: config-volume
- mountPath: /usr/share/logstash/pipeline
name: logstash-pipeline-volume
volumes:
- configMap:
items:
- key: logstash.yml
path: logstash.yml
name: logging-configmap
name: config-volume
- configMap:
items:
- key: logstash.conf
path: logstash.conf
name: logging-configmap
name: logstash-pipeline-volume
- apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
labels:
app: filebeat
name: filebeat
spec:
selector:
matchLabels:
app: filebeat
template:
metadata:
labels:
app: filebeat
name: filebeat
spec:
serviceAccountName: filebeat-serviceaccount
containers:
- args:
- -e
- -path.config
- /usr/share/filebeat/config
command:
- /usr/share/filebeat/filebeat
env:
- name: LOGSTASH_HOSTS
value: logstash-service:5044
- name: LOG_LEVEL
value: info
- name: FILEBEAT_HOST
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: docker.elastic.co/beats/filebeat:6.4.1
imagePullPolicy: IfNotPresent
name: filebeat
resources:
limits:
cpu: 500m
memory: 4Gi
requests:
cpu: 500m
memory: 4Gi
volumeMounts:
- mountPath: /usr/share/filebeat/config
name: config-volume
- mountPath: /var/log/hostlogs
name: varlog
readOnly: true
- mountPath: /var/log/containers
name: varlogcontainers
readOnly: true
- mountPath: /var/log/pods
name: varlogpods
readOnly: true
- mountPath: /var/lib/docker/containers
name: varlibdockercontainers
readOnly: true
- mountPath: /var/tmp/filebeat
name: vartmp
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
runAsUser: 0
privileged: true
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /var/log
type: ""
name: varlog
- hostPath:
path: /var/tmp
type: ""
name: vartmp
- hostPath:
path: /var/log/containers
type: ""
name: varlogcontainers
- hostPath:
path: /var/log/pods
type: ""
name: varlogpods
- hostPath:
path: /var/lib/docker/containers
type: ""
name: varlibdockercontainers
- configMap:
items:
- key: filebeat.yml
path: filebeat.yml
name: logging-configmap
name: config-volume

- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat-clusterrolebinding
namespace: logging
subjects:
- kind: ServiceAccount
name: filebeat-serviceaccount
namespace: logging
roleRef:
kind: ClusterRole
name: filebeat-clusterrole
apiGroup: rbac.authorization.k8s.io

- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat-clusterrole
namespace: logging
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list

- apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat-serviceaccount
namespace: logging

kibana 仪表板已启动,elastic 和 logstash api 工作正常,但 filebeat 没有将数据发送到 logstash,因为我在 5044 端口上监听的 logstash 上没有看到任何数据轮询。

所以我从弹性论坛发现以下 iptables 命令可以解决我的问题,但没有运气,
iptables -A OUTPUT -t mangle -p tcp --dport 5044 -j MARK --set-mark 10

仍然没有对 logstash 监听器进行轮询。如果我遗漏了什么,请帮助我,如果您需要更多信息,请告诉我。

注意:
filebeat.yml、logstash.yml 和 logstash.conf 文件在部署在普通 kubernetes 中时运行良好。

最佳答案

我为调试此问题而遵循的步骤是:

  • 检查 Kibana 是否正在运行,
  • 检查 Elastic API 是否正常工作,
  • 检查是否可以从 Filebeat 访问 Logstash。

  • 在我的情况下一切正常。在 Filebeat.yml 中添加日志级别,并在 filebeat 访问“/var/lib/docker/containers//”文件夹下的 docker 容器日志时发现“权限被拒绝”错误。

    通过运行以下命令将 selinux 设置为“Permissive”修复了该问题,
    sudo setenforce Permissive

    在此 ELK 开始同步日志之后。

    关于elasticsearch - Filebeat无法将数据发送到logstash,导致elastic & kibana中的数据为空,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/61584838/

    25 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com