- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在使用 kube-prometheus-stack 来监控 kubernetes 集群。这个 Helm chart 有一些默认的 Grafana View 。除了默认的,我想添加更多的仪表板。
达到
apiVersion: v1
data:
grafana-dummy-dashboard.json: |-
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 15,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "-- Grafana --",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 19,
"w": 24,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Grafana Fake Data",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"schemaVersion": 25,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Grafana dummy dashboard",
"uid": "UlxCrx4Gk",
"version": 2
}
kind: ConfigMap
metadata:
creationTimestamp: "2021-02-25T18:36:09Z"
managedFields:
apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:grafana-dummy-dashboard.json: {}
manager: kubectl-create
operation: Update
time: "2021-02-25T18:36:09Z"
name: my-config
namespace: monitoring
resourceVersion: "1226938"
selfLink: /api/v1/namespaces/monitoring/configmaps/my-config
uid: 958764ee-5f84-48a2-93eb-1c6728779af3
Grafana Pod 规范
apiVersion: v1
kind: Pod
metadata:
annotations:
checksum/config: 6dff7c26bb7ba0d9ef70e1c8c606456a48d31530440dd2eeedeb61881f30031d
checksum/dashboards-json-config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/sc-dashboard-provider-config: 004de7c58cd5fe4be7264a0e096aae5c620da7f9d019a9eccbe4e94a815078e0
checksum/secret: e4e24c4feb60f298b35a0d758e2358bb89d37d783878f3d51068593a3db7b7d3
creationTimestamp: "2021-02-25T18:40:18Z"
generateName: mon-grafana-565645c6d4-
labels:
app.kubernetes.io/instance: mon
app.kubernetes.io/name: grafana
pod-template-hash: 565645c6d4
managedFields:
apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:checksum/config: {}
f:checksum/dashboards-json-config: {}
f:checksum/sc-dashboard-provider-config: {}
f:checksum/secret: {}
f:generateName: {}
f:labels:
.: {}
f:app.kubernetes.io/instance: {}
f:app.kubernetes.io/name: {}
f:pod-template-hash: {}
f:ownerReferences:
.: {}
k:{"uid":"af69e5b4-aab1-4446-b948-e1eff68802a3"}:
.: {}
f:apiVersion: {}
f:blockOwnerDeletion: {}
f:controller: {}
f:kind: {}
f:name: {}
f:uid: {}
f:spec:
f:containers:
k:{"name":"grafana"}:
.: {}
f:env:
.: {}
k:{"name":"GF_SECURITY_ADMIN_PASSWORD"}:
.: {}
f:name: {}
f:valueFrom:
.: {}
f:secretKeyRef:
.: {}
f🔑 {}
f:name: {}
k:{"name":"GF_SECURITY_ADMIN_USER"}:
.: {}
f:name: {}
f:valueFrom:
.: {}
f:secretKeyRef:
.: {}
f🔑 {}
f:name: {}
f:image: {}
f:imagePullPolicy: {}
f:livenessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
f:scheme: {}
f:initialDelaySeconds: {}
f:periodSeconds: {}
f:successThreshold: {}
f:timeoutSeconds: {}
f:name: {}
f:ports:
.: {}
k:{"containerPort":80,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
k:{"containerPort":3000,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
f:readinessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
f:scheme: {}
f:periodSeconds: {}
f:successThreshold: {}
f:timeoutSeconds: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:volumeMounts:
.: {}
k:{"mountPath":"/etc/grafana/grafana.ini"}:
.: {}
f:mountPath: {}
f:name: {}
f:subPath: {}
k:{"mountPath":"/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml"}:
.: {}
f:mountPath: {}
f:name: {}
f:subPath: {}
k:{"mountPath":"/etc/grafana/provisioning/datasources"}:
.: {}
f:mountPath: {}
f:name: {}
k:{"mountPath":"/tmp/dashboards"}:
.: {}
f:mountPath: {}
f:name: {}
k:{"mountPath":"/var/lib/grafana"}:
.: {}
f:mountPath: {}
f:name: {}
k:{"name":"grafana-sc-dashboard"}:
.: {}
f:env:
.: {}
k:{"name":"FOLDER"}:
.: {}
f:name: {}
f:value: {}
k:{"name":"LABEL"}:
.: {}
f:name: {}
f:value: {}
k:{"name":"METHOD"}:
.: {}
f:name: {}
k:{"name":"RESOURCE"}:
.: {}
f:name: {}
f:value: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:volumeMounts:
.: {}
k:{"mountPath":"/tmp/dashboards"}:
.: {}
f:mountPath: {}
f:name: {}
f:dnsPolicy: {}
f:enableServiceLinks: {}
f:initContainers:
.: {}
k:{"name":"grafana-sc-datasources"}:
.: {}
f:env:
.: {}
k:{"name":"FOLDER"}:
.: {}
f:name: {}
f:value: {}
k:{"name":"LABEL"}:
.: {}
f:name: {}
f:value: {}
k:{"name":"METHOD"}:
.: {}
f:name: {}
f:value: {}
k:{"name":"RESOURCE"}:
.: {}
f:name: {}
f:value: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:volumeMounts:
.: {}
k:{"mountPath":"/etc/grafana/provisioning/datasources"}:
.: {}
f:mountPath: {}
f:name: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext:
.: {}
f:fsGroup: {}
f:runAsGroup: {}
f:runAsUser: {}
f:serviceAccount: {}
f:serviceAccountName: {}
f:terminationGracePeriodSeconds: {}
f:volumes:
.: {}
k:{"name":"config"}:
.: {}
f:configMap:
.: {}
f:defaultMode: {}
f:name: {}
f:name: {}
k:{"name":"sc-dashboard-provider"}:
.: {}
f:configMap:
.: {}
f:defaultMode: {}
f:name: {}
f:name: {}
k:{"name":"sc-dashboard-volume"}:
.: {}
f:emptyDir: {}
f:name: {}
k:{"name":"sc-datasources-volume"}:
.: {}
f:emptyDir: {}
f:name: {}
k:{"name":"storage"}:
.: {}
f:emptyDir: {}
f:name: {}
manager: kube-controller-manager
operation: Update
time: "2021-02-25T18:40:18Z"
apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:conditions:
k:{"type":"ContainersReady"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
k:{"type":"Initialized"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
k:{"type":"Ready"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
f:containerStatuses: {}
f:hostIP: {}
f:initContainerStatuses: {}
f:phase: {}
f:podIP: {}
f:podIPs:
.: {}
k:{"ip":"10.233.64.91"}:
.: {}
f:ip: {}
f:startTime: {}
manager: kubelet
operation: Update
time: "2021-02-25T18:40:25Z"
name: mon-grafana-565645c6d4-526hg
namespace: monitoring
ownerReferences:
apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: mon-grafana-565645c6d4
uid: af69e5b4-aab1-4446-b948-e1eff68802a3
resourceVersion: "1227952"
selfLink: /api/v1/namespaces/monitoring/pods/mon-grafana-565645c6d4-526hg
uid: 74487dc2-aac9-4950-81c3-7ab73895eced
spec:
containers:
env:
name: METHOD
name: LABEL
value: grafana_dashboard
name: FOLDER
value: /tmp/dashboards
name: RESOURCE
value: both
image: quay.io/kiwigrid/k8s-sidecar:1.10.6
imagePullPolicy: IfNotPresent
name: grafana-sc-dashboard
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
mountPath: /tmp/dashboards
name: sc-dashboard-volume
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: mon-grafana-token-p9x96
readOnly: true
env:
name: GF_SECURITY_ADMIN_USER
valueFrom:
secretKeyRef:
key: admin-user
name: mon-grafana
name: GF_SECURITY_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
key: admin-password
name: mon-grafana
image: grafana/grafana:7.4.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 10
httpGet:
path: /api/health
port: 3000
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 30
name: grafana
ports:
containerPort: 80
name: service
protocol: TCP
containerPort: 3000
name: grafana
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /api/health
port: 3000
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
mountPath: /etc/grafana/grafana.ini
name: config
subPath: grafana.ini
mountPath: /var/lib/grafana
name: storage
mountPath: /tmp/dashboards
name: sc-dashboard-volume
mountPath: /etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml
name: sc-dashboard-provider
subPath: provider.yaml
mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: mon-grafana-token-p9x96
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
initContainers:
env:
name: METHOD
value: LIST
name: LABEL
value: grafana_datasource
name: FOLDER
value: /etc/grafana/provisioning/datasources
name: RESOURCE
value: both
image: quay.io/kiwigrid/k8s-sidecar:1.10.6
imagePullPolicy: IfNotPresent
name: grafana-sc-datasources
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
mountPath: /etc/grafana/provisioning/datasources
name: sc-datasources-volume
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: mon-grafana-token-p9x96
readOnly: true
nodeName: iyappan-k8s-node-1
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 472
runAsGroup: 472
runAsUser: 472
serviceAccount: mon-grafana
serviceAccountName: mon-grafana
terminationGracePeriodSeconds: 30
tolerations:
effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
configMap:
defaultMode: 420
name: mon-grafana
name: config
emptyDir: {}
name: storage
emptyDir: {}
name: sc-dashboard-volume
configMap:
defaultMode: 420
name: mon-grafana-config-dashboards
name: sc-dashboard-provider
emptyDir: {}
name: sc-datasources-volume
name: mon-grafana-token-p9x96
secret:
defaultMode: 420
secretName: mon-grafana-token-p9x96
status:
conditions:
lastProbeTime: null
lastTransitionTime: "2021-02-25T18:40:21Z"
status: "True"
type: Initialized
lastProbeTime: null
lastTransitionTime: "2021-02-25T18:40:25Z"
status: "True"
type: Ready
lastProbeTime: null
lastTransitionTime: "2021-02-25T18:40:25Z"
status: "True"
type: ContainersReady
lastProbeTime: null
lastTransitionTime: "2021-02-25T18:40:18Z"
status: "True"
type: PodScheduled
containerStatuses:
containerID: docker://947d93368594c216e375313ec725e38ec92d2177b9f7d72348cd1101a32eb71a
image: grafana/grafana:7.4.2
imageID: docker-pullable://grafana/grafana@sha256:29e4e68a557fac7ead72496acea16a9b89626f3311ba7c4a9e39f7fb99f8f68f
lastState: {}
name: grafana
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2021-02-25T18:40:21Z"
containerID: docker://5f067be9334d0c431085bc935912addfab19ada57c6c44c6faa265800ad0cc62
image: quay.io/kiwigrid/k8s-sidecar:1.10.6
imageID: docker-pullable://quay.io/kiwigrid/k8s-sidecar@sha256:49ffea7ccdbbd5021e2bd20c11a956be23c2f28c558c57b93ac50aa0974fb49c
lastState: {}
name: grafana-sc-dashboard
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2021-02-25T18:40:21Z"
hostIP: 172.168.1.59
initContainerStatuses:
containerID: docker://56bdc029dfcf286bbecec325f8c0036a869b2bb0d97a90d5eb62776b77a5e2e9
image: quay.io/kiwigrid/k8s-sidecar:1.10.6
imageID: docker-pullable://quay.io/kiwigrid/k8s-sidecar@sha256:49ffea7ccdbbd5021e2bd20c11a956be23c2f28c558c57b93ac50aa0974fb49c
lastState: {}
name: grafana-sc-datasources
ready: true
restartCount: 0
state:
terminated:
containerID: docker://56bdc029dfcf286bbecec325f8c0036a869b2bb0d97a90d5eb62776b77a5e2e9
exitCode: 0
finishedAt: "2021-02-25T18:40:20Z"
reason: Completed
startedAt: "2021-02-25T18:40:19Z"
phase: Running
podIP: 10.233.64.91
podIPs:
ip: 10.233.64.91
qosClass: BestEffort
startTime: "2021-02-25T18:40:18Z"
@yappansg
最佳答案
解决了问题。在configmap中添加标签后,它开始工作
apiVersion: v1
data:
grafana-dummy-dashboard.json: |-
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 15,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "-- Grafana --",
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 19,
"w": 24,
"x": 0,
"y": 0
},
"hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Grafana Fake Data",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"schemaVersion": 25,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Grafana dummy dashboard",
"uid": "UlxCrx4Gk",
"version": 2
}
kind: ConfigMap
metadata:
managedFields:
apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:grafana-dummy-dashboard.json: {}
manager: kubectl-create
operation: Update
time: "2021-02-25T18:36:09Z"
name: my-config
namespace: monitoring
labels:
grafana_dashboard: "1"
关于kubernetes - kube-prometheus-stack - Grafana 中的新仪表板,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/66374806/
我正在使用 rke在私有(private)云中生成 Kubernetes 集群。它产生 kube_config_cluster.yml文件。有没有办法将此配置添加到我的 $HOME/.kube/con
我尝试在我的桌面(Ubuntu 18)上运行 OKD。我按照指示:https://opensource.com/article/18/11/local-okd-cluster-linux (simil
我在我的 k8s 中使用 calico 作为 CNI,我试图在 3 个服务器中部署一个主集群。我用的是kubeadm,关注官方setup guide .但是发生了一些错误,kube-controlle
Fresh Kubernetes (1.10.0) 集群使用 kubeadm (1.10.0) 安装在 RHEL7 裸机虚拟机上 Linux 3.10.0-693.11.6.el7.x86_64 #1
我使用 kubeadm 安装了 kubernetes .为了启用基本身份验证,我添加了 --basic-auth-file=/etc/kubernetes/user-password.txt在我的 /
我尝试使用 minikube start 启动本地 Kubernetes 集群并收到以下错误。 Starting local Kubernetes v1.10.0 cluster... Startin
我用了this tutorial在我的 Raspberry 3 上设置一个 kubernetes 集群。 我按照说明进行操作,直到设置 flannel 为止: curl -sSL https://ra
我有一个本地 kubernetes 集群 v1.22.1(1 个主节点和 2 个工作节点),并且想使用 jenkins 上的 kubernetes 插件在这个 kubernetes 集群上运行 jen
我只是尝试运行一个简单的批处理作业并收到此错误“卷“kube-api-access-cvwdt”的 MountVolume.SetUp 失败:对象“default”/“kube-root-ca.crt
我只是尝试运行一个简单的批处理作业并收到此错误“卷“kube-api-access-cvwdt”的 MountVolume.SetUp 失败:对象“default”/“kube-root-ca.crt
我正在用KIND测试K8。。我创建了集群:。现在我想用sudo Kind删除集群来删除这个集群,但得到的是:。但是当我转到路径时,我看不到文件:。配置文件:。另外,当调用命令sudo种类删除集群--名
我在用kind测试k8。我创建了集群:。现在我想用sudo Kind删除集群来删除这个集群,但得到的是:。但当转到路径时,我没有看到文件:。配置文件:。另外,当调用命令sudo种类删除集群--名称节点
简介 kube-proxy 是 Kubernetes 集群中负责服务发现和负载均衡的组件之一。它是一个网络代理,运行在每个节点上, 用于 service 资源的负载均衡。它有两种模式:iptable
本文分享自华为云社区《kube-apiserver限流机制原理》,作者:可以交个朋友。 背景 apiserver是kubernetes中最重要的组件,一旦遇到恶意刷接口或请求量超过承载范围,api
kube-scheduler组件是kubernetes中的核心组件之一,主要负责pod资源对象的调度工作,具体来说,kube-scheduler组件负责根据调度算法(包括预选算法和优选算法)将未调度的
kube-scheduler组件是kubernetes中的核心组件之一,主要负责pod资源对象的调度工作,具体来说,kube-scheduler组件负责根据调度算法(包括预选算法和优选算法)将未调度的
kube-scheduler组件是kubernetes中的核心组件之一,主要负责pod资源对象的调度工作,具体来说,kube-scheduler组件负责根据调度算法(包括预选算法和优选算法)将未调度的
我通过docker-multinode设置了k8s $ https_proxy=http://10.25.30.127:7777 IP_ADDRESS=10.25.24.116 MASTER_IP=1
kube-proxy 有一个名为 --proxy-mode 的选项,根据帮助信息,该选项可以是 userspace 或 iptables。(见下文) # kube-proxy -h Usage of
在单节点Kubernetes集群上安装Kube-router时,遇到以下问题: kube-system kube-router-wnnq8 0/1
我是一名优秀的程序员,十分优秀!