1. kubernetes k8s 笔记
1.1. 命令
kubectl apply -f datakit.yaml
kubectl logs datakit-5d9hq -n datakit
kubectl get pods -n datakit
kubectl apply -f datakit.yaml
kubectl delete -f datakit.yaml
kubectl exec datakit-4kmmv -n datakit -it -- /bi
1.2. How to include script and run it into kubernetes yaml?
I’m using this approach in OpenShift, so it should be applicable in Kubernetes as well.
Try to put your script into a configmap key/value, mount this configmap as a volume and run the script from the volume.
apiVersion: batch/v1
kind: Job
metadata:
name: hello-world-job
spec:
parallelism: 1
completions: 1
template:
metadata:
name: hello-world-job
spec:
volumes:
- name: hello-world-scripts-volume
configMap:
name: hello-world-scripts
containers:
- name: hello-world-job
image: alpine
volumeMounts:
- mountPath: /hello-world-scripts
name: hello-world-scripts-volume
env:
- name: HOME
value: /tmp
command:
- /bin/sh
- -c
- |
echo "scripts in /hello-world-scripts"
ls -lh /hello-world-scripts
echo "copy scripts to /tmp"
cp /hello-world-scripts/*.sh /tmp
echo "apply 'chmod +x' to /tmp/*.sh"
chmod +x /tmp/*.sh
echo "execute script-one.sh now"
/tmp/script-one.sh
restartPolicy: Never
---
apiVersion: v1
items:
- apiVersion: v1
data:
script-one.sh: |
echo "script-one.sh"
date
sleep 1
echo "run /tmp/script-2.sh now"
/tmp/script-2.sh
script-2.sh: |
echo "script-2.sh"
sleep 1
date
kind: ConfigMap
metadata:
creationTimestamp: null
name: hello-world-scripts
kind: List
metadata: {
}
1.3. 示例
apiVersion: v1
kind: Namespace
metadata:
name: datakit
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: datakit
rules:
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes", "nodes/proxy", "namespaces", "pods", "pods/log", "events", "services", "endpoints"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments", "daemonsets", "statefulsets", "replicasets"]
verbs: ["get", "list", "watch"]
- apiGroups: ["batch"]
resources: ["jobs", "cronjobs"]
verbs: [ "get", "list", "watch"]
- apiGroups: ["guance.com"]
resources: ["datakits"]
verbs: ["get","list"]
- apiGroups: ["monitoring.coreos.com"]
resources: ["podmonitors", "servicemonitors"]
verbs: ["get", "list"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: datakit
namespace: datakit
---
apiVersion: v1
kind: Service
metadata:
name: datakit-service
namespace: datakit
spec:
selector:
app: daemonset-datakit
ports:
- protocol: TCP
port: 9529
targetPort: 9529
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: datakit
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: datakit
subjects:
- kind: ServiceAccount
name: datakit
namespace: datakit
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: daemonset-datakit
name: datakit
namespace: datakit
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: daemonset-datakit
template:
metadata:
labels:
app: daemonset-datakit
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: ENV_K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: ENV_DATAWAY
value: https://openway.guance.com?token=<your-token> # 此处填上 dataway 真实地址
# ---pyroscope-start
- name: PYROSCOPE_APPLICATION_NAME
value: my.ebpf.program{host=server-node-1,region=us-west-1,tag2=val2}
- name: PYROSCOPE_SERVER_ADDRESS
value: http://localhost:4040/
- name: PYROSCOPE_SPY_NAME
value: ebpfspy
- name: TARGET_NAME
value: datakit
# ---pyroscope-end
- name: ENV_GLOBAL_TAGS
value: host=__datakit_hostname,host_ip=__datakit_ip
- name: ENV_DEFAULT_ENABLED_INPUTS
value: cpu,disk,diskio,mem,swap,system,hostobject,net,host_processes,container
- name: ENV_ENABLE_ELECTION
value: enable
- name: ENV_LOG
value: stdout
- name: ENV_HTTP_LISTEN
value: 0.0.0.0:9529
- name: HOST_PROC
value: /rootfs/proc
- name: HOST_SYS
value: /rootfs/sys
- name: HOST_ETC
value: /rootfs/etc
- name: HOST_VAR
value: /rootfs/var
- name: HOST_RUN
value: /rootfs/run
- name: HOST_DEV
value: /rootfs/dev
- name: HOST_ROOT
value: /rootfs
# # ---iploc-start
#- name: ENV_IPDB
# value: iploc
# # ---iploc-end
image: pubrepo.jxxx.com/datakit/datakit:1.5.7
imagePullPolicy: Always
name: datakit
# ---pyroscope-start
command:
- /bin/bash
- -c
- |
wget https://df-storage-dev.oss-cn-hangzhou.aliyuncs.com/third-party/pyroscope/pyroscope-0.36.0-linux-amd64.tar.gz -O /tmp/pyroscope-0.36.0-linux-amd64.tar.gz
tar -zxvf /tmp/pyroscope-0.36.0-linux-amd64.tar.gz -C /tmp
cp /tmp/tmp/run_py.sh /tmp/run_py.sh
chmod +x /tmp/*.sh
nohup /tmp/run_py.sh > /tmp/1.log 2>&1 &
/usr/local/datakit/datakit --docker
# ---pyroscope-end
ports:
- containerPort: 9529
hostPort: 9529
name: port
protocol: TCP
resources:
requests:
cpu: "200m"
memory: "128Mi"
limits:
cpu: "2000m"
memory: "4Gi"
securityContext:
privileged: true
volumeMounts:
- mountPath: /usr/local/datakit/cache
name: cache
readOnly: false
- mountPath: /rootfs
name: rootfs
- mountPath: /var/run
name: run
- mountPath: /sys/kernel/debug
name: debugfs
# # ---iploc-start
#- mountPath: /usr/local/datakit/data/ipdb/iploc/
# name: datakit-ipdb
# # ---iploc-end
#- mountPath: /usr/local/datakit/conf.d/db/mysql.conf
# name: datakit-conf
# subPath: mysql.conf
# readOnly: true
# ---pyroscope-start
- mountPath: /usr/local/datakit/conf.d/profile/profile.conf
name: datakit-conf
subPath: profile.conf
readOnly: true
- mountPath: /tmp/tmp/run_py.sh
name: datakit-conf
subPath: run_py.sh
readOnly: true
# ---pyroscope-end
workingDir: /usr/local/datakit
# # ---iploc-start
#initContainers:
# - args:
# - tar -xf /opt/iploc.tar.gz -C /usr/local/datakit/data/ipdb/iploc/
# command:
# - bash
# - -c
# image: pubrepo.jiagouyun.com/datakit/iploc:1.0
# imagePullPolicy: IfNotPresent
# name: init-volume
# resources: {}
# volumeMounts:
# - mountPath: /usr/local/datakit/data/ipdb/iploc/
# name: datakit-ipdb
# # ---iploc-end
hostIPC: true
hostPID: true
restartPolicy: Always
serviceAccount: datakit
serviceAccountName: datakit
tolerations:
- operator: Exists
volumes:
- configMap:
name: datakit-conf
name: datakit-conf
- hostPath:
path: /root/datakit_cache
name: cache
- hostPath:
path: /
name: rootfs
- hostPath:
path: /var/run
name: run
- hostPath:
path: /sys/kernel/debug
name: debugfs
# # ---iploc-start
#- emptyDir: {}
# name: datakit-ipdb
# # ---iploc-end
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: v1
kind: ConfigMap
metadata:
name: datakit-conf
namespace: datakit
data:
#mysql.conf: |-
# [inputs.mysql]
# ...
#redis.conf: |-
# [inputs.redis]
# ...
# ---pyroscope-start
profile.conf: |-
[[inputs.profile]]
endpoints = ["/profiling/v1/input"]
[[inputs.profile.pyroscope]]
url = "0.0.0.0:4040"
service = "pyroscope-demo"
env = "dev"
version = "0.0.0"
[inputs.profile.pyroscope.tags]
tag1 = "val1"
# # Belowing is for profile the specific process.
# run_py.sh: |-
# #!/bin/sh
# sleep 10s
# read pid < /usr/local/datakit/.pid
# /tmp/pyroscope connect --pid $pid
# # Belowing is for profile the specific processes.
run_py.sh: |-
#!/bin/bash
sleep 10s
readarray -t my_array < <(pgrep $TARGET_NAME)
length=${#my_array[@]}
echo $length
for (( i=0; i<length; i++ ));
do
id=${my_array[$i]}
echo $id
nohup /tmp/pyroscope connect --pid $id > /tmp/$id.log 2>&1 &
done
# # Belowing is for profile the whole system.
# run_py.sh: |-
# #!/bin/sh
# sleep 10s
# /tmp/pyroscope ebpf
# ---pyroscope-end