diff --git a/practice/chanhyle/elk/k8s.md b/practice/chanhyle/elk/k8s.md
new file mode 100644
index 0000000..d0935a6
--- /dev/null
+++ b/practice/chanhyle/elk/k8s.md
@@ -0,0 +1,495 @@
+## nginx 프로세스, 클라이언트 연결 관련 메트릭 정보 수집
+
+### 1. elasticsearch
+
+```yaml
+# elasticsearch-deploy.yaml
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: elasticsearch-config
+  labels:
+    app: elasticsearch
+data:
+  elasticsearch.yml: |-
+    network.host: 0.0.0.0
+    http.port: 9200
+    discovery.type: single-node
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: elasticsearch-deployment
+  labels:
+    app: elasticsearch
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: elasticsearch
+  template:
+    metadata:
+      labels:
+        app: elasticsearch
+    spec:
+      containers:
+        - name: elasticsearch
+          image: elasticsearch:7.17.18
+          env:
+            - name: discovery.type
+              value: "single-node"
+          ports:
+            - containerPort: 9200
+          volumeMounts:
+            - name: config-volume
+              mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
+              subPath: elasticsearch.yml
+      volumes:
+        - name: config-volume
+          configMap:
+            name: elasticsearch-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: elasticsearch-service
+spec:
+  selector:
+    app: elasticsearch
+  ports:
+    - protocol: TCP
+      name: kibana-port
+      port: 9200
+      targetPort: 9200
+  type: LoadBalancer
+```
+
+```shell
+k create -f elasticsearch-deploy.yaml
+
+k delete cm elasticsearch-config
+k delete deploy elasticsearch-deployment
+k delete svc elasticsearch-service
+```
+
+- elasticsearch 관련 설정 : `elasticsearch.yml` 파일은 ConfigMap으로 구성하여 컨테이너에 volume mount
+- elasticsearch service는 굳이 필요하진 않음
+
+### 2. kibana
+
+```yaml
+# kibana-deploy.yaml
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: kibana-config
+  labels:
+    app: kibana
+data:
+  kibana.yml: |-
+    server.port: 5601
+    server.host: "0.0.0.0"
+    elasticsearch.hosts: ["http://${ELASTICSEARCH_SERVICE_SERVICE_HOST}:9200"]
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kibana-deployment
+  labels:
+    app: kibana
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kibana
+  template:
+    metadata:
+      labels:
+        app: kibana
+    spec:
+      containers:
+        - name: kibana
+          image: kibana:7.17.18
+          ports:
+            - containerPort: 5601
+          volumeMounts:
+            - name: config-volume
+              mountPath: /usr/share/kibana/config/kibana.yml
+              subPath: kibana.yml
+      volumes:
+        - name: config-volume
+          configMap:
+            name: kibana-config
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: kibana-service
+spec:
+  selector:
+    app: kibana
+  ports:
+    - protocol: TCP
+      name: kibana-port
+      port: 5601
+      targetPort: 5601
+  type: LoadBalancer
+```
+
+```shell
+k create -f kibana-deploy.yaml
+
+k delete cm kibana-config
+k delete deploy kibana-deployment
+k delete svc kibana-service
+
+curl -XGET http://localhost:9200/_cat/indices?v
+```
+
+- elasticsearch와 마찬가지로 설정 파일을 ConfigMap을 이용하여 컨테이너에 적용
+- kibana는 k8s 외부에서 접근할 수 있어야 하므로 NodePort(LoadBalancer)를 이용하여 외부로 expose
+
+### 3. metricbeat
+
+```yaml
+# metricbeat-kubernetes.yaml
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: metricbeat-daemonset-config
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+data:
+  metricbeat.yml: |-
+    metricbeat.config.modules:
+      # Mounted `metricbeat-daemonset-modules` configmap:
+      path: ${path.config}/modules.d/*.yml
+      # Reload module configs as they change:
+      reload.enabled: false
+
+    processors:
+      - add_cloud_metadata:
+
+    cloud.id: ${ELASTIC_CLOUD_ID}
+    cloud.auth: ${ELASTIC_CLOUD_AUTH}
+
+    output.elasticsearch:
+      hosts: ["http://${ELASTICSEARCH_SERVICE_SERVICE_HOST}:9200"]
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: metricbeat-daemonset-modules
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+data:
+  system.yml: |-
+    - module: system
+      period: 10s
+      metricsets:
+        #- cpu
+        #- load
+        #- memory
+        #- network
+        - process
+        #- process_summary
+        #- core
+        #- diskio
+        #- socket
+      processes: ['nginx']
+      #process.include_top_n:
+        #by_cpu: 5      # include top 5 processes by CPU
+        #by_memory: 5   # include top 5 processes by memory
+---
+# Deploy a Metricbeat instance per node for node metrics retrieval
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: metricbeat
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+spec:
+  selector:
+    matchLabels:
+      k8s-app: metricbeat
+  template:
+    metadata:
+      labels:
+        k8s-app: metricbeat
+    spec:
+      tolerations:
+        - key: node-role.kubernetes.io/master
+          effect: NoSchedule
+      serviceAccountName: metricbeat
+      terminationGracePeriodSeconds: 30
+      hostNetwork: true
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+        - name: metricbeat
+          image: docker.elastic.co/beats/metricbeat:7.17.18
+          args: [
+              #"-c", "/etc/metricbeat.yml",
+              "-e",
+              "-system.hostfs=/hostfs",
+            ]
+          env:
+            - name: ELASTICSEARCH_HOST
+              value: elasticsearch
+            - name: ELASTICSEARCH_PORT
+              value: "9200"
+            - name: ELASTICSEARCH_USERNAME
+              value: elastic
+            - name: ELASTICSEARCH_PASSWORD
+              value: changeme
+            - name: ELASTIC_CLOUD_ID
+              value:
+            - name: ELASTIC_CLOUD_AUTH
+              value:
+            - name: NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+          securityContext:
+            runAsUser: 0
+            # If using Red Hat OpenShift uncomment this:
+            #privileged: true
+          resources:
+            limits:
+              memory: 200Mi
+            requests:
+              cpu: 100m
+              memory: 100Mi
+          volumeMounts:
+            - name: config
+              mountPath: /usr/share/metricbeat/metricbeat.yml
+              readOnly: true
+              subPath: metricbeat.yml
+            - name: data
+              mountPath: /usr/share/metricbeat/data
+            - name: modules
+              mountPath: /usr/share/metricbeat/modules.d
+              readOnly: true
+            - name: proc
+              mountPath: /hostfs/proc
+              readOnly: true
+            - name: cgroup
+              mountPath: /hostfs/sys/fs/cgroup
+              readOnly: true
+      volumes:
+        - name: proc
+          hostPath:
+            path: /proc
+        - name: cgroup
+          hostPath:
+            path: /sys/fs/cgroup
+        - name: config
+          configMap:
+            defaultMode: 0640
+            name: metricbeat-daemonset-config
+        - name: modules
+          configMap:
+            defaultMode: 0640
+            name: metricbeat-daemonset-modules
+        - name: data
+          hostPath:
+            # When metricbeat runs as non-root user, this directory needs to be writable by group (g+w)
+            path: /var/lib/metricbeat-data
+            type: DirectoryOrCreate
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: metricbeat
+subjects:
+  - kind: ServiceAccount
+    name: metricbeat
+    namespace: kube-system
+roleRef:
+  kind: ClusterRole
+  name: metricbeat
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: metricbeat
+  namespace: kube-system
+subjects:
+  - kind: ServiceAccount
+    name: metricbeat
+    namespace: kube-system
+roleRef:
+  kind: Role
+  name: metricbeat
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: metricbeat-kubeadm-config
+  namespace: kube-system
+subjects:
+  - kind: ServiceAccount
+    name: metricbeat
+    namespace: kube-system
+roleRef:
+  kind: Role
+  name: metricbeat-kubeadm-config
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: metricbeat
+  labels:
+    k8s-app: metricbeat
+rules:
+  - apiGroups: [""]
+    resources:
+      - nodes
+      - namespaces
+      - events
+      - pods
+      - services
+    verbs: ["get", "list", "watch"]
+  # Enable this rule only if planing to use Kubernetes keystore
+  #- apiGroups: [""]
+  #  resources:
+  #  - secrets
+  #  verbs: ["get"]
+  - apiGroups: ["extensions"]
+    resources:
+      - replicasets
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apps"]
+    resources:
+      - statefulsets
+      - deployments
+      - replicasets
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["batch"]
+    resources:
+      - jobs
+    verbs: ["get", "list", "watch"]
+  - apiGroups:
+      - ""
+    resources:
+      - nodes/stats
+    verbs:
+      - get
+  - nonResourceURLs:
+      - "/metrics"
+    verbs:
+      - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: metricbeat
+  # should be the namespace where metricbeat is running
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+rules:
+  - apiGroups:
+      - coordination.k8s.io
+    resources:
+      - leases
+    verbs: ["get", "create", "update"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: metricbeat-kubeadm-config
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+rules:
+  - apiGroups: [""]
+    resources:
+      - configmaps
+    resourceNames:
+      - kubeadm-config
+    verbs: ["get"]
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: metricbeat
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+```
+
+```shell
+k create -f metricbeat-kubernetes.yaml
+
+k delete cm metricbeat-daemonset-config -n kube-system
+k delete cm metricbeat-daemonset-modules -n kube-system
+k delete ds metricbeat -n kube-system
+k delete clusterRoleBinding metricbeat
+k delete roleBinding metricbeat -n kube-system
+k delete roleBinding metricbeat-kubeadm-config -n kube-system
+k delete clusterRole metricbeat
+k delete role metricbeat -n kube-system
+k delete role metricbeat-kubeadm-config -n kube-system
+k delete serviceAccount metricbeat -n kube-system
+```
+
+- `kube-system` namespace에 object들 생성
+- `metricbeat`는 DaemonSet으로 생성하여 모든 노드에 pod이 생성되도록 함
+- `metricbeat-daemonset-config` ConfigMap
+  - 메트릭 정보를 어디로 보낼 것인지 설정 : elasticsearch or logstash
+- `metricbeat-daemonset-modules` ConfigMap
+
+  - 어떤 메트릭 정보를 어떻게 수집할 것인지 설정
+  - system module : 호스트와 특정 프로세스의 cpu, memory 등 메트릭을 수집
+
+- 두 ConfigMap 모두 volume mount path 설정 필요
+  - 원래 각각의 config이 있던 path로 덮어쓰기
+  - DaemonSet을 실행할 때, `-c` 옵션으로 `/etc/metricbeat.yml`을 포함하는 argument에서 삭제
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: metricbeat-daemonset-modules
+  namespace: kube-system
+  labels:
+    k8s-app: metricbeat
+data:
+  system.yml: |-
+    - module: nginx
+      period: 10s
+      metricsets: ["stubstatus"]
+      enabled: true
+      hosts: ["http://localhost:31139"]
+      server_status_path: "nginx_status"
+```
+
+- nginx module : nginx server의 클라이언트 관련 메트릭을 수집
+
+```config
+# /etc/nginx/conf.d/deafult.conf
+...
+
+# 추가
+location = /nginx_status {
+    stub_status;
+}
+...
+```
+
+```shell
+nginx -s reload
+```
+
+- nginx 컨테이너에서 추가 설정 필요
+- `/nginx_status` url로 요청이 온 경우, `stub_status` 옵션을 활성화
+  - 해당 옵션은 클라이언트 관련 메트릭 정보를 주는 nginx 내장 모듈
diff --git a/practice/chanhyle/elk/vm.md b/practice/chanhyle/elk/vm.md
new file mode 100644
index 0000000..d2e8771
--- /dev/null
+++ b/practice/chanhyle/elk/vm.md
@@ -0,0 +1,280 @@
+## nginx access.log 로그 파일 및 시스템 메트릭 정보 수집
+
+### 1. jdk
+
+```shell
+sudo apt-get update
+
+# jdk 설치
+sudo apt-get install openjdk-8-jdk
+java -version
+
+# nginx 설치
+sudo apt-get install nginx
+```
+
+### 2. elasticsearch
+
+```shell
+# elasticsearch 설치
+wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
+sudo apt-get install apt-transport-https
+echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee –a /etc/apt/sources.list.d/elastic-7.x.list
+sudo apt-get install elasticsearch
+```
+
+```shell
+# /etc/elasticsearch/elasticsearch.yml
+
+network.host: localhost # 수정
+http.port: 9200 # 수정
+discovery.type: single-node # 추가
+```
+
+```shell
+# /etc/elasticsearch/jvm.options
+
+-Xms512m # 추가
+-Xmx512m # 추가
+```
+
+```shell
+sudo systemctl start elasticsearch.service
+sudo systemctl enable elasticsearch.service
+
+curl -X GET "localhost:9200"
+```
+
+```json
+{
+  "name": "test",
+  "cluster_name": "elasticsearch",
+  "cluster_uuid": "PrTln484Rli05y8HP2L7Vg",
+  "version": {
+    "number": "7.17.18",
+    "build_flavor": "default",
+    "build_type": "deb",
+    "build_hash": "8682172c2130b9a411b1bd5ff37c9792367de6b0",
+    "build_date": "2024-02-02T12:04:59.691750271Z",
+    "build_snapshot": false,
+    "lucene_version": "8.11.1",
+    "minimum_wire_compatibility_version": "6.8.0",
+    "minimum_index_compatibility_version": "6.0.0-beta1"
+  },
+  "tagline": "You Know, for Search"
+}
+```
+
+### 3. kibana
+
+```shell
+# kibana 설치
+sudo apt-get install kibana
+```
+
+```shell
+# /etc/kibana/kibana.yml
+
+server.port: 5601 # 수정
+server.host: "localhost" # 수정
+elasticsearch.hosts: ["http://localhost:9200"] # 수정
+```
+
+```shell
+sudo systemctl start kibana
+sudo systemctl enable kibana
+
+# 브라우저에서 vm 프로세스로 접속
+http://192.168.64.19:5601/
+
+curl -XGET http://localhost:9200/_cat/indices?v
+```
+
+### 4. filebeat
+
+```shell
+# filebeat 설치
+sudo apt-get install filebeat
+```
+
+```shell
+# /etc/filebeat/filebeat.yml
+
+...
+# ============================== Filebeat inputs ===============================
+
+filebeat.inputs:
+
+# Each - is an input. Most options can be set at the input level, so
+# you can use different inputs for various configurations.
+# Below are the input specific configurations.
+
+# filestream is an input for collecting log messages from files.
+- type: log
+
+  # Unique ID among all inputs, an ID is required.
+  id: nginx-log
+
+  # Change to true to enable this input configuration.
+  enabled: true
+
+  # Paths that should be crawled and fetched. Glob based paths.
+  paths:
+    - /var/log/nginx/access.log
+    #- c:\programdata\elasticsearch\logs\*
+    #document_type: syslog
+...
+# ================================== Outputs ===================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+# ---------------------------- Elasticsearch Output ----------------------------
+# logstash를 거쳐서 갈 것이기 때문에 주석 해제
+#output.elasticsearch:
+  # Array of hosts to connect to.
+  #hosts: ["localhost:9200"]
+
+  # Protocol - either `http` (default) or `https`.
+  #protocol: "https"
+
+  # Authentication credentials - either API key or username/password.
+  #api_key: "id:api_key"
+  #username: "elastic"
+  #password: "changeme"
+
+# ------------------------------ Logstash Output -------------------------------
+output.logstash:
+  # The Logstash hosts
+  enabled : true
+  hosts: ["localhost:5044"]
+
+  # Optional SSL. By default is off.
+  # List of root certificates for HTTPS server verifications
+  #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
+
+  # Certificate for SSL client authentication
+  #ssl.certificate: "/etc/pki/client/cert.pem"
+
+  # Client Certificate Key
+  #ssl.key: "/etc/pki/client/cert.key"
+...
+```
+
+- inputs
+  - path : 수집할 대상(로그 파일)을 지정
+  - enabled : true로 설정
+- output
+  - elasticsearch 관련 주석 처리
+  - logstash 관련 주석 해제
+
+```shell
+cd modules.d/
+mv system.yml system.yml.disabled
+```
+
+- 기본 설정으로 `Configured paths: [/var/log/auth.log* /var/log/secure*]` 등 system 관련 파일 또한 관련 로그를 파싱하는데, 이를 해제하면 더 보기 편함
+
+```shell
+sudo systemctl start filebeat
+sudo systemctl enable filebeat
+sudo systemctl restart filebeat
+```
+
+### 5. logstash
+
+```shell
+# logstash 설치
+sudo apt-get install logstash
+```
+
+```
+# /etc/logstash/logstash.conf
+
+# Sample Logstash configuration for creating a simple
+# Beats -> Logstash -> Elasticsearch pipeline.
+
+input {
+  beats {
+    port => 5044
+  }
+}
+
+filter {
+  grok {
+    match => { "message" => "%{COMBINEDAPACHELOG}" }
+  }
+}
+
+
+output {
+  elasticsearch {
+    hosts => ["localhost:9200"]
+    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
+    user => "elastic"
+    password => "3L5xGK9WNSiNVTPT5AHR"
+  }
+}
+```
+
+- filter를 통해 원하는 데이터 구조로 파싱
+- output
+  - hosts : elasticsearch 로 전송
+  - index : 저장할 인덱스 설정(새로 생성도 됨)
+  - user, password : elasticsearch에서 id, pw를 설정한 경우에 넣어주어야 함
+
+```yaml
+# /etc/logstash/logstash.yaml
+
+# ------------ Pipeline Configuration Settings --------------
+#
+# Where to fetch the pipeline configuration for the main pipeline
+#
+path.config: "/etc/logstash/logstash.conf"
+#
+```
+
+- config file path를 포함시켜야 적용이 됨
+
+```shell
+sudo systemctl start logstash
+sudo systemctl enable logstash
+sudo systemctl status logstash
+
+systemctl restart logstash.service
+# restart가 되지 않는 경우 강제로 process kill
+ps -ef | grep logstash
+kill -9 30898
+```
+
+### 6. metricbeat
+
+```shell
+# metricbeat 설치
+sudo apt-get install metricbeat
+
+sudo systemctl enable metricbeat
+sudo systemctl start metricbeat
+```
+
+```shell
+# metricbeat.yml
+
+metricbeat.modules:
+- module: system
+  metricsets:
+    - cpu
+    - memory
+    #- network
+    #- filesystem
+    #- diskio
+    #- process
+  enabled: true
+  period: 30s  # 30초마다 메트릭을 수집하도록 설정
+
+output.logstash:
+  # The Logstash hosts
+  hosts: ["localhost:5044"]
+```
+
+- system 모듈에서 cpu, memory 관련 메트릭 정보 수집
+- 메트릭 정보를 logstash로 전송