01 - Create a Pod YAML file with two containers that use the image alpine:3.12.0. Provide a command for both containers that keep them running forever.

apiVersion: v1
kind: Pod
metadata:
  name: business-app
  labels:
    section: 08-state-persistence
spec:
  containers:
    - image: alpine:3.12.0
      name: alpine-1
      command: ["/bin/sh", "-c"]
      args:
        - echo "starting alpine-1";
          while true; do
            sleep 1;
          done;
    - image: alpine:3.12.0
      name: alpine-2
      command: ["/bin/sh", "-c"]
      args:
        - echo "starting alpine-2";
          while true; do
            sleep 1;
          done;

02 - Define a Volume of type emptyDir for the Pod. Container 1 should mount the Volume to path /etc/a, and container 2 should mount the Volume to path /etc/b.

apiVersion: v1
kind: Pod
metadata:
  name: business-app
  labels:
    section: 08-state-persistence
spec:
  containers:
    - image: alpine:3.12.0
      name: alpine-1
      command: ["/bin/sh", "-c"]
      args:
        - echo "starting alpine-1";
          while true; do
            sleep 1;
          done;
      volumeMounts:
        - name: ctnr-vol
          mountPath: /etc/a
    - image: alpine:3.12.0
      name: alpine-2
      command: ["/bin/sh", "-c"]
      args:
        - echo "starting alpine-2";
          while true; do
            sleep 1;
          done;
      volumeMounts:
        - name: ctnr-vol
          mountPath: /etc/b
  volumes:
    - name: ctnr-vol
      emptyDir: {}

03 - Open an interactive shell for container 1 and create the directory data in the mount path. Navigate to the directory and create the file hello.txt with the contents “Hello World.” Exit out of the container.

$ kubectl exec -it business-app -c alpine-1 -- sh  
	/ # cd etc/a
	/etc/a # mkdir data
	/etc/a # ls
		data
	/etc/a # cd data/
	/etc/a/data # echo "Hello World" >> hello.txt
	/etc/a/data # ls
		hello.txt
	/etc/a/data # cat hello.txt 
		Hello World
	/etc/a/data # exit

04 - Open an interactive shell for container 2 and navigate to the directory /etc/b/data. Inspect the contents of file hello.txt. Exit out of the container.

$ kubectl exec -it business-app -c alpine-2 -- sh
	/ # cd etc/b/data/
	/etc/b/data # ls
		hello.txt
	/etc/b/data # cat hello.txt 
		Hello World
	/etc/b/data # exit

05 - Create a PersistentVolume named logs-pv that maps to the hostPath /var/logs. The access mode should be ReadWriteOnce and ReadOnlyMany. Provision a storage capacity of 5 Gi. Ensure that the status of the PersistentVolume shows Available.

apiVersion: v1
kind: PersistentVolume
metadata:
  name: logs-pv
  labels:
    section: 08-state-persistence
spec:
  storageClassName: ""
  accessModes:
    - ReadWriteOnce
    - ReadOnlyMany
  capacity:
    storage: 5Gi
  hostPath:
    path: /var/logs
$ kubectl apply -f pv.yaml                          
	persistentvolume/logs-pv created

$ kubectl get pv           
	NAME      CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
	logs-pv   5Gi        RWO,ROX        Retain           Available                                   2s

06 - Create a PersistentVolumeClaim named logs-pvc. The access it uses is ReadWriteOnce. Request a capacity of 2 Gi. Ensure that the status of the PersistentVolume shows Bound.

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: logs-pvc
  labels:
    section: 08-state-persistence
spec:
  storageClassName: ""
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 2Gi
$ kubectl apply -f pvc.yaml                         
	persistentvolumeclaim/logs-pvc created

$ kubectl get pv,pvc
	NAME                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM           STORAGECLASS   REASON   AGE
	persistentvolume/logs-pv   5Gi        RWO,ROX        Retain           Bound    ckad/logs-pvc                           9m37s

	NAME                             STATUS   VOLUME    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
	persistentvolumeclaim/logs-pvc   Bound    logs-pv   5Gi        RWO,ROX                       3s

07 - Mount the PersistentVolumeClaim in a Pod running the image nginx at the mount path /var/log/nginx.

apiVersion: v1
kind: Pod
metadata:
  name: use-pvc
  labels:
    section: 08-state-persistence
spec:
  containers:
    - image: nginx
      name: nginx
      volumeMounts:
        - name: logs-vol
          mountPath: /var/log/nginx
  volumes:
    - name: logs-vol
      persistentVolumeClaim:
        claimName: logs-pvc
$ kubectl apply -f pod.yaml                        
	pod/use-pvc created

$ kubectl get pv,pvc,pods                               
	NAME                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM           STORAGECLASS   REASON   AGE
	persistentvolume/logs-pv   5Gi        RWO,ROX        Retain           Bound    ckad/logs-pvc                           14m

	NAME                             STATUS   VOLUME    CAPACITY   ACCESS MODES   STORAGECLASS   AGE
	persistentvolumeclaim/logs-pvc   Bound    logs-pv   5Gi        RWO,ROX                       5m9s

	NAME               READY   STATUS    RESTARTS   AGE
	pod/use-pvc        1/1     Running   0          5s

$ kubectl describe pvc logs-pvc     
	Name:          logs-pvc
	Namespace:     ckad
	Status:        Bound
	Volume:        logs-pv
	Labels:        section=08-state-persistence
	Capacity:      5Gi         # foi requisitado 2Gi
	Access Modes:  RWO,ROX
	VolumeMode:    Filesystem
	Used By:       use-pvc     # usado pelo Pod chamado **use-pvc

$** kubectl describe pods use-pvc     
	Name:         use-pvc
	Namespace:    ckad
	Containers:
	    Mounts:
	      /var/log/nginx from logs-vol (rw)
	Volumes:
	  logs-vol:
	    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
	    **ClaimName:  logs-pvc**
	    ReadOnly:   false

08 - Open an interactive shell to the container and create a new file named my-nginx.log in /var/log/nginx. Exit out of the Pod.

$ kubectl get pods -o wide            
	NAME      READY   STATUS     IP            NODE           
	use-pvc   1/1     Running    10.244.2.17   kind-worker2

$ kubectl exec -it use-pvc -- bash
	root@use-pvc:/# cd /var/log/nginx/
	root@use-pvc:/var/log/nginx# touch my-nginx.log
	root@use-pvc:/var/log/nginx# ls
		access.log  error.log  my-nginx.log
	root@use-pvc:/var/log/nginx# exit

# Navegando até o diretório do escolhido no mountPath do PV
# por coincidencia o path é igual tanto no cointainer quanto no PV.
# Caso contrário, seria <path-pv>/<path-container>
$ docker exec -it f63ee68308cb bash
    root@kind-worker2:/# cd /var/logs/
	  root@kind-worker2:/var/logs# ls
		  access.log  error.log  my-nginx.log
	  root@kind-worker2:/var/logs#

09 - Delete the Pod and re-create it with the same YAML manifest. Open an interactive shell to the Pod, navigate to the directory /var/log/nginx, and find the file you created before.

$ kubectl delete -f pod.yaml                         
	pod "use-pvc" deleted

$ kubectl apply -f pod.yaml 
	pod/use-pvc created

# Sorte... o Pod foi escalonado no mesmo node(kind-worker-2)
$ kubectl get pods -o wide 
	NAME      READY   STATUS    RESTARTS   NODE        
	use-pvc   1/1     Running   0          kind-worker2

$ kubectl exec -it use-pvc -- bash
	root@use-pvc:/# cd var/log/nginx/
	root@use-pvc:/var/log/nginx# ls
		access.log  error.log  my-nginx.log
	root@use-pvc:/var/log/nginx# exit