사전점검
각 노드에서 cpu가상화가 지원되는지 확인
egrep -c '(vmx|svm)' /proc/cpuinfo # 0 보다 커야함.
테스트 환경정보
ip address | hostName |
192.168.0.85 | vk8s-master01 |
192.168.0.207 | vk8s-worker01 |
Kubernetes version
1.32.4-1.1
metal-lb
vip 대역 설정 192.168.0.70-75
Kube-virt

export RELEASE=$(curl -s https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt)
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml
virtctl
$ export VERSION=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt)
$ wget https://github.com/kubevirt/kubevirt/releases/download/${VERSION}/virtctl-${VERSION}-linux-amd64
sudo mv virtctl-v1.5.1-linux-amd64 /usr/local/bin/virtctl
sudo chmod +x /usr/local/bin/virtctl
생성검증
샘플 코드
vi cirros-vm.yaml
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: testvm
spec:
running: false
template:
metadata:
labels:
kubevirt.io/size: small
kubevirt.io/domain: testvm
spec:
domain:
devices:
disks:
- name: containerdisk
disk:
bus: virtio
- name: cloudinitdisk
disk:
bus: virtio
interfaces:
- name: default
masquerade: {}
resources:
requests:
memory: 64M
networks:
- name: default
pod: {}
volumes:
- name: containerdisk
containerDisk:
image: quay.io/kubevirt/cirros-container-disk-demo
- name: cloudinitdisk
cloudInitNoCloud:
userDataBase64: SGkuXG4=
$ kubectl apply -f cirros-vm.yaml

실행

# VM 접속
➜ virtctl console testvm
Successfully connected to testvm console. The escape sequence is ^] # 엔터치면 프롬프트 넘어감
login as 'cirros' user. default password: 'gocubsgo'. use 'sudo' for root.
testvm login: cirros
Password:
$ hostname
testvm
# 콘솔을 닫으려면 Ctrl+]
virtctl 사용방법
vm 정지
virtctl stop testvm
vnc 활성
virtctl vnc win10-hostpath-vm --proxy-only --address=0.0.0.0
Mutus설정
WorkerNode
echo "network: {config: disabled}" > /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg
calico설정변경
마스터 또는 베스천에서진행
(이거 오퍼레이터에서 CR이 초기화 시킬것이다.)
$ kubectl edit -n calico-system daemonsets.apps calico-node
#env에서 아래 값 변경
## 기존
- name: IP_AUTODETECTION_METHOD
value: first-found
## 변경 gateway를 찾아서 지정
- name: IP_AUTODETECTION_METHOD
value: "can-reach=192.168.0.1"
https://docs.tigera.io/calico/latest/reference/configure-calico-node#ip-autodetection-methods

CR설정을 바꿔야한다.
kubectl edit installation default -n calico-system
spec:
calicoNetwork:
nodeAddressAutodetectionV4:
canReach: 192.168.0.1
mutus 배포
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml
NAD 배포
네임스페이스는 default에서 테스트.
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: br-iptimes-net
namespace: default
#spec:
# config: '{
# "cniVersion": "0.3.1",
# "type": "bridge",
# "bridge": "br-iptimes",
# "ipam": {
# "type": "dhcp"
# }
# }'
spec:
config: '{
"cniVersion": "0.3.1",
"type": "bridge",
"bridge": "br-iptimes",
"ipam": {
"type": "static",
"addresses": [
{
"address": "192.168.0.76/24",
"gateway": "192.168.0.1"
}
]
}
}'
kube-virt 추가 설정
hostpath를 사용하기 위한설정
kubectl edit kubevirt kubevirt -n kubevirt
#spec:
# certificateRotateStrategy: {}
# configuration:
# developerConfiguration: {}
spec:
configuration:
developerConfiguration:
featureGates:
- HostDisk
kubectl rollout restart deployment virt-api -n kubevirt
kubectl rollout restart deployment virt-controller -n kubevirt
kubectl rollout restart daemonset virt-handler -n kubevirt
ubuntu배포 테스트
# [ubuntu@vk8s-master01 sample (⎈|vk8s:default)]$ cat ubuntu-vm-mutus.yaml
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: ubuntu-vm
namespace: default
spec:
running: false
template:
metadata:
labels:
kubevirt.io/domain: ubuntu-vm
spec:
domain:
cpu:
cores: 2
resources:
requests:
memory: 4Gi
devices:
disks:
- name: os-disk
disk:
bus: virtio
bootOrder: 2
- name: ubuntu-iso
cdrom:
bus: sata
bootOrder: 1
interfaces:
- name: default
masquerade: {}
- name: ext
bridge: {}
model: virtio
networks:
- name: default
pod: {}
- name: ext
multus:
networkName: default/br-iptimes-net
volumes:
- name: os-disk
hostDisk:
path: /data/ubuntu-vm-disk.img # 👉 설치될 디스크 이미지 파일
type: DiskOrCreate # 자동 생성됨
- name: ubuntu-iso
hostDisk:
path: /data/ubuntu-22.04.5-live-server-amd64.iso
type: Disk

VNC 접속

sudo apt install tigervnc-viewer
virtctl vnc ubuntu-vm --proxy-only --address=0.0.0.0

master node에서 nodeport가 열릴것이다…



network interface가 두개가 보여야한다.

디스크 부분에서 에러가나네…

worker node에서 블록을 직접 지정하고 테스트
# VM이 생성될 노드에서 아래 실행
truncate -s 50G /data/ubuntu-vm-disk.img
chown 107:107 /data/ubuntu-vm-disk.img # KubeVirt는 qemu uid/gid = 107
chmod 660 /data/ubuntu-vm-disk.img
# 88-kong-disk.img
# 아래 것도 되는지 확인 필요...
# dd if=/dev/zero of=/data/ubuntu-vm-disk.img bs=1M count=10240
디스크가 보인다….
그렇다면 기본 kubernetes pvc로 마운트하는 것도 문제가 있을것 같다…추가적인 기능이 필요할듯.
(블록 구성이 선행되어야하는 이슈.)

부팅순서 변경
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: ubuntu-vm
namespace: default
spec:
running: false
template:
metadata:
labels:
kubevirt.io/domain: ubuntu-vm
spec:
domain:
cpu:
cores: 2
resources:
requests:
memory: 4Gi
devices:
disks:
- name: ubuntu-iso
cdrom:
bus: sata
bootOrder: 2 # 여기랑
- name: os-disk
disk:
bus: sata
bootOrder: 1 # 여기
interfaces:
- name: default
masquerade: {}
- name: ext
bridge: {}
model: virtio
networks:
- name: default
pod: {}
- name: ext
multus:
networkName: default/br-iptimes-net
volumes:
- name: ubuntu-iso
hostDisk:
path: /data/ubuntu-22.04.5-live-server-amd64.iso
type: Disk
- name: os-disk
hostDisk:
path: /data/ubuntu-vm-disk.img
type: DiskOrCreate
Longhorn
kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.8.1/deploy/longhorn.yaml

CDI
이거 없으면 블록구성안됨.
$ export VERSION=$(curl -s https://api.github.com/repos/kubevirt/containerized-data-importer/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
$ kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-operator.yaml
$ kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/$VERSION/cdi-cr.yaml
Create VM
# 저장경로
/dev/longhorn/pvc-e0818251-14bb-4150-821f-af71c95ef0fb
Kubevirt-manager
kubectl apply -f https://raw.githubusercontent.com/kubevirt-manager/kubevirt-manager/main/kubernetes/bundled.yaml

