#夏日挑战赛# Flannel 同节点通信 原创 精华
whale_life
发布于 2022-7-21 16:31
浏览
3收藏
Flannel 同节点通信
Flannel 同节点通信利用了 veth pair 对的形式,一端连接到 pod,一端连接到宿主机的网卡,同时作为宿主机 cni0 网卡的的接口,使得同节点的通信,直接是二层互通。
和 calico vxlan 不一样的是,pod 对应的网口并没有使用 proxy-arp 的特性,所以就和传统的二层交换,三层路由一样,理解起来更为简单。
集群环境
kubernetes v1.23.5
master 192.168.0.80
node1 192.168.0.81
node2 192.168.0.82
运行 pod
在集群中运行 pod ,查看在 node1 上的 pod
pod1 10.244.1.2
pod2 10.244.1.3
[root@master ~]# kubectl create deployment cni-test --image=burlyluo/nettoolbox --replicas=3
[root@master ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
cni-test-777bbd57c8-5zhjd 1/1 Running 0 18s 10.244.2.6 node2.whale.com <none> <none>
cni-test-777bbd57c8-t6xhd 1/1 Running 0 18s 10.244.1.2 node1.whale.com <none> <none>
cni-test-777bbd57c8-whwcm 1/1 Running 0 18s 10.244.1.3 node1.whale.com <none> <none>
查看 pod1 内部网卡和 node1 对应网卡
[root@master ~]# kubectl exec -it cni-test-777bbd57c8-t6xhd -- bash
bash-5.1# ifconfig
eth0 Link encap:Ethernet HWaddr 6A:6D:35:69:37:FE
inet addr:10.244.1.2 Bcast:10.244.1.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1
RX packets:14 errors:0 dropped:0 overruns:0 frame:0
TX packets:1 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:1220 (1.1 KiB) TX bytes:42 (42.0 B)
bash-5.1# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.244.1.1 0.0.0.0 UG 0 0 0 eth0
10.244.0.0 10.244.1.1 255.255.0.0 UG 0 0 0 eth0
10.244.1.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
bash-5.1# ethtool -S eth0
NIC statistics:
peer_ifindex: 6
rx_queue_0_xdp_packets: 0
rx_queue_0_xdp_bytes: 0
rx_queue_0_xdp_drops: 0
# node1 对应 pod1 网卡
[root@node1 ~]# ip link show | grep ^6
6: veth9903a3d4@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP mode DEFAULT group default
[root@node1 ~]# ip -d link show veth9903a3d4
6: veth9903a3d4@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP mode DEFAULT group default
link/ether 2a:5e:4a:15:33:1c brd ff:ff:ff:ff:ff:ff link-netnsid 0 promiscuity 1
veth
bridge_slave state forwarding priority 32 cost 2 hairpin on guard off root_block off fastleave off learning on flood on port_id 0x8001 port_no 0x1 designated_port 32769 designated_cost 0 designated_bridge 8000.9a:50:49:2d:65:48 designated_root 8000.9a:50:49:2d:65:48 hold_timer 0.00 message_age_timer 0.00 forward_delay_timer 0.00 topology_change_ack 0 config_pending 0 proxy_arp off proxy_arp_wifi off mcast_router 1 mcast_fast_leave off mcast_flood on addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
查看 pod2 内部网卡和 node1 对应网卡
[root@master ~]# kubectl exec -it cni-test-777bbd57c8-whwcm -- bash
bash-5.1# ifconfig
eth0 Link encap:Ethernet HWaddr CA:BB:AF:BD:E5:3A
inet addr:10.244.1.3 Bcast:10.244.1.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1
RX packets:13 errors:0 dropped:0 overruns:0 frame:0
TX packets:1 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:1178 (1.1 KiB) TX bytes:42 (42.0 B)
bash-5.1# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.244.1.1 0.0.0.0 UG 0 0 0 eth0
10.244.0.0 10.244.1.1 255.255.0.0 UG 0 0 0 eth0
10.244.1.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
bash-5.1# ethtool -S eth0
NIC statistics:
peer_ifindex: 7
rx_queue_0_xdp_packets: 0
rx_queue_0_xdp_bytes: 0
rx_queue_0_xdp_drops: 0
# node1 对应 pod2 网卡
[root@node1 ~]# ip -d link show vetha61ebf2f
7: vetha61ebf2f@if3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue master cni0 state UP mode DEFAULT group default
link/ether b6:c0:6c:fd:e6:e0 brd ff:ff:ff:ff:ff:ff link-netnsid 1 promiscuity 1
veth
bridge_slave state forwarding priority 32 cost 2 hairpin on guard off root_block off fastleave off learning on flood on port_id 0x8002 port_no 0x2 designated_port 32770 designated_cost 0 designated_bridge 8000.9a:50:49:2d:65:48 designated_root 8000.9a:50:49:2d:65:48 hold_timer 0.00 message_age_timer 0.00 forward_delay_timer 0.00 topology_change_ack 0 config_pending 0 proxy_arp off proxy_arp_wifi off mcast_router 1 mcast_fast_leave off mcast_flood on addrgenmode eui64 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535
查看 node1 网卡
我们可以见到 pod 对应的 veth 网卡会有 master cni0 选项,这个意思是 cni0 的桥接网卡的接口。
通过查看 cni0 网卡接口信息,确定 pod 对端的 veth pair 网卡是在 cni0 上的接口
通过查看 node1 的路由,我们可以看到 10.244.1.0 指向了 cni0 网卡,所以可以确认,同节点的通信经过的都是 cni0 网桥,而两端的接口分别指向了两个 pod 的对端的 MAC 地址。
[root@node1 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.244.1.0 0.0.0.0 255.255.255.0 U 0 0 0 cni0
[root@node1 ~]# brctl show cni0
bridge name bridge id STP enabled interfaces
cni0 8000.9a50492d6548 no veth9903a3d4
vetha61ebf2f
[root@node1 ~]# brctl showmacs cni0
port no mac addr is local? ageing timer
1 2a:5e:4a:15:33:1c yes 0.00
1 2a:5e:4a:15:33:1c yes 0.00
2 b6:c0:6c:fd:e6:e0 yes 0.00
2 b6:c0:6c:fd:e6:e0 yes 0.00
同节点通信抓包演示
针对同节点通信,我们通过抓包演示
针对 pod eth0 网卡以及对应 veth 网卡,还有 cni0 进行抓包
kubectl exec -it cni-test-777bbd57c8-t6xhd -- ping -c 1 10.244.1.3
pod1.cap
kubectl exec -it cni-test-777bbd57c8-t6xhd -- tcpdump -pne -i eth0 -w pod1.cap
pod1-veth.cap
tcpdump -pne -i veth9903a3d4 -w pod1-veth.cap
cni0.cap
tcpdump -pne -i cni0 -w cni0.cap
pod2-veth.cap
tcpdump -pne -i vetha61ebf2f -w pod2-veth.cap
pod2.cap
kubectl exec -it cni-test-777bbd57c8-whwcm -- tcpdump -pne -i eth0 -w pod2.cap
©著作权归作者所有,如需转载,请注明出处,否则将追究法律责任
分类
标签
赞
6
收藏 3
回复
相关推荐
这属于系列技术文章?
算是吧,这个组件的特性这是