2_node_topo.sh revision 9325071d
1
2#
3#                                +--------+
4#                                |        |
5#                                |   MR   |
6#                                |        |
7#                                +--------+
8#                                odl  |6.0.3.100
9#6:0:1::2                             |6:0:3::100
10#6.0.1.2     vpp1 +--------+          |         +--------+
11#       +---------+        |intervpp1 |intervpp2|        |vpp2
12#                 |  VPP1  +----------+---------+  VPP2  +---------+
13#                 |        |          |         |        |      6.0.2.2
14#                 +--------+          |         +--------+      6:0:2::2
15#                                     +mr
16#                                     6.0.3.200
17#
18
19function set_arp
20{
21  odl_mac=`ip a show dev odl | grep "link/ether" | awk '{print $2}'`
22  echo "set ip arp host-xtr1 6.0.3.100 $odl_mac" | nc 0 5002
23  echo "set ip arp host-xtr2 6.0.3.100 $odl_mac" | nc 0 5003
24  echo "set ip arp host-xtr3 6.0.3.100 $odl_mac" | nc 0 5004
25
26  mac=`ip netns exec vppns1 ip a show dev veth_vpp1  | grep "link/ether" | awk '{print $2}'`
27  echo "set ip arp host-vpp1 6.0.1.2 $mac" | nc 0 5002
28  echo "set ip6 neighbor host-vpp1 6:0:1::2 $mac" | nc 0 5002
29
30  mac=`ip netns exec vppns2 ip a show dev veth_vpp2  | grep "link/ether" | awk '{print $2}'`
31  echo "set ip arp host-vpp2 6.0.2.2 $mac" | nc 0 5003
32  echo "set ip6 neighbor host-vpp2 6:0:2::2 $mac" | nc 0 5003
33
34  mac=`echo "sh hard host-intervpp1" | nc 0 5002 | grep 'Ethernet address' | awk '{print $3}'`
35  echo "set ip arp host-intervpp2 6.0.3.1 $mac" | nc 0 5003
36  echo "set ip6 neighbor host-intervpp2 6:0:3::1 $mac" | nc 0 5003
37
38  mac=`echo "sh hard host-intervpp2" | nc 0 5003 | grep 'Ethernet address' | awk '{print $3}'`
39  echo "set ip arp host-intervpp1 6.0.3.2 $mac" | nc 0 5002
40  echo "set ip6 neighbor host-intervpp1 6:0:3::2 $mac" | nc 0 5002
41}
42
43function 2_node_topo_clean
44{
45  echo "Clearing all VPP instances.."
46  pkill vpp --signal 9
47  rm /dev/shm/*
48
49  echo "Cleaning topology.."
50  ip netns exec intervppns ifconfig vppbr down
51  ip netns exec intervppns brctl delbr vppbr
52  ip link del dev veth_vpp1 &> /dev/null
53  ip link del dev veth_vpp2 &> /dev/null
54  ip link del dev veth_intervpp1 &> /dev/null
55  ip link del dev veth_intervpp2 &> /dev/null
56  ip link del dev veth_odl &> /dev/null
57  ip link del dev veth_mr &> /dev/null
58  ip netns del vppns1 &> /dev/null
59  ip netns del vppns2 &> /dev/null
60  ip netns del intervppns &> /dev/null
61
62  if [ "$1" != "no_odl" ] ; then
63    odl_clear_all
64  fi
65}
66
67function 2_node_topo_setup
68{
69
70  # create vpp to clients and inter-vpp namespaces
71  ip netns add vppns1
72  ip netns add vppns2
73  ip netns add intervppns
74
75  # create vpp and odl interfaces and set them in intervppns
76  ip link add veth_intervpp1 type veth peer name intervpp1
77  ip link add veth_intervpp2 type veth peer name intervpp2
78  ip link add veth_odl type veth peer name odl
79  ip link add veth_mr type veth peer name mr
80  ip link set dev intervpp1 up
81  ip link set dev intervpp2 up
82  ip link set dev odl up
83  ip link set dev mr up
84  ip link set dev veth_intervpp1 up netns intervppns
85  ip link set dev veth_intervpp2 up netns intervppns
86  ip link set dev veth_odl up netns intervppns
87  ip link set dev veth_mr up netns intervppns
88
89  # create bridge in intervppns and add vpp and odl interfaces
90  ip netns exec intervppns brctl addbr vppbr
91  ip netns exec intervppns brctl addif vppbr veth_intervpp1
92  ip netns exec intervppns brctl addif vppbr veth_intervpp2
93  ip netns exec intervppns brctl addif vppbr veth_odl
94  ip netns exec intervppns brctl addif vppbr veth_mr
95  ip netns exec intervppns ifconfig vppbr up
96
97  # create and configure 1st veth client to vpp pair
98  ip link add veth_vpp1 type veth peer name vpp1
99  ip link set dev vpp1 up
100  ip link set dev veth_vpp1 up netns vppns1
101
102  # create and configure 2nd veth client to vpp pair
103  ip link add veth_vpp2 type veth peer name vpp2
104  ip link set dev vpp2 up
105  ip link set dev veth_vpp2 up netns vppns2
106
107  ip netns exec vppns1 \
108  bash -c "
109    ip link set dev lo up
110    ip addr add 6.0.1.2/24 dev veth_vpp1
111    ip route add 6.0.2.0/24 via 6.0.1.1
112    ip addr add 6:0:1::2/64 dev veth_vpp1
113    ip route add 6:0:2::0/64 via 6:0:1::1
114  "
115
116  ip netns exec vppns2 \
117  bash -c "
118    ip link set dev lo up
119    ip addr add 6.0.2.2/24 dev veth_vpp2
120    ip route add 6.0.1.0/24 via 6.0.2.1
121    ip addr add 6:0:2::2/64 dev veth_vpp2
122    ip route add 6:0:1::0/64 via 6:0:2::1
123  "
124
125  # set odl iface ip and disable checksum offloading
126  ip addr add 6.0.3.100/24 dev odl
127  ip addr add 6:0:3::100/64 dev odl
128  ethtool --offload  odl rx off tx off
129
130  ip addr add 6.0.3.200/24 dev mr
131  ethtool --offload mr rx off tx off
132
133  # generate config files
134  ./scripts/generate_config.py ${VPP_LITE_CONF} ${CFG_METHOD}
135
136  start_vpp 5002 vpp1
137  start_vpp 5003 vpp2
138
139  sleep 2
140  echo "* Selected configuration method: $CFG_METHOD"
141  if [ "$CFG_METHOD" == "cli" ] ; then
142    echo "exec ${VPP_LITE_CONF}/vpp1.cli" | nc 0 5002
143    echo "exec ${VPP_LITE_CONF}/vpp2.cli" | nc 0 5003
144  elif [ "$CFG_METHOD" == "vat" ] ; then
145    ${VPP_API_TEST} chroot prefix vpp1 script in ${VPP_LITE_CONF}/vpp1.vat
146    ${VPP_API_TEST} chroot prefix vpp2 script in ${VPP_LITE_CONF}/vpp2.vat
147  else
148    echo "=== WARNING:"
149    echo "=== Invalid configuration method selected!"
150    echo "=== To resolve this set env variable CFG_METHOD to vat or cli."
151    echo "==="
152  fi
153
154  if [ "$1" != "no_odl" ] ; then
155    post_curl "add-mapping" ${ODL_CONFIG_FILE1}
156    post_curl "add-mapping" ${ODL_CONFIG_FILE2}
157  fi
158
159  set_arp
160}
161