-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathipfailover.sh
executable file
·217 lines (183 loc) · 7.48 KB
/
ipfailover.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
#!/bin/bash
source ./color.sh
MASTER_IP=$MASTER_IP
NODE1=$NODE_NAME_1
NODE2=$NODE_NAME_2
VIPS=$VIPS
PROJECT=ipf
function prepare_user() {
#copy admin kubeconfig
scp root@$MASTER_IP:/etc/origin/master/admin.kubeconfig ./
if [ $? -ne 0 ]
then
echo -e "${BRed}Failed to copy admin kubeconfig${NC}"
exit 1
fi
# login to server
oc login https://$MASTER_IP:8443 -u bmeng -p redhat --insecure-skip-tls-verify=false
if [ $? -ne 0 ]
then
echo -e "${BRed}Failed to login${NC}"
exit 1
fi
oc delete project $PROJECT
until [ `oc get project | grep $PROJECT | wc -l` -eq 0 ]
do
echo -e "Waiting for project to be deleted on server"
sleep 5
done
sleep 10
# create project
oc new-project $PROJECT
if [ $? -ne 0 ]
then
echo -e "${BRed}Failed to create project${NC}"
exit 1
fi
#add privileged scc to user
oc adm policy add-scc-to-user privileged system:serviceaccount:$PROJECT:default --config admin.kubeconfig
if [ $? -ne 0 ]
then
echo -e "${BRed}Failed to grant privileged permission${NC}"
exit 1
fi
}
function expand_ipv4_range(){
expandedset=()
local ip1=$(echo "$1" | awk '{print $1}' FS='-')
local ip2=$(echo "$1" | awk '{print $2}' FS='-')
local n
if [ -z "$ip2" ]; then
expandedset=(${expandedset[@]} "$ip1")
else
local base=$(echo "$ip1" | cut -f 1-3 -d '.')
local start=$(echo "$ip1" | awk '{print $NF}' FS='.')
local end=$(echo "$ip2" | awk '{print $NF}' FS='.')
for n in `seq $start $end`; do
expandedset=(${expandedset[@]} "${base}.$n")
done
fi
}
function check_ips(){
expand_ipv4_range $VIPS
for i in ${expandedset[@]}
do ping -c 1 $i
if [ $? -ne 1 ]
then
exit
fi
done
VIP_1="${expandedset[0]},${expandedset[1]}"
VIP_2="${expandedset[2]},${expandedset[3]}"
echo $VIP_1
echo $VIP_2
}
function test_offset(){
echo -e "$BGreen Test ipfailover with vrrp_id_offset $NC"
# add labels to node
oc label node $NODE1 ha=red --overwrite --config admin.kubeconfig
oc label node $NODE2 ha=blue --overwrite --config admin.kubeconfig
# create router on each node
oc adm policy add-scc-to-user hostnetwork -z router --config admin.kubeconfig
oc adm router router-red --selector=ha=red --config admin.kubeconfig --images=$LOCAL_REGISTRY/openshift3/ose-haproxy-router:$VERSION
oc adm router router-blue --selector=ha=blue --config admin.kubeconfig --images=$LOCAL_REGISTRY/openshift3/ose-haproxy-router:$VERSION
# wait the routers are running
while [ `oc get pod --config admin.kubeconfig | grep -v deploy| grep router | grep Running | wc -l` -lt 2 ]
do
sleep 5
done
echo -e "$BBlue Create ipfailover $NC"
# create ipfailover for each router
oc adm policy add-scc-to-user privileged -z ipfailover --config admin.kubeconfig
oc adm ipfailover ipf-red --create --selector=ha=red --virtual-ips=${VIP_1} --watch-port=80 --replicas=1 --service-account=ipfailover --config admin.kubeconfig --images=$LOCAL_REGISTRY/openshift3/ose-keepalived-ipfailover:$VERSION
oc adm ipfailover ipf-blue --create --selector=ha=blue --virtual-ips=${VIP_2} --watch-port=80 --replicas=1 --service-account=ipfailover --vrrp-id-offset=50 --config admin.kubeconfig --images=$LOCAL_REGISTRY/openshift3/ose-keepalived-ipfailover:$VERSION
# wait the keepaliveds are running
while [ `oc get pod --config admin.kubeconfig | grep -v deploy | grep ipf | grep Running | wc -l` -lt 2 ]
do
sleep 5
done
echo -e "$BBlue Check the value in keepalived.conf $NC"
oc exec `oc get po --config admin.kubeconfig | grep ipf-red |grep -v deploy| cut -d " " -f1` --config admin.kubeconfig -- grep -i id /etc/keepalived/keepalived.conf
oc exec `oc get po --config admin.kubeconfig | grep ipf-blue | grep -v deploy | cut -d " " -f1` --config admin.kubeconfig -- grep -i id /etc/keepalived/keepalived.conf
echo -e "$BBlue Create pod svc route for test $NC"
oc create -f https://raw.githubusercontent.com/openshift-qe/v3-testfiles/master/routing/unsecure/list_for_unsecure.json
while [ `oc get pod | grep caddy | grep Running | wc -l` -lt 2 ]
do
sleep 5
done
echo -e "$BBlue Access the ipfailover via port $NC"
for i in ${expandedset[@]}
do
set -x
curl -s --resolve unsecure.example.com:80:$i http://unsecure.example.com/
set +x
done
}
function test_svc(){
echo -e "$BGreen Test ipfailover for ha service $NC"
# add labels to node
oc label node $NODE1 ha-service=ha --overwrite --config admin.kubeconfig
oc label node $NODE2 ha-service=ha --overwrite --config admin.kubeconfig
echo -e "$BBlue Create ha service $NC"
# create ha service on each node
oc adm policy add-scc-to-user privileged -z default -n $PROJECT --config admin.kubeconfig
oc create -f https://raw.githubusercontent.com/openshift-qe/v3-testfiles/master/networking/ha-network-service.json
# wait the endpoints are running
while [ `oc get pod | grep ha | grep Running | wc -l` -lt 2 ]
do
sleep 5
done
# patch the service
oc patch svc ha-service -p '{"spec": {"ports": [{"port":9736,"targetPort":8080}]}}'
oc patch svc ha-service -p '{"spec": {"type":"NodePort"}}'
local nodeport=`oc get svc ha-service -o jsonpath={.spec.ports[0].nodePort}`
echo -e "$BBlue Create ipfailover $NC"
# create ipfailover
oc adm ipfailover ipf --create --selector=ha-service=ha --virtual-ips=${VIP_1} --watch-port=${nodeport} --replicas=2 --service-account=ipfailover --config admin.kubeconfig --images=$LOCAL_REGISTRY/openshift3/ose-keepalived-ipfailover:$VERSION
# wait the keepaliveds are running
while [ `oc get pod --config admin.kubeconfig | grep ipf | grep -v deploy | grep Running | wc -l` -lt 2 ]
do
sleep 5
done
echo -e "$BBlue Access the ipfailover via port $NC"
# access the svc
for i in ${expandedset[@]:0:2}
do
set -x
curl -s $i:$nodeport
set +x
done
}
function test_preemption_strategy(){
echo -e "$BGreen Test ipfailover with preemption strategy $NC"
oc label node $NODE1 router=enabled --config admin.kubeconfig
oc label node $NODE2 router=enabled --config admin.kubeconfig
oc adm ipfailover ipfnopre --create --selector=router=enabled --virtual-ips=${VIP_1} --replicas=1 --service-account=ipfailover --config admin.kubeconfig --images=$LOCAL_REGISTRY/openshift3/ose-keepalived-ipfailover:$VERSION --preemption-strategy=nopreempt
# wait the keepaliveds are running
while [ `oc get pod --config admin.kubeconfig | grep -v deploy | grep ipf | grep Running | wc -l` -lt 1 ]
do
sleep 5
done
echo -e "$BBlue Check the value in keepalived.conf $NC"
oc exec `oc get po --config admin.kubeconfig | grep ipfnopre |grep -v deploy| cut -d " " -f1` --config admin.kubeconfig -- grep -i preempt /etc/keepalived/keepalived.conf
}
function clean_up(){
echo -e "$BGreen Clean up the pods $NC"
oc delete dc,svc router-red --config admin.kubeconfig
oc delete dc,svc router-blue --config admin.kubeconfig
oc delete dc ipf-red --config admin.kubeconfig
oc delete dc ipf-blue --config admin.kubeconfig
oc delete dc ipf --config admin.kubeconfig
oc delete dc ipfnopre --config admin.kubeconfig
oc delete all --all
sleep 15
}
prepare_user
check_ips
test_offset
clean_up
test_svc
clean_up
test_preemption_strategy
clean_up
oc delete project ipf