forked from kiali/kiali
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaws-openshift.sh
executable file
·1145 lines (1008 loc) · 42.9 KB
/
aws-openshift.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
##############################################################################
# aws-openshift.sh
#
# Run this script to create/destroy an OpenShift 4 cluster on AWS.
# This can also optionally install Maistra.
#
# This script takes one command whose value is one of the following:
# create: starts the OpenShift environment
# destroy: deletes the OpenShift environment removing persisted data
# status: outputs the current status of the OpenShift environment
# routes: outputs all known route URLs
# services: outputs all known service endpoints (excluding internal openshift services)
# oc-env: used to configure a shell for 'oc'
# sm-install: installs service mesh into the cluster
# sm-uninstall: removes all service mesh components
# bi-install: installs bookinfo demo into the cluster
# k-uninstall: removes only kiali components
#
# This script accepts several options - see --help for details.
#
##############################################################################
########################################
# START FUNCTIONS
infomsg() {
echo "HACK: $1"
}
debug() {
if [ "$_VERBOSE" == "true" ]; then
infomsg "DEBUG: $1"
fi
}
get_downloader() {
if [ -z "$DOWNLOADER" ] ; then
# Use wget command if available, otherwise try curl
if which wget > /dev/null 2>&1 ; then
DOWNLOADER="wget -O"
else
if which curl > /dev/null 2>&1 ; then
DOWNLOADER="curl -L -o"
fi
fi
if [ ! "$DOWNLOADER" ] ; then
infomsg "ERROR: You must install either curl or wget to allow downloading."
exit 1
fi
fi
debug "Downloader command to be used: ${DOWNLOADER}"
}
get_installer() {
if [ -z "$INSTALLER" ] ; then
# Use dnf command if available, otherwise try yum
if which dnf > /dev/null 2>&1 ; then
INSTALLER="sudo dnf"
else
if which yum > /dev/null 2>&1 ; then
INSTALLER="sudo yum"
fi
fi
if [ ! "$INSTALLER" ] ; then
infomsg "ERROR: Cannot determine your machine's installer (cannot find dnf or yum)."
exit 1
fi
fi
debug "Installer command to be used: ${INSTALLER}"
}
check_is_running() {
if ${OC} status > /dev/null 2>&1; then
_IS_RUNNING="true"
else
_IS_RUNNING="false"
fi
}
oc_login() {
${OC} login -u system:admin > /dev/null 2>&1
}
get_console_url() {
CONSOLE_URL="$(${OC} get console cluster -o jsonpath='{.status.consoleURL}' 2>/dev/null)"
if [ "$?" != "0" -o "$CONSOLE_URL" == "" ]; then
CONSOLE_URL="console-not-available"
fi
}
get_api_server_url() {
OPENSHIFT_API_SERVER_URL="$(${OC} whoami --show-server)"
}
check_aws_config() {
if [ -f "${HOME}/.aws/config" -a -f "${HOME}/.aws/credentials" ]; then
export AWS_PROFILE="$(cat ${HOME}/.aws/credentials | head -n 1 | sed ${SEDOPTIONS} -E 's/\[(.*)\]/\1/')"
else
if ! which aws > /dev/null 2>&1 ; then
infomsg "You need the AWS CLI - installing the awscli package"
get_installer
eval ${INSTALLER} install awscli
fi
infomsg "===== AWS IS NOT CONFIGURED ====="
infomsg "You must configure AWS first."
infomsg "Run this command:"
infomsg " aws configure --profile=openshift-dev"
infomsg "Once you complete that step,"
infomsg "you can re-run this hack script."
infomsg "================================="
exit 1
fi
}
get_status() {
echo "====================================================================="
echo "oc: ${OC}"
echo "To configure your shell to use 'oc', set these environment variables:"
echo " export KUBECONFIG=\"${KUBECONFIG}\""
echo " export PATH=\"${OPENSHIFT_DOWNLOAD_PATH}:\$PATH\""
echo "To do this, you can run this command:"
echo " eval \$($0 oc-env)"
echo "====================================================================="
check_is_running
if [ "${_IS_RUNNING}" == "true" ]; then
if ! ${OC} whoami > /dev/null 2>&1; then
oc_login
fi
get_registry_names
check_insecure_registry
get_console_url
get_api_server_url
get_worker_node_count
echo "Version from oc command [${OC}]"
${OC} version
echo "====================================================================="
echo "Number of worker nodes in cluster: ${OPENSHIFT_WORKER_NODE_COUNT}"
echo "====================================================================="
echo "whoami: $(${OC} whoami)"
echo "====================================================================="
echo "Status from oc command [${OC}]"
${OC} status
echo "====================================================================="
echo "Console: ${CONSOLE_URL}"
echo "API URL: ${OPENSHIFT_API_SERVER_URL}"
echo "Image Repo: ${EXTERNAL_IMAGE_REGISTRY} (${INTERNAL_IMAGE_REGISTRY})"
echo "====================================================================="
echo "kubeadmin password: $(cat ${AWS_KUBEADMIN_PASSWORD_FILE})"
echo "kiali password: kiali"
echo "johndoe password: johndoe"
echo "====================================================================="
echo "To push images to the image repo you need to log in."
echo "You can use docker or podman, and you can use kubeadmin or kiali user."
echo " oc login -u kubeadmin -p $(cat ${AWS_KUBEADMIN_PASSWORD_FILE}) ${OPENSHIFT_API_SERVER_URL}"
echo ' docker login -u kubeadmin -p $(oc whoami -t)' ${EXTERNAL_IMAGE_REGISTRY}
echo "or"
echo " oc login -u kiali -p kiali ${OPENSHIFT_API_SERVER_URL}"
echo ' podman login --tls-verify=false -u kiali -p $(oc whoami -t)' ${EXTERNAL_IMAGE_REGISTRY}
echo "====================================================================="
else
echo "Cluster appears to be down."
fi
}
check_istio_app() {
local expected="$1"
apps=$(${OC} get deployment.apps -n ${CONTROL_PLANE_NAMESPACE} -o jsonpath='{range .items[*]}{.metadata.name}{" "}{end}' 2> /dev/null)
for app in ${apps[@]}
do
if [[ "$expected" == "$app" ]]; then
return 0
fi
done
return 1
}
get_registry_names() {
local ext=$(${OC} get image.config.openshift.io/cluster -o custom-columns=EXT:.status.externalRegistryHostnames[0] --no-headers 2>/dev/null)
local int=$(${OC} get image.config.openshift.io/cluster -o custom-columns=INT:.status.internalRegistryHostname --no-headers 2>/dev/null)
EXTERNAL_IMAGE_REGISTRY=${ext:-<unknown>}
INTERNAL_IMAGE_REGISTRY=${int:-<unknown>}
}
check_insecure_registry() {
# make sure docker insecure registry is defined
pgrep -a dockerd | grep "[-]-insecure-registry.*${EXTERNAL_IMAGE_REGISTRY}" > /dev/null 2>&1
if [ "$?" != "0" ]; then
grep "OPTIONS=.*--insecure-registry.*${EXTERNAL_IMAGE_REGISTRY}" /etc/sysconfig/docker > /dev/null 2>&1
if [ "$?" != "0" ]; then
grep "insecure-registries.*${EXTERNAL_IMAGE_REGISTRY}" /etc/docker/daemon.json > /dev/null 2>&1
if [ "$?" != "0" ]; then
infomsg "WARNING: You must tell Docker about the insecure image registry (e.g. --insecure-registry ${EXTERNAL_IMAGE_REGISTRY})."
else
debug "/etc/docker/daemon.json has the insecure-registry setting. This is good."
fi
else
debug "/etc/sysconfig/docker has defined the insecure-registry setting. This is good."
fi
else
debug "Docker daemon is running with --insecure-registry setting. This is good."
fi
}
get_route_url() {
# takes as input "routeName:routeNamespace"
local routename=$(echo ${1} | cut -d: -f1)
local routenamespace=$(echo ${1} | cut -d: -f2)
local protocol="https"
local termination=$(${OC} get route ${routename} -n ${routenamespace} -o custom-columns=T:spec.tls.termination --no-headers)
if [ "${termination}" == "<none>" ]; then
protocol="http"
fi
local host=$(${OC} get route ${routename} -n ${routenamespace} -o custom-columns=H:spec.host --no-headers)
ROUTE_URL="${protocol}://${host}"
}
print_all_route_urls() {
allnames_namespaces="$(${OC} get routes --all-namespaces --no-headers -o custom-columns=NAME:.metadata.name,NS:.metadata.namespace | sed ${SEDOPTIONS} 's/ */:/g')"
for n in ${allnames_namespaces}
do
get_route_url ${n}
printf '=====\n%s\n %s\n' "${n}" "${ROUTE_URL}"
done
}
get_service_endpoint() {
# TODO this needs to be fixed - the host is not right
# takes as input "serviceName:serviceNamespace"
local servicename=$(echo ${1} | cut -d: -f1)
local servicenamespace=$(echo ${1} | cut -d: -f2)
local data="$(${OC} get service ${servicename} -n ${servicenamespace} -o custom-columns=I:spec.clusterIP,T:spec.type,NP:spec.ports[*].nodePort,P:spec.ports[*].port --no-headers | sed ${SEDOPTIONS} 's/ */:/g')"
local clusterIP=$(echo ${data} | cut -d: -f1)
local servicetype=$(echo ${data} | cut -d: -f2)
local nodeports=$(echo ${data} | cut -d: -f3)
local ports=$(echo ${data} | cut -d: -f4)
local host="${AWS_CLUSTER_NAME}.${AWS_BASE_DOMAIN}"
# only NodePort services are exposed outside so we just show those
if [ ${servicetype} == "NodePort" ]; then
SERVICE_ENDPOINT="${host}:${nodeports}"
else
if [ "${nodeports}" == "<none>" ]; then
SERVICE_ENDPOINT="inaccessible - (${servicetype}) ${clusterIP}, ports=${ports}"
else
SERVICE_ENDPOINT="inaccessible - (${servicetype}) ${clusterIP}, ports=${nodeports}"
fi
fi
}
print_all_service_endpoints() {
# we do filter out services from the internal openshift* and default namespaces
allnames_namespaces="$(${OC} get services --all-namespaces --no-headers -o custom-columns=NAME:.metadata.name,NS:.metadata.namespace | sed ${SEDOPTIONS} 's/ */:/g' | grep -v ':openshift*' | grep -v ':default')"
for n in ${allnames_namespaces}
do
get_service_endpoint ${n}
printf '=====\n%s\n %s\n' "${n}" "${SERVICE_ENDPOINT}"
done
}
install_service_mesh() {
local create_smcp="$1"
infomsg "Installing the Service Mesh operator..."
cat <<EOM | ${OC} apply -f -
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: servicemeshoperator
namespace: openshift-operators
spec:
channel: '1.0'
installPlanApproval: Automatic
name: servicemeshoperator
source: redhat-operators
sourceNamespace: openshift-marketplace
EOM
if [ "${create_smcp}" == "true" ] ; then
infomsg "Waiting for the operator CRDs to come online"
#### TODO: when 1.0.7 is released, add elasticsearches.logging.openshift.io
for crd in servicemeshcontrolplanes.maistra.io servicemeshmemberrolls.maistra.io kialis.kiali.io jaegers.jaegertracing.io
do
echo -n "Waiting for $crd ..."
while ! ${OC} get crd $crd > /dev/null 2>&1
do
sleep 2
echo -n '.'
done
echo "done."
done
infomsg "Waiting for operator Deployments to be created..."
debug "Waiting for service mesh deployment to be created..."
local servicemesh_deployment=$(${OC} get deployment -n openshift-operators -o name 2>/dev/null | grep istio)
while [ "${servicemesh_deployment}" == "" ]
do
sleep 2
servicemesh_deployment=$(${OC} get deployment -n openshift-operators -o name 2>/dev/null | grep istio)
done
debug "Waiting for kiali deployment to be created..."
local kiali_deployment=$(${OC} get deployment -n openshift-operators -o name 2>/dev/null | grep kiali)
while [ "${kiali_deployment}" == "" ]
do
sleep 2
kiali_deployment=$(${OC} get deployment -n openshift-operators -o name 2>/dev/null | grep kiali)
done
debug "Waiting for jaeger deployment to be created..."
local jaeger_deployment=$(${OC} get deployment -n openshift-operators -o name 2>/dev/null | grep jaeger)
while [ "${jaeger_deployment}" == "" ]
do
sleep 2
jaeger_deployment=$(${OC} get deployment -n openshift-operators -o name 2>/dev/null | grep jaeger)
done
infomsg "Waiting for operator Deployments to start..."
for op in ${servicemesh_deployment} ${kiali_deployment} ${jaeger_deployment}
do
echo -n "Waiting for ${op} to be ready..."
readyReplicas="0"
while [ "$?" != "0" -o "$readyReplicas" == "0" ]
do
sleep 1
echo -n '.'
readyReplicas="$(${OC} get ${op} -n openshift-operators -o jsonpath='{.status.readyReplicas}' 2> /dev/null)"
done
echo "done."
done
infomsg "Creating control plane namespace: ${CONTROL_PLANE_NAMESPACE}"
${OC} create namespace ${CONTROL_PLANE_NAMESPACE}
infomsg "Installing Maistra via ServiceMeshControlPlane Custom Resource."
if [ "${MAISTRA_SMCP_YAML}" != "" ]; then
${OC} create -n ${CONTROL_PLANE_NAMESPACE} -f ${MAISTRA_SMCP_YAML}
else
debug "Using example SMCP/SMMR"
rm -f /tmp/maistra-smcp.yaml
get_downloader
eval ${DOWNLOADER} /tmp/maistra-smcp.yaml "https://raw.githubusercontent.com/Maistra/istio-operator/maistra-1.0/deploy/examples/maistra_v1_servicemeshcontrolplane_cr_full.yaml"
${OC} create -n ${CONTROL_PLANE_NAMESPACE} -f /tmp/maistra-smcp.yaml
fi
else
infomsg "The operators should be available but the Maistra SMCP CR will not be created."
fi
}
get_worker_node_count() {
OPENSHIFT_WORKER_NODE_COUNT="$(${OC} get nodes 2>/dev/null | grep worker | wc -l)"
}
scale_worker_nodes() {
if [ -z "${1}" ]; then
infomsg "ERROR: did not provide the number of worker nodes that are desired"
return
fi
local desired_worker_nodes=${1}
get_worker_node_count
if [ "${OPENSHIFT_WORKER_NODE_COUNT}" -ge "${desired_worker_nodes}" ]; then
infomsg "Cluster has [${OPENSHIFT_WORKER_NODE_COUNT}] worker nodes which is enough to satify the requested [${desired_worker_nodes}] worker nodes. No new nodes will be created."
else
local additional_worker_nodes_needed=$(expr ${desired_worker_nodes} - ${OPENSHIFT_WORKER_NODE_COUNT})
infomsg "Cluster has [${OPENSHIFT_WORKER_NODE_COUNT}] worker nodes but [${desired_worker_nodes}] worker nodes are desired. [${additional_worker_nodes_needed}] new nodes will be created."
if [ "${additional_worker_nodes_needed}" -gt "9" ]; then
infomsg "WARNING: This hack script will not request more than 9 additional new nodes. You must do so manually."
return
fi
local machineset=$(${OC} get machinesets -n openshift-machine-api -o name 2>/dev/null | head -n 1)
if [ -z "${machineset}" ]; then
infomsg "WARNING: Cannot determine a valid machine set - cannot create new nodes"
return
fi
local current_replicas=$(${OC} get ${machineset} -n openshift-machine-api -o jsonpath='{.spec.replicas}')
local additional_replicas_needed=$(expr ${current_replicas} + ${additional_worker_nodes_needed})
debug "Will scale the machine set [${machineset}] from [${current_replicas}] to [${additional_replicas_needed}] replicas"
if [ -z "${additional_replicas_needed}" ]; then
infomsg "WARNING: Cannot determine how many additional replicas are needed - cannot create new nodes"
return
fi
${OC} scale --replicas=${additional_replicas_needed} ${machineset} -n openshift-machine-api
fi
}
# END FUNCTIONS
########################################
# Change to the directory where this script is and set our environment
SCRIPT_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"
cd ${SCRIPT_ROOT}
# The default version of OpenShift to be downloaded
DEFAULT_OPENSHIFT_DOWNLOAD_VERSION="4.2.1"
# The default number of worker nodes that should be in the cluster.
DEFAULT_OPENSHIFT_REQUIRED_WORKER_NODES="4"
# The default domain for the AWS OpenShift cluster
DEFAULT_AWS_BASE_DOMAIN="devcluster.openshift.com"
# The name of the OpenShift cluster - Kerberos username must be the prefix
DEFAULT_AWS_CLUSTER_NAME="${USER}-dev"
# The AWS region where the cluster will be installed.
DEFAULT_AWS_REGION="us-east-1"
# Default control plane namespace - where the CRs and the Istio components are installed
DEFAULT_CONTROL_PLANE_NAMESPACE="istio-system"
# Default namespace where bookinfo is to be installed
DEFAULT_BOOKINFO_NAMESPACE="bookinfo"
# process command line args to override environment
_CMD=""
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
# COMMANDS
create|start|up)
_CMD="create"
shift
;;
destroy|delete|stop|down)
_CMD="destroy"
shift
;;
status)
_CMD="status"
shift
;;
routes)
_CMD="routes"
shift
;;
services)
_CMD="services"
shift
;;
oc-env)
_CMD="oc-env"
shift
;;
sm-install)
_CMD="sm-install"
shift
;;
sm-uninstall)
_CMD="sm-uninstall"
shift
;;
bi-install)
_CMD="bi-install"
shift
;;
k-uninstall)
_CMD="k-uninstall"
shift
;;
# OPTIONS CONFIGURING THE HACK SCRIPT ITSELF AND THE CLUSTER
-ar|--aws-region)
AWS_REGION="$2"
shift;shift
;;
-bd|--base-domain)
AWS_BASE_DOMAIN="$2"
shift;shift
;;
-cn|--cluster-name)
AWS_CLUSTER_NAME="$2"
shift;shift
;;
-dd|--download-dir)
OPENSHIFT_DOWNLOAD_BASEPATH="$2"
shift;shift
;;
-lp|--local-platform)
LOCAL_PLATFORM="$2"
shift;shift
;;
-ov|--openshift-version)
OPENSHIFT_DOWNLOAD_VERSION="$2"
shift;shift
;;
-p|--pull-secret-file)
PULL_SECRET_FILE="$2"
if [ ! -f ${PULL_SECRET_FILE} ]; then
infomsg "ERROR: Pull secret file is invalid: ${PULL_SECRET_FILE}"
exit 1
fi
shift;shift
;;
-rn|--required-nodes)
OPENSHIFT_REQUIRED_WORKER_NODES="$2"
shift;shift
;;
-sk|--ssh-key)
SSH_PUBLIC_KEY_FILE="$2"
if [ ! -f ${SSH_PUBLIC_KEY_FILE} ]; then
infomsg "ERROR: SSH public key file is invalid: ${SSH_PUBLIC_KEY_FILE}"
exit 1
fi
shift;shift
;;
-v|--verbose)
_VERBOSE=true
shift
;;
# OPTIONS CONFIGURING THE SERVICE MESH AND ITS COMPONENTS
-bin|--bookinfo-namespace)
BOOKINFO_NAMESPACE="$2"
shift;shift
;;
-cpn|--control-plane-namespace)
CONTROL_PLANE_NAMESPACE="$2"
shift;shift
;;
-ie|--istio-enabled)
ISTIO_ENABLED="$2"
shift;shift
;;
-kuca|--kiali-user-cluster-admin)
KIALI_USER_IS_CLUSTER_ADMIN="$2"
shift;shift
;;
-nw|--no-wait-for-istio)
WAIT_FOR_ISTIO=false
shift
;;
-smcp|--maistra-smcp-yaml)
MAISTRA_SMCP_YAML="$2"
shift;shift
;;
# HELP
-h|--help)
cat <<HELPMSG
$0 [option...] command
Valid options that configure the hack script itself and the cluster:
-ar|--aws-region <name>
The AWS region where the cluster will be deployed.
Options: us-east-1, us-east-2, us-west-1, us-west-2, eu-west-2, eu-west-3, sa-east-1
Default: ${DEFAULT_AWS_REGION}
-bd|--base-domain <name>
The base domain name for the OpenShift cluster.
Default: ${DEFAULT_AWS_BASE_DOMAIN}
-cn|--cluster-name <name>
The name of the OpenShift cluster.
Default: ${DEFAULT_AWS_CLUSTER_NAME}
-dd|--download-dir <dir>
Directory where the OpenShift binaries are or will be stored when downloaded.
Default: ${HOME}/openshift
-lp|--local-platform <platform>
The platform indicator to determine what binaries to download.
Default: linux (mac if MacOS is detected)
-ov|--openshift-version <version>
The version of OpenShift to use.
Default: ${DEFAULT_OPENSHIFT_DOWNLOAD_VERSION}
-p|--pull-secret-file <filename>
Specifies the file containing your Image pull secret.
You can download it from https://cloud.redhat.com/openshift/install/metal/user-provisioned
Used only for the 'create' command.
Default: not set (you will be prompted for the pull secret json at startup if it does not exist yet)
-rn|--required-nodes <node count>
The number of required worker nodes in the cluster. If the number of worker nodes in the cluster is less than
the given value, new nodes will be requested to bring it up to the number of nodes specified by the given value.
Default: ${DEFAULT_OPENSHIFT_REQUIRED_WORKER_NODES}
Used only for the 'create' command.
-sk|--ssh-key <path to SSH public key file>
If provided, this is a file containing the SSH public key that will be used when performing installation
debugging. This is optional, but without it you may have difficulty debugging installation errors.
Used only for the 'create' command.
-v|--verbose
Enable logging of debug messages from this script.
Valid options that configure the service mesh components:
-bin|--bookinfo-namespace
The namespace where the bookinfo demo will be installed.
Default: ${DEFAULT_BOOKINFO_NAMESPACE}
Used only for the 'bi-install' command.
-cpn|--control-plane-namespace
The namespace where the service mesh components are or will be installed. The operator CRs are installed here also.
Default: ${DEFAULT_CONTROL_PLANE_NAMESPACE}
-ie|--istio-enabled (true|false)
When set to true, Maistra will be installed in OpenShift.
Default: true
Used only for the 'create' command.
-kuca|--kiali-user-cluster-admin (true|false)
Determines if the "kiali" OpenShift user is to be given cluster admin rights.
Default: not set - you will be prompted during startup
Used only for the 'create' command.
-nw|--no-wait-for-istio
When specified, this script will not wait for Maistra to be up and running before exiting.
This will be ignored when --istio-enabled is false.
Used only for the 'create' command.
-smcp|--maistra-smcp-yaml <file or url>
Points to the YAML file that defines the ServiceMeshControlPlane custom resource which declares what to install.
If not defined, a basic one will be used.
Used only for the 'create' command.
The command must be one of:
* create: Starts OpenShift and optionally installs Maistra.
* destroy: Stops OpenShift and removes all persistent data.
* status: Information about the OpenShift cluster.
* routes: Outputs URLs for all known routes.
* services: Outputs URLs for all known service endpoints (excluding internal openshift services).
* oc-env: Used to configure a shell for 'oc'.
* sm-install: Installs Service Mesh into the cluster.
* sm-uninstall: Removes Service Mesh from the cluster.
* bi-install: Installs Bookinfo demo into the cluster.
* k-uninstall: Removes Kiali from the cluster.
HELPMSG
exit 1
;;
*)
infomsg "Unknown argument [$key]. Aborting."
exit 1
;;
esac
done
########################################
# Environment setup section starts here.
# This avoids some timeout problems during cluster create
unset SSH_AUTH_SOCK
#--------------------------------------------------------------
# Variables below have values that can be overridden by
# command line options (see above) or by environment variables.
#--------------------------------------------------------------
# if sed is gnu-sed then set option to work in posix mode to be compatible with non-gnu-sed versions
if sed --posix 's/ / /' < /dev/null > /dev/null 2>&1 ; then
SEDOPTIONS="--posix"
fi
# the OpenShift binaries are available for linux and mac platforms
if [ "${LOCAL_PLATFORM}" == "" ]; then
LOCAL_PLATFORM="linux"
if [ "$(uname | tr '[:upper:]' '[:lower:]')" == "darwin" ]; then
LOCAL_PLATFORM="mac"
fi
fi
debug "The local operating system platform: ${LOCAL_PLATFORM}"
# This is where you want the OpenShift binaries to go
OPENSHIFT_DOWNLOAD_BASEPATH="${OPENSHIFT_DOWNLOAD_BASEPATH:-${HOME}/openshift}"
# If ISTIO_ENABLED=true, then a version of Maistra will be installed for you.
ISTIO_ENABLED="${ISTIO_ENABLED:-true}"
# By default, wait for Maistra to be up and running before the script ends.
WAIT_FOR_ISTIO="${WAIT_FOR_ISTIO:-true}"
# Settings for the install-config.yaml configuration settings
AWS_BASE_DOMAIN="${AWS_BASE_DOMAIN:-${DEFAULT_AWS_BASE_DOMAIN}}"
AWS_CLUSTER_NAME="${AWS_CLUSTER_NAME:-${DEFAULT_AWS_CLUSTER_NAME}}"
AWS_REGION="${AWS_REGION:-${DEFAULT_AWS_REGION}}"
# The minimum number of worker nodes the cluster needs to have
OPENSHIFT_REQUIRED_WORKER_NODES=${OPENSHIFT_REQUIRED_WORKER_NODES:-${DEFAULT_OPENSHIFT_REQUIRED_WORKER_NODES}}
# Namespaces for the components
CONTROL_PLANE_NAMESPACE="${CONTROL_PLANE_NAMESPACE:-${DEFAULT_CONTROL_PLANE_NAMESPACE}}"
BOOKINFO_NAMESPACE="${BOOKINFO_NAMESPACE:-${DEFAULT_BOOKINFO_NAMESPACE}}"
#--------------------------------------------------------------
# Variables below have values derived from the variables above.
# These variables below are not meant for users to change.
#--------------------------------------------------------------
OPENSHIFT_DOWNLOAD_VERSION="${OPENSHIFT_DOWNLOAD_VERSION:-${DEFAULT_OPENSHIFT_DOWNLOAD_VERSION}}"
OPENSHIFT_DOWNLOAD_PATH="${OPENSHIFT_DOWNLOAD_BASEPATH}/${OPENSHIFT_DOWNLOAD_VERSION}"
mkdir -p "${OPENSHIFT_DOWNLOAD_PATH}" 2> /dev/null
if [ ! -d "${OPENSHIFT_DOWNLOAD_PATH}" ]; then
infomsg "ERROR: Cannot find or create the download directory. It is currently set to: ${OPENSHIFT_DOWNLOAD_PATH}"
exit 1
fi
OPENSHIFT_INSTALL_PATH="${OPENSHIFT_DOWNLOAD_PATH}/install_dir"
mkdir -p "${OPENSHIFT_INSTALL_PATH}"
debug "The OpenShift binaries will be downloaded into directory: ${OPENSHIFT_DOWNLOAD_PATH}"
debug "The OpenShift installer install directory will be: ${OPENSHIFT_INSTALL_PATH}"
# Determine where to get the binaries and their full paths and how to execute them.
OPENSHIFT_INSTALLER_DOWNLOAD_LOCATION="https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OPENSHIFT_DOWNLOAD_VERSION}/openshift-install-${LOCAL_PLATFORM}-${OPENSHIFT_DOWNLOAD_VERSION}.tar.gz"
OPENSHIFT_CLIENT_DOWNLOAD_LOCATION="https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OPENSHIFT_DOWNLOAD_VERSION}/openshift-client-${LOCAL_PLATFORM}-${OPENSHIFT_DOWNLOAD_VERSION}.tar.gz"
OPENSHIFT_INSTALLER_EXE="${OPENSHIFT_DOWNLOAD_PATH}/openshift-install"
OC="${OPENSHIFT_DOWNLOAD_PATH}/oc"
AWS_KUBEADMIN_PASSWORD_FILE="${OPENSHIFT_INSTALL_PATH}/auth/kubeadmin-password"
AWS_KUBECONFIG="${OPENSHIFT_INSTALL_PATH}/auth/kubeconfig"
check_aws_config
if [ "$_VERBOSE" == "true" ]; then
LOG_LEVEL_ARG="--log-level debug"
fi
# Environment setup section stops here.
########################################
debug "ENVIRONMENT:
command=$_CMD
AWS_BASE_DOMAIN=$AWS_BASE_DOMAIN
AWS_CLUSTER_NAME=$AWS_CLUSTER_NAME
AWS_KUBEADMIN_PASSWORD_FILE=$AWS_KUBEADMIN_PASSWORD_FILE
AWS_KUBECONFIG=$AWS_KUBECONFIG
AWS_PROFILE=$AWS_PROFILE
AWS_REGION=$AWS_REGION
BOOKINFO_NAMESPACE=$BOOKINFO_NAMESPACE
CONTROL_PLANE_NAMESPACE=$CONTROL_PLANE_NAMESPACE
ISTIO_ENABLED=$ISTIO_ENABLED
LOCAL_PLATFORM=$LOCAL_PLATFORM
MAISTRA_SMCP_YAML=$MAISTRA_SMCP_YAML
OC=$OC
OPENSHIFT_CLIENT_DOWNLOAD_LOCATION=$OPENSHIFT_CLIENT_DOWNLOAD_LOCATION
OPENSHIFT_DOWNLOAD_BASEPATH=$OPENSHIFT_DOWNLOAD_BASEPATH
OPENSHIFT_DOWNLOAD_PATH=$OPENSHIFT_DOWNLOAD_PATH
OPENSHIFT_DOWNLOAD_VERSION=$OPENSHIFT_DOWNLOAD_VERSION
OPENSHIFT_INSTALL_PATH=$OPENSHIFT_INSTALL_PATH
OPENSHIFT_INSTALLER_DOWNLOAD_LOCATION=$OPENSHIFT_INSTALLER_DOWNLOAD_LOCATION
OPENSHIFT_INSTALLER_EXE=$OPENSHIFT_INSTALLER_EXE
OPENSHIFT_REQUIRED_WORKER_NODES=$OPENSHIFT_REQUIRED_WORKER_NODES
SEDOPTIONS=$SEDOPTIONS
SSH_PUBLIC_KEY_FILE=$SSH_PUBLIC_KEY_FILE
"
# Download the installer if we do not have it yet
if [ -f "${OPENSHIFT_INSTALLER_EXE}" ]; then
_existingVersion=$(${OPENSHIFT_INSTALLER_EXE} version | head -n 1 | sed ${SEDOPTIONS} 's/^.*v\([0-9.]*\).*/\1/')
_desiredVersion=$(echo -n ${OPENSHIFT_DOWNLOAD_VERSION} | sed ${SEDOPTIONS} 's/^\([0-9.]*\).*/\1/')
if [ "${_existingVersion}" != "${_desiredVersion}" ]; then
infomsg "===== WARNING ====="
infomsg "You already have the OpenShift installer but it does not match the version you want."
infomsg "This appears incorrect: ${OPENSHIFT_INSTALLER_EXE}"
infomsg "The version of the installer is: ${_existingVersion}"
infomsg "You asked for version: ${_desiredVersion} (${OPENSHIFT_DOWNLOAD_VERSION})"
infomsg "===== WARNING ====="
exit 1
fi
debug "Existing OpenShift installer version (${_existingVersion}) matches the desired version (${_desiredVersion}; download version ${OPENSHIFT_DOWNLOAD_VERSION})"
else
infomsg "Downloading OpenShift installer to ${OPENSHIFT_DOWNLOAD_PATH}"
get_downloader
eval ${DOWNLOADER} "${OPENSHIFT_DOWNLOAD_PATH}/installer.tar.gz" "${OPENSHIFT_INSTALLER_DOWNLOAD_LOCATION}"
if [ "$?" != "0" ]; then
infomsg "===== WARNING ====="
infomsg "Could not download the OpenShift installer for the version you want."
infomsg "Make sure this is valid: ${OPENSHIFT_INSTALLER_DOWNLOAD_LOCATION}"
infomsg "===== WARNING ====="
rm "${OPENSHIFT_DOWNLOAD_PATH}/installer.tar.gz"
exit 1
fi
tar xvfz "${OPENSHIFT_DOWNLOAD_PATH}/installer.tar.gz" -C "${OPENSHIFT_DOWNLOAD_PATH}"
if [ ! -f "${OPENSHIFT_INSTALLER_EXE}" ]; then
infomsg "===== WARNING ====="
infomsg "Failed to extract the OpenShift installer."
infomsg "Expecting: ${OPENSHIFT_INSTALLER_EXE}"
infomsg "Make sure this is valid: ${OPENSHIFT_DOWNLOAD_PATH}/installer.tar.gz"
infomsg "===== WARNING ====="
exit 1
fi
fi
debug "OpenShift installer that will be used: ${OPENSHIFT_INSTALLER_EXE}"
debug "$(${OPENSHIFT_INSTALLER_EXE} version)"
# Download the client tarball if we do not have it yet
if [ -f "${OC}" ]; then
_existingVersion=$(${OC} version --client | head -n 1 | sed ${SEDOPTIONS} 's/^[^0-9]*\([0-9.]*\).*/\1/')
_desiredVersion=$(echo -n ${OPENSHIFT_DOWNLOAD_VERSION} | sed ${SEDOPTIONS} 's/^\([0-9.]*\).*/\1/')
if [ "${_existingVersion}" != "${_desiredVersion}" ]; then
infomsg "===== WARNING ====="
infomsg "You already have the OpenShift oc client but it does not match the version you want."
infomsg "This appears incorrect: ${OC}"
infomsg "The version of the oc client is: ${_existingVersion}"
infomsg "You asked for version: ${_desiredVersion} (${OPENSHIFT_DOWNLOAD_VERSION})"
infomsg "===== WARNING ====="
exit 1
fi
debug "Existing OpenShift oc client version (${_existingVersion}) matches the desired version (${_desiredVersion}; download version ${OPENSHIFT_DOWNLOAD_VERSION})"
else
infomsg "Downloading OpenShift oc client to ${OPENSHIFT_DOWNLOAD_PATH}"
get_downloader
eval ${DOWNLOADER} "${OPENSHIFT_DOWNLOAD_PATH}/client.tar.gz" "${OPENSHIFT_CLIENT_DOWNLOAD_LOCATION}"
if [ "$?" != "0" ]; then
infomsg "===== WARNING ====="
infomsg "Could not download the OpenShift oc client for the version you want."
infomsg "Make sure this is valid: ${OPENSHIFT_CLIENT_DOWNLOAD_LOCATION}"
infomsg "===== WARNING ====="
rm "${OPENSHIFT_DOWNLOAD_PATH}/client.tar.gz"
exit 1
fi
tar xvfz "${OPENSHIFT_DOWNLOAD_PATH}/client.tar.gz" -C "${OPENSHIFT_DOWNLOAD_PATH}"
if [ ! -f "${OC}" ]; then
infomsg "===== WARNING ====="
infomsg "Failed to extract the OpenShift oc client."
infomsg "Expecting: ${OC}"
infomsg "Make sure this is valid: ${OPENSHIFT_DOWNLOAD_PATH}/client.tar.gz"
infomsg "===== WARNING ====="
exit 1
fi
fi
debug "OpenShift oc client that will be used: ${OC}"
debug "$(${OC} version --client)"
cd ${OPENSHIFT_DOWNLOAD_PATH}
export KUBECONFIG="${AWS_KUBECONFIG}"
if [ "$_CMD" = "create" ]; then
check_is_running
if [ "${_IS_RUNNING}" == "true" ]; then
infomsg "The OpenShift cluster is already up - nothing to do."
exit 0
fi
infomsg "Starting the OpenShift cluster..."
if [ -f "${PULL_SECRET_FILE}" ]; then
_PULL_SECRET="$(cat ${PULL_SECRET_FILE})"
else
_PULL_SECRET="$(read -sp 'Enter your pull secret - get it from https://cloud.redhat.com/openshift/install/aws/installer-provisioned : ' val && echo -n $val)"
echo "${_PULL_SECRET}" | sed ${SEDOPTIONS} 's/./*/g'
fi
if [ "${SSH_PUBLIC_KEY_FILE}" != "" ]; then
_SSH_KEY_YAML="sshKey: $(cat ${SSH_PUBLIC_KEY_FILE})"
fi
cat <<EOM > ${OPENSHIFT_INSTALL_PATH}/install-config.yaml
apiVersion: v1
baseDomain: ${AWS_BASE_DOMAIN}
metadata:
name: ${AWS_CLUSTER_NAME}
platform:
aws:
region: ${AWS_REGION}
pullSecret: '${_PULL_SECRET}'
${_SSH_KEY_YAML}
EOM
${OPENSHIFT_INSTALLER_EXE} ${LOG_LEVEL_ARG} create cluster --dir "${OPENSHIFT_INSTALL_PATH}"
if [ "$?" != "0" ]; then
infomsg "===== ERROR ====="
infomsg "ERROR: Failed to start the OpenShift cluster."
infomsg "If you get an error that looks like this:"
infomsg " Tried to create resource record set [name='XXX', type='A'] but it already exists"
infomsg "then you need to remove that record following these instructions:"
infomsg " https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-deleting.html"
infomsg "===== ERROR ====="
exit 1
fi
oc_login
get_console_url
echo -n "Waiting for OpenShift console at ${CONSOLE_URL} ..."
while ! curl --head -s -k ${CONSOLE_URL} | head -n 1 | grep -q "200[[:space:]]*OK"
do
sleep 5
get_console_url
echo -n "."
done
echo "Done."
infomsg "OpenShift is ready and the console is accessible."
# see https://docs.openshift.com/container-platform/4.1/authentication/identity_providers/configuring-htpasswd-identity-provider.html
# we need to be admin in order to create the htpasswd oauth and users
infomsg "Creating users 'kiali' and 'johndoe'"
cat <<EOM | ${OC} apply -f -
---
# Secret containing two htpasswd credentials:
# kiali:kiali
# johndoe:johndoe
apiVersion: v1
metadata:
name: htpasswd
namespace: openshift-config
data:
htpasswd: a2lhbGk6JDJ5JDA1JHhrV1NNY0ZIUXkwZ2RDMUltLnJDZnVsV2NuYkhDQ2w2bDhEdjFETWEwV1hLRzc4U2tVcHQ2CmpvaG5kb2U6JGFwcjEkRzhhL2x1My4kRnc5RjJUczFKNUFKRUNJc05KN1RWLgo=
kind: Secret
type: Opaque
---
apiVersion: config.openshift.io/v1
kind: OAuth
metadata:
name: cluster
spec:
identityProviders:
- name: htpasswd
type: HTPasswd
mappingMethod: claim
htpasswd:
fileData:
name: htpasswd
EOM
if [ "${KIALI_USER_IS_CLUSTER_ADMIN}" == "" ]; then
infomsg 'Do you want the kiali user to be assigned the cluster-admin role?'
infomsg 'Select "1" for Yes and "2" for No:'
select yn in "Yes" "No"; do
case $yn in
Yes )
KIALI_USER_IS_CLUSTER_ADMIN="true"
break;;
No )
KIALI_USER_IS_CLUSTER_ADMIN="false"
break;;
esac
done
fi
if [ "${KIALI_USER_IS_CLUSTER_ADMIN}" == "true" ]; then
infomsg "Will assign the cluster-admin role to the kiali user."
${OC} adm policy add-cluster-role-to-user cluster-admin kiali
_CREATE_SMCP_RESOURCE="true"
else
infomsg "Kiali user will not be assigned the cluster-admin role."
_CREATE_SMCP_RESOURCE="true" # still try to install Maistra it should work with system:admin logged in
fi
# Make sure the image registry is exposed via the default route
if [ "$(${OC} get config.imageregistry.operator.openshift.io/cluster -o jsonpath='{.spec.defaultRoute}')" != "true" ]; then
infomsg "Manually patching image registry operator to expose the internal image registry"
${OC} patch config.imageregistry.operator.openshift.io/cluster --patch '{"spec":{"defaultRoute":true}}' --type=merge
else
debug "The image registry operator has exposed the internal image registry"
fi
# Ask for enough nodes that will be required for Maistra/Service Mesh/Kiali to run
scale_worker_nodes ${OPENSHIFT_REQUIRED_WORKER_NODES}
# Install Maistra
${OC} get -n ${CONTROL_PLANE_NAMESPACE} ServiceMeshControlPlane > /dev/null 2>&1
if [ "$?" != "0" ]; then
if [ "${ISTIO_ENABLED}" == "true" ] ; then
install_service_mesh "${_CREATE_SMCP_RESOURCE}"
else
infomsg "You asked that Maistra not be enabled - neither the operators nor a SMCP CR will be created."
fi
else
if [ "${ISTIO_ENABLED}" == "true" ] ; then
infomsg "It appears Maistra has already been installed - will not attempt to do so again."
else
infomsg "You asked that Maistra not be enabled, but it appears to have already been installed. You might want to uninstall it."
fi
fi
# If Maistra is enabled, it should be installing now - if we need to, wait for it to finish
if [ "${ISTIO_ENABLED}" == "true" ] ; then
if [ "${WAIT_FOR_ISTIO}" == "true" ]; then
infomsg "Wait for Maistra to fully start (this is going to take a while)..."
infomsg "Waiting for Maistra Deployments to be created."
_EXPECTED_APPS=(istio-citadel prometheus istio-galley istio-policy istio-telemetry istio-pilot istio-egressgateway istio-ingressgateway istio-sidecar-injector)
for expected in ${_EXPECTED_APPS[@]}
do
echo -n "Waiting for $expected ..."
while ! check_istio_app $expected
do
sleep 5
echo -n '.'
done
echo "done."
done
infomsg "Waiting for Maistra Deployments to start..."
for app in $(${OC} get deployment.apps -n ${CONTROL_PLANE_NAMESPACE} -o jsonpath='{range .items[*]}{.metadata.name}{" "}{end}' 2> /dev/null)
do
echo -n "Waiting for ${app} to be ready..."
readyReplicas="0"
while [ "$?" != "0" -o "$readyReplicas" == "0" ]
do
sleep 1
echo -n '.'
readyReplicas="$(${OC} get deployment.app/${app} -n ${CONTROL_PLANE_NAMESPACE} -o jsonpath='{.status.readyReplicas}' 2> /dev/null)"
done
echo "done."
done