forked from openshift/origin-aggregated-logging
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest-datetime-future.sh
executable file
·188 lines (165 loc) · 5.26 KB
/
test-datetime-future.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#!/bin/bash
if [[ $VERBOSE ]]; then
set -ex
else
set -e
VERBOSE=
fi
set -o nounset
set -o pipefail
if [[ $# -ne 1 || "$1" = "false" ]]; then
# assuming not using OPS cluster
CLUSTER="false"
ops=
else
CLUSTER="$1"
ops="-ops"
fi
ARTIFACT_DIR=${ARTIFACT_DIR:-${TMPDIR:-/tmp}/origin-aggregated-logging}
if [ ! -d $ARTIFACT_DIR ] ; then
mkdir -p $ARTIFACT_DIR
fi
# $1 - shell command or function to call to test if wait is over -
# this command/function should return true if the condition
# has been met, or false if still waiting for condition to be met
# $2 - shell command or function to call if we timed out for error handling
# $3 - timeout in seconds - should be a multiple of $4 (interval)
# $4 - loop interval in seconds
wait_until_cmd_or_err() {
let ii=$3
interval=${4:-1}
while [ $ii -gt 0 ] ; do
$1 && break
sleep $interval
let ii=ii-$interval
done
if [ $ii -le 0 ] ; then
$2
return 1
fi
return 0
}
get_running_pod() {
# $1 is component for selector
oc get pods -l component=$1 | awk -v sel=$1 '$1 ~ sel && $3 == "Running" {print $1}'
}
# $1 - kibana pod name
# $2 - es hostname (e.g. logging-es or logging-es-ops)
# $3 - project name (e.g. logging, test, .operations, etc.)
# $4 - _count or _search
# $5 - field to search
# $6 - search string
# stdout is the JSON output from Elasticsearch
# stderr is curl errors
curl_es_from_kibana() {
oc exec $1 -- curl --connect-timeout 1 -s -k \
--cert /etc/kibana/keys/cert --key /etc/kibana/keys/key \
https://${2}:9200/${3}*/${4}\?q=${5}:${6}
}
# stdin is JSON output from Elasticsearch for _count search
# stdout is the integer count
# stderr is JSON parsing errors if bogus input (i.e. search error, empty JSON)
get_count_from_json() {
python -c 'import json, sys; print json.loads(sys.stdin.read())["count"]'
}
# return true if the actual count matches the expected count, false otherwise
test_count_expected() {
myfield=${myfield:-message}
nrecs=`curl_es_from_kibana $kpod $myhost $myproject _count $myfield $mymessage | \
get_count_from_json`
test "$nrecs" = $expected
}
# display an appropriate error message if the expected count did not match
# the actual count
test_count_err() {
myfield=${myfield:-message}
nrecs=`curl_es_from_kibana $kpod $myhost $myproject _count $myfield $mymessage | \
get_count_from_json`
echo Error: found $nrecs for project $myproject message $mymessage - expected $expected
for thetype in _count _search ; do
curl_es_from_kibana $kpod $myhost $myproject $thetype $myfield $mymessage | python -mjson.tool
done
}
write_and_verify_logs() {
# expected number of matches
expected=$1
# generate a log message 1 hour in the future
dt=`date -u +"%b %d %H:%M:%S" --date="1 hour hence"`
uq=`uuidgen`
# NOTE: can't use `logger` for this because we need complete control over the date and format
# so have to use sudo to write directly to /var/log/messages
echo "$dt localhost $uq: $uq message from test-datetime-future" | sudo tee -a /var/log/messages > /dev/null
# get current kibana pod
kpod=`get_running_pod kibana`
if [ -z "$kpod" ] ; then
echo Error: no kibana pod found
oc get pods
return 1
fi
rc=0
# wait for message to show up in the ops log
if myhost=logging-es${ops} myproject=.operations mymessage=$uq expected=$expected myfield=ident \
wait_until_cmd_or_err test_count_expected test_count_err 20 ; then
if [ -n "$VERBOSE" ] ; then
echo good - found $expected records project .operations for $uq
fi
else
rc=1
fi
return $rc
}
if [ -z "${USE_JOURNAL:-}" ] ; then
docker_uses_journal() {
# need to be able to handle cases like
# OPTIONS='--log-driver=json-file ....' # or use --log-driver=journald
grep -q "^OPTIONS='[^']*--log-driver=journald" /etc/sysconfig/docker
}
else
docker_uses_journal() {
test $USE_JOURNAL = true
}
fi
TEST_DIVIDER="------------------------------------------"
# make sure the host/node TZ is the same as the fluentd pod
fpod=`get_running_pod fluentd`
nodetz=`date +%z`
podtz=`oc exec $fpod -- date +%z`
if [ x"$nodetz" = x"$podtz" ] ; then
echo Good - node timezone $nodetz `date +%Z` is equal to the fluentd pod timezone
else
echo Error - node timezone $nodetz is not equal to the fluentd pod timezone $podtz
exit 1
fi
if docker_uses_journal ; then
# don't need to test the /var/log/messages code
echo The rest of the test is not applicable when using the journal - skipping
exit 0
fi
cleanup() {
rc=$?
if [ -n "${before:-}" -a -f "${before:-}" ] ; then
if [ "$rc" != "0" ] ; then
echo fluentd log before:
cat $before
echo ""
fi
rm -f $before
fi
if [ -n "${after:-}" -a -f "${after:-}" ] ; then
if [ "$rc" != "0" ] ; then
echo fluentd log after:
cat $after
echo ""
fi
rm -f $after
fi
}
trap "cleanup" INT TERM EXIT
# save log of fluentd before test
before=`mktemp`
oc logs $fpod > $before
# write syslog message and verify in ES
write_and_verify_logs 1
after=`mktemp`
oc logs $fpod > $after
diff $before $after