-
-
Notifications
You must be signed in to change notification settings - Fork 18
129 lines (106 loc) · 4.74 KB
/
benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
name: Benchmark Express Performance
on:
pull_request:
branches:
- main
jobs:
benchmark:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Cache Node.js dependencies
uses: actions/cache@v3
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
- name: Install dependencies
run: npm install
- name: Cache wrk
uses: actions/cache@v3
with:
path: /usr/local/bin/wrk
key: ${{ runner.os }}-wrk-1
- name: Install wrk
if: steps.cache.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install -y bc wrk tmux
- name: Start ultimate-express server in tmux session
run: |
tmux new-session -d -s ultimate-session 'node benchmark/ultimate-express.js'
sleep 3
- name: Run wrk on ultimate-express
run: |
echo "Benchmarking ultimate-express..."
wrk -t 1 -c 200 -d 30 http://localhost:3000 > ultimate-express-benchmark.txt
cat ultimate-express-benchmark.txt
tmux kill-session -t ultimate-session
- name: Start express server in tmux session
run: |
tmux new-session -d -s express-session 'node benchmark/express.js'
sleep 3
- name: Run wrk on express
run: |
echo "Benchmarking express..."
wrk -t 1 -c 200 -d 30 http://localhost:3001 > express-benchmark.txt
cat express-benchmark.txt
tmux kill-session -t express-session
- name: Prepare benchmark output
id: benchmark_output
run: |
# Parse Requests/sec values
ultimate_requests=$(grep "Requests/sec" ultimate-express-benchmark.txt | awk '{print $2}')
express_requests=$(grep "Requests/sec" express-benchmark.txt | awk '{print $2}')
requests_ratio=$(echo "scale=2; $ultimate_requests / $express_requests" | bc)
# Parse and convert latency values to ms
ultimate_latency_raw=$(grep "Latency" ultimate-express-benchmark.txt | awk '{print $2}')
express_latency_raw=$(grep "Latency" express-benchmark.txt | awk '{print $2}')
# Convert ultimate latency to ms if in us
if [[ "$ultimate_latency_raw" == *us ]]; then
ultimate_latency=$(echo "scale=3; ${ultimate_latency_raw%us} / 1000" | bc)
else
ultimate_latency=${ultimate_latency_raw%ms}
fi
# Convert express latency to ms if in us
if [[ "$express_latency_raw" == *us ]]; then
express_latency=$(echo "scale=3; ${express_latency_raw%us} / 1000" | bc)
else
express_latency=${express_latency_raw%ms}
fi
# Calculate latency ratio
latency_ratio=$(echo "scale=2; $express_latency / $ultimate_latency" | bc)
# Parse and format Transfer/sec values
ultimate_transfer=$(grep "Transfer/sec" ultimate-express-benchmark.txt | awk '{print $2}' | sed 's/MB//')
express_transfer=$(grep "Transfer/sec" express-benchmark.txt | awk '{print $2}' | sed 's/MB//')
transfer_ratio=$(echo "scale=2; $ultimate_transfer / $express_transfer" | bc)
# Using printf to format each line with dynamic column widths
{
echo "### Comparison Summary"
printf "| %-18s | %-18s | %-15s | %-15s |\n" "Metric" "Ultimate Express" "Express" "Difference"
printf "|--------------------|--------------------|-----------------|-----------------|\n"
printf "| %-18s | %-18s | %-15s | %-15s |\n" "Requests/sec" "$ultimate_requests" "$express_requests" "${requests_ratio}x faster"
printf "| %-18s | %-16sms | %-13sms | %-15s |\n" "Avg Latency" "$ultimate_latency" "$express_latency" "${latency_ratio}x faster"
printf "| %-18s | %-16sMB | %-13sMB | %-15s |\n" "Transfer/sec" "$ultimate_transfer" "$express_transfer" "${transfer_ratio}x faster"
} >> benchmark_summary.md
# Display the summary in the Actions log
cat benchmark_summary.md
- name: Comment on Pull Request
if: github.event_name == 'pull_request'
continue-on-error: true
uses: peter-evans/create-or-update-comment@v3
with:
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.pull_request.number }}
body: |
## Benchmark Results
$(cat benchmark_summary.md)