Skip to content

Commit

Permalink
Merge pull request #273 from nulib/add-load-tests
Browse files Browse the repository at this point in the history
Add Locust load tests and instructions
  • Loading branch information
mbklein authored Nov 18, 2024
2 parents 053978b + 979fada commit f77bc55
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 1 deletion.
2 changes: 1 addition & 1 deletion .tool-versions
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
nodejs 20.15.0
java corretto-19.0.1.10.1
aws-sam-cli 1.107.0
python 3.10.5
python 3.12.2
24 changes: 24 additions & 0 deletions loadtest/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# API Load Testing

The API's load tests are written using the [Locust](https://locust.io/) load testing framework.

## Usage

### Set up dependencies
```shell
python -m venv ./.venv
. ./.venv/bin/activate
pip install -r requirements.txt
```

### Start server
```shell
API_BASE_URL=https://dcapi.rdc-staging.library.northwestern.edu/api/v2 # or whatever
locust -f locustfile.py --host=${API_BASE_URL} --processes -1
```

### Run load tests
1. Open http://localhost:8089/ in a browser.
2. Customize test parameters (peak user count, ramp up, run time, etc.).
3. Click **Start**.
4. You can click around the UI while the test is running to see statistics, graphs, etc.
73 changes: 73 additions & 0 deletions loadtest/locustfile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
from locust import HttpUser, task, between
import random

class DcApiUser(HttpUser):
wait_time = between(1, 5)

@task(1)
def page_through_works(self):
with self.client.rename_request("/search/works (paged)"):
response = self.client.get('/search/works').json()
for i in range(4):
next_url = response['pagination']['next_url']
response = self.client.get(next_url).json()

@task(1)
def search_works(self):
with self.client.rename_request("/search/works with query (paged)"):
query = { 'query': { 'term': { 'title': 'baez' } } }
response = self.client.post('/search/works', json=query).json()
for i in range(4):
next_url = response['pagination']['next_url']
response = self.client.get(next_url).json()

@task(3)
def load_collection_as_json(self):
id = random.choice(self.collection_ids)
self.client.get(f'/collections/{id}', name="/collections/:id")

@task(3)
def load_collection_as_iiif(self):
id = random.choice(self.collection_ids)
self.client.get(f'/collections/{id}?as=iiif', name="/collections/:id?as=iiif")

@task(3)
def load_work_as_json(self):
id = random.choice(self.work_ids)
self.client.get(f'/works/{id}', name="/works/:id")

@task(3)
def load_work_as_iiif(self):
id = random.choice(self.work_ids)
self.client.get(f'/works/{id}?as=iiif', name="/works/:id?as=iiif")

@task(3)
def load_work_thumbnail(self):
id = random.choice(self.work_ids)
self.client.get(f'/works/{id}/thumbnail', name="/works/:id/thumbnail")

@task(3)
def load_file_set(self):
id = random.choice(self.file_set_ids)
self.client.get(f'/file-sets/{id}', name="/file-sets/:id")

def on_start(self):
response = self.random_docs('works', include='id,file_sets.id')
self.work_ids = [doc['id'] for doc in response['data']]
self.file_set_ids = [file_set['id'] for item in response['data'] for file_set in item['file_sets']]
self.collection_ids = [doc['id'] for doc in self.random_docs('collections')['data']]

def random_docs(self, type, count=100, include='id'):
query = {
"size": count,
"query": {
"function_score": {
"query": { "match_all": {} },
"random_score": {}
}
}
}

response = self.client.post(f'/search/{type}?_source_includes={include}', json=query, name=f'Load {count} random {type}')
json = response.json()
return json
1 change: 1 addition & 0 deletions loadtest/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
locust==2.32.2

0 comments on commit f77bc55

Please sign in to comment.