Skip to content

Commit

Permalink
Merge branch 'branch-24.02' into 24.02-pylibraft-build
Browse files Browse the repository at this point in the history
  • Loading branch information
cjnolet authored Jan 16, 2024
2 parents 1795d12 + 1d9adab commit 399433a
Show file tree
Hide file tree
Showing 6 changed files with 76 additions and 9 deletions.
35 changes: 35 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -354,3 +354,38 @@ If citing CAGRA, please consider the following bibtex:
primaryClass={cs.DS}
}
```

If citing the k-selection routines, please consider the following bibtex:

```bibtex
@proceedings{10.1145/3581784,
title = {SC '23: Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis},
year = {2023},
isbn = {9798400701092},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Started in 1988, the SC Conference has become the annual nexus for researchers and practitioners from academia, industry and government to share information and foster collaborations to advance the state of the art in High Performance Computing (HPC), Networking, Storage, and Analysis.},
location = {, Denver, CO, USA, }
}
```

If citing the nearest neighbors descent API, please consider the following bibtex:
```bibtex
@inproceedings{10.1145/3459637.3482344,
author = {Wang, Hui and Zhao, Wan-Lei and Zeng, Xiangxiang and Yang, Jianye},
title = {Fast K-NN Graph Construction by GPU Based NN-Descent},
year = {2021},
isbn = {9781450384469},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3459637.3482344},
doi = {10.1145/3459637.3482344},
abstract = {NN-Descent is a classic k-NN graph construction approach. It is still widely employed in machine learning, computer vision, and information retrieval tasks due to its efficiency and genericness. However, the current design only works well on CPU. In this paper, NN-Descent has been redesigned to adapt to the GPU architecture. A new graph update strategy called selective update is proposed. It reduces the data exchange between GPU cores and GPU global memory significantly, which is the processing bottleneck under GPU computation architecture. This redesign leads to full exploitation of the parallelism of the GPU hardware. In the meantime, the genericness, as well as the simplicity of NN-Descent, are well-preserved. Moreover, a procedure that allows to k-NN graph to be merged efficiently on GPU is proposed. It makes the construction of high-quality k-NN graphs for out-of-GPU-memory datasets tractable. Our approach is 100-250\texttimes{} faster than the single-thread NN-Descent and is 2.5-5\texttimes{} faster than the existing GPU-based approaches as we tested on million as well as billion scale datasets.},
booktitle = {Proceedings of the 30th ACM International Conference on Information \& Knowledge Management},
pages = {1929–1938},
numpages = {10},
keywords = {high-dimensional, nn-descent, gpu, k-nearest neighbor graph},
location = {Virtual Event, Queensland, Australia},
series = {CIKM '21}
}
```
8 changes: 6 additions & 2 deletions ci/build_cpp.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Copyright (c) 2022-2024, NVIDIA CORPORATION.

set -euo pipefail

source rapids-env-update
rapids-configure-conda-channels

source rapids-configure-sccache

source rapids-date-string

export CMAKE_GENERATOR=Ninja

Expand Down
8 changes: 6 additions & 2 deletions ci/build_python.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
#!/bin/bash
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Copyright (c) 2022-2024, NVIDIA CORPORATION.

set -euo pipefail

source rapids-env-update
rapids-configure-conda-channels

source rapids-configure-sccache

source rapids-date-string

export CMAKE_GENERATOR=Ninja

Expand Down
20 changes: 16 additions & 4 deletions cpp/include/raft/comms/detail/std_comms.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -81,6 +81,7 @@ class std_comms : public comms_iface {
num_ranks_(num_ranks),
rank_(rank),
subcomms_ucp_(subcomms_ucp),
own_nccl_comm_(false),
ucp_worker_(ucp_worker),
ucp_eps_(eps),
next_request_id_(0)
Expand All @@ -95,13 +96,18 @@ class std_comms : public comms_iface {
* @param rank rank of the current worker
* @param stream stream for ordering collective operations
*/
std_comms(const ncclComm_t nccl_comm, int num_ranks, int rank, rmm::cuda_stream_view stream)
std_comms(const ncclComm_t nccl_comm,
int num_ranks,
int rank,
rmm::cuda_stream_view stream,
bool own_nccl_comm = false)
: nccl_comm_(nccl_comm),
stream_(stream),
status_(stream),
num_ranks_(num_ranks),
rank_(rank),
subcomms_ucp_(false)
subcomms_ucp_(false),
own_nccl_comm_(own_nccl_comm)
{
initialize();
};
Expand All @@ -116,6 +122,11 @@ class std_comms : public comms_iface {
{
requests_in_flight_.clear();
free_requests_.clear();

if (own_nccl_comm_) {
RAFT_NCCL_TRY_NO_THROW(ncclCommDestroy(nccl_comm_));
nccl_comm_ = nullptr;
}
}

int get_size() const { return num_ranks_; }
Expand Down Expand Up @@ -172,7 +183,7 @@ class std_comms : public comms_iface {

RAFT_NCCL_TRY(ncclCommInitRank(&nccl_comm, subcomm_size, id, key));

return std::unique_ptr<comms_iface>(new std_comms(nccl_comm, subcomm_size, key, stream_));
return std::unique_ptr<comms_iface>(new std_comms(nccl_comm, subcomm_size, key, stream_, true));
}

void barrier() const
Expand Down Expand Up @@ -515,6 +526,7 @@ class std_comms : public comms_iface {
int rank_;

bool subcomms_ucp_;
bool own_nccl_comm_;

comms_ucp_handler ucp_handler_;
ucp_worker_h ucp_worker_;
Expand Down
8 changes: 7 additions & 1 deletion cpp/include/raft/matrix/detail/select_radix.cuh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -1141,6 +1141,12 @@ void radix_topk_one_block(const T* in,
*
* Note, the output is NOT sorted within the groups of `k` selected elements.
*
* Reference:
* Jingrong Zhang, Akira Naruse, Xipeng Li, and Yong Wang. 2023. Parallel Top-K Algorithms on GPU:
* A Comprehensive Study and New Methods. In The International Conference for High Performance
* Computing, Networking, Storage and Analysis (SC ’23), November 12–17, 2023, Denver, CO, USA.
* ACM, New York, NY, USA. https://doi.org/10.1145/3581784.3607062
*
* @tparam T
* the type of the keys (what is being compared).
* @tparam IdxT
Expand Down
6 changes: 6 additions & 0 deletions cpp/include/raft/neighbors/nn_descent_types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,12 @@ struct index_params : ann::index_params {
* The index contains an all-neighbors graph of the input dataset
* stored in host memory of dimensions (n_rows, n_cols)
*
* Reference:
* Hui Wang, Wan-Lei Zhao, Xiangxiang Zeng, and Jianye Yang. 2021.
* Fast k-NN Graph Construction by GPU based NN-Descent. In Proceedings of the 30th ACM
* International Conference on Information & Knowledge Management (CIKM '21). Association for
* Computing Machinery, New York, NY, USA, 1929–1938. https://doi.org/10.1145/3459637.3482344
*
* @tparam IdxT dtype to be used for constructing knn-graph
*/
template <typename IdxT>
Expand Down

0 comments on commit 399433a

Please sign in to comment.