-
Notifications
You must be signed in to change notification settings - Fork 26
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(transformer-nvidia): 先把参数拷贝到锁页内存
Signed-off-by: YdrMaster <[email protected]>
- Loading branch information
Showing
8 changed files
with
188 additions
and
8 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,6 +5,7 @@ members = [ | |
"model-parameters", | ||
"tokenizer", | ||
"transformer-cpu", | ||
"transformer-nvidia", | ||
"xtask", | ||
] | ||
resolver = "2" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
use crate::{memory::Layer, ConfigJson, Llama2, Memory, Storage}; | ||
use std::{ops::Deref, ptr::NonNull, slice::from_raw_parts_mut, sync::Arc}; | ||
use tensor::Tensor; | ||
|
||
pub trait Allocator { | ||
unsafe fn allocate(&self, size: usize) -> NonNull<u8>; | ||
unsafe fn deallocate(&self, ptr: NonNull<u8>); | ||
} | ||
|
||
struct TotalStorage<A: Allocator> { | ||
ptr: NonNull<u8>, | ||
len: usize, | ||
allocator: A, | ||
} | ||
|
||
impl<A: Allocator> Deref for TotalStorage<A> { | ||
type Target = [u8]; | ||
|
||
fn deref(&self) -> &Self::Target { | ||
unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) } | ||
} | ||
} | ||
|
||
impl<A: Allocator> Drop for TotalStorage<A> { | ||
fn drop(&mut self) { | ||
unsafe { self.allocator.deallocate(self.ptr) } | ||
} | ||
} | ||
|
||
impl Memory { | ||
pub fn realloc_with(src: &dyn Llama2, allocator: impl Allocator + 'static) -> Self { | ||
let len = src.size(); | ||
let ptr = unsafe { allocator.allocate(len) }; | ||
let total = Arc::new(TotalStorage { | ||
ptr, | ||
len, | ||
allocator, | ||
}); | ||
|
||
struct Writer<A: Allocator> { | ||
total: Arc<TotalStorage<A>>, | ||
offset: usize, | ||
} | ||
impl<A: Allocator + 'static> Writer<A> { | ||
fn write(&mut self, tensor: Tensor<Storage>) -> Tensor<Storage> { | ||
let offset = self.offset; | ||
let ptr = self.total.ptr.as_ptr(); | ||
let len = tensor.bytes_size(); | ||
self.offset += len; | ||
unsafe { tensor.reform_to_raw(from_raw_parts_mut(ptr.add(offset), len)) }; | ||
Tensor::new( | ||
tensor.data_type(), | ||
tensor.shape(), | ||
Storage::new(self.total.clone(), offset, len), | ||
) | ||
} | ||
} | ||
|
||
let mut writer = Writer { total, offset: 0 }; | ||
Self { | ||
config: ConfigJson::from(src), | ||
embed_tokens: writer.write(src.embed_tokens()), | ||
layers: (0..src.num_hidden_layers()) | ||
.map(|layer| Layer { | ||
input_layernorm: writer.write(src.input_layernorm(layer)), | ||
w_qkv: writer.write(src.w_qkv(layer)), | ||
self_attn_o_proj: writer.write(src.self_attn_o_proj(layer)), | ||
post_attention_layernorm: writer.write(src.post_attention_layernorm(layer)), | ||
mlp_gate_up: writer.write(src.mlp_gate(layer)), | ||
mlp_down: writer.write(src.mlp_down(layer)), | ||
}) | ||
.collect(), | ||
model_norm: writer.write(src.model_norm()), | ||
lm_head: writer.write(src.lm_head()), | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
[package] | ||
name = "transformer-nvidia" | ||
version = "0.0.0" | ||
edition = "2021" | ||
authors = ["YdrMaster <[email protected]>"] | ||
|
||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | ||
|
||
[dependencies] | ||
common = { path = "../common" } | ||
tensor = { path = "../tensor" } | ||
model-parameters = { path = "../model-parameters" } | ||
# cuda = { git = "https://github.com/YdrMaster/cuda-bench" } | ||
cuda = { path = "../../cuda-bench/cuda" } | ||
|
||
[dev-dependencies] | ||
tokenizer = { path = "../tokenizer" } | ||
|
||
[build-dependencies] | ||
find_cuda_helper = "0.2" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
fn main() { | ||
if find_cuda_helper::find_cuda_root().is_some() { | ||
println!("cargo:rustc-cfg=detected_cuda"); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
#![cfg(detected_cuda)] | ||
|
||
use cuda::{driver, Context}; | ||
use std::{ | ||
ptr::{null_mut, NonNull}, | ||
sync::Arc, | ||
}; | ||
|
||
pub extern crate model_parameters; | ||
|
||
struct HostAllocator(Arc<Context>); | ||
|
||
impl model_parameters::Allocator for HostAllocator { | ||
#[inline] | ||
unsafe fn allocate(&self, size: usize) -> NonNull<u8> { | ||
let mut ptr = null_mut(); | ||
self.0.apply(|_| driver!(cuMemHostAlloc(&mut ptr, size, 0))); | ||
NonNull::new(ptr.cast()).unwrap() | ||
} | ||
|
||
#[inline] | ||
unsafe fn deallocate(&self, ptr: NonNull<u8>) { | ||
self.0 | ||
.apply(|_| driver!(cuMemFreeHost(ptr.as_ptr().cast()))); | ||
} | ||
} | ||
|
||
#[test] | ||
fn test_load() { | ||
use model_parameters::{Memory, SafeTensorError}; | ||
use std::{io::ErrorKind::NotFound, time::Instant}; | ||
|
||
cuda::init(); | ||
let Some(dev) = cuda::Device::fetch() else { | ||
return; | ||
}; | ||
|
||
let t0 = Instant::now(); | ||
let safetensors = Memory::load_safetensors("../../TinyLlama-1.1B-Chat-v1.0_F16"); | ||
let t1 = Instant::now(); | ||
println!("mmap {:?}", t1 - t0); | ||
|
||
let safetensors = match safetensors { | ||
Ok(m) => m, | ||
Err(SafeTensorError::Io(e)) if e.kind() == NotFound => return, | ||
Err(e) => panic!("{e:?}"), | ||
}; | ||
|
||
dev.context().apply(|ctx| { | ||
let t0 = Instant::now(); | ||
let _model = Memory::realloc_with(&safetensors, HostAllocator(ctx.clone_ctx())); | ||
let t1 = Instant::now(); | ||
println!("realloc {:?}", t1 - t0); | ||
}); | ||
} |