Skip to content

Commit

Permalink
Legend in Fig11 and changed paths in llama7b.cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
Neha J committed May 28, 2024
1 parent 877453b commit 70689c0
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 10 deletions.
3 changes: 2 additions & 1 deletion GPU-MPC/experiments/sigma/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,8 @@ def run_perf(party, dealer_gpu, eval_gpu, dealer_key_dir, peer_ip, cpu_threads):
with open('output/P{}/Fig11_data.csv'.format(party),'w') as out_file:
online_time = list(map(lambda model: stats['evaluator'][model]['total']['time'], ['gpt-neo', 'gpt-neo-large', 'llama7b', 'llama13b']))
X = ('1.3', '2.7', '7', '13')
plt.plot(X, online_time, marker='s')
plt.plot(X, online_time, marker='s', label='SIGMA-GPU')
plt.legend(loc='upper left')
plt.xlabel('Number of parameters (in billions)')
plt.ylabel('Time (s)')
plt.savefig("output/P{}/Fig11.png".format(party), dpi=300, bbox_inches='tight')
Expand Down
18 changes: 9 additions & 9 deletions GPU-MPC/ext/sytorch/examples/llama7b.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -251,11 +251,11 @@ void ct_main(std::string inpName)
ct->bw = 48;
llama_model.setBackend(ct);

// llama_model.load("/home/t-nejawalkar/ananta/meta_llama2_7b.dat");
llama_model.load("/home/t-nejawalkar/ananta/meta_llama2_13b.dat");
// llama_model.load("meta_llama2_7b.dat");
llama_model.load("meta_llama2_13b.dat");

// std::string fname = std::string("/home/t-nejawalkar/ananta/lambada-meta-llama2-7b/") + /*std::to_string(i)*/ +"999.dat";
std::string fname = std::string("/home/t-nejawalkar/ananta/lambada-meta-llama2-13b/") + /*std::to_string(i)*/ inpName;
// std::string fname = std::string("lambada-meta-llama2-7b/") + /*std::to_string(i)*/ +"999.dat";
std::string fname = std::string("lambada-meta-llama2-13b/") + /*std::to_string(i)*/ inpName;
input.load(fname, scale);
auto &res = llama_model.forward(input);
auto signedAct = Tensor<i64>((i64 *)res.data, res.shape);
Expand All @@ -282,7 +282,7 @@ void lt_main(std::string inpName, int party)
const u64 n_ctx = 4096;
const u64 n_embd = 5120;
const u64 n_head = 40; // 40;
const u64 n_layer = 1; // 40;
const u64 n_layer = 40; // 40;
const u64 intermediate_size = 13824;
const u64 scale = 12;

Expand All @@ -303,13 +303,13 @@ void lt_main(std::string inpName, int party)

if (party != DEALER)
{
// llama_model.load("/home/t-nejawalkar/ananta/meta_llama2_7b.dat");
llama_model.load("/home/t-nejawalkar/ananta/meta_llama2_13b.dat");
std::string fname = std::string("/home/t-nejawalkar/ananta/lambada-meta-llama2-13b/") + /*std::to_string(i)*/ inpName;
// llama_model.load("meta_llama2_7b.dat");
llama_model.load("meta_llama2_13b.dat");
std::string fname = std::string("lambada-meta-llama2-13b/") + /*std::to_string(i)*/ inpName;
input.load(fname, scale);
}

// std::string fname = std::string("/home/t-nejawalkar/ananta/lambada-meta-llama2-7b/") + /*std::to_string(i)*/ +"999.dat";
// std::string fname = std::string("lambada-meta-llama2-7b/") + /*std::to_string(i)*/ +"999.dat";
llama->initializeInferencePartyA(llama_model.root);
llama->initializeInferencePartyB(input);

Expand Down

0 comments on commit 70689c0

Please sign in to comment.