-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrunner.py
109 lines (94 loc) · 4.03 KB
/
runner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from Distillation.data import get_dataloader_dist
from Distillation.CNN import CNN, CNNdecoder
import torch
from diffusers.optimization import get_cosine_schedule_with_warmup
from Distillation.train_utils import train_dist, train_AE_dist, train_dec_dist
from Distillation.data import get_dataloader_dist_no_class_cifar
def main_dist_no_class():
use_gpu = torch.cuda.is_available()
img_size = 256
batch_size = 16
dataloader = get_dataloader_dist_no_class_cifar(img_size, batch_size, use_gpu=use_gpu)
device = torch.device("cuda" if use_gpu else "cpu")
num_epochs = 500
model = CNN().to(device)
print(model)
# state_dict = torch.load("./checkpoints/run_norm/model_best.pt")
# model.load_state_dict(state_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-7)
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=1000,
num_training_steps=50000//batch_size*num_epochs,
)
train_dist(model, optimizer, num_epochs, dataloader, device, lr_scheduler, "checkpoints/dist_cifar_class")
def main_dec_dist():
use_gpu = torch.cuda.is_available()
img_size = 256
batch_size = 16
dataloader = get_dataloader_dist(img_size, batch_size, use_gpu=use_gpu)
device = torch.device("cuda" if use_gpu else "cpu")
num_epochs = 500
model = CNNdecoder().to(device)
print(model)
# state_dict = torch.load("./checkpoints/run_norm/model_best.pt")
# model.load_state_dict(state_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-7)
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=1000,
num_training_steps=50000//batch_size*num_epochs,
)
train_dec_dist(model, optimizer, num_epochs, dataloader, device, lr_scheduler, "checkpoints/run_dist_dec_cifar_norm")
def main_dist():
use_gpu = torch.cuda.is_available()
img_size = 256
batch_size = 16
dataloader = get_dataloader_dist(img_size, batch_size, use_gpu=use_gpu)
device = torch.device("cuda" if use_gpu else "cpu")
num_epochs = 500
model = CNN().to(device)
print(model)
# state_dict = torch.load("./checkpoints/run_norm/model_best.pt")
# model.load_state_dict(state_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-7)
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=1000,
num_training_steps=50000//batch_size*num_epochs,
)
train_dist(model, optimizer, num_epochs, dataloader, device, lr_scheduler, "checkpoints/run_dist_cifar_norm")
def main_AE_dist():
use_gpu = torch.cuda.is_available()
img_size = 256
batch_size = 16
dataloader = get_dataloader_dist(img_size, batch_size, use_gpu=use_gpu)
device = torch.device("cuda" if use_gpu else "cpu")
num_epochs = 500
model = CNN().to(device)
dec = CNNdecoder().to(device)
print("------------------------------")
print("-----------Encoder------------")
print("------------------------------")
print(model)
print("------------------------------")
print("-----------Decoder------------")
print("------------------------------")
print(dec)
# state_dict = torch.load("./checkpoints/run_norm/model_best.pt")
# model.load_state_dict(state_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-7)
dec_optimizer = torch.optim.Adam(dec.parameters(), lr=1e-4, weight_decay=1e-7)
lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=1000,
num_training_steps=50000//batch_size*num_epochs,
)
dec_lr_scheduler = get_cosine_schedule_with_warmup(
optimizer=dec_optimizer,
num_warmup_steps=1000,
num_training_steps=50000//batch_size*num_epochs,
)
train_AE_dist(model, dec, optimizer, dec_optimizer, num_epochs, dataloader, device, lr_scheduler, dec_lr_scheduler, "checkpoints/run_AE")
if __name__ == "__main__":
main_dist()