-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathutils.py
91 lines (70 loc) · 2.03 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import numpy as np
import torch
import os
import gc
from sklearn.preprocessing import MinMaxScaler
def sliding_windows(data, seq_length):
x = []
y = []
for i in range(len(data)-seq_length-1):
_x = data[i:(i+seq_length)]
_y = data[i+seq_length] #the next has to be predicted
x.append(_x)
y.append(_y)
return np.array(x),np.array(y)
def real_seq(data,seq_length):
y=[]
for i in range(seq_length,len(data)-seq_length-1):
_y = data[i:(i+seq_length)]
y.append(_y)
return np.array(y)
def noise(Xt, device):
bs = Xt.size(0)
target_len = Xt.size(2)
noise = torch.randn((bs, 1, target_len))
noise=noise.to(device)
Xt=Xt.to(device)
Xt = torch.cat((Xt, noise), dim=1)
return Xt
def create_folder(path):
path = path
isExist = os.path.exists(path)
if not isExist:
os.makedirs(path)
print("New models directory created!:",path)
else:
print("Directory already exists:",path)
return
def load_model(path,name):
G = torch.load(path+'/'+name+'/'+name+'_generator.pt')
D = torch.load(path+'/'+name+'/'+name+'_discriminator.pt')
print("Model Loaded succesfully: ",name)
return G,D
def scale(generated):
sc = MinMaxScaler()
generated = sc.fit_transform(generated)
return generated
def assign_device(gpu):
if (torch.cuda.is_available() and gpu==True):
device = "cuda"
print("Cuda enabled: using GPU")
else:
device = "cpu"
print("Cuda not available: using CPU")
return device
def moment_loss(fake_data, real_data):
fake_mean = fake_data.mean()
real_mean = real_data.mean()
fake_std = fake_data.std()
real_std = real_data.std()
return abs(fake_mean - real_mean) + abs(fake_std - real_std)
def write_file(path, values):
with open(path+'.txt', 'w') as file:
for value in values:
file.write(str(value) + '\n')
def free_gpu(G):
G.cpu()
del G
gc.collect()
torch.cuda.empty_cache()
return