-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
17 changed files
with
708 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,121 @@ | ||
from torch.utils.data import Dataset, DataLoader | ||
import pandas as pd | ||
import cv2 | ||
from os.path import join | ||
import torch | ||
import numpy as np | ||
from sklearn import preprocessing | ||
|
||
Scaler = preprocessing.MinMaxScaler() | ||
|
||
# our spectra data has 200 points, where i start at 1 so index is 201 | ||
dataindex = | ||
|
||
class MMIUnseenDataset(Dataset): | ||
""" | ||
MMIUnseenDataset is dataset for Unseenprediction.py | ||
where load the unseen data | ||
""" | ||
|
||
def __init__(self, z_dim,points_path): | ||
self.data = pd.read_csv(points_path,header=None).to_numpy() | ||
self.z_dim = z_dim | ||
|
||
def __getitem__(self,index): | ||
item = self.data[index] | ||
# print(item) | ||
# print(item.shape) | ||
# points = item[0:dataindex-1].astype(np.float64) | ||
points = torch.from_numpy(item.astype(np.float64)) | ||
points = torch.hstack([points, torch.randn(self.z_dim - len(points))]) | ||
points = points.reshape([self.z_dim, 1, 1]) | ||
# print(points.shape) | ||
return points | ||
|
||
|
||
|
||
class MMIDataset(Dataset): | ||
""" | ||
MMIDataset is for main to train | ||
and point refers to original 200 data points | ||
while points21 refers original 200 data points but for gradient penalty | ||
""" | ||
|
||
def __init__(self, img_size, z_dim, points_path, img_folder): | ||
self.data = pd.read_csv(points_path, header=0, index_col=None).to_numpy() | ||
# self.data = pd.read_csv(points_path, header=0).to_numpy() | ||
self.img_folder = img_folder | ||
self.img_size = img_size | ||
self.z_dim = z_dim | ||
|
||
def __getitem__(self, index): | ||
item = self.data[index] | ||
img = cv2.imread(self.img_folder + '\\{}.png'.format(item[0]), cv2.IMREAD_GRAYSCALE) | ||
img = cv2.resize(img, (self.img_size, self.img_size))[:, :, np.newaxis] | ||
img = img / 255.0 | ||
img = img.transpose(2, 0, 1) | ||
img = torch.from_numpy(img) | ||
points21 = item[1:dataindex].astype(np.float64).reshape(-1, 1) | ||
# points21 = item[1:dataindex].astype(np.float64) | ||
points21 = Scaler.fit_transform(points21) | ||
points21 = torch.from_numpy(points21).flatten(0) | ||
# points21 = torch.from_numpy(points21) | ||
|
||
points = item[1:dataindex].astype(np.float64).reshape(-1,1) | ||
# points = item[1:dataindex].astype(np.float64) | ||
points = Scaler.fit_transform(points) | ||
points = torch.from_numpy(points).flatten(0) | ||
# points = torch.from_numpy(points) | ||
assert len(points) <= self.z_dim | ||
points = torch.hstack([points, torch.randn(self.z_dim - len(points))]) | ||
points = points.reshape([self.z_dim, 1, 1]) | ||
# the shape of points should be [Z_DIM, CHANNELS_IMG, FEATURES_GEN] | ||
|
||
return points, img, points21 | ||
|
||
def __len__(self): | ||
return len(self.data) | ||
|
||
# remember to write down the file_path, image_path | ||
def get_loader( | ||
img_size, | ||
batch_size, | ||
z_dim, | ||
points_path='', | ||
img_folder='', | ||
shuffle=True, | ||
): | ||
return DataLoader(MMIDataset(img_size, z_dim, points_path, img_folder), | ||
batch_size=batch_size, shuffle=shuffle) | ||
|
||
#text onehot plz ignore | ||
# class Condition(nn.Module): | ||
# def __init__(self, alpha: float): | ||
# super().__init__() | ||
# | ||
# # From one-hot encoding to features: 21 => 784 | ||
# self.fc = nn.Sequential( | ||
# nn.Linear(21, 256), | ||
# nn.BatchNorm1d(256), | ||
# nn.LeakyReLU(alpha)) | ||
# | ||
# def forward(self, labels: torch.Tensor): | ||
# # One-hot encode labels | ||
# x = F.one_hot(labels, num_classes=21) | ||
# | ||
# # From Long to Float | ||
# x = x.float() | ||
# | ||
# # To feature vectors | ||
# return self.fc(x) | ||
# | ||
# | ||
# # Reshape helper | ||
# class Reshape(nn.Module): | ||
# def __init__(self, *shape): | ||
# super().__init__() | ||
# | ||
# self.shape = shape | ||
# | ||
# def forward(self, x): | ||
# return x.reshape(-1, *self.shape) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
import torch | ||
from torch import nn | ||
import cv2 | ||
from Dataloader import MMIDataset | ||
import numpy as np | ||
|
||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | ||
# load the saved trained Generator info | ||
model_path = r'' | ||
|
||
|
||
# Load the dataset | ||
dataset = MMIDataset(img_size=64, | ||
z_dim=300, | ||
points_path=r'', | ||
img_folder=r'') | ||
|
||
# Output the results path & load the data into Generator | ||
results_folder = r'' | ||
gen = torch.load(model_path) | ||
gen = gen.to(device) | ||
gen = gen.eval() | ||
|
||
# Generate the image array from given dataset | ||
def predict(net: nn.Module, points): | ||
return net(points).squeeze(0).squeeze(0).cpu().detach().numpy() | ||
|
||
# Generate the desired number of results and save to path | ||
stop_p = 10 | ||
i = 0 | ||
for p in dataset: | ||
if i >= stop_p: | ||
break | ||
data = p[0].to(device, dtype=torch.float).unsqueeze(0) | ||
img_out = predict(gen,data) | ||
img = (img_out + 1) / 2 | ||
img = np.round(255 * img) | ||
img = cv2.normalize(img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX) | ||
|
||
cv2.imwrite(results_folder + '\\' + str(i) + '-test.png', img) | ||
i += 1 | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
import torch | ||
import torch.nn as nn | ||
|
||
|
||
nc = 1 | ||
image_size = 64 | ||
ngpu = 1 | ||
features_d = 64 | ||
features_g = 64 | ||
Z_dim = 250 | ||
channels_noise = Z_dim | ||
|
||
""" | ||
I put the command of ngpu here, | ||
you can just change it into | ||
input.view(-1) to match the batch size | ||
nc is number of channel | ||
you can change it to RGB, | ||
need to check the in_channel | ||
""" | ||
|
||
class Critic(nn.Module): | ||
def __init__(self, ngpu): | ||
super(Critic, self).__init__() | ||
self.ngpu = ngpu | ||
self.image_size = image_size | ||
self.l1 = nn.Linear(200, image_size * image_size * nc) | ||
self.disc = nn.Sequential( | ||
nn.Conv2d(nc * 2, features_d, kernel_size=4, stride=2, padding=1), | ||
nn.LeakyReLU(0.2), | ||
self._block(features_d, features_d * 2, 4, 2, 1), | ||
self._block(features_d * 2, features_d * 4, 4, 2, 1), | ||
self._block(features_d * 4, features_d * 8, 4, 2, 1), | ||
nn.Conv2d(features_d * 8, 1, kernel_size=4, stride=1, padding=0), | ||
# nn.Sigmoid(), | ||
) | ||
|
||
def _block(self, in_channels, out_channels, kernel_size, stride, padding): | ||
return nn.Sequential( | ||
nn.Conv2d( | ||
in_channels, out_channels, kernel_size, stride, padding, bias=False, | ||
), | ||
nn.InstanceNorm2d(out_channels, affine=True), | ||
nn.LeakyReLU(0.2, inplace=True), | ||
) | ||
|
||
def forward(self, img, points21): | ||
x1 = img | ||
x2 = self.l1(points21) | ||
# x2 = x2.reshape(int(b_size / ngpu), nc, image_size, image_size) | ||
x2 = x2.reshape(-1, nc, image_size, image_size) | ||
combine = torch.cat((x1, x2), dim=1) | ||
return self.disc(combine) | ||
|
||
|
||
|
||
# /////////////////////////////////////////////////////////// | ||
class Generator(nn.Module): | ||
def __init__(self, ngpu): | ||
super(Generator, self).__init__() | ||
self.ngpu = ngpu | ||
self.net = nn.Sequential( | ||
# Input: N x channels_noise x 1 x 1 | ||
self._block(channels_noise, features_g * 16, 4, 1, 0), # img: just the example 4x4, depending on your size | ||
self._block(features_g * 16, features_g * 8, 4, 2, 1), # img: 8x8 | ||
self._block(features_g * 8, features_g * 4, 4, 2, 1), # img: 16x16 | ||
self._block(features_g * 4, features_g * 2, 4, 2, 1), # img: 32x32 | ||
nn.ConvTranspose2d( | ||
features_g * 2, nc, kernel_size=4, stride=2, padding=1 | ||
), | ||
# Output: N x channels_img x 64 x 64 | ||
nn.Tanh(), | ||
) | ||
|
||
def _block(self, in_channels, out_channels, kernel_size, stride, padding): | ||
return nn.Sequential( | ||
nn.ConvTranspose2d( | ||
in_channels, out_channels, kernel_size, stride, padding, bias=False, | ||
), | ||
nn.BatchNorm2d(out_channels), | ||
nn.ReLU(), | ||
) | ||
|
||
def forward(self, points): | ||
|
||
return self.net(points) | ||
|
||
|
||
def initialize_weights(model): | ||
for m in model.modules(): | ||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d)): | ||
nn.init.normal_(m.weight.data, 0.0, 0.02) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
import os | ||
from PIL import Image | ||
import numpy as np | ||
import torch | ||
|
||
# Set the input and output directories | ||
input_dir = '' | ||
output_dir = '' | ||
|
||
# Create the output directory if it doesn't already exist | ||
if not os.path.exists(output_dir): | ||
os.makedirs(output_dir) | ||
|
||
# Loop through each file in the input directory | ||
for filename in os.listdir(input_dir): | ||
# Open the image file using PIL | ||
img_path = os.path.join(input_dir, filename) | ||
img = Image.open(img_path) | ||
img_gray = img.convert('L') | ||
|
||
# Convert the image to a NumPy array | ||
img_np = np.array(img_gray) | ||
|
||
# Convert the NumPy array to a PyTorch tensor | ||
img_tensor = torch.from_numpy(img_np) | ||
|
||
# Replace values below 200 with 0, and set all other values to 1 | ||
img_tensor[img_tensor <= 80] = 3 | ||
img_tensor[img_tensor > 80] = 1 | ||
|
||
# Reshape the tensor from 64 x 64 to 4096 x 1 | ||
# img_tensor = img_tensor.reshape((64, 64)) | ||
img_tensor = img_tensor.t() | ||
img_tensor = img_tensor.reshape((4096,1)) | ||
img = torch.flatten(img_tensor, 0, -1) | ||
|
||
# Save the altered tensor as a text file | ||
tensor_name = os.path.splitext(filename)[0] + '.txt' | ||
tensor_path = os.path.join(output_dir, tensor_name) | ||
np.savetxt(tensor_path, img_tensor.numpy(), fmt='%d') |
Oops, something went wrong.