forked from bnsreenu/python_for_image_processing_APEER
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
9 changed files
with
658 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
#Video Playlist: https://www.youtube.com/playlist?list=PLHae9ggVvqPgyRQQOtENr6hK0m1UquGaG | ||
|
||
""" | ||
Manual and auto thresholding | ||
""" | ||
|
||
import cv2 | ||
import matplotlib.pyplot as plt | ||
|
||
img = cv2.imread("images/Osteosarcoma_01.tif", 1) | ||
|
||
#########################MANUAL################## | ||
#Separate blue channels as they contain nuclei pixels (DAPI). | ||
blue_channel = img[:,:,0] | ||
plt.imshow(blue_channel, cmap='gray') | ||
|
||
#plt.hist(blue_channel.flat, bins=100, range=(0,150)) #.flat returns the flattened numpy array (1D) | ||
|
||
#Manual thresholding by setting threshold value to numpy array | ||
#After thresholding we will get a binary image. | ||
background = (blue_channel <= 40) | ||
nuclei = (blue_channel > 40) | ||
plt.imshow(nuclei, cmap='gray') | ||
|
||
#Using opencv to perform manual threshold | ||
#All pixels above 40 will have pixel value 255 | ||
#Should be exactly same as the above method. | ||
ret1, thresh1 = cv2.threshold(blue_channel, 40, 255, cv2.THRESH_BINARY) | ||
plt.imshow(thresh1, cmap='gray') | ||
|
||
###################################################### | ||
|
||
############# AUTO using OTSU ########################## | ||
#Using opencv for otsu based automatic thresholding | ||
ret2, thresh2 = cv2.threshold(blue_channel,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) | ||
#Reports a value of 50 as threshold for the nuclei. | ||
|
||
#Now, let us segment the image, meaning assign values of 0, 1, 2, ... to pixels | ||
import numpy as np | ||
#np.digitize needs bins to be defined as an array | ||
#So let us convert the threshold value to an array | ||
# #np.digitize assign values 0, 1, 2, 3, ... to pixels in each class. | ||
#For binary it wold be 0 and 1. | ||
regions1=np.digitize(blue_channel, bins=np.array([ret2])) | ||
plt.imshow(regions1) | ||
|
||
#################################################################### |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
#Video Playlist: https://www.youtube.com/playlist?list=PLHae9ggVvqPgyRQQOtENr6hK0m1UquGaG | ||
|
||
# Image segmentation and morphological operators | ||
|
||
from matplotlib import pyplot as plt | ||
import numpy as np | ||
from skimage.filters import threshold_multiotsu | ||
import cv2 | ||
|
||
|
||
img = cv2.imread("images/BSE.tif", 0) | ||
|
||
#Denoise for better results | ||
#from skimage.restoration import denoise_tv_chambolle | ||
#denoised_img = denoise_tv_chambolle(img, weight=0.1, eps=0.0002, n_iter_max=200, multichannel=False) | ||
plt.imshow(img, cmap='gray') | ||
plt.hist(img.flat, bins=100, range=(100,255)) #.flat returns the flattened numpy array (1D) | ||
|
||
##################MANUAL######################## | ||
#Can perform manual segmentation but auto works fine | ||
region1 = (img >= 0) & (img <75) | ||
region2 = (img >= 75) & (img <140) | ||
region3 = (img >= 140) & (img <200) | ||
region4 = (img >= 200) & (img <=255) | ||
all_regions = np.zeros((img.shape[0], img.shape[1], 3)) #Create 3 channel blank image of same size as original | ||
all_regions[region1] = (1,0,0) | ||
all_regions[region2] = (0,1,0) | ||
all_regions[region3] = (0,0,1) | ||
all_regions[region4] = (1,1,0) | ||
plt.imshow(all_regions) | ||
############################################## | ||
####AUTO########################### | ||
# Apply multi-Otsu threshold | ||
thresholds = threshold_multiotsu(img, classes=4) | ||
|
||
# Digitize (segment) original image into multiple classes. | ||
#np.digitize assign values 0, 1, 2, 3, ... to pixels in each class. | ||
regions = np.digitize(img, bins=thresholds) | ||
plt.imshow(regions) | ||
|
||
segm1 = (regions == 0) | ||
segm2 = (regions == 1) | ||
segm3 = (regions == 2) | ||
segm4 = (regions == 3) | ||
|
||
|
||
#We can use binary opening and closing operations to clean up. | ||
#Open takes care of isolated pixels within the window | ||
#Closing takes care of isolated holes within the defined window | ||
|
||
from scipy import ndimage as nd | ||
|
||
segm1_opened = nd.binary_opening(segm1, np.ones((3,3))) | ||
segm1_closed = nd.binary_closing(segm1_opened, np.ones((3,3))) | ||
|
||
segm2_opened = nd.binary_opening(segm2, np.ones((3,3))) | ||
segm2_closed = nd.binary_closing(segm2_opened, np.ones((3,3))) | ||
|
||
segm3_opened = nd.binary_opening(segm3, np.ones((3,3))) | ||
segm3_closed = nd.binary_closing(segm3_opened, np.ones((3,3))) | ||
|
||
segm4_opened = nd.binary_opening(segm4, np.ones((3,3))) | ||
segm4_closed = nd.binary_closing(segm4_opened, np.ones((3,3))) | ||
|
||
all_segments_cleaned = np.zeros((img.shape[0], img.shape[1], 3)) | ||
|
||
all_segments_cleaned[segm1_closed] = (1,0,0) | ||
all_segments_cleaned[segm2_closed] = (0,1,0) | ||
all_segments_cleaned[segm3_closed] = (0,0,1) | ||
all_segments_cleaned[segm4_closed] = (1,1,0) | ||
|
||
plt.imshow(all_segments_cleaned) #All the noise should be cleaned now | ||
plt.imsave("images/BSE_segmented.jpg", all_segments_cleaned) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
#Video Playlist: https://www.youtube.com/playlist?list=PLHae9ggVvqPgyRQQOtENr6hK0m1UquGaG | ||
|
||
#Scratch Assay single image sgmentation | ||
|
||
import matplotlib.pyplot as plt | ||
from skimage import io | ||
|
||
import numpy as np | ||
from skimage.filters import threshold_otsu | ||
import cv2 | ||
|
||
img = io.imread("images/scratch_assay/Scratch0.jpg", as_gray=True) | ||
|
||
################################################## | ||
#Variance - not a great way to quantify texture | ||
from scipy import ndimage | ||
k=7 | ||
img_mean = ndimage.uniform_filter(img, (k, k)) | ||
img_sqr_mean = ndimage.uniform_filter(img**2, (k, k)) | ||
img_var = img_sqr_mean - img_mean**2 | ||
plt.imshow(img_var, cmap='gray') | ||
|
||
####################################################### | ||
#GABOR - A great filter for texture but usually efficient | ||
#if we know exact parameters. Good choice for generating features | ||
#for machine learning | ||
|
||
ksize=45 | ||
theta=np.pi/4 | ||
kernel = cv2.getGaborKernel((ksize, ksize), 5.0, theta, 10.0, 0.9, 0, ktype=cv2.CV_32F) | ||
filtered_image = cv2.filter2D(img, cv2.CV_8UC3, kernel) | ||
plt.imshow(filtered_image, cmap='gray') | ||
|
||
########################################################### | ||
#Entropy | ||
#Entropy quantifies disorder. | ||
#Since cell region has high variation in pixel values the entropy would be | ||
#higher compared to scratch region | ||
from skimage.filters.rank import entropy | ||
from skimage.morphology import disk | ||
entropy_img = entropy(img, disk(3)) | ||
plt.imshow(entropy_img) | ||
|
||
#Scratch Analysis - single image | ||
#Now let us use otsu to threshold high vs low entropy regions. | ||
plt.hist(entropy_img.flat, bins=100, range=(0,5)) #.flat returns the flattened numpy array (1D) | ||
|
||
thresh = threshold_otsu(entropy_img) | ||
|
||
#Now let us binarize the entropy image | ||
binary = entropy_img <= thresh | ||
plt.imshow(binary) | ||
|
||
#Sum all pixels in the scratch region (values =1) | ||
scratch_area = np.sum(binary == 1) | ||
print("Scratched area is: ", scratch_area, "Square pixels") | ||
|
||
scale = 0.45 # microns/pixel | ||
print("Scratched area in sq. microns is: ", scratch_area*((scale)**2), "Square pixels") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
#Video Playlist: https://www.youtube.com/playlist?list=PLHae9ggVvqPgyRQQOtENr6hK0m1UquGaG | ||
|
||
#Scratch Assay on time series images | ||
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5154238/ | ||
|
||
import matplotlib.pyplot as plt | ||
from skimage import io | ||
from skimage.filters.rank import entropy | ||
from skimage.morphology import disk | ||
import numpy as np | ||
from skimage.filters import threshold_otsu | ||
|
||
#Use glob to extract image names and load them. | ||
import glob | ||
|
||
time = 0 | ||
scale = 0.45 # microns/pixel | ||
time_list=[] | ||
area_list=[] | ||
path = "images/scratch_assay/*.*" | ||
|
||
#Put the code from single image segmentation in af for loop | ||
# to apply segmentaion to all images | ||
for file in glob.glob(path): | ||
img=io.imread(file) | ||
entropy_img = entropy(img, disk(3)) | ||
thresh = threshold_otsu(entropy_img) | ||
binary = entropy_img <= thresh | ||
scratch_area = np.sum(binary == 1) | ||
scratch_area = scratch_area*((scale)**2) #Convert to microns from pixel units | ||
print("time=", time, "hr ", "Scratch area=", scratch_area, "um\N{SUPERSCRIPT TWO}") | ||
time_list.append(time) | ||
area_list.append(scratch_area) | ||
time += 1 | ||
|
||
#print(time_list, area_list) | ||
plt.plot(time_list, area_list, 'bo') #Print blue dots scatter plot | ||
|
||
#Print slope, intercept | ||
from scipy.stats import linregress #Linear regression | ||
#print(linregress(time_list, area_list)) | ||
|
||
slope, intercept, r_value, p_value, std_err = linregress(time_list, area_list) | ||
print("y = ",slope, "x", " + ", intercept ) | ||
print("R\N{SUPERSCRIPT TWO} = ", r_value**2) | ||
#print("r-squared: %f" % r_value**2) | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,84 @@ | ||
#Video Playlist: https://www.youtube.com/playlist?list=PLHae9ggVvqPgyRQQOtENr6hK0m1UquGaG | ||
|
||
""" | ||
Measure properties of labeled image regions. | ||
https://scikit-image.org/docs/stable/api/skimage.measure.html#skimage.measure.regionprops | ||
https://github.com/scikit-image/scikit-image/blob/v0.17.2/skimage/measure/_regionprops.py#L643 | ||
""" | ||
|
||
from skimage import measure, io, img_as_ubyte | ||
import matplotlib.pyplot as plt | ||
from skimage.color import label2rgb, rgb2gray | ||
import numpy as np | ||
|
||
# The input image. | ||
image = img_as_ubyte(rgb2gray(io.imread("images/cast_iron1.tif"))) | ||
plt.imshow(image, cmap='gray') | ||
scale = 0.6 #microns/pixel | ||
|
||
#plt.hist(blue_channel.flat, bins=100, range=(0,150)) #.flat returns the flattened numpy array (1D) | ||
|
||
from skimage.filters import threshold_otsu | ||
threshold = threshold_otsu(image) | ||
|
||
#Generate thresholded image | ||
thresholded_img = image < threshold | ||
plt.imshow(thresholded_img) | ||
|
||
#Remove edge touching regions | ||
from skimage.segmentation import clear_border | ||
edge_touching_removed = clear_border(thresholded_img) | ||
plt.imshow(edge_touching_removed) | ||
|
||
#Label connected regions of an integer array using measure.label | ||
#Labels each connected entity as one object | ||
#Connectivity = Maximum number of orthogonal hops to consider a pixel/voxel as a neighbor. | ||
#If None, a full connectivity of input.ndim is used, number of dimensions of the image | ||
#For 2D image it would be 2 | ||
|
||
label_image = measure.label(edge_touching_removed, connectivity=image.ndim) | ||
|
||
plt.imshow(label_image) | ||
#Return an RGB image where color-coded labels are painted over the image. | ||
#Using label2rgb | ||
|
||
image_label_overlay = label2rgb(label_image, image=image) | ||
plt.imshow(image_label_overlay) | ||
|
||
plt.imsave("labeled_cast_iron.jpg", image_label_overlay) | ||
|
||
################################################# | ||
#Calculate properties | ||
#Using regionprops or regionprops_table | ||
all_props=measure.regionprops(label_image, image) | ||
#Can print various parameters for all objects | ||
for prop in all_props: | ||
print('Label: {} Area: {}'.format(prop.label, prop.area)) | ||
|
||
#Compute image properties and return them as a pandas-compatible table. | ||
#Available regionprops: area, bbox, centroid, convex_area, coords, eccentricity, | ||
# equivalent diameter, euler number, label, intensity image, major axis length, | ||
#max intensity, mean intensity, moments, orientation, perimeter, solidity, and many more | ||
|
||
props = measure.regionprops_table(label_image, image, | ||
properties=['label', | ||
'area', 'equivalent_diameter', | ||
'mean_intensity', 'solidity']) | ||
|
||
import pandas as pd | ||
df = pd.DataFrame(props) | ||
print(df.head()) | ||
|
||
#To delete small regions... | ||
df = df[df['area'] > 50] | ||
print(df.head()) | ||
|
||
####################################################### | ||
#Convert to micron scale | ||
df['area_sq_microns'] = df['area'] * (scale**2) | ||
df['equivalent_diameter_microns'] = df['equivalent_diameter'] * (scale) | ||
print(df.head()) | ||
|
||
df.to_csv('data/cast_iron_measurements.csv') |
Oops, something went wrong.