Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion CNN/datasets/DatasetLoader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,21 +13,30 @@ def __init__(self, preprocessors=None):
def load(self, image_paths, verbose=-1):
data = []
labels = []
for (i, image_paths) in enumerate(image_paths):
for i, image_paths in enumerate(image_paths):
# print(i)
# try:
# print(image_paths)
# raise Exception(image_paths)
image = cv2.imread(image_paths)
# print(image_paths.split(os.path.sep))
label = image_paths.split(os.path.sep)[-2]
# print(label)
# print(self.preprocessors)
# print(image.shape)
if self.preprocessors is not None:
for p in self.preprocessors:
image = p.preprocess(image)
# print("here")
data.append(image)
labels.append(label)
# print("[INFO] processed {}/{}".format(i + 1, len(image_paths)))
if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
print("[INFO] processed {}/{}".format(i + 1, len(image_paths)))
#
# except Exception as identifier:
# print(identifier)

print(np.array(data), np.array(labels))

return np.array(data), np.array(labels)
65 changes: 44 additions & 21 deletions CNN/nn/conv/IncludeNet.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,64 @@
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense, Dropout
from keras.api.models import Sequential
from keras.api.layers import (
Conv2D,
Activation,
Dense,
Dropout,
MaxPooling2D,
BatchNormalization,
GlobalAveragePooling2D,
)
from keras.api.regularizers import l2
from keras import backend as K
from keras.layers import MaxPooling2D

size = 50


class IncludeNet:
@staticmethod
def build(width, height, depth, classes):
def build(width, height, depth, classes=4, size=32, reg=0.001):
# Default `classes=4` for your use case, added `reg` parameter for L2 regularization
model = Sequential()
input_shape = (height, width, depth)
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
# model.add(Dropout(0.2,input_shape=inputShape))
model.add(Conv2D(size, (3, 3), padding="same", input_shape=input_shape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(3, 3)))

model.add(Conv2D(size, (3, 3), padding="same", input_shape=input_shape))
# Conv Block 1 with L2 Regularization
model.add(
Conv2D(
size,
(3, 3),
padding="same",
input_shape=input_shape,
kernel_regularizer=l2(reg),
)
)
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(size, (3, 3), padding="same", input_shape=input_shape))
# Conv Block 2 with L2 Regularization
model.add(Conv2D(size * 2, (3, 3), padding="same", kernel_regularizer=l2(reg)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(size, (3, 3), padding="same", input_shape=input_shape))
# Conv Block 3 with L2 Regularization
model.add(Conv2D(size * 4, (3, 3), padding="same", kernel_regularizer=l2(reg)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5)) # Consider experimenting with dropout rate

# Adding GlobalAveragePooling2D layer
model.add(GlobalAveragePooling2D())

model.add(Flatten())
# Dense Layer with L2 Regularization
model.add(Dense(size * 8, kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(Dropout(0.5)) # Consider experimenting with dropout rate

model.add(Dense(classes))
# Output Layer
model.add(
Dense(classes, kernel_regularizer=l2(reg))
) # Adding L2 regularization here as well
model.add(Activation("softmax"))
return model
3 changes: 2 additions & 1 deletion CNN/preprocessing/ImageToArray.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from keras.preprocessing.image import img_to_array
from keras.api.preprocessing.image import img_to_array


class ImageToArrayPreprocessor:
def __init__(self, data_format=None):
self.dataFormat = data_format

def preprocess(self, image):
# print(image)
return img_to_array(image, data_format=self.dataFormat)
21 changes: 16 additions & 5 deletions CNN/preprocessing/PreProcessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ def __init__(self, width, height, inter=cv2.INTER_AREA):
self.inter = inter

def preprocess(self, input_image):
# print("12")
try:
# if in train time with 100 epoch and size 50 get bad result remove all this line und just resize , convert all dataset befor train network
# input_image = cv2.imread(image_paths)
Expand All @@ -18,6 +19,7 @@ def preprocess(self, input_image):
min_red = np.array([80, 60, 140])
max_red = np.array([255, 255, 255])
image_red1 = cv2.inRange(image_blur_hsv, min_red, max_red)
# print("inja")
big_contour, mask = self.find_biggest_contour(image_red1)
(x, y), radius = cv2.minEnclosingCircle(big_contour)
center = (int(x), int(y))
Expand All @@ -30,7 +32,7 @@ def preprocess(self, input_image):
extera = (center[0] + radius) - width
border[3] = extera + 1

if (center[0] - radius < 0):
if center[0] - radius < 0:
extera = width - (center[0] + radius)
border[2] = extera + 1

Expand All @@ -54,17 +56,26 @@ def preprocess(self, input_image):

cropped_image = input_image[y:y2, x:x2]

return cv2.resize(cropped_image, (self.width, self.height),
interpolation=self.inter)
# print("sal;am")

return cv2.resize(
cropped_image, (self.width, self.height), interpolation=self.inter
)
except Exception as a:
print("preprocessor", a)

def find_biggest_contour(self, image):
image = image.copy()
s, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# print("69")
# print(res)
contours, hierarchy = cv2.findContours(
image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)

# print("70")
biggest_contour = max(contours, key=cv2.contourArea)
mask = np.zeros(image.shape, np.uint8)
cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
cv2.drawContours(mask, [biggest_contour], -1, (0, 255, 0), -1)
return biggest_contour, mask

def overlay_mask(self, mask, image):
Expand Down
Binary file modified TrainedModel/model_epoch_100.hdf5
Binary file not shown.
Binary file added TrainedModel/model_epoch_120.hdf5
Binary file not shown.
Binary file added TrainedModel/model_epoch_120.keras
Binary file not shown.
35 changes: 20 additions & 15 deletions WBC-Detection/WBC-Detection.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,35 @@
import cv2
import matplotlib
from matplotlib import colors
from matplotlib import pyplot as plt
import numpy as np


def find_biggest_contour(image):
image = image.copy()
s, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
s, contours, hierarchy = cv2.findContours(
image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
biggest_contour = max(contours, key=cv2.contourArea)
mask = np.zeros(image.shape, np.uint8)
cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
return biggest_contour, mask


def overlay_mask(mask, image):
rgb_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
img = cv2.addWeighted(rgb_mask, 0.5, image, 0.5, 0)


def show_mask(mask):
plt.figure(figsize=(10, 10))
plt.imshow(mask, cmap='gray')
plt.imshow(mask, cmap="gray")


def show(image):
plt.figure(figsize=(15, 15))
plt.imshow(image, interpolation='nearest')
plt.imshow(image, interpolation="nearest")
plt.show()


im = cv2.imread("4.jpeg")
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

Expand All @@ -34,12 +42,12 @@ def show(image):
show(image_blur_hsv)
show(image_red1)
big_contour, mask = find_biggest_contour(image_red1)
overlay_mask(mask,im)
overlay_mask(mask, im)

moments = cv2.moments(mask)
centre_of_mass = (
int(moments['m10'] / moments['m00']),
int(moments['m01'] / moments['m00'])
int(moments["m10"] / moments["m00"]),
int(moments["m01"] / moments["m00"]),
)
image_with_com = im.copy()

Expand All @@ -48,15 +56,12 @@ def show(image):
ellipse = cv2.fitEllipse(big_contour)




img =cv2.ellipse(image_with_ellipse, ellipse, (0, 255, 0), 2)
dst= cv2.bitwise_and(im,im,mask=mask)
img = cv2.ellipse(image_with_ellipse, ellipse, (0, 255, 0), 2)
dst = cv2.bitwise_and(im, im, mask=mask)

r = 100.0 / dst.shape[1]
dim = (100, int(dst.shape[0] * r))


resized = cv2.resize(dst, dim, interpolation = cv2.INTER_AREA)
show(image_with_ellipse)

resized = cv2.resize(dst, dim, interpolation=cv2.INTER_AREA)
show(image_with_ellipse)
Empty file added include_net/__init__.py
Empty file.
64 changes: 64 additions & 0 deletions include_net/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from keras.api.models import Sequential
from keras.api.layers import (
Conv2D,
Activation,
Dense,
Dropout,
MaxPooling2D,
BatchNormalization,
GlobalAveragePooling2D,
)
from keras.api.regularizers import l2
from keras import backend as K


class IncludeNet:
@staticmethod
def build(width, height, depth, classes=4, size=32, reg=0.001):
# Default `classes=4` for your use case, added `reg` parameter for L2 regularization
model = Sequential()
input_shape = (height, width, depth)
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)

# Conv Block 1 with L2 Regularization
model.add(
Conv2D(
size,
(3, 3),
padding="same",
input_shape=input_shape,
kernel_regularizer=l2(reg),
)
)
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

# Conv Block 2 with L2 Regularization
model.add(Conv2D(size * 2, (3, 3), padding="same", kernel_regularizer=l2(reg)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

# Conv Block 3 with L2 Regularization
model.add(Conv2D(size * 4, (3, 3), padding="same", kernel_regularizer=l2(reg)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5)) # Consider experimenting with dropout rate

# Adding GlobalAveragePooling2D layer
model.add(GlobalAveragePooling2D())

# Dense Layer with L2 Regularization
model.add(Dense(size * 8, kernel_regularizer=l2(reg)))
model.add(Activation("relu"))
model.add(Dropout(0.5)) # Consider experimenting with dropout rate

# Output Layer
model.add(
Dense(classes, kernel_regularizer=l2(reg))
) # Adding L2 regularization here as well
model.add(Activation("softmax"))
return model
Loading