Add files via upload

This commit is contained in:
Tanqiu Liu
2017-09-18 16:31:54 -05:00
committed by GitHub
parent 8f4fa6526b
commit a26f473bca
13 changed files with 1275 additions and 0 deletions

9
background_screening.py Normal file
View File

@ -0,0 +1,9 @@
from util import *
from skimage.morphology import disk
def background_screening(img):
img = skimage.img_as_ubyte(img)
max_img = skimage.filter.rank.maximum(img, disk(3))
min_img = skimage.filter.rank.minimum(img, disk(3))
mean_img = skimage.filter.rank.mean(img, disk(3))
bg = (max_img - min_img)/mean_img < 0.1

78
csgReader.py Normal file
View File

@ -0,0 +1,78 @@
import scipy.io as sio
import os
from PIL import Image
import matplotlib as mpl
import matplotlib.pyplot as plt
from functools import reduce
import numpy as np
import glob
EXPR_PATH = ['/Volumes/LTQ/QYM data/data/gal nacl02_gal nacl03_gal nacl04_gly nacl02 20160711',
'/Volumes/LTQ/QYM data/data/raf caf4_raf dtt05_gal dtt05_gal nacl05 20160703'
]
def correct_image_path(path_imageBF):
pl = path_imageBF.split(':')
correct_path = '/Volumes/LTQ' + pl[1]
correct_path = reduce(lambda a,b: a+'/'+b,correct_path.split('\\'))
return correct_path
def load_csg(filename_csg):
print('loading'+filename_csg)
mat_contents = sio.loadmat(filename_csg)
hh = mat_contents['hh']
val = hh[0, 0]
seg_data = dict()
seg_data['cellsegperim'] = val['cellsegperim']
seg_data['filenameBF'] = val['filenameBF']
seg_data['path_imageBF'] = str(val['pathnameBF'][0])
# 下步仅用于矫正不同机器上驱动器名称的误差
seg_data['path_imageBF'] = correct_image_path(seg_data['path_imageBF'])
return seg_data
def transform_cellseg(cellseg):
cellsegs = list()
for i in range(cellseg.shape[0]):
seg = cellseg[i, 0]
if(seg.shape[1]==2):
cellsegs.append(seg)
return cellsegs
def get_seg_im(seg_data, idx):
seg_im = dict()
seg_im['cellseg'] = transform_cellseg(seg_data['cellsegperim'][0, idx])
seg_im['filenameBF'] = str(seg_data['filenameBF'][0, idx][0])
image_file = os.path.join(seg_data['path_imageBF'], seg_im['filenameBF'])
seg_im['imageBF'] = np.array(Image.open(image_file))
return seg_im
def extract_xypoint(filepath_csg):
# extract data from a single xypoint's images
seg_data = load_csg(filepath_csg)
n_frame = seg_data['cellsegperim'].shape[1]
seg_ims = list()
for frame in range(0, n_frame, int(n_frame/5)): # for each xypoint, extract 5 images
seg_im = get_seg_im(seg_data, frame)
seg_ims.append(seg_im)
return seg_ims
def extract_expr(expr_path):
csg_paths = glob.glob(expr_path + '/*.csg')
seg_im_list = list()
for csg_file in csg_paths:
seg_ims = extract_xypoint(csg_file)
seg_im_list.extend(seg_ims)
return seg_im_list
def get_seg_im_data():
seg_im_data = list()
for path in EXPR_PATH:
seg_im_data.extend(extract_expr(path))
return seg_im_data

173
detect_yeast.py Normal file
View File

@ -0,0 +1,173 @@
from keras.models import load_model
from keras.utils import np_utils
from util import *
from segment_seed import *
def cut_window(imageBF, center):
r1 = int(center[0] - WINDOW_SHAPE[0] / 2)
r2 = int(center[0] + WINDOW_SHAPE[0] / 2)
c1 = int(center[1] - WINDOW_SHAPE[1] / 2)
c2 = int(center[1] + WINDOW_SHAPE[1] / 2)
return imageBF[r1:r2, c1:c2]
def windows_generator(imageBF, step_size):
r_range = (int(WINDOW_SHAPE[0] / 2) + 1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0] / 2) - 1)
c_range = (int(WINDOW_SHAPE[1] / 2) + 1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1] / 2) - 1)
for r in range(r_range[0], r_range[1], step_size[0]):
for c in range(c_range[0], c_range[1], step_size[1]):
win = cut_window(imageBF, (r, c))
center = (r, c)
yield(win, center)
def test_win_std(win):
return win.std()/win.mean() < 0.1
def test_stripes_std(win):
r1 = int(WINDOW_SHAPE[0]/3)
r2 = 2 * int(WINDOW_SHAPE[0]/3)
c1 = int(WINDOW_SHAPE[1]/3)
c2 = 2 * int(WINDOW_SHAPE[1]/3)
if(win[r1:r2, :].std()/win[r1:r2, :].mean() < 0.1 or win[:, c1:c2].std()/win[:, c1:c2].mean() < 0.1):
return True
else:
return False
def judge_yeast(win, model_detect):
# filter out wrong windows using stdDev/mean within the window, if stdDev/mean<0.1, discard
if(test_win_std(win)):
return False
# same as above, another way to filter out wrong windows
elif(test_stripes_std(win)):
return False
else:
im = win.reshape(1, WINDOW_SHAPE[0], WINDOW_SHAPE[1], 1)
result = model_detect.predict(im)
if(result[0, 0]==0.0 and result[0, 1]==1.0):
return True
elif(result[0, 0]==1.0 and result[0, 1]==0.0):
return False
def get_neighbor_list(center_list, center, neighbor_list):
pos_up = (center[0]-STEP_SIZE[0], center[1])
pos_down = (center[0]+STEP_SIZE[0], center[1])
pos_left = (center[0], center[1]-STEP_SIZE[1])
pos_right = (center[0], center[1]+STEP_SIZE[1])
poss = [pos_up, pos_down, pos_left, pos_right]
# center_list.remove(center)
neighbor_list.append(center)
for pos in poss:
if(pos in center_list and not pos in neighbor_list):
get_neighbor_list(center_list, pos, neighbor_list)
def get_neighbors(center_list, center):
neighbors = []
get_neighbor_list(center_list, center, neighbors)
return list(set(neighbors))
def merge_multi_detections(center_list):
for center in center_list:
center_list1 = center_list[:]
neighbors = get_neighbors(center_list1, center)
if(len(neighbors) > 1):
for n in neighbors:
center_list.remove(n)
center_list.append(tuple(np.mean(np.array(neighbors), axis=0).astype(np.int32)))
return center_list
def detect_centers(imageBF, model_detect):
center_list = list()
for (win, center) in windows_generator(imageBF, STEP_SIZE):
if(judge_yeast(win, model_detect) == True):
center_list.append(center)
# center_list = merge_multi_detections(center_list)
return center_list
def compute_vertex(win, model_rect):
im = win.reshape(1, WINDOW_SHAPE[0], WINDOW_SHAPE[1], 1)/65535.
vertex = model_rect.predict(im).astype(np.int32)
return (vertex[0, 0], vertex[0, 1], vertex[0, 2], vertex[0, 3])
def get_center_list(imageBF, model_detect):
raw_center_list = detect_centers(imageBF, model_detect)
center_list = raw_center_list #postprocessing of centers e.g. merge
count = len(raw_center_list)
new_count = 0
while(new_count != count):
count = new_count
center_list = merge_multi_detections(center_list)
new_count = len(center_list)
return center_list
def get_vertex_list(imageBF, model_detect, model_rect):
center_list = get_center_list(imageBF, model_detect)
vertex_list = list()
for center in center_list:
win = cut_window(imageBF, center)
vertex = compute_vertex(win, model_rect)
true_vertex = (vertex[0]+center[0]-WINDOW_SHAPE[0]//2,
vertex[1]+center[0]-WINDOW_SHAPE[0]//2,
vertex[2]+center[1]-WINDOW_SHAPE[1]//2,
vertex[3]+center[1]-WINDOW_SHAPE[1]//2,)
vertex_list.append(true_vertex)
return vertex_list
def get_polygon_list(image, center_list):
(slopes, gradx, grady, slopes2, grad2x, grad2y, gradxy) = findslopes(image)
polygon_list = list()
for i in range(len(center_list)):
print("processing %s" %i)
polygon = get_polygon(image, gradx, grady, center_list[i])
polygon_list.append(polygon)
return polygon_list
def plot_detection_center(imageBF, center_list):
colormap = mpl.cm.gray
plt.imshow(imageBF, cmap=colormap)
for center in center_list:
plt.plot(center[1], center[0], 'r*')
plt.xlim(0, 512)
plt.ylim(512, 0)
plt.show()
def plot_detection_rect(imageBF, vertex_list):
colormap = mpl.cm.gray
plt.imshow(imageBF, cmap=colormap)
for (r1,r2,c1,c2) in vertex_list:
plt.plot(np.ones(r2-r1)*c1, np.array(range(r1, r2)), 'r')
plt.plot(np.ones(r2-r1)*c2, np.array(range(r1, r2)), 'r')
plt.plot(np.array(range(c1, c2)), np.ones(c2-c1)*r1, 'r')
plt.plot(np.array(range(c1, c2)), np.ones(c2-c1)*r2, 'r')
plt.xlim(0, IMAGE_SHAPE[1])
plt.ylim(IMAGE_SHAPE[0], 0)
plt.show()
def plot_polygons(img, polygon_list):
plt.imshow(img, cmap=mpl.cm.gray)
for i in range(len(polygon_list)):
plt.plot(polygon_list[i][:,1], polygon_list[i][:, 0], 'r')
plt.xlim(0, IMAGE_SHAPE[1])
plt.ylim(IMAGE_SHAPE[0], 0)
plt.show()
if __name__ == '__main__':
image = np.array(Image.open('./examples/example1.tif'))
model_detect = load_model('./models/CNN_detect6.h5')
center_list = get_center_list(image, model_detect)
polygon_list = get_polygon_list(image, center_list)
plot_polygons(image, polygon_list)

14
loadseg.py Normal file
View File

@ -0,0 +1,14 @@
import scipy.io as sio
def load_seg_im_data(filename):
mat_contents = sio.loadmat(filename)
data = mat_contents['data']
seg_im_data = list()
for idx in range(data.shape[1]):
data_im = data[0, idx][0, 0]
seg_im = dict()
seg_im['cellseg'] = data_im['cellseg'].transpose()
seg_im['filenameBF'] = str(data_im['filenameBF'][0])
seg_im['imageBF'] = data_im['imageBF']
seg_im_data.append(seg_im)
return seg_im_data

327
prepare_data.py Normal file
View File

@ -0,0 +1,327 @@
from util import *
import glob
from skimage.morphology import binary_dilation
EXPR_PATH = ['I:\QYM data\data\gal nacl02_gal nacl03_gal nacl04_gly nacl02 20160711',
'I:\QYM data\data/raf caf4_raf dtt05_gal dtt05_gal nacl05 20160703',
'I:\QYM data\data\Gly H2O2009_DTT02_DTT05_Gal004 dtt075 20160627',
'I:\QYM data\data\SD_NaCl1_Eth_Glu0005 20160612',
'I:\QYM data\data/002_004Gal 20160513',
'I:\QYM data\data\SD-DTT075_H2O203_Glu005 20160511',
'I:\QYM data\data/ura1-his2_3_5_6_8 20160505.nd2'
]
def extract_xypoint(filepath_csg):
# extract data from a single xypoint's images
seg_data = load_csg(filepath_csg)
n_frame = seg_data['cellsegperim'].shape[1]
seg_ims = list()
for frame in range(0, n_frame, int(n_frame/5)): # for each xypoint, extract 5 images
seg_im = get_seg_im(seg_data, frame)
seg_ims.append(seg_im)
return seg_ims
def extract_expr(expr_path):
csg_paths = glob.glob(expr_path + '/*.csg')
seg_im_list = list()
for csg_file in csg_paths:
seg_ims = extract_xypoint(csg_file)
seg_im_list.extend(seg_ims)
return seg_im_list
def load_seg_im_data():
seg_im_data = list()
for path in EXPR_PATH:
seg_im_data.extend(extract_expr(path))
return seg_im_data
def load_seg_im_data(filename):
mat_contents = sio.loadmat(filename)
data = mat_contents['data']
seg_im_data = list()
for idx in range(data.shape[1]):
data_im = data[0, idx][0, 0]
seg_im = dict()
seg_im['cellseg'] = data_im['cellseg'].transpose()
seg_im['filenameBF'] = str(data_im['filenameBF'][0])
seg_im['imageBF'] = data_im['imageBF']
seg_im_data.append(seg_im)
return seg_im_data
# ----------------------------------------------------
def reduce_image_size():
pass
def cut_window(image, center):
r1 = int(center[0] - WINDOW_SHAPE[0] / 2)
r2 = int(center[0] + WINDOW_SHAPE[0] / 2)
c1 = int(center[1] - WINDOW_SHAPE[1] / 2)
c2 = int(center[1] + WINDOW_SHAPE[1] / 2)
return image[r1:r2, c1:c2]
def gen_pos(seg_im):
r_range = (int(WINDOW_SHAPE[0] / 2) + 1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0] / 2) - 1)
c_range = (int(WINDOW_SHAPE[1] / 2) + 1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1] / 2) - 1)
pos_data = list()
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
center = find_center(seg)
if(center[0] > r_range[0] and center[0] < r_range[1] and center[1] > c_range[0] and center[1] < c_range[1]):
window = cut_window(seg_im['imageBF'], center)
pos_data.append(window.reshape(1, WINDOW_SHAPE[0] * WINDOW_SHAPE[1]))
feat = np.vstack(tuple(pos_data))
target = np.ones((feat.shape[0], 1))
return np.hstack((target, feat))
def gen_pos_example(seg_im_data):
tables = list()
for seg_im in seg_im_data:
pos_table = gen_pos(seg_im)
tables.append(pos_table)
return np.vstack(tuple(tables)).astype(np.int32)
"""
seg_im_data = load_seg_im_data('seg_im_data.mat')
pos_data_table = gen_pos_example(seg_im_data)
np.savetxt('positive_data.csv', pos_data_table, fmt='%d', header = 'positive_examples, first col is target')
"""
def get_cor_range(seg):
r1 = np.min(seg[:, 0])
r2 = np.max(seg[:, 0])
c1 = np.min(seg[:, 1])
c2 = np.max(seg[:, 1])
return (r1, r2, c1, c2)
def gen_mask(seg_im):
mask = np.zeros((IMAGE_SHAPE[0], IMAGE_SHAPE[0]))
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
(r1, r2, c1, c2) = get_cor_range(seg)
mask[r1:r2, c1:c2] = 1
return mask.astype(np.int32)
def get_pos_mask(seg_im):
pos_mask = np.zeros((IMAGE_SHAPE[0], IMAGE_SHAPE[0]))
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
(r, c) = find_center(seg)
pos_mask[r-int(STEP_SIZE[0]/2):r+int(STEP_SIZE[0]/2), c-int(STEP_SIZE[1]/2):c+int(STEP_SIZE[1]/2)] = 1
pos_mask = pos_mask.astype(np.int32)
return pos_mask
def gen_neg(seg_im):
r_range = (int(WINDOW_SHAPE[0] / 2) + 1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0] / 2) - 1)
c_range = (int(WINDOW_SHAPE[1] / 2) + 1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1] / 2) - 1)
neg_data = list()
mask = get_pos_mask(seg_im)
num_points = int(len(seg_im['cellseg']) * 2)
count = 0
while(count < num_points):
rand_cor = ((r_range[1] - r_range[0]) * np.random.random() + r_range[0],
(c_range[1] - c_range[0]) * np.random.random() + c_range[0])
rand_cor = (int(rand_cor[0]), int(rand_cor[1]))
if(mask[rand_cor[0], rand_cor[1]] != 1):
window = cut_window(seg_im['imageBF'], rand_cor)
if((np.max(window)-np.min(window))/np.mean(window) > 0.1):
neg_data.append(window.reshape(1, WINDOW_SHAPE[0] * WINDOW_SHAPE[1]))
count += 1
feat = np.vstack(tuple(neg_data))
target = np.zeros((feat.shape[0], 1))
return np.hstack((target, feat))
def gen_neg_example(seg_im_data):
tables = list()
idx = 1
for seg_im in seg_im_data:
print('processing %s/%s...' %(idx, len(seg_im_data)))
neg_table = gen_neg(seg_im)
tables.append(neg_table)
idx += 1
return np.vstack(tuple(tables)).astype(np.int32)
"""
seg_im_data = load_seg_im_data('seg_im_data.mat')
neg_data_table = gen_neg_example(seg_im_data)
np.savetxt('negative_not_empty4.csv', neg_data_table, fmt='%d', header = 'negative_examples, first col is target')
"""
"""
id_list = []
for idx in range(Xy.shape[0]):
im = Xy[idx, 1:]
if((np.max(im) - np.min(im))/np.mean(im) < 0.1):
id_list.append(idx)
id_list2 = list(set(list(range(X_neg.shape[0]))), set(id_list))
"""
def get_neg_mask(seg_im):
pos_mask = np.zeros((IMAGE_SHAPE[0], IMAGE_SHAPE[0]))
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
(r, c) = find_center(seg)
pos_mask[r-int(STEP_SIZE[0]/2):r+int(STEP_SIZE[0]/2), c-int(STEP_SIZE[1]/2):c+int(STEP_SIZE[1]/2)] = 1
pos_mask = pos_mask.astype(np.int32)
neg_mask = binary_dilation(gen_mask(seg_im), np.ones((WINDOW_SHAPE[0],WINDOW_SHAPE[1]))) - pos_mask
return neg_mask
def gen_pos2(seg_im):
r_range = (int(WINDOW_SHAPE[0]/2+STEP_SIZE[0]/2)+1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0]/2+STEP_SIZE[0]/2)-1)
c_range = (int(WINDOW_SHAPE[1]/2+STEP_SIZE[1]/2)+1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1]/2+STEP_SIZE[1]/2)-1)
pos_data = list()
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
center = find_center(seg)
if (center[0] > r_range[0] and center[0] < r_range[1] and center[1] > c_range[0] and center[1] < c_range[1]):
for ii in range(2):
cor = (STEP_SIZE[0] * np.random.random() - STEP_SIZE[0] / 2, STEP_SIZE[1] * np.random.random() - STEP_SIZE[1] / 2)
cor = (center[0]+int(cor[0]), center[1]+int(cor[1]))
window = cut_window(seg_im['imageBF'], cor)
pos_data.append(window.reshape(1, WINDOW_SHAPE[0] * WINDOW_SHAPE[1]))
feat = np.vstack(tuple(pos_data))
target = np.ones((feat.shape[0], 1))
return np.hstack((target, feat))
def gen_pos_example2(seg_im_data):
tables = list()
idx = 1
for seg_im in seg_im_data:
pos_table = gen_pos2(seg_im)
print('processing %s/%s...' %(idx, len(seg_im_data)))
tables.append(pos_table)
idx += 1
return np.vstack(tuple(tables)).astype(np.int32)
"""
seg_im_data = load_seg_im_data('seg_im_data.mat')
pos_data_table = gen_pos_example2(seg_im_data)
np.savetxt('positive_data3.csv', pos_data_table, fmt='%d', header = 'positive_examples3, first col is target')
"""
def gen_neg2(seg_im):
r_range = (int(WINDOW_SHAPE[0] / 2) + 1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0] / 2) - 1)
c_range = (int(WINDOW_SHAPE[1] / 2) + 1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1] / 2) - 1)
neg_data = list()
neg_mask = get_neg_mask(seg_im)
num_points = int(len(seg_im['cellseg']) * 3)
count = 0
for r in range(r_range[0], r_range[1], STEP_SIZE[0]*3):
for c in range(c_range[0], c_range[1], STEP_SIZE[1]*3):
if (neg_mask[r, c] != 1):
window = cut_window(seg_im['imageBF'], (r, c))
neg_data.append(window.reshape(1, WINDOW_SHAPE[0] * WINDOW_SHAPE[1]))
feat = np.vstack(tuple(neg_data))
target = np.zeros((feat.shape[0], 1))
return np.hstack((target, feat))
def gen_neg_example2(seg_im_data):
tables = list()
idx = 1
seg_im_data = seg_im_data[0:100]
for seg_im in seg_im_data:
print('processing %s/%s...' %(idx, len(seg_im_data)))
neg_table = gen_neg2(seg_im)
tables.append(neg_table)
idx += 1
return np.vstack(tuple(tables)).astype(np.int32)
"""
seg_im_data = load_seg_im_data('seg_im_data.mat')
neg_data_table = gen_neg_example2(seg_im_data)
np.savetxt('negative_data3.csv', neg_data_table, fmt='%d', header = 'negative_examples2, first col is target')
"""
def get_vertex(seg):
r1 = np.min(seg[:, 0])
r2 = np.max(seg[:, 0])
c1 = np.min(seg[:, 1])
c2 = np.max(seg[:, 1])
return (r1, r2, c1, c2)
def gen_rectangle(seg_im):
r_range = (int(WINDOW_SHAPE[0]/2+STEP_SIZE[0]/2)+1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0]/2+STEP_SIZE[0]/2)-1)
c_range = (int(WINDOW_SHAPE[1]/2+STEP_SIZE[1]/2)+1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1]/2+STEP_SIZE[1]/2)-1)
rect_data = list()
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
center = find_center(seg)
vertex = get_vertex(seg)
if (center[0] > r_range[0] and center[0] < r_range[1] and center[1] > c_range[0] and center[1] < c_range[1]):
for ii in range(2):
cor = (STEP_SIZE[0] * np.random.random() - STEP_SIZE[0] / 2, STEP_SIZE[1] * np.random.random() - STEP_SIZE[1] / 2)
cor = (center[0]+int(cor[0]), center[1]+int(cor[1]))
window = cut_window(seg_im['imageBF'], cor)
new_vertex = np.array([vertex[i]-cor[i//2]+WINDOW_SHAPE[i//2]//2 for i in range(4)]).astype(np.int32)
data = window.reshape( WINDOW_SHAPE[0] * WINDOW_SHAPE[1])
line = np.concatenate((new_vertex,data))
rect_data.append(line)
return np.vstack(tuple(rect_data))
def gen_rect_example(seg_im_data):
tables = list()
idx = 1
for seg_im in seg_im_data:
rect_table = gen_rectangle(seg_im)
print('processing %s/%s...' %(idx, len(seg_im_data)))
tables.append(rect_table)
idx += 1
return np.vstack(tuple(tables)).astype(np.int32)
"""
seg_im_data = load_seg_im_data('seg_im_data.mat')
rect_data_table = gen_rect_example(seg_im_data)
np.savetxt('rect_data.csv', rect_data_table, fmt='%d', header = 'rect_examples, first 4 cols are target')
"""
def gen_mask_data(seg_im):
r_range = (int(WINDOW_SHAPE[0]/2+STEP_SIZE[0]/2)+1, IMAGE_SHAPE[0] - int(WINDOW_SHAPE[0]/2+STEP_SIZE[0]/2)-1)
c_range = (int(WINDOW_SHAPE[1]/2+STEP_SIZE[1]/2)+1, IMAGE_SHAPE[1] - int(WINDOW_SHAPE[1]/2+STEP_SIZE[1]/2)-1)
mask_data = list()
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
center = find_center(seg)
if (center[0] > r_range[0] and center[0] < r_range[1] and center[1] > c_range[0] and center[1] < c_range[1]):
for ii in range(2):
cor = (STEP_SIZE[0] * np.random.random() - STEP_SIZE[0] / 2, STEP_SIZE[1] * np.random.random() - STEP_SIZE[1] / 2)
cor = (center[0]+int(cor[0]), center[1]+int(cor[1]))
window = cut_window(seg_im['imageBF'], cor)
new_seg = (seg - center + np.array([WINDOW_SHAPE[0]//2,WINDOW_SHAPE[1]//2])).astype(np.int32)
data = window.reshape( WINDOW_SHAPE[0] * WINDOW_SHAPE[1])
mask = poly2mask(new_seg[:,0], new_seg[:,1], WINDOW_SHAPE).astype(np.int32)
line = np.concatenate((mask.reshape(WINDOW_SHAPE[0] * WINDOW_SHAPE[1]), data))
mask_data.append(line)
return np.vstack(tuple(mask_data))
def gen_mask_example(seg_im_data):
tables = list()
idx = 1
for seg_im in seg_im_data:
mask_table = gen_mask_data(seg_im)
print('processing %s/%s...' %(idx, len(seg_im_data)))
tables.append(mask_table)
idx += 1
return np.vstack(tuple(tables)).astype(np.int32)
"""
seg_im_data = load_seg_im_data('seg_im_data.mat')
mask_data_table = gen_mask_example(seg_im_data)
np.savetxt('./data/mask_data.csv', mask_data_table, fmt='%d', header = 'mask_examples, first 2500 cols are target')
"""

3
prepare_data2.py Normal file
View File

@ -0,0 +1,3 @@
from util import *
import glob
from skimage.morphology import binary_dilation

178
segment_seed.py Normal file
View File

@ -0,0 +1,178 @@
from util import *
import cv2
from scipy.signal import savgol_filter
from scipy.spatial import ConvexHull
"""
This file contains functions used to segment the contour of single cell with a seed point.
The principal algorithm is :
1. generate a series of rays of different direnctions from the seed of the cell detected,
2. compute several images which include pix_image, gradx, grady, grad_from_center...
3. get the values of each image on all points on the rays, which is named tab, pixtab, gxtab, gytab...
4. find out the optimal pathway(dynamic programming) through all of the rays which best represents
the cell contour (manually defined scoring function)
5. filter the optimal pathway and get the polygon/mask of the cell
"""
NRAY = 100 # the result is quite sensitive to this parameter
RHO = 30 # the max distance of rays from the center
RHO_SKIP = 5 # the mix distance of rays from the center
AOFF = 1/2/np.pi
MINCELLRADIUS = 5
MAXCELLRADIUS = 50
def findslopes(img):
"""
usw different kernels to filter the raw_image to get the gradient and 2th gradient image
"""
img = img.astype(np.float32)
DY = np.array([[-1,-1,-1],[0, 0, 0],[1, 1, 1]]) * 1/6
DX = DY.transpose()
gradx = cv2.filter2D(src=img, ddepth=-1, kernel=DX)
grady = cv2.filter2D(src=img, ddepth=-1, kernel=DY)
D2Y = np.array([[0.5, 1, 0.5], [-1, -2, -1], [0.5, 1, 0.5]]) * 0.5
D2X = D2Y.transpose()
DXY = np.array([[-1, 0, 1], [0, 0, 0], [1, 0, -1]]) * 1/4
grad2x = cv2.filter2D(src=img, ddepth=-1, kernel=D2X)
grad2y = cv2.filter2D(src=img, ddepth=-1, kernel=D2Y)
gradxy = cv2.filter2D(src=img, ddepth=-1, kernel=DXY)
slopes = gradx**2 + grady**2
slopes2 = grad2x**2 + grad2y**2 + 2 * gradxy**2
return (slopes, gradx, grady, slopes2, grad2x, grad2y, gradxy)
def get_rays(nray, rho, rho_skip):
"""
generate a list of rays which start from the seed point
"""
aoff = 1/2/np.pi
rays = list()
for i in range(nray):
rays.append(np.zeros((rho - rho_skip,2)).astype(np.float32))
for j in range(rho_skip, RHO):
[x, y] = pol2cart(2*np.pi*i/nray+aoff, j)
x = round(x)
y = round(y)
rays[i][j - rho_skip, :] = [x, y]
return rays
def findminpath(tab, gxtab, gytab, pixtab):
"""
Dynamic programming to findout a optimal pathway through rays which optimize
the defined scoring function to get the points which best represents the cell
contour
"""
pathdist = 2 # the number of points each points on a ray can related to on the previous ray
pathdist_penalty = 0.3 # penalty of the difference of the pathdist
pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point
nray = tab.shape[1]
#tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))
#pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))
#gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))
#gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))
tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result
pixtab = np.hstack((pixtab,pixtab,pixtab))
gxtab = np.hstack((gxtab,gxtab,gxtab))
gytab = np.hstack((gytab,gytab,gytab))
tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix
pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab
# tab = tab / np.median(tab)
# pixtab = pixtab / np.median(pixtab)
path = np.zeros(tab.shape)
path[:, 0] = np.array(range(0, tab.shape[0]))
score = np.zeros(tab.shape)
score[:, 1] = tab[:, 1]
for i in range(1, tab.shape[1]):
for j in range(tab.shape[0]):
mins = np.Inf # record the min value of the ray
minat = 0
for k in range(-pathdist, pathdist+1):
if(0 <= (j+k) and (j+k) < tab.shape[0]):
s = pixtab[j, i]
pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])
s += pixdiff * pathpix_penalty # two kinds of penalty
s += abs(k) * pathdist_penalty
s += score[j+k, i-1]
if(s < mins):
mins = s
minat = j + k
path[j, i] = minat
score[j, i]= mins
start = int(np.argmin(score[:, -1]))
path = path.astype(np.int32)
minpath = [start]
for i in range(tab.shape[1]-1, 0, -1):
minpath.append(path[minpath[-1], i])
minpath = minpath[::-1]
# print(len(minpath))
minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal
minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray
return np.array(minpath).astype(np.int32)
def get_polygon(img, gradx, grady, seed):
"""
take raw image, slopes of the image, and the seed point as parameters.
return the polygon coordinates of the detected cell contour
"""
rays = get_rays(NRAY, RHO, RHO_SKIP)
# minCellSize = np.pi * MINCELLRADIUS**2
# maxCellSize = np.pi * MAXCELLRADIUS**2
assert 0<seed[0]<img.shape[0] and 0<seed[1]<img.shape[1]
(cr,cc) = seed # cr, cc is the coordinates of the seed
[ac, ar] = np.meshgrid(np.array(range(img.shape[0])), np.array(range(img.shape[1])))
cac = (ac-cc).astype(np.float32) # cac,car represent the distance of each pixel on the image to the seed
car = (ar-cr).astype(np.float32)
with np.errstate(all='ignore'):
unitx = np.cos(np.arctan(np.abs(car/cac))) * np.sign(cac) # unitx,unity represent cosine value of each pixel on the image to the seed
unity = np.cos(np.arctan(np.abs(cac/car))) * np.sign(car)
dirslopes = gradx * unitx + grady * unity # dirslopes is the gradient map which consider the seed points as the center
tab = np.zeros((RHO - RHO_SKIP, NRAY))
gxtab = np.zeros((RHO - RHO_SKIP, NRAY))
gytab = np.zeros((RHO - RHO_SKIP, NRAY))
pixtab = np.zeros((RHO - RHO_SKIP, NRAY))
for i in range(NRAY):
for j in range(RHO-RHO_SKIP):
pr = int(cr + rays[i][j, 0])
pc = int(cc + rays[i][j, 1])
tab[j, i] = dirslopes[pr, pc]
gxtab[j, i] = gradx[pr, pc]
gytab[j, i] = grady[pr, pc]
pixtab[j, i] = img[pr, pc]
minpath = findminpath(tab, gxtab, gytab, pixtab) # get the minpath
polygon = np.zeros((NRAY, 2))
for i in range(NRAY):
polygon[i, 0] = cr + rays[i][minpath[i], 0]
polygon[i, 1] = cc + rays[i][minpath[i], 1]
#hull = ConvexHull(polygon)
#polygon = polygon[hull.vertices]
#print(polygon.shape[0])
return polygon
def plot_polygon(img, polygon):
plt.imshow(img, cmap=mpl.cm.gray)
plt.plot(polygon[:,1], polygon[:, 0], 'r')
plt.show()
if __name__ == '__main__':
img = np.array(Image.open('./examples/example1.tif'))
seed = [90, 300]
(slopes, gradx, grady, slopes2, grad2x, grad2y, gradxy) = findslopes(img)
polygon = get_polygon(img, gradx, grady, seed)
plot_polygon(img, polygon)

32
temp.py Normal file
View File

@ -0,0 +1,32 @@
from util import *
"""将positive data中各种size的cell均衡化,避免某一类cell过多"""
NB_EACH_BIN = 1000
filename = './data/rect_data.csv'
data = np.loadtxt(filename, dtype=np.int32, comments='#', delimiter=' ')
np.random.shuffle(data)
X = data[:,4:]
y = data[:,0:4]
area = np.array([(yy[1]-yy[0])*(yy[3]-yy[2]) for yy in list(y)])
sepes = np.histogram(area, bins=20)[1]
counts = np.histogram(area, bins=20)[0]
data2 = list()
for idx in range(20):
data2.append([])
for idx in range(data.shape[0]):
for ii in range(20):
if(sepes[ii] <= area[idx] < sepes[ii+1]):
data2[ii].append(data[idx])
data3 = []
for idx_bin in range(20):
if(len(data2[idx_bin]) != 0):
data3.append(np.vstack(tuple(data2[idx_bin])))
for idx_bin in range(len(data3)):
if(data3[idx_bin].shape[0] > NB_EACH_BIN):
np.random.shuffle(data3[idx_bin])
data3[idx_bin] = data3[idx_bin][0:NB_EACH_BIN, :]
data4 = np.vstack(tuple(data3))
np.savetxt('./data/rect_data2.csv', data4, fmt='%d', header = 'rect_examples, first 4 cols are target')

74
train_CNN.py Normal file
View File

@ -0,0 +1,74 @@
"""
related to CNN_model 1-6
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from util import *
BATCH_SIZE = 100
NB_CLASSES = 2
NB_EPOCH = 5
img_rows, img_cols = (50, 50)
INPUT_SHAPE = (img_rows, img_cols, 1)
MODEL_SAVE_PATH = './CNN_model7.h5'
print("loading data...")
X, y = load_data(['./data/positive_data3.csv', './data/negative_not_empty4.csv'])
train_size = int(0.9 * X.shape[0])
X_train = X[0:train_size, :]
y_train = y[0:train_size]
X_test = X[train_size:, :]
y_test = y[train_size:]
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.transpose(0, 2, 3, 1)
X_test = X_test.transpose(0, 2, 3, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 65535.
X_test /= 65535.
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# define the CNN
model = Sequential()
model.add(Convolution2D(nb_filter=16, nb_row=3, nb_col=3, border_mode='valid', input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1000))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
verbose=1, validation_split=0.1)
model.save(MODEL_SAVE_PATH)
"""
model = load_model(MODEL_SAVE_PATH)
"""
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

74
train_CNN2.py Normal file
View File

@ -0,0 +1,74 @@
"""
related to CNN_detect 7,8
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from util import *
BATCH_SIZE = 100
NB_CLASSES = 2
NB_EPOCH = 5
img_rows, img_cols = (50, 50)
INPUT_SHAPE = (img_rows, img_cols, 1)
MODEL_SAVE_PATH = './CNN_model9.h5'
print("loading data...")
X, y = load_data(['./data/positive_data3.csv', './data/negative_not_empty4.csv'])
train_size = int(0.9 * X.shape[0])
X_train = X[0:train_size, :]
y_train = y[0:train_size]
X_test = X[train_size:, :]
y_test = y[train_size:]
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.transpose(0, 2, 3, 1)
X_test = X_test.transpose(0, 2, 3, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 65535.
X_test /= 65535.
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# define the CNN
model = Sequential()
model.add(Convolution2D(nb_filter=16, nb_row=3, nb_col=3, border_mode='valid', input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
verbose=1, validation_split=0.1)
model.save(MODEL_SAVE_PATH)
"""
model = load_model(MODEL_SAVE_PATH)
"""
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])

78
train_CNN_mask.py Normal file
View File

@ -0,0 +1,78 @@
"""
related to CNN_mask
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from util import *
from prepare_data import *
def load_mask_data():
Xy_list = list()
seg_im_data = load_seg_im_data('seg_im_data.mat')
Xy = gen_mask_example(seg_im_data)
np.random.shuffle(Xy)
X = Xy[:, 2500:]
y = Xy[:, 0:2500]
X = X.reshape(X.shape[0], WINDOW_SHAPE[0], WINDOW_SHAPE[1])
return X, y
BATCH_SIZE = 100
NB_EPOCH = 10
NB_OUTPUT = 2500
img_rows, img_cols = (50, 50)
INPUT_SHAPE = (img_rows, img_cols, 1)
MODEL_SAVE_PATH = './CNN_mask.h5'
print("loading data...")
X, y = load_mask_data()
train_size = int(0.9 * X.shape[0])
X_train = X[0:train_size, :]
y_train = y[0:train_size]
X_test = X[train_size:, :]
y_test = y[train_size:]
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.transpose(0, 2, 3, 1)
X_test = X_test.transpose(0, 2, 3, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 65535.
X_test /= 65535.
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = y_train
Y_test = y_test
# define the CNN
model = Sequential()
model.add(Convolution2D(nb_filter=16, nb_row=3, nb_col=3, border_mode='valid', input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_OUTPUT))
model.add(Activation('tanh'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
verbose=1, validation_split=0.1)
model.save(MODEL_SAVE_PATH)

73
train_CNN_rect.py Normal file
View File

@ -0,0 +1,73 @@
"""
related to CNN_rect
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from util import *
BATCH_SIZE = 100
NB_EPOCH = 10
NB_OUTPUT = 4
img_rows, img_cols = (50, 50)
INPUT_SHAPE = (img_rows, img_cols, 1)
MODEL_SAVE_PATH = './CNN_rect2.h5'
print("loading data...")
X, y = load_rect_data(['./data/rect_data2.csv'])
train_size = int(0.9 * X.shape[0])
X_train = X[0:train_size, :]
y_train = y[0:train_size]
X_test = X[train_size:, :]
y_test = y[train_size:]
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.transpose(0, 2, 3, 1)
X_test = X_test.transpose(0, 2, 3, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 65535.
X_test /= 65535.
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = y_train
Y_test = y_test
# define the CNN
model = Sequential()
model.add(Convolution2D(nb_filter=16, nb_row=3, nb_col=3, border_mode='valid', input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(nb_filter=32, nb_row=3, nb_col=3, border_mode='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_OUTPUT))
model.add(Activation('linear'))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, Y_train, batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
verbose=1, validation_split=0.1)
model.save(MODEL_SAVE_PATH)
"""
model = load_model(MODEL_SAVE_PATH)
"""
#score = model.evaluate(X_test, Y_test, verbose=0)
#print('Test score:', score[0])
#print('Test accuracy:', score[1])

162
util.py Normal file
View File

@ -0,0 +1,162 @@
import scipy.io as sio
import os
from PIL import Image
import matplotlib as mpl
import matplotlib.pyplot as plt
import time
import numpy as np
import skimage
from skimage import draw
#import cv2
IMAGE_SHAPE = (512, 512, 1)
WINDOW_SHAPE = (50, 50)
STEP_SIZE = (7, 7)
def correct_image_path(path_imageBF):
pl = path_imageBF.split(':')
correct_path = 'I:' + pl[1]
return correct_path
def load_csg(filename_csg):
mat_contents = sio.loadmat(filename_csg)
hh = mat_contents['hh']
val = hh[0, 0]
seg_data = dict()
seg_data['cellsegperim'] = val['cellsegperim']
seg_data['filenameBF'] = val['filenameBF']
seg_data['path_imageBF'] = str(val['pathnameBF'][0])
# 下步仅用于矫正不同机器上驱动器名称的误差
seg_data['path_imageBF'] = correct_image_path(seg_data['path_imageBF'])
return seg_data
def transform_cellseg(cellseg):
cellsegs = list()
for i in range(cellseg.shape[0]):
seg = cellseg[i, 0]
if(seg.shape[1]==2):
cellsegs.append(seg)
return cellsegs
def get_seg_im(seg_data, idx):
seg_im = dict()
seg_im['cellseg'] = transform_cellseg(seg_data['cellsegperim'][0, idx])
seg_im['filenameBF'] = str(seg_data['filenameBF'][0, idx][0])
image_file = os.path.join(seg_data['path_imageBF'], seg_im['filenameBF'])
seg_im['imageBF'] = np.array(Image.open(image_file))
return seg_im
def segperim_generator(seg_data):
for i in range(seg_data['cellsegperim'].shape[1]):
seg_im = get_seg_im(seg_data, i)
yield seg_im
def plot_cellseg(seg_im):
colormap = mpl.cm.gray
plt.imshow(seg_im['imageBF'], cmap=colormap)
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx, 0]
plt.plot(seg[:, 1], seg[:, 0], 'r')
plt.plot(find_center(seg)[1], find_center(seg)[0], 'r*')
plt.show()
def find_center(seg):
c_mean = np.mean(seg[:, 1])
r_mean = np.mean(seg[:, 0])
return (int(r_mean), int(c_mean))
"""
def get_circle(seg):
circle = np.zeros(IMAGE_SHAPE)
circle[seg[:, 0], seg[:, 1]] = 1
return circle
def get_circles(seg_im):
circles = np.zeros(IMAGE_SHAPE)
for idx in range(len(seg_im['cellseg'])):
seg = seg_im['cellseg'][idx]
circles[seg[:, 0], seg[:, 1]] = 1
return circles
"""
"""
seg_data = load_csg('E:/LTQ work/tanglab/deepYeast/xy01 1-120.csg')
seg_im = next(segperim_generator(seg_data))
plot_cellseg(seg_im)
"""
"""
# 看来还是不能保存为.mat 不然再次打开内部结构和数据类型就乱了
def load_seg_im_data(filename):
mat_contents = sio.loadmat(filename)
data = mat_contents['data']
"""
def load_data(filename_list):
Xy_list = list()
for filename in filename_list:
Xy_list.append(np.loadtxt(filename, dtype=np.int32, comments='#', delimiter=' '))
Xy = np.vstack(tuple(Xy_list))
np.random.shuffle(Xy)
X = Xy[:, 1:]
y = Xy[:, 0]
X = X.reshape(X.shape[0], WINDOW_SHAPE[0], WINDOW_SHAPE[1])
return X, y
def load_rect_data(filename_list):
Xy_list = list()
for filename in filename_list:
Xy_list.append(np.loadtxt(filename, dtype=np.int32, comments='#', delimiter=' '))
Xy = np.vstack(tuple(Xy_list))
np.random.shuffle(Xy)
X = Xy[:, 4:]
y = Xy[:, 0:4]
X = X.reshape(X.shape[0], WINDOW_SHAPE[0], WINDOW_SHAPE[1])
return X, y
def save_image(data, path):
for idx in range(data.shape[0]):
im = data[idx]
img = Image.fromarray(np.uint16(im))
img.save(os.path.join(path, '%s.tif'%idx))
def plot_rect(imageBF, vertex):
colormap = mpl.cm.gray
plt.imshow(imageBF, cmap=colormap)
(r1, r2, c1, c2) = vertex
plt.plot(np.ones(r2-r1)*c1, np.array(range(r1, r2)), 'r')
plt.plot(np.ones(r2-r1)*c2, np.array(range(r1, r2)), 'r')
plt.plot(np.array(range(c1, c2)), np.ones(c2-c1)*r1, 'r')
plt.plot(np.array(range(c1, c2)), np.ones(c2-c1)*r2, 'r')
plt.xlim(0, WINDOW_SHAPE[1])
plt.ylim(WINDOW_SHAPE[0], 0)
plt.show()
def poly2mask(vertex_row_coords, vertex_col_coords, shape):
fill_row_coords, fill_col_coords = draw.polygon(vertex_row_coords, vertex_col_coords, shape)
mask = np.zeros(shape, dtype=np.bool)
mask[fill_row_coords, fill_col_coords] = True
return mask
def pol2cart(phi, rho):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return (x, y)