From ac4e3e12a7a7c521eb46347addaa8bba03fb6e57 Mon Sep 17 00:00:00 2001 From: Gwendolien Sergeant Date: Tue, 27 Jun 2017 14:59:52 +0200 Subject: [PATCH] Fixing some imports and method calls, moving more stuff from main to common --- scripts/application.py | 1 + scripts/common.py | 270 +++++++++++++++++-------- scripts/main.py | 72 +------ scripts/morphology/morph_extraction.py | 20 +- scripts/tracking/kcf_tracker.py | 53 +++-- scripts/tracking/knn_tracker.py | 13 +- scripts/tracking/opt_flow_tracker.py | 55 ++--- 7 files changed, 261 insertions(+), 223 deletions(-) diff --git a/scripts/application.py b/scripts/application.py index 7f8a556..3dd0a57 100644 --- a/scripts/application.py +++ b/scripts/application.py @@ -2,6 +2,7 @@ import re import matplotlib.animation as animation +import matplotlib.pyplot as plt from PIL import Image as img2 from PIL import ImageSequence diff --git a/scripts/common.py b/scripts/common.py index 01a56cd..aac3940 100644 --- a/scripts/common.py +++ b/scripts/common.py @@ -1,7 +1,6 @@ """ Contains common functionality that is used by different classes (e.g. saving images, writing to files) - The idea of this class is to reduce importing the same packages over and over in different classes. Methods will be added to this class as more code gets reformatted """ @@ -9,11 +8,13 @@ # imports import os.path -import mahotas import csv +import mahotas import cv2 + import scripts.preprocessing.preprocessing as preprocessing import scripts.segmentation.segmentation as segmentation +from . import main try: import tkinter as tk # for python 3 @@ -28,21 +29,9 @@ import ntpath as path -def join_path(pathpart1, pathpart2): - """ - Join the path parts using the intelligent joiner from the imported module - """ - - return path.join(pathpart1, pathpart2) - - -def concatenateList(root): - if isinstance(root, (list, tuple)): - for element in root: - for e in concatenateList(element): - yield e - else: - yield root +""" +Save/read/write methods +""" def save_image(pathpart1, pathpart2, imagefile): @@ -71,6 +60,18 @@ def write_image(pathpart1, pathpart2, imagefile): cv2.imwrite(joined_path, imagefile) +def csv_writer(openedfile): + """ + Return a csv writer for a file + """ + return csv.writer(openedfile, lineterminator='\n') + + +""" +Methods for image and GUI manipulation +""" + + def resize_image(imagefile, dimension): """ OpenCV method, return resized image. @@ -83,115 +84,208 @@ def tkinter_photoimage(filepath): """ Tkinter method, return image """ - return tk.PhotoImage(file=filepath) + return tk.Photoimage(file=filepath) -def csv_writer(openedfile): +def display_image(image): + """Use the application root to display an image """ - Return a csv writer for a file + root = main.get_root() + root.displayimage = image + + +def draw_str(dst, target, s): + """ Puts text onto an image + :param dst: the image on which to put a string + :param target: a tuple designating the image coordinates of the desired text + :param s: the string to draw on the image """ - return csv.writer(openedfile, lineterminator='\n') + x, y = target + cv2.putText(dst, s, (x + 1, y + 1), cv2.FONT_HERSHEY_PLAIN, + 1.0, (0, 0, 0), thickness=3, lineType=cv2.LINE_AA) + cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, + 1.0, (255, 255, 255), lineType=cv2.LINE_AA) + + +def displaycoordinates(self, objectLabel, a, b, tm): + """ Display coordinates of the cell to the panel""" + + if objectLabel == 0: + self.label_10.configure(text=int(tm)) + self.label_43.configure(text=a) + self.label_63.configure(text=b) + if objectLabel == 1: + self.label_11.configure(text=int(tm)) + self.label_44.configure(text=a) + self.label_64.configure(text=b) + + if objectLabel == 2: + self.label_12.configure(text=int(tm)) + self.label_45.configure(text=a) + self.label_65.configure(text=b) + + if objectLabel == 3: + self.label_13.configure(text=int(tm)) + self.label_46.configure(text=a) + self.label_66.configure(text=b) + + if objectLabel == 4: + self.label_14.configure(text=int(tm)) + self.label_47.configure(text=a) + self.label_67.configure(text=b) + + if objectLabel == 5: + self.label_15.configure(text=int(tm)) + self.label_48.configure(text=a) + self.label_68.configure(text=b) + if objectLabel == 6: + self.label_16.configure(text=int(tm)) + self.label_49.configure(text=a) + self.label_69.configure(text=b) + if objectLabel == 7: + self.label_17.configure(text=int(tm)) + self.label_50.configure(text=a) + self.label_70.configure(text=b) + + if objectLabel == 8: + self.label_18.configure(text=int(tm)) + self.label_51.configure(text=a) + self.label_71.configure(text=b) + + if objectLabel == 9: + self.label_19.configure(text=int(tm)) + self.label_52.configure(text=a) + self.label_72.configure(text=b) + + +""" +Methods for forwarding external modules +""" + + +def join_path(pathpart1, pathpart2): + """ + Join the path parts using the intelligent joiner from the imported module + """ + + return path.join(pathpart1, pathpart2) + + +""" +Other methods +""" + + +def concatenate_list(arg): + """Concatenate lists + """ + if isinstance(arg, (list, tuple)): + for element in arg: + for elem in concatenate_list(element): + yield elem + else: + yield arg -def call_preprocessing(image, preprocessingMethod): +def call_preprocessing(image, preprocessing_method): """ Execute a preprocessing method on an image """ - if preprocessingMethod == 1: - preprocessedImage = preprocessing.histEqualize(image) + if preprocessing_method == 1: + preprocessed_image = preprocessing.histEqualize(image) - elif preprocessingMethod == 2: - preprocessedImage = preprocessing.brightening(image) + elif preprocessing_method == 2: + preprocessed_image = preprocessing.brightening(image) - elif preprocessingMethod == 3: - preprocessedImage = preprocessing.GaussianBlurring(image) + elif preprocessing_method == 3: + preprocessed_image = preprocessing.GaussianBlurring(image) - elif preprocessingMethod == 4: - preprocessedImage = preprocessing.darkening(image) + elif preprocessing_method == 4: + preprocessed_image = preprocessing.darkening(image) - elif preprocessingMethod == 5: - preprocessedImage = preprocessing.denoising(image) + elif preprocessing_method == 5: + preprocessed_image = preprocessing.denoising(image) - elif preprocessingMethod == 6: - preprocessedImage = preprocessing.binaryThresholding(image) + elif preprocessing_method == 6: + preprocessed_image = preprocessing.binaryThresholding(image) - elif preprocessingMethod == 8: - preprocessedImage = preprocessing.sharpening(image) + elif preprocessing_method == 8: + preprocessed_image = preprocessing.sharpening(image) - if preprocessingMethod == 7: + if preprocessing_method == 7: - preprocessedImage = image + preprocessed_image = image - return preprocessedImage + return preprocessed_image -def call_segmentation(segMeth, preImage, rawImg, minAreaSize, maxAreaSize, fixscale, minDistance, cellEstimate, color, thre): +def call_segmentation(segmentationmethod, preimage, rawimage, min_areasize, max_areasize, fixscale, min_distance, cell_estimate, color, thre): """ Call segmentation methods - :param segMeth: segmentation methods - :param processedImage1: input image to segment - :param rawImg: raw image without preprocessing - :param minAreaSize: estimated minimum area size of the cell - :param maxAreaSize: estimated maximum area size of the cell + :param segmentationmethod: segmentation methods + :param preimage: input image to segment + :param rawimage: raw image without preprocessing + :param min_areasize: estimated minimum area size of the cell + :param max_areasize: estimated maximum area size of the cell :param fixscale: pixel intensity from 0.1-1.0 - :param minDistance: the minimum distance between the cells - :param cellEstimate: minimum estimated number of cells per image + :param min_distance: the minimum distance between the cells + :param cell_estimate: minimum estimated number of cells per image :param color: color cell path """ - initialpoints, boxes, maskIMage, Image, CellMorph, processedImage = [], [], [], [], [], [] - processedImage = preImage + initialpoints, boxes, mask_image, image, cellmorph, processedimage = [], [], [], [], [], [] + processedimage = preimage - if segMeth == 1: - initialpoints, boxes, maskIMage, mage = segmentation.blob_seg( - processedImage) + if segmentationmethod == 1: + initialpoints, boxes, mask_image, mage = segmentation.blob_seg( + processedimage) - if segMeth == 2: + if segmentationmethod == 2: if color == 1: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.black_background( - processedImage, rawImg, minAreaSize, maxAreaSize) + initialpoints, boxes, mask_image, image, cellmorph = segmentation.black_background( + processedimage, rawimage, min_areasize, max_areasize) if color == 2: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.white_background( - processedImage, rawImg, minAreaSize, maxAreaSize) + initialpoints, boxes, mask_image, image, cellmorph = segmentation.white_background( + processedimage, rawimage, min_areasize, max_areasize) - if segMeth == 3: - initialpoints, boxes, Image = segmentation.harris_corner( - processedImage, int(cellEstimate), float(fixscale), int(minDistance)) + if segmentationmethod == 3: + initialpoints, boxes, image = segmentation.harris_corner( + processedimage, int(cell_estimate), float(fixscale), int(min_distance)) - if segMeth == 4: - initialpoints, boxes, Image = segmentation.shi_tomasi( - processedImage, int(cellEstimate), float(fixscale), int(minDistance)) + if segmentationmethod == 4: + initialpoints, boxes, image = segmentation.shi_tomasi( + processedimage, int(cell_estimate), float(fixscale), int(min_distance)) - if segMeth == 5: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.kmeansSegment( - processedImage, rawImg, 1, minAreaSize, maxAreaSize) + if segmentationmethod == 5: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.kmeansSegment( + processedimage, rawimage, 1, min_areasize, max_areasize) - if segMeth == 6: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.graphSegmentation( - processedImage, rawImg, minAreaSize, minAreaSize, maxAreaSize) + if segmentationmethod == 6: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.graphSegmentation( + processedimage, rawimage, min_areasize, min_areasize, max_areasize) - if segMeth == 7: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.meanshif( - processedImage, rawImg, minAreaSize, maxAreaSize, int(fixscale * 100)) + if segmentationmethod == 7: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.meanshif( + processedimage, rawimage, min_areasize, max_areasize, int(fixscale * 100)) - if segMeth == 8: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.sheetSegment( - processedImage, rawImg, minAreaSize, maxAreaSize) + if segmentationmethod == 8: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.sheetSegment( + processedimage, rawimage, min_areasize, max_areasize) - if segMeth == 9: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.findContour( - processedImage, rawImg, minAreaSize, maxAreaSize) + if segmentationmethod == 9: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.findContour( + processedimage, rawimage, min_areasize, max_areasize) - if segMeth == 10: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.threshold( - processedImage, rawImg, minAreaSize, maxAreaSize) + if segmentationmethod == 10: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.threshold( + processedimage, rawimage, min_areasize, max_areasize) - if segMeth == 11: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.overlapped_seg( - processedImage, rawImg, minAreaSize, maxAreaSize) + if segmentationmethod == 11: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.overlapped_seg( + processedimage, rawimage, min_areasize, max_areasize) - if segMeth == 12: - initialpoints, boxes, maskIMage, Image, CellMorph = segmentation.gredientSeg(processedImage, rawImg, - minAreaSize, maxAreaSize, thre) + if segmentationmethod == 12: + initialpoints, boxes, mask_image, image, cellmorph = segmentation.gredientSeg(processedimage, rawimage, + min_areasize, max_areasize, thre) - return initialpoints, boxes, maskIMage, Image, CellMorph + return initialpoints, boxes, mask_image, image, cellmorph \ No newline at end of file diff --git a/scripts/main.py b/scripts/main.py index 2eeab20..4072090 100644 --- a/scripts/main.py +++ b/scripts/main.py @@ -11,13 +11,10 @@ from itertools import groupby from operator import itemgetter -import matplotlib.pyplot as plt import numpy as np import pandas as pd # import scipy # from pylab import tk -from scipy.ndimage import gaussian_filter1d -from sklearn.neighbors import NearestNeighbors import application import call_back_preprocessing @@ -39,18 +36,12 @@ current_time = time.strftime('%a, %d %b %Y %H:%M:%S GMT', current_time) global prev_image, thre - -def drawStr(dst, target, s): - """ Puts text onto an image - :param dst: the image on which to put a string - :param target: a tuple designating the image coordinates of the desired text - :param s: the string to draw on the image +def get_root(): + """Return the application root. + Needed for methods in other packages that display on GUI + Replace with more efficient way in the future """ - x, y = target - cv2.putText(dst, s, (x + 1, y + 1), cv2.FONT_HERSHEY_PLAIN, - 1.0, (0, 0, 0), thickness=3, lineType=cv2.LINE_AA) - cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, - 1.0, (255, 255, 255), lineType=cv2.LINE_AA) + return root def resizeImage(image): @@ -103,57 +94,6 @@ def deleteLostTracks(trackNr, frameIdx, currentFrameIdx): return deleteTrack -def displayCoordinates(self, objectLabel, a, b, tm): - """ Display coordinates of the cell to the panel""" - - if objectLabel == 0: - self.label_10.configure(text=int(tm)) - self.label_43.configure(text=a) - self.label_63.configure(text=b) - if objectLabel == 1: - self.label_11.configure(text=int(tm)) - self.label_44.configure(text=a) - self.label_64.configure(text=b) - - if objectLabel == 2: - self.label_12.configure(text=int(tm)) - self.label_45.configure(text=a) - self.label_65.configure(text=b) - - if objectLabel == 3: - self.label_13.configure(text=int(tm)) - self.label_46.configure(text=a) - self.label_66.configure(text=b) - - if objectLabel == 4: - self.label_14.configure(text=int(tm)) - self.label_47.configure(text=a) - self.label_67.configure(text=b) - - if objectLabel == 5: - self.label_15.configure(text=int(tm)) - self.label_48.configure(text=a) - self.label_68.configure(text=b) - if objectLabel == 6: - self.label_16.configure(text=int(tm)) - self.label_49.configure(text=a) - self.label_69.configure(text=b) - if objectLabel == 7: - self.label_17.configure(text=int(tm)) - self.label_50.configure(text=a) - self.label_70.configure(text=b) - - if objectLabel == 8: - self.label_18.configure(text=int(tm)) - self.label_51.configure(text=a) - self.label_71.configure(text=b) - - if objectLabel == 9: - self.label_19.configure(text=int(tm)) - self.label_52.configure(text=a) - self.label_72.configure(text=b) - - if __name__ == '__main__': root = tk.Tk() menu = tk.Menu(root) @@ -180,4 +120,4 @@ def displayCoordinates(self, objectLabel, a, b, tm): # root.tk.call('wm', 'iconphoto', root._w, img) root.title("CellMojo: Cell Segmentation and Tracking") app = application.Application(root) - root.mainloop() + root.mainloop() \ No newline at end of file diff --git a/scripts/morphology/morph_extraction.py b/scripts/morphology/morph_extraction.py index a65cd96..29e5fa6 100644 --- a/scripts/morphology/morph_extraction.py +++ b/scripts/morphology/morph_extraction.py @@ -21,20 +21,20 @@ def morph_extraction(frames, smoothingmethod, seg_method, for i, frame in enumerate(frames): old_gray = common.call_preprocessing(frame, smoothingmethod) # unused: bounding_box and mask_image - initialpoints, _, _, displayed_image, cell_info = common.call_segmentation(seg_method, preImage=old_gray, - rawImg=frame, - minAreaSize=exp_parameter[2], - maxAreaSize=exp_parameter[3], + initialpoints, _, _, displayed_image, cell_info = common.call_segmentation(seg_method, preimage=old_gray, + rawimage=frame, + min_areasize=exp_parameter[2], + max_areasize=exp_parameter[3], fixscale=exp_parameter[4], - minDistance=exp_parameter[5], - cellEstimate=exp_parameter[1], + min_distance=exp_parameter[5], + cell_estimate=exp_parameter[1], color=int(exp_parameter[6]), thre=int(exp_parameter[7])) for ii, tmp_inf in enumerate(cell_info): if tmp_inf: tmp_inf = tmp_inf[1:] - temp_list = list(common.concatenateList([i, int(ii), tmp_inf])) + temp_list = list(common.concatenate_list([i, int(ii), tmp_inf])) cell_morphology.append(temp_list) # cell centroids @@ -59,7 +59,7 @@ def morph_extraction(frames, smoothingmethod, seg_method, common.save_image(tmp_dir[3], '%d.gif' % i, resized) display_image = common.tkinter_photoimage(str(common.join_path(tmp_dir[3], '%d.gif' % i))) - root.displayImage = display_image + common.display_image(display_image) imagesprite = updateconvax.create_image( 263, 187, image=display_image) updateconvax.update_idletasks() # Force redraw @@ -67,7 +67,7 @@ def morph_extraction(frames, smoothingmethod, seg_method, if i == amount_frames - 1 or i == amount_frames: display_image = common.tkinter_photoimage(str(common.join_path(tmp_dir[3], '%d.gif' % i))) - root.displayImage = display_image + common.display_image(display_image) imagesprite = updateconvax.create_image( 263, 187, image=display_image) @@ -85,4 +85,4 @@ def morph_extraction(frames, smoothingmethod, seg_method, writer = common.csv_writer(f3) writer.writerow(('frameID', 'x', 'y')) for obj in detections: - writer.writerow(obj) + writer.writerow(obj) \ No newline at end of file diff --git a/scripts/tracking/kcf_tracker.py b/scripts/tracking/kcf_tracker.py index 3b4a6ee..dec5b45 100644 --- a/scripts/tracking/kcf_tracker.py +++ b/scripts/tracking/kcf_tracker.py @@ -1,4 +1,6 @@ # imports +import cv2 +import numpy as np from .. import common def KCFTrack(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, updateconvax, progessbar, timelapse, tmp_dir): @@ -8,15 +10,14 @@ def KCFTrack(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, tracker = cv2.MultiTracker("KCF") init_once = False - old_gray = call_back_preprocessing.call_preprocessing( - firstImage, smoothingmethod) - initialpoints, boundingBox, _, _, CellInfo = call_back_segmentation.call_segmentation(segMeth, processedImage=old_gray, - rawImg=firstImage, - minAreaSize=exp_parameter[2], - maxAreaSize=exp_parameter[3], + old_gray = common.call_preprocessing(firstImage, smoothingmethod) + initialpoints, boundingBox, _, _, CellInfo = common.call_segmentation(segMeth, preimage=old_gray, + rawimage=firstImage, + min_areasize=exp_parameter[2], + max_areasize=exp_parameter[3], fixscale=exp_parameter[4], - minDistance=exp_parameter[5], - cellEstimate=exp_parameter[1], + min_distance=exp_parameter[5], + cell_estimate=exp_parameter[1], color=int(exp_parameter[6]), thre=exp_parameter[7]) @@ -28,7 +29,7 @@ def KCFTrack(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, r = 500.0 / firstImage.shape[1] dim = (500, int(firstImage.shape[0] * r)) - firstImage = cv2.resize(firstImage, dim, interpolation=cv2.INTER_AREA) + firstImage = common.resize_image(firstImage, dim) noFrames = len(frames) masks = np.zeros_like(firstImage,) @@ -40,10 +41,10 @@ def KCFTrack(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, r = 500.0 / image.shape[1] dim = (500, int(image.shape[0] * r)) - image = cv2.resize(image, dim, interpolation=cv2.INTER_AREA) + image = common.resize_image(image, dim) IM = image - old_gray = call_back_preprocessing.call_preprocessing(image, smoothingmethod) + old_gray = common.call_preprocessing(image, smoothingmethod) # check if its two dim or more if len(old_gray.shape) > 3: old_gray = cv2.cvtColor(old_gray, cv2.COLOR_GRAY2BGR) @@ -81,50 +82,48 @@ def KCFTrack(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, frameID.append(ii) # display to the panel the location - displayCoordinates(self, c, cX, cY) + common.displaycoordinates(self, c, cX, cY) old_points2 = old_points IM = np.add(IM, masks) - tmp_img = path.join(str(tmp_dir[1]), 'frame{}.png'.format(ii)) - save_path = path.join(str(tmp_dir[0]), 'frame{}.png'.format(ii)) - cv2.imwrite(save_path, IM) + tmp_img = common.join_path(str(tmp_dir[1]), 'frame{}.png'.format(ii)) + common.write_image(str(tmp_dir[0]), 'frame{}.png'.format(ii), IM) if ii == noFrames - 1 or ii == noFrames: common.save_image(str(tmp_dir[0]), 'frame{}.png'.format(ii), IM) # handle image in the displace panel - img = cv2.imread(save_path) + img = common.read_image(str(tmp_dir[0]), 'frame{}.png'.format(ii)) r = 600.0 / img.shape[1] dim = (600, int(img.shape[0] * r)) # perform the actual resizing of the image and display it to the panel - resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) + resized = common.resize_image(img, dim) common.save_image(tmp_dir[3], '%d.gif' % ii, resized) - displayImage = tk.PhotoImage(file=str(path.join(tmp_dir[3], '%d.gif' % ii))) - root.displayImage = displayImage - imagesprite = updateconvax.create_image( - 263, 187, image=displayImage) + displayImage = tk.PhotoImage(file=str(common.join_path(tmp_dir[3], '%d.gif' % ii))) + common.display_image(displayImage) + imagesprite = updateconvax.create_image(263, 187, image=displayImage) updateconvax.update_idletasks() # Force redraw updateconvax.delete(imagesprite) if ii == noFrames - 1 or ii == noFrames: - displayImage = tk.PhotoImage( - file=str(path.join(tmp_dir[3], '%d.gif' % ii))) - root.displayImage = displayImage + displayImage = tk.PhotoImage(file=str(common.join_path(tmp_dir[3], '%d.gif' % ii))) + common.display_image(displayImage) imagesprite = updateconvax.create_image( 263, 187, image=displayImage) + except EOFError: continue # timelapse += Initialtime unpacked = zip(frameID, cellIDs, trajectoriesX, trajectoriesY) - with open(path.join(tmp_dir[2], 'data.csv'), 'wt') as f1: - writer = csv.writer(f1, lineterminator='\n') + with open(common.join_path(tmp_dir[2], 'data.csv'), 'wt') as f1: + writer = common.csv_writer(f1) writer.writerow(('frameID', 'track_no', 'x', "y",)) for value in unpacked: - writer.writerow(value) + writer.writerow(value) \ No newline at end of file diff --git a/scripts/tracking/knn_tracker.py b/scripts/tracking/knn_tracker.py index edde3c4..4c5760d 100644 --- a/scripts/tracking/knn_tracker.py +++ b/scripts/tracking/knn_tracker.py @@ -1,4 +1,7 @@ #imports +import matplotlib.pyplot as plt +from scipy.ndimage import gaussian_filter1d + from .. import common def KNNTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, updateconvax, progessbar, timelapse, tmp_dir): @@ -126,17 +129,17 @@ def KNNTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter # than 15 frames if CellInfo: tmp_inf = CellInfo[ii] - tmpList = list(extra_modules.concatenateList([i, int(cellIdx), tmp_inf])) + tmpList = list(common.concatenate_list([i, int(cellIdx), tmp_inf])) CellMorph.append(tmpList) # display some info to the user interface - displayCoordinates(self, ii, a, b, Initialtime) + common.displaycoordinates(self, ii, a, b, Initialtime) dataFrame = pd.DataFrame(track_history, columns=[ 'frame_idx', 'track_no', 'x', 'y', 'time']) # review tracking - drawStr(imagePlot, (20, 20), 'track count: %d' % len(good_new)) - drawStr(morphImage, (20, 20), 'track count: %d' % len(good_new)) + common.draw_str(imagePlot, (20, 20), 'track count: %d' % len(good_new)) + common.draw_str(morphImage, (20, 20), 'track count: %d' % len(good_new)) if dataFrame is not None: index_Values = dataFrame["track_no"] x_Values = dataFrame["x"] @@ -270,4 +273,4 @@ def KNNTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter # opens file with name of "totalProcessingTime.txt" f = open("totalProcessingTime.txt", "w") f.write(str(endProcessingTime)) - f.close() + f.close() \ No newline at end of file diff --git a/scripts/tracking/opt_flow_tracker.py b/scripts/tracking/opt_flow_tracker.py index 6566f21..7b37fa0 100644 --- a/scripts/tracking/opt_flow_tracker.py +++ b/scripts/tracking/opt_flow_tracker.py @@ -1,19 +1,24 @@ # imports +import numpy as np +import matplotlib.pyplot as plt +from scipy.ndimage import gaussian_filter1d +from sklearn.neighbors import NearestNeighbors + from .. import common def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_parameter, updateconvax, progessbar, timelapse, tmp_dir): """ Optical Flow-based tracker """ - old_gray = call_back_preprocessing.call_preprocessing( + old_gray = common.call_preprocessing( firstImage, smoothingmethod) - initialpoints, boundingBox, _, _, CellInfo = call_back_segmentation.call_segmentation(segMeth, processedImage=old_gray, - rawImg=firstImage, - minAreaSize=exp_parameter[2], - maxAreaSize=exp_parameter[3], + initialpoints, boundingBox, _, _, CellInfo = common.call_segmentation(segMeth, preimage=old_gray, + rawimage=firstImage, + min_areasize=exp_parameter[2], + max_areasize=exp_parameter[3], fixscale=exp_parameter[4], - minDistance=exp_parameter[5], - cellEstimate=exp_parameter[1], + min_distance=exp_parameter[5], + cell_estimate=exp_parameter[1], color=int(exp_parameter[6]), thre=int(exp_parameter[7])) @@ -50,8 +55,7 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param r = 500.0 / frame.shape[1] dim = (500, int(frame.shape[0] * r)) - frame = cv2.resize( - frame, dim, interpolation=cv2.INTER_AREA) + frame = common.resize_image(frame, dim) # make a copy of the frame for ploting reasons imagePlot = frame.copy() @@ -59,8 +63,7 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param # show the progress bar progessbar.step(i * 2) - im = call_back_preprocessing.call_preprocessing( - frame, smoothingmethod) + im = common.call_preprocessing(frame, smoothingmethod) # Parameters for lucas kanade optical flow lk_params = dict(winSize=(20, 20), maxLevel=3, @@ -119,19 +122,18 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param if CellInfo: tmp_inf = CellInfo[ii] tmp_inf = tmp_inf[1:] - tmpList = list(extra_modules.concatenateList( + tmpList = list(common.concatenate_list( [i, int(cellId), tmp_inf])) CellMorph.append(tmpList) # manage the displaying label - - displayCoordinates(self, ii, a, b, Initialtime) + common.displaycoordinates(self, ii, a, b, Initialtime) dataFrame = pd.DataFrame(track_history, columns=[ 'frame_idx', 'track_no', 'x', 'y']) # review tracking - drawStr(imagePlot, (20, 20), 'track count: %d' % len(good_new)) + common.draw_str(imagePlot, (20, 20), 'track count: %d' % len(good_new)) if dataFrame is not None: index_Values = dataFrame["track_no"] @@ -183,13 +185,12 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param plt.axis('off') mng = plt.get_current_fig_manager() mng.full_screen_toggle() - tmp_img = path.join(str(tmp_dir[1]), 'frame{}.png'.format(i)) - fig.savefig(tmp_img) + fig.savefig(common.join_path(str(tmp_dir[1]), 'frame{}.png'.format(i))) if i == noFrames - 1: fig.savefig( - path.join(str(tmp_dir[0]), 'frame{}.png'.format(i))) + common.join_path(str(tmp_dir[0]), 'frame{}.png'.format(i))) del fig # Now update the previous frame and previous points @@ -197,19 +198,19 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param initialpoints = p1.reshape(-1, 1, 2) # handle image in the displace panel - img = cv2.imread(tmp_img) + img = common.read_image(str(tmp_dir[1]), 'frame{}.png'.format(i)) r = 600.0 / img.shape[1] dim = (600, int(img.shape[0] * r)) # perform the actual resizing of the image and display it to the # panel - resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA) + resized = common.resize_image(img, dim) common.save_image(tmp_dir[3], '%d.gif' % i, resized) displayImage = tk.PhotoImage( - file=str(path.join(tmp_dir[3], '%d.gif' % i))) - root.displayImage = displayImage + file=str(common.join_path(tmp_dir[3], '%d.gif' % i))) + common.display_image(displayImage) imagesprite = updateconvax.create_image( 263, 187, image=displayImage) updateconvax.update_idletasks() # Force redraw @@ -218,8 +219,8 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param if i == noFrames - 1: displayImage = tk.PhotoImage( - file=str(path.join(tmp_dir[3], '%d.gif' % i))) - root.displayImage = displayImage + file=str(common.join_path(tmp_dir[3], '%d.gif' % i))) + common.display_image(displayImage) imagesprite = updateconvax.create_image( 263, 187, image=displayImage) @@ -228,8 +229,8 @@ def OptflowTracker(self, frames, firstImage, smoothingmethod, segMeth, exp_param # timelapse += Initialtime unpacked = zip(frameID, cellIDs, trajectoriesX, trajectoriesY) - with open(path.join(tmp_dir[2], 'data.csv'), 'wt') as f1: - writer = csv.writer(f1, lineterminator='\n') + with open(common.join_path(tmp_dir[2], 'data.csv'), 'wt') as f1: + writer = common.csv_writer(f1) writer.writerow(('frameID', 'track_no', 'x', "y",)) for value in unpacked: - writer.writerow(value) + writer.writerow(value) \ No newline at end of file