Commit 65649f41 authored by Christoph Sommer's avatar Christoph Sommer

Merge branch 'master' of git.ist.ac.at:csommer/careless

parents 90ad1ef6 478ce527
.ipynb_checkpoints/
bif_care.egg-info/
build/
dist/
exp/
*.pyc
*.db
*.tif
......
# bif_care|n2v
Simple IPython based user interface to [CARE](http://csbdeep.bioimagecomputing.com/) a toolbox for Content-aware Image Restoration and to [Noise2void](https://github.com/juglab/n2v)
# care-*less* CARE|n2v
Simple IPython based user interface to [CARE](http://csbdeep.bioimagecomputing.com/) a toolbox for Content-aware Image Restoration and to [Noise2Void](https://github.com/juglab/n2v)
# care
# CARE
## How to use:
CARE needs pairs of registered images - low (input) and high (output) quality. It trains a convolutional neural network how to transform low quality images - which might even be of less physical resolution - into high quality images. After training, newly recorded low quality images or movies can be predicted. 2D, 3D and multi-channel images are supported. For each channel a separate network is trained.
......@@ -12,7 +12,7 @@ CARE needs pairs of registered images - low (input) and high (output) quality. I
0. Clone this repository with `git clone https://....`
1. Copy and rename the IPython notebook template file: `bif_care_templ.ipynb` to `my_care_project.ipynb`
2. Open your renamed `my_care_project.ipynb` file in Jypyter or IPyhton notebook.
3. In order to train CARE, the path to the image pairs needs to be specified. Then, choose images for low and high quality respectively using a wild-card (e.g. `low*.tif` and `high*.tif`). The images will be converted and image patches are extracted. This step is required for efficient GPU execution. Choose patch sizes for your input dimensions `(Z)YX` and set how many patches should be extracted per image pair. After image patches have been extracted, they are saved to the output directory.
3. In order to train CARE, the path to the image pairs needs to be specified. Then, choose images for low and high quality respectively using a wild-card (e.g. `low*.tif` and `high*.tif`). The images will be converted and image patches are extracted. This step is required for efficient GPU execution. Choose patch sizes for your input dimensions `(Z)YX` and set how many patches should be extracted per image pair. After image patches have been extracted, they are saved to the output directory.
#### Training the network
The training of a neural network is done iteratively in `epochs`. In each epoch, the network weights' are updated by optimizing a loss function on `steps_per_epoch` batches of image patches. The size of the batches is given by `batch_size`. To make use of all your image data, select `steps_per_epoch = #patches / batch_size`. Per default, 10% of patches are used for validation and not used in training.
......@@ -39,7 +39,9 @@ You can predict new images in the IPython notebook directly using the prediction
3. Select network file `<bif_care-out-folder>/models/CH_X_model/TF_SavedModel.zip` as 'Import model (.zip)' of your trained channel
4. Set additional parameters such as number of tiles (higher number, in case your entire image cannot be loaded on your GPU memory) and press OK
# Noise2void
---
# Noise2Void
## How to use:
Noise2void does not require pairs of images.
1. Copy and rename the IPython notebook template file: `bif_n2v_templ.ipynb` to `my_n2v_project.ipynb`
......@@ -104,7 +106,7 @@ Unzip, copy and rename (e. g. *_low.tif*, *_high.tif*) the images form `low` and
### Troubleshooting and known issues
* tensorflow 1.13.x requires NVidia tookit 10.0 for the latest csbdeep 0.3.0 release.
* tensorflow 1.13.x requires NVidia tookit 10.0 for the latest csbdeep 0.3.0 release.
* Currently NVidia toolkit 10.1 is not supported by the latest tensorflow==13.1 release
* To install bioformats/ javabridge you need the Microsoft Visual Studio compiler runtime 2015 (14.0) installed, which is included with Microsoft Visual Studio community edition >=2017
* To install bioformats/ javabridge you need Java SDK 8 (1.8) or download [pre-compiled .whl](https://www.lfd.uci.edu/~gohlke/pythonlibs/) packages and install them by:
......
......@@ -11,6 +11,9 @@ from matplotlib import pyplot as plt
from skimage.transform import rescale
from tqdm import tqdm_notebook as tqdm
import warnings
warnings.simplefilter("ignore")
import tensorflow as tf
from csbdeep.utils import plot_some
from csbdeep.models import Config, CARE
......@@ -29,7 +32,7 @@ tf.logging.set_verbosity(tf.logging.ERROR)
if type(tf.contrib) != type(tf): tf.contrib._warning = None
class BifCareInputConverter(object):
def __init__(self, **params):
def __init__(self, **params):
self.order = 0
self.__dict__.update(**params)
......@@ -51,8 +54,8 @@ class BifCareInputConverter(object):
else:
print(" -- Error: Pixel-type not supported. Pixel type must be 8- or 16-bit")
return
series = 0
series = 0
z_size = reader.getSizeZ()
y_size = reader.getSizeY()
x_size = reader.getSizeX()
......@@ -61,7 +64,7 @@ class BifCareInputConverter(object):
t_size = reader.getSizeT()
for t in range(t_size):
img_3d = numpy.zeros((z_size, c_size, y_size, x_size), dtype=dtype)
for z in range(z_size):
for c in range(c_size):
......@@ -71,22 +74,22 @@ class BifCareInputConverter(object):
c=c, rescale=False)
tmp_dir = pathlib.Path(self.out_dir) / "train_data" / "raw"
for c in range(c_size):
low_dir = tmp_dir / "CH_{}".format(c) / conv_token
low_dir.mkdir(parents=True, exist_ok=True)
out_tif = low_dir / "training_file_{:04d}_t{:04d}.tif".format(f_i, t)
img_3d_ch = img_3d[:, c, :, :]
if conv_scaling:
img_3d_ch = rescale(img_3d_ch, conv_scaling, preserve_range=True,
order=self.order,
img_3d_ch = rescale(img_3d_ch, conv_scaling, preserve_range=True,
order=self.order,
multichannel=False,
mode="reflect",
anti_aliasing=True)
tifffile.imsave(out_tif, img_3d_ch[:, None, :, :].astype(dtype),
tifffile.imsave(out_tif, img_3d_ch[:, None, :, :].astype(dtype),
imagej=True,
metadata={'axes': 'ZCYX'})
ir.close()
......@@ -101,9 +104,9 @@ class BifCareInputConverter(object):
print("Done")
class BifCareTrainer(object):
def __init__(self, **params):
def __init__(self, **params):
self.order = 0
self.__dict__.update(**params)
self.__dict__.update(**params)
def create_patches(self):
for ch in self.train_channels:
......@@ -125,15 +128,15 @@ class BifCareTrainer(object):
)
plt.figure(figsize=(16,4))
rand_sel = numpy.random.randint(low=0, high=len(X), size=6)
plot_some(X[rand_sel, 0],Y[rand_sel, 0],title_list=[range(6)], cmap="gray")
plt.show()
print("Done")
return
def get_training_patch_path(self):
return pathlib.Path(self.out_dir) / 'train_data' / 'patches'
......@@ -155,7 +158,7 @@ class BifCareTrainer(object):
config = Config(axes, n_channel_in, n_channel_out, train_epochs=self.train_epochs,
train_steps_per_epoch=self.train_steps_per_epoch,
train_batch_size=self.train_batch_size,
**config_args)
**config_args,)
# Training
model = CARE(config, 'CH_{}_model'.format(ch), basedir=pathlib.Path(self.out_dir) / 'models')
......@@ -174,15 +177,15 @@ class BifCareTrainer(object):
_P = model.keras_model.predict(X_val[:5])
plot_some(X_val[:5], Y_val[:5], _P, pmax=99.5, cmap="gray")
plt.suptitle('5 example validation patches\n'
'top row: input (source), '
plt.suptitle('5 example validation patches\n'
'top row: input (source), '
'middle row: target (ground truth), '
'bottom row: predicted from source');
plt.show()
plt.show()
print("-- Export model for use in Fiji...")
model.export_TF()
model.export_TF()
print("Done")
......@@ -213,8 +216,8 @@ class BifCareTrainer(object):
else:
print("Error: Pixel-type not supported. Pixel type must be 8- or 16-bit")
return
series = 0
series = 0
z_size = reader.getSizeZ()
y_size = reader.getSizeY()
x_size = reader.getSizeX()
......@@ -227,7 +230,7 @@ class BifCareTrainer(object):
if c_size != len(self.train_channels):
print(" -- Warning: Number of Channels during training and prediction do not match. Using channels {} for prediction".format(self.train_channels))
for ch in self.train_channels:
model = CARE(None, 'CH_{}_model'.format(ch), basedir=pathlib.Path(self.out_dir) / 'models')
res_image_ch = numpy.zeros(shape=(t_size, z_out_size, 1, y_out_size, x_out_size), dtype=dtype)
......@@ -237,11 +240,11 @@ class BifCareTrainer(object):
for z in range(z_size):
img_3d[z, :, :] = ir.read(series=series,
z=z,
c=ch,
c=ch,
t=t, rescale=False)
img_3d_ch_ex = rescale(img_3d, self.low_scaling, preserve_range=True,
order=self.order,
img_3d_ch_ex = rescale(img_3d, self.low_scaling, preserve_range=True,
order=self.order,
multichannel=False,
mode="reflect",
anti_aliasing=True)
......@@ -252,21 +255,21 @@ class BifCareTrainer(object):
pred = pred.clip(di.min, di.max).astype(dtype)
res_image_ch[t, :, 0, :, :] = pred
if False:
ch_t_out_fn = os.path.join(os.path.dirname(file_fn), os.path.splitext(os.path.basename(file_fn))[0] + "_care_predict_tp{:04d}_ch{}.tif".format(t, ch))
print("Saving time-point {} and channel {} to file '{}'".format(t, ch, ch_t_out_fn))
tifffile.imsave(ch_t_out_fn, pred[None,:, None, :, :], imagej=True, metadata={'axes': 'TZCYX'})
ch_out_fn = os.path.join(os.path.dirname(file_fn),
os.path.splitext(os.path.basename(file_fn))[0]
ch_out_fn = os.path.join(os.path.dirname(file_fn),
os.path.splitext(os.path.basename(file_fn))[0]
+ "_care_predict_ch{}.tif".format(ch))
print(" -- Saving channel {} CARE prediction to file '{}'".format(ch, ch_out_fn))
if keep_meta:
reso = (1 / (pixel_reso.X / self.low_scaling[2]),
reso = (1 / (pixel_reso.X / self.low_scaling[2]),
1 / (pixel_reso.Y / self.low_scaling[1]))
spacing = pixel_reso.Z / self.low_scaling[0]
unit = pixel_reso.Xunit
......@@ -274,7 +277,7 @@ class BifCareTrainer(object):
tifffile.imsave(ch_out_fn, res_image_ch, imagej=True, resolution=reso, metadata={'axes' : 'TZCYX',
'finterval': finterval,
'spacing' : spacing,
'spacing' : spacing,
'unit' : unit})
else:
tifffile.imsave(ch_out_fn, res_image_ch)
......@@ -282,7 +285,7 @@ class BifCareTrainer(object):
res_image_ch = None # should trigger gc and free the memory
......@@ -22,21 +22,21 @@ def get_space_time_resolution(img_path):
C = i.Pixels.get_SizeC()
Z = i.Pixels.get_SizeZ()
T = i.Pixels.get_SizeT()
X_res = i.Pixels.get_PhysicalSizeX()
Y_res = i.Pixels.get_PhysicalSizeY()
Z_res = i.Pixels.get_PhysicalSizeZ()
X_res_unit = i.Pixels.get_PhysicalSizeXUnit()
Y_res_unit = i.Pixels.get_PhysicalSizeYUnit()
Z_res_unit = i.Pixels.get_PhysicalSizeZUnit()
X_res_unit = i.Pixels.get_PhysicalSizeXUnit() or "micron"
Y_res_unit = i.Pixels.get_PhysicalSizeYUnit() or "micron"
Z_res_unit = i.Pixels.get_PhysicalSizeZUnit() or "micron"
if None in [X_res, Y_res]:
X_res = Y_res = 1
if None in [X_res_unit, Y_res_unit]:
X_res_unit = Y_res_unit = "pixel"
if Z_res is None:
Z_res = 1
Z_res_unit = "pixel"
......@@ -47,17 +47,17 @@ def get_space_time_resolution(img_path):
Y_res_unit = "micron"
if '\xb5' in Z_res_unit:
Z_res_unit = "micron"
Z_res_unit = "micron"
i.Pixels.get_PhysicalSizeZUnit()
frame_interval = 0
Tunit = "sec."
if i.Pixels.node.get("TimeIncrement"):
frame_interval = i.Pixels.node.get("TimeIncrement")
Tunit = i.Pixels.node.get("TimeIncrementUnit")
elif (T > 1) and i.Pixels.get_plane_count() > 0:
plane_axes = i.Pixels.get_DimensionOrder().replace("X", "").replace("Y", "")
......@@ -85,8 +85,8 @@ class JVM(object):
def start(self):
if not JVM.started:
jv.start_vm(class_path=bf.JARS,
max_heap_size='8G',
jv.start_vm(class_path=bf.JARS,
max_heap_size='8G',
args=["-Dlog4j.configuration=file:{}".format(self.log_config),],
run_headless=True)
JVM.started = True
......@@ -98,15 +98,15 @@ class JVM(object):
def get_pixel_dimensions(fn):
JVM().start()
ir = bf.ImageReader(str(fn))
t_size = ir.rdr.getSizeT()
z_size = ir.rdr.getSizeZ()
c_size = ir.rdr.getSizeC()
y_size = ir.rdr.getSizeY()
x_size = ir.rdr.getSizeX()
ir.close()
return Axes(t_size, z_size, c_size, y_size, x_size)
......@@ -137,7 +137,7 @@ def check_file_lists(in_dir, low_wc, high_wc):
if dim_low.t != dim_high.t:
return False, "Low and high quality images have different number of time points\n '{}' != '{}'".format(fl, fh)
if (dim_low.x > dim_high.x) or \
(dim_low.y > dim_high.y) or \
(dim_low.z > dim_high.z):
......@@ -147,9 +147,9 @@ def check_file_lists(in_dir, low_wc, high_wc):
return True, "OK"
def get_upscale_factors(in_dir, low_wc, high_wc):
def get_upscale_factors(in_dir, low_wc, high_wc):
low_fl = get_file_list(in_dir, low_wc)
high_fl = get_file_list(in_dir, high_wc)
high_fl = get_file_list(in_dir, high_wc)
low_dim = get_pixel_dimensions(str(low_fl[0]))
high_dim = get_pixel_dimensions(str(high_fl[0]))
......@@ -221,15 +221,40 @@ class BFListReader(object):
for c in range(dims.c):
img[t, z, :, :, c] = ir.read(series=0,
z=z,
c=c,
c=c,
t=t, rescale=False)
if "Z" not in axes:
img = img[:, 0, ...]
img = img[:, 0, ...]
res.append(img)
ir.close()
return res
def load_imgs_generator(self):
axes = self.check_dims_equal()
for fn in self.img_fns:
dims = get_pixel_dimensions(fn)
print(" -- ", fn, dims)
ir = bf.ImageReader(str(fn))
img = numpy.zeros((dims.t, dims.z, dims.y, dims.x, dims.c), numpy.float32)
for t in range(dims.t):
for z in range(dims.z):
for c in range(dims.c):
img[t, z, :, :, c] = ir.read(series=0,
z=z,
c=c,
t=t, rescale=False)
if "Z" not in axes:
img = img[:, 0, ...]
yield img
ir.close()
class Timer(object):
......
This diff is collapsed.
csbdeep==0.3.0
csbdeep==0.4.1
tqdm>=4.23.4
scipy>=1.2.0
scikit_image>=0.14.2
......@@ -11,4 +11,4 @@ tensorflow-gpu>=1.12.0
javabridge>=1.0.18
python-bioformats>=1.5.2
jupyterlab>=0.35.6
n2v==0.1.3
\ No newline at end of file
n2v==0.1.8
\ No newline at end of file
......@@ -7,9 +7,10 @@ with open("README.md", "rb") as f:
setup(
name = "bif_care",
packages = ["bif_care"],
version = "0.1",
version = "0.2",
description = description,
long_description = description,
entry_points = {'console_scripts': ['bif_n2v=bif_n2v.bif_n2v:cmd_line']},
author = "Christoph Sommer",
author_email = "christoph.sommer@gmail.com",
author_email = "christoph.sommer23@gmail.com",
)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment