Commit e27ca614 authored by Christoph Sommer's avatar Christoph Sommer
Browse files

initial commit

parents
frapdiff.egg-info/
__pycache__/
build/
dist/
.vscode/
include README.md
include LICENSE
\ No newline at end of file
# FRAPdiff
## Example
## Installation
#### pip
#### Dependencies
* numpy
* pandas
* networkx
import os
import re
import sys
import json
import numpy
import pandas
import roifile
import tifffile
import traceback
from pathlib import Path
from matplotlib import pyplot as plt
from gooey import Gooey, GooeyParser
from .reflecting_diffusion_fitter import run_fitter
def get_physical_units_from_imagej_tif(tif_fn):
with tifffile.TiffFile(tif_fn) as tif:
assert tif.is_imagej
tags = tif.pages[0].tags
y_resolution = tags["YResolution"].value
finterval = tif.imagej_metadata["finterval"]
pixel_size = y_resolution[1] / y_resolution[0]
return pixel_size, finterval
def simple_bleach_correction(mov, win_size):
print(win_size, type(win_size))
roi_values = mov[:, :win_size, :win_size]
roi_values_mean = roi_values.mean(axis=(1, 2))
mov_corr = (mov.T / roi_values_mean).T
return mov_corr
def extract_frap_profiles_and_run(
mov_fn,
bleach_correction=True,
roi_ext_factor=1.5,
project_on="v",
mirror="first_half",
D_guess=0.05,
koff_guess=0.1,
min_l_f=8,
max_l_f=16,
correction_region_size=150,
):
roi = roifile.ImagejRoi.fromfile(mov_fn)
if len(roi) == 0:
print("cannot read roi from tiff file, try using .roi")
roi = roifile.ImagejRoi.fromfile(mov_fn[:-4] + ".roi")
else:
roi = roi[0]
pixel_size, finterval = get_physical_units_from_imagej_tif(mov_fn)
mov = tifffile.imread(mov_fn)
if bleach_correction:
mov = simple_bleach_correction(mov, correction_region_size)
if project_on == "v":
roi_height = roi.bottom - roi.top
roi_extension = int(roi_height * roi_ext_factor)
roi_values = mov[
:,
max(roi.top - roi_extension, 0) : min(
roi.bottom + roi_extension, mov.shape[1] - 1
),
roi.left : roi.right,
]
roi_values_projected = roi_values.mean(axis=2)
elif project_on == "h":
roi_width = roi.right - roi.left
roi_extension = int(roi_width * roi_ext_factor)
roi_values = mov[
:,
roi.top : roi.bottom,
max(roi.left - roi_extension, 0) : min(
roi.right + roi_extension, mov.shape[2] - 1
),
]
roi_values_projected = roi_values.mean(axis=1)
else:
raise ValueError(f"Value for 'project_on' not understood. Use 'v' or 'h'")
# Find frame of bleaching
time_bleach = numpy.argmax(numpy.abs(numpy.diff(roi_values_projected.mean(1)))) + 1
# Normalize
roi_values_projected = (
roi_values_projected / roi_values_projected[time_bleach - 1, :].mean()
)
data = roi_values_projected
if mirror == "first_half":
data1 = data[:, : (data.shape[1] // 2)]
data = numpy.c_[data1, numpy.fliplr(data1)]
elif mirror == "second_half":
data2 = data[:, (data.shape[1] // 2) :]
data = numpy.c_[numpy.fliplr(data2), data2]
elif mirror.lower() == "no":
pass
else:
raise ValueError(
f"mirror parameter not understood. Use 'first_half', 'second_half', or 'no"
)
plt.figure()
for line in data:
plt.plot(line)
plt.show()
return
I0 = data[time_bleach - 1, :].mean()
data = data[time_bleach:, :]
# tifffile.imsave(mov_fn[:-4] + "_bc.tiff", (mov / I0).astype("float32")[:, None, ...])
print("I0", I0)
print("time of bleach", time_bleach)
data = pandas.DataFrame(data.T)
data.insert(0, "loc", pixel_size * numpy.arange(data.shape[0]))
data_fn = str(mov_fn)[:-4] + f"_frap_recovery_proj.txt"
data.to_csv(data_fn, "\t", header=False, index=False)
result_fn = str(mov_fn)[:-4] + f"_results.json"
result = run_fitter(
data_fn,
mov_fn.stem,
I0=I0,
t_step_size=float(finterval),
D_guess=D_guess,
koff_guess=koff_guess,
min_l_f=min_l_f,
max_l_f=max_l_f,
)
result["frameInteval"] = float(finterval)
result["pixelSize"] = float(pixel_size)
result["frameOfFrap"] = int(time_bleach)
result["I0"] = I0
with open(result_fn, "w") as fh:
json.dump(result, fh)
return result
# this needs to be *before* the @Gooey decorator!
# (this code allows to only use Gooey when no arguments are passed to the script)
if len(sys.argv) >= 2:
if not "--ignore-gooey" in sys.argv:
sys.argv.append("--ignore-gooey")
@Gooey(
program_name="Frap DIff",
program_description="Frap2Diffusion",
tabbed_groups=True,
target="frapdiff.exe", ### https://github.com/chriskiehl/Gooey/issues/219
)
def main_cli():
"""
input_dir
output
recursive :: True
bleach_correction :: True
correction_region_size :: 150
project_values :: vertical
extend :: 1.5
mirror_values :: first_half
D_initial :: 0.05
Koff_initial :: 0.1
minimum_Lf :: 8.0
maximum_Lf :: 16.0
"""
parser = GooeyParser()
in_movies_parser = parser.add_argument_group("General")
in_movies_parser.add_argument(
"-d",
"--input_dir",
required=True,
help="Input Folder containing movies (.tif)",
widget="DirChooser",
)
in_movies_parser.add_argument(
"-r",
"--recursive",
action="store_true",
gooey_options={"initial_value": True},
help="Seach movies in input folder recursively",
)
in_movies_parser.add_argument(
"-o",
"--output",
required=True,
help="Output file (.tab)",
widget="FileSaver",
gooey_options={
"wildcard": "TAB (*.tab)|*.tab|" "All files (*.*)|*.*",
"default_file": "results.tab",
},
)
bleach_corr_parser = parser.add_argument_group("Bleach correction")
bleach_corr_parser.add_argument(
"-b",
"--bleach_correction",
action="store_true",
gooey_options={"initial_value": True},
help="Perfom simple ratio-based bleach correction",
)
bleach_corr_parser.add_argument(
"-bs",
"--correction_region_size",
widget="IntegerField",
gooey_options={"min": 0, "max": 999, "increment": 1, "initial_value": 150},
help="Size of bleach-correction region (upper left corner)",
type=int,
)
frap_region_parser = parser.add_argument_group("FRAP region")
frap_region_parser.add_argument(
"-p",
"--project_values",
widget="Dropdown",
choices=["vertical", "horizontal"],
gooey_options={"initial_value": "vertical"},
type=str,
)
frap_region_parser.add_argument(
"-e",
"--extend",
widget="DecimalField",
gooey_options={"min": 0, "max": 3.0, "increment": 0.1, "initial_value": 1.5},
help="Extend original FRAP window to both sides",
type=float,
)
frap_region_parser.add_argument(
"-m",
"--mirror_values",
widget="Dropdown",
choices=["No", "first_half", "second_half"],
help="Mirror values",
gooey_options={"initial_value": "first_half"},
type=str,
)
fitting_parser = parser.add_argument_group("Fitting")
fitting_parser.add_argument(
"-D",
"--D_initial",
widget="DecimalField",
gooey_options={"min": 0, "max": 3.0, "increment": 0.01, "initial_value": 0.05},
help="Initial guess for diffusion",
type=float,
)
fitting_parser.add_argument(
"-K",
"--Koff_initial",
widget="DecimalField",
gooey_options={"min": 0, "max": 3.0, "increment": 0.01, "initial_value": 0.1},
help="Initial guess for Koff",
type=float,
)
fitting_parser.add_argument(
"-min_lf",
"--minimum_Lf",
widget="DecimalField",
gooey_options={"min": 0, "max": 20.0, "increment": 1.0, "initial_value": 8},
help="Minimum Lf",
type=float,
)
fitting_parser.add_argument(
"-max_lf",
"--maximum_Lf",
widget="DecimalField",
gooey_options={"min": 0, "max": 20.0, "increment": 1.0, "initial_value": 16},
help="Maximum Lf",
type=float,
)
args = parser.parse_args()
for key, value in vars(args).items():
print(f"{key} :: {value}")
if args.recursive:
all_mov_fns = [path for path in Path(args.input_dir).rglob("*.tif")]
else:
all_mov_fns = [path for path in Path(args.input_dir).glob("*.tif")]
results = []
for mov_fn in all_mov_fns:
print(mov_fn)
try:
result_dict = extract_frap_profiles_and_run(
mov_fn=mov_fn,
bleach_correction=args.bleach_correction,
roi_ext_factor=args.extend,
project_on=args.project_values[0],
mirror=args.mirror_values,
D_guess=args.D_initial,
koff_guess=args.Koff_initial,
min_l_f=args.minimum_Lf,
max_l_f=args.maximum_Lf,
correction_region_size=args.correction_region_size,
)
results.append(result_dict)
except:
print(f"ERROR for file '{mov_fn}'")
traceback.print_exc()
tab = pandas.DataFrame(results)
tab.to_csv(args.output, sep="\t")
if __name__ == "__main__":
main_cli()
"""
* Copyright (C) 2021 Lehigh University.
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see http://www.gnu.org/licenses/.
*
* Author: David Rutkowski (dmr518@lehigh.edu)
*
* Details of model used is described in further detail in Gerganova et al.
* https://www.biorxiv.org/content/10.1101/2020.12.18.423457v3
"""
import os
import numpy as np
import scipy, scipy.optimize
import matplotlib.pyplot as plt
def run_fitter(
filepath,
cell_name,
I0,
t_step_size,
D_guess,
koff_guess,
min_l_f=2.0,
max_l_f=10.0,
max_num_points=1000,
max_n=500,
x_d=0.0,
x_e=0.0,
):
I1 = I0
x_initial = []
z_initial = []
# number of additional neighbors to include in averaging
include_neighbor_count = 0
def diffusion_reflect(data, D, koff, Iinf):
x = data[0]
t = data[1]
result = 0.0
for i in range(0, len(x_initial) - 1):
avg_intensity = 0.0
avg_intensity_count = 0
for j in range(i - include_neighbor_count, i + include_neighbor_count + 2):
if j >= 0 and j < len(x_initial):
avg_intensity += z_initial[j]
avg_intensity_count += 1
avg_intensity = avg_intensity / float(avg_intensity_count)
x_diff = x_initial[i + 1] - x_initial[i]
result += avg_intensity * x_diff
x_l = (Iinf * (x_d - x_e) + result) / (2 * Iinf - I0 - I1)
# x_l = 25
print(
"D = {0:0.6f}, koff = {1:0.6f}, Iinf = {2:0.6f}, x_l = {3:0.6f}, x_e = {3:0.3f}, x_d = {3:0.3f}".format(
D, koff, Iinf, x_l, x_d, x_e
)
)
if x_l < 0.0:
raise ValueError(
"x_l less than zero: " + str(x_l) + ", Iinf may not be maintained"
)
x_l = 0.0
x_a = x_d
x_b = x_e
else:
x_a = x_d - x_l
x_b = x_e + x_l
x_diff = x_d - x_a
result += I0 * x_diff
x_diff = x_b - x_e
result += I1 * x_diff
result = result / (x_b - x_a)
sums = [0.0] * (len(x_initial) - 1)
sum_left = 0.0
sum_right = 0.0
result_two = 0.0
for n in range(1, max_n + 1):
lambda_n = n * np.pi / (x_b - x_a)
middle_term = (
np.exp(-D * t * lambda_n * lambda_n)
/ lambda_n
* np.cos(lambda_n * (x - x_a))
)
for i in range(0, len(x_initial) - 1):
avg_intensity = 0.0
avg_intensity_count = 0
for j in range(
i - include_neighbor_count, i + include_neighbor_count + 2
):
if j >= 0 and j < len(x_initial):
avg_intensity += z_initial[j]
avg_intensity_count += 1
avg_intensity = avg_intensity / float(avg_intensity_count)
sums[i] += (
avg_intensity
* middle_term
* (
np.sin(lambda_n * (x_initial[i + 1] - x_a))
- np.sin(lambda_n * (x_initial[i] - x_a))
)
)
sum_left += (
I0
* middle_term
* (np.sin(lambda_n * (x_d - x_a)) - np.sin(lambda_n * (x_a - x_a)))
)
sum_right += (
I1
* middle_term
* (np.sin(lambda_n * (x_b - x_a)) - np.sin(lambda_n * (x_e - x_a)))
)
for i in range(0, len(x_initial) - 1):
result_two += sums[i]
result_two += sum_left
result_two += sum_right
result_two = 2.0 * np.exp(-koff * t) / (x_b - x_a) * result_two
return result + result_two
def IndividualLineComparisons(func, data, fittedParameters):
x_data = data[0]
y_data = data[1]
z_data = data[2]
x_step = 0.1
x_a = x_d
x_b = x_e
result = 0.0
for i in range(0, len(x_initial) - 1):
avg_intensity = 0.0
avg_intensity_count = 0
for j in range(i - include_neighbor_count, i + include_neighbor_count + 2):
if j >= 0 and j < len(x_initial):
avg_intensity += z_initial[j]
avg_intensity_count += 1
avg_intensity = avg_intensity / float(avg_intensity_count)
x_diff = x_initial[i + 1] - x_initial[i]
result += avg_intensity * x_diff
Iinf = fittedParameters[2]
x_l = (Iinf * (x_d - x_e) + result) / (2 * Iinf - I0 - I1)
if x_l < 0.0:
raise ValueError(
"x_l less than zero: " + str(x_l) + ", Iinf may not be maintained"
)
x_l = 0.0
x_a = (I0 * x_d - Iinf * x_e + result) / (I0 - Iinf)
x_b = x_e
else:
x_a = x_d - x_l
x_b = x_e + x_l
lower_range = np.arange(x_a, x_d, x_step).tolist()
upper_range = np.arange(x_e + x_step, x_b, x_step).tolist()
x_data_extended = x_data[:].tolist()
y_data_extended = y_data[:].tolist()
unique_times = list(set(y_data))
temp_list_x = []
temp_list_y = []
for i in range(0, len(lower_range)):
for j in range(0, len(unique_times)):
temp_list_x.append(lower_range[i])
temp_list_y.append(unique_times[j])
temp_list_x.extend(x_data_extended)
temp_list_y.extend(y_data_extended)
x_data_extended = temp_list_x[:]
y_data_extended = temp_list_y[:]
temp_list_x = []
temp_list_y = []
for i in range(0, len(upper_range)):
for j in range(0, len(unique_times)):
temp_list_x.append(upper_range[i])
temp_list_y.append(unique_times[j])
x_data_extended.extend(temp_list_x)
y_data_extended.extend(temp_list_y)
z_predicted_list = func(
np.array([x_data_extended, y_data_extended]), *fittedParameters
)
z_by_time_dict = {}
z_predicted_by_time_dict = {}
z_predicted_by_time_dict_low = {}
z_predicted_by_time_dict_high = {}
for i in range(0, len(x_data)):
curr_time = y_data[i]
if curr_time in z_by_time_dict: