Publications
Munkyu Kang; Elizabeth Murray; Leyla A. Kabuli; Rikky Muller; Laura Waller
Correcting curvature in micromirror-based spatial light modulators with a microlens array Journal Article
In: Opt. Express, vol. 34, no. 8, pp. 15783–15794, 2026.
Abstract | Links | BibTeX | Tags: Deformable mirrors; Diffraction efficiency; Holographic displays; Microlens arrays; Phase modulation; Spatial light modulators
@article{Kang:26,
title = {Correcting curvature in micromirror-based spatial light modulators with a microlens array},
author = {Munkyu Kang and Elizabeth Murray and Leyla A. Kabuli and Rikky Muller and Laura Waller},
url = {https://opg.optica.org/oe/abstract.cfm?URI=oe-34-8-15783},
doi = {10.1364/OE.593116},
year = {2026},
date = {2026-04-01},
journal = {Opt. Express},
volume = {34},
number = {8},
pages = {15783–15794},
publisher = {Optica Publishing Group},
abstract = {Computer-generated holography requires high-speed spatial light modulators (SLMs) for dynamically patterning light in 3D. Piston-motion micromirror-based SLMs support high-speed ($geq$ 10 kHz) phase modulation; however, fabricating micromirror arrays with sufficient fill factor necessary for high diffraction efficiency is challenging. In particular, the larger mirrors of high fill factor designs are susceptible to stress-induced curvature that significantly degrades optical performance. In this work, we introduce an optical compensation method using a pitch-matched microlens array (MLA) to focus light onto just the center of each mirror. Our approach thus avoids curvature-induced artifacts and improves optical fill factor to nearly 100%, independent of the original mechanical fill factor. Through simulations and experiments on a fabricated micromirror array with bowed mirrors, we show that the Pearson correlation coefficient of the imparted phase profile is improved from 0.11 to 0.85 and the brightness of a holographically-generated single spot is enhanced by 8× with our microlens array in place. Our hybrid optical-electromechanical strategy thus provides a scalable path toward high-speed, high-fidelity wavefront control for applications such as adaptive optics, holographic displays, and optogenetics.},
keywords = {Deformable mirrors; Diffraction efficiency; Holographic displays; Microlens arrays; Phase modulation; Spatial light modulators},
pubstate = {published},
tppubtype = {article}
}
Leyla A. Kabuli; Henry Pinkard; Eric Markley; Clara S. Hung; Laura Waller
Designing lensless imaging systems to maximize information capture Journal Article
In: Optica, vol. 13, no. 2, pp. 227–235, 2026.
Abstract | Links | BibTeX | Tags: Computational imaging; Imaging systems; Neural networks; Optical imaging; Systems design; Three dimensional imaging
@article{Kabuli:26,
title = {Designing lensless imaging systems to maximize information capture},
author = {Leyla A. Kabuli and Henry Pinkard and Eric Markley and Clara S. Hung and Laura Waller},
url = {https://opg.optica.org/optica/abstract.cfm?URI=optica-13-2-227},
doi = {10.1364/OPTICA.570334},
year = {2026},
date = {2026-02-01},
journal = {Optica},
volume = {13},
number = {2},
pages = {227–235},
publisher = {Optica Publishing Group},
abstract = {Mask-based lensless imaging uses an optical encoder (e.g., a phase or amplitude mask) to capture measurements, then a computational decoding algorithm to reconstruct images. In this work, we evaluate and design lensless encoders based on the information content of their measurements using mutual information estimation. Our approach formalizes the object-dependent nature of lensless imaging and quantifies the interdependence between object sparsity, encoder multiplexing, and noise. Our analysis reveals that optimal encoder designs should tailor encoder multiplexing to object sparsity for maximum information capture, and that all optimally encoded measurements share the same level of sparsity. Using mutual information-based optimization, we design information-optimal encoders for compressive imaging of fixed object distributions. Our designs demonstrate improved downstream reconstruction performance for objects in the distribution, without requiring joint optimization with a specific reconstruction algorithm. We validate our approach experimentally by evaluating lensless imaging systems directly from captured measurements, without the need for image formation models, reconstruction algorithms, or ground truth data. Our comprehensive analysis establishes design and engineering principles for lensless imaging systems and offers a model for the study of general multiplexing systems, especially those with object-dependent performance.},
keywords = {Computational imaging; Imaging systems; Neural networks; Optical imaging; Systems design; Three dimensional imaging},
pubstate = {published},
tppubtype = {article}
}
Amit Kohli
Robust Computational Imaging Under Aberrations and Algorithmic Uncertainty PhD Thesis
EECS Department, University of California, Berkeley, 2025.
@phdthesis{Kohli:31994,
title = {Robust Computational Imaging Under Aberrations and Algorithmic Uncertainty},
author = {Amit Kohli},
year = {2025},
date = {2025-12-01},
number = {UCB/},
school = {EECS Department, University of California, Berkeley},
abstract = {Computational imaging has transformed scientific measurement by co-designing optical hardware and reconstruction algorithms around mathematical models of image formation. However, as applications push toward increasingly extreme scales (e.g., endoscopy), more demanding specifications in resolution and field of view (e.g., whole brain imaging), or the extraction of more information from fewer measurements (e.g., snapshot spectral imaging), computational imaging’s lack of robustness becomes apparent. In such settings, conditions deviate from idealized assumptions, and computational imaging systems can fail catastrophically or produce misleading results.
Among the prominent sources of error are optical aberrations and algorithmic uncertainty. Aberrations—imperfections present in all real imaging systems—are often inadequately modeled and can be computationally irreversible, limiting what can be recovered through standard post-processing alone. Algorithmic uncertainty arises when reconstruction algorithms lack guarantees on their behavior and may produce errors that are unexpectedly large or appear plausible but are incorrect. Deep learning models are an example of such algorithms; their flexibility makes them attractive for challenging reconstruction tasks, but their blackbox nature stymies interpretability and reliability.
This dissertation develops principled methods to address these challenges across the computational imaging pipeline. First, we introduce ring deconvolution microscopy, exploiting rotational symmetry to efficiently correct spatially-varying aberrations. Second, we prove that incorporating random phase masks into optical systems dramatically reduces their sensitivity to unknown aberrations, making their effects computationally reversible. Finally, we develop statistically rigorous uncertainty quantification for deep learning-based reconstruction, providing pixel-wise confidence intervals with formal guarantees that reveal unreliable regions. Together, these contributions establish foundations for robust computational imaging
under real-world conditions.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Among the prominent sources of error are optical aberrations and algorithmic uncertainty. Aberrations—imperfections present in all real imaging systems—are often inadequately modeled and can be computationally irreversible, limiting what can be recovered through standard post-processing alone. Algorithmic uncertainty arises when reconstruction algorithms lack guarantees on their behavior and may produce errors that are unexpectedly large or appear plausible but are incorrect. Deep learning models are an example of such algorithms; their flexibility makes them attractive for challenging reconstruction tasks, but their blackbox nature stymies interpretability and reliability.
This dissertation develops principled methods to address these challenges across the computational imaging pipeline. First, we introduce ring deconvolution microscopy, exploiting rotational symmetry to efficiently correct spatially-varying aberrations. Second, we prove that incorporating random phase masks into optical systems dramatically reduces their sensitivity to unknown aberrations, making their effects computationally reversible. Finally, we develop statistically rigorous uncertainty quantification for deep learning-based reconstruction, providing pixel-wise confidence intervals with formal guarantees that reveal unreliable regions. Together, these contributions establish foundations for robust computational imaging
under real-world conditions.
Martin Zach; Kuan-Chen Shen; Ruiming Cao; Michael Unser; Laura Waller; Jonathan Dong
Perturbative Fourier ptychographic microscopy for fast quantitative phase imaging Journal Article
In: Opt. Express, vol. 33, no. 18, pp. 38984–38996, 2025.
Abstract | Links | BibTeX | Tags: Computational imaging; Illumination design; Image resolution; Microlens arrays; Phase contrast; Phase imaging
@article{Zach:25,
title = {Perturbative Fourier ptychographic microscopy for fast quantitative phase imaging},
author = {Martin Zach and Kuan-Chen Shen and Ruiming Cao and Michael Unser and Laura Waller and Jonathan Dong},
url = {https://opg.optica.org/oe/abstract.cfm?URI=oe-33-18-38984},
doi = {10.1364/OE.560811},
year = {2025},
date = {2025-09-01},
journal = {Opt. Express},
volume = {33},
number = {18},
pages = {38984–38996},
publisher = {Optica Publishing Group},
abstract = {In computational phase imaging with a microscope equipped with an array of light emitting diodes as the illumination unit, conventional Fourier ptychographic microscopy achieves high resolution and wide-field reconstructions but is constrained by a lengthy acquisition time. Conversely, differential phase contrast (DPC) offers fast imaging but is limited in resolution. Here, we introduce perturbative Fourier ptychographic microscopy (pFPM). pFPM is an extension of DPC that incorporates dark-field illumination to enable fast, high-resolution, wide-field quantitative phase imaging with few measurements. We interpret DPC as the initial iteration of a Gauss-Newton algorithm with quadratic regularization and generalize it to multiple iterations and more sophisticated regularizers. This broader framework is not restricted to bright-field measurements and allows us to overcome resolution limitations of DPC. We develop tailored dark-field illumination patterns with ring shapes, that align with the perturbative interpretation and lead to an improvement in the quality of reconstruction with respect to other common illumination schemes. Consequently, our methodology combines an enhanced phase reconstruction algorithm with a specialized illumination strategy and offers significant advantages in both imaging speed and resolution.},
keywords = {Computational imaging; Illumination design; Image resolution; Microlens arrays; Phase contrast; Phase imaging},
pubstate = {published},
tppubtype = {article}
}
Eric Markley
Data-Driven Design of High-Dimensional, Snapshot Computational Imaging Systems PhD Thesis
2025.
Abstract | Links | BibTeX | Tags:
@phdthesis{Markley2025,
title = {Data-Driven Design of High-Dimensional, Snapshot Computational Imaging Systems},
author = {Eric Markley},
url = {https://www.proquest.com/openview/c815b58777964d9d0b4dfdd4af6564dc/1?pq-origsite=gscholar&cbl=18750&diss=y},
year = {2025},
date = {2025-08-01},
urldate = {2025-08-01},
abstract = {Modern imaging systems increasingly rely on computational methods to extract highdimensional information from 2D optical measurements. Examples include snapshot 3D
microscopy systems that capture volumetric data in a single exposure and hyperspectral
imagers that simultaneously measure spatial and spectral information across dozens of
wavelength channels. Designing such systems is challenging because it requires jointly optimizing both the optical hardware that encodes the scene and the computational algorithms
that decode the measurements, a process complicated by the non-convex, high-dimensional
parameter spaces and computationally expensive end-to-end training requirements. In
this dissertation, we present data-driven approaches that address these challenges through
physics-based simulation and information-theoretic design principles. We first develop a
memory-efficient, end-to-end pipeline that jointly optimizes optical elements and neural
reconstruction algorithms using differentiable simulation, demonstrating this method on a
snapshot 3D fluorescence microscope that achieves improved resolution over heuristic designs.
We then present a compact snapshot hyperspectral fluorescence microscope with a custom
iterative reconstruction algorithm tailored to its physical model.
To overcome the computational limitations of end-to-end optimization and accommodate
non-differentiable reconstruction algorithms, we develop an information-theoretic optimization
framework that treats optical design as a mutual information maximization problem. This
approach, implemented through the IDEAL and IDEAL-IO methods, decouples encoder
design from specific reconstruction implementations. By directly maximizing the information
content of measurements rather than optimizing reconstruction fidelity, this framework
provides a generalizable design principle that transcends particular decoder architectures
while reducing the computational requirements in comparison to end-to-end design.
The methods developed in this dissertation demonstrate that principled, simulation-driven
design can achieve improved performance across diverse high-dimensional imaging modalities
while maintaining computational tractability.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
microscopy systems that capture volumetric data in a single exposure and hyperspectral
imagers that simultaneously measure spatial and spectral information across dozens of
wavelength channels. Designing such systems is challenging because it requires jointly optimizing both the optical hardware that encodes the scene and the computational algorithms
that decode the measurements, a process complicated by the non-convex, high-dimensional
parameter spaces and computationally expensive end-to-end training requirements. In
this dissertation, we present data-driven approaches that address these challenges through
physics-based simulation and information-theoretic design principles. We first develop a
memory-efficient, end-to-end pipeline that jointly optimizes optical elements and neural
reconstruction algorithms using differentiable simulation, demonstrating this method on a
snapshot 3D fluorescence microscope that achieves improved resolution over heuristic designs.
We then present a compact snapshot hyperspectral fluorescence microscope with a custom
iterative reconstruction algorithm tailored to its physical model.
To overcome the computational limitations of end-to-end optimization and accommodate
non-differentiable reconstruction algorithms, we develop an information-theoretic optimization
framework that treats optical design as a mutual information maximization problem. This
approach, implemented through the IDEAL and IDEAL-IO methods, decouples encoder
design from specific reconstruction implementations. By directly maximizing the information
content of measurements rather than optimizing reconstruction fidelity, this framework
provides a generalizable design principle that transcends particular decoder architectures
while reducing the computational requirements in comparison to end-to-end design.
The methods developed in this dissertation demonstrate that principled, simulation-driven
design can achieve improved performance across diverse high-dimensional imaging modalities
while maintaining computational tractability.
Ruiming Cao; Guanghan Meng; Laura Waller
Sample motion for structured illumination fluorescence microscopy Journal Article
In: Opt. Lett., vol. 50, no. 12, pp. 4074–4077, 2025.
Abstract | Links | BibTeX | Tags: Diffraction limit; Fourier transforms; Imaging systems; Spatial light modulators; Speckle patterns; Structured illumination microscopy
@article{Cao:25,
title = {Sample motion for structured illumination fluorescence microscopy},
author = {Ruiming Cao and Guanghan Meng and Laura Waller},
url = {https://opg.optica.org/ol/abstract.cfm?URI=ol-50-12-4074},
doi = {10.1364/OL.560873},
year = {2025},
date = {2025-06-01},
journal = {Opt. Lett.},
volume = {50},
number = {12},
pages = {4074–4077},
publisher = {Optica Publishing Group},
abstract = {Structured illumination microscopy (SIM) uses a set of images captured with different illumination patterns to computationally reconstruct resolution beyond the diffraction limit. Here, we propose an alternative approach using a single speckle illumination pattern and relying on inherent sample motion to encode the super-resolved information in multiple raw images. From a set of raw fluorescence images captured as the sample moves, we jointly estimate both the sample motion and the super-resolved image. We demonstrate the feasibility of the proposed method both in simulation and in experiment.},
keywords = {Diffraction limit; Fourier transforms; Imaging systems; Spatial light modulators; Speckle patterns; Structured illumination microscopy},
pubstate = {published},
tppubtype = {article}
}
Amit Kohli; Anastasios N. Angelopoulos; David McAllister; Esther Whang; Sixian You; Kyrollos Yanny; Federico M. Gasparoli; Bo-Jui Chang; Reto Fiolka; Laura Waller
Ring deconvolution microscopy: exploiting symmetry for efficient spatially varying aberration correction Journal Article
In: Nature Methods, vol. 22, no. 6, pp. 1311–1320, 2025, ISSN: 1548-7105.
Abstract | Links | BibTeX | Tags:
@article{kohli_ring_2025,
title = {Ring deconvolution microscopy: exploiting symmetry for efficient spatially varying aberration correction},
author = {Amit Kohli and Anastasios N. Angelopoulos and David McAllister and Esther Whang and Sixian You and Kyrollos Yanny and Federico M. Gasparoli and Bo-Jui Chang and Reto Fiolka and Laura Waller},
url = {https://doi.org/10.1038/s41592-025-02684-5},
doi = {10.1038/s41592-025-02684-5},
issn = {1548-7105},
year = {2025},
date = {2025-06-01},
journal = {Nature Methods},
volume = {22},
number = {6},
pages = {1311–1320},
abstract = {The most ubiquitous form of aberration correction for microscopy is deconvolution; however, deconvolution relies on the assumption that the system’s point spread function is the same across the entire field of view. This assumption is often inadequate, but space-variant deblurring techniques generally require impractical amounts of calibration and computation. We present an imaging pipeline that leverages symmetry to provide simple and fast spatially varying deblurring. Our ring deconvolution microscopy method utilizes the rotational symmetry of most microscopes and cameras, and naturally extends to sheet deconvolution in the case of lateral symmetry. We derive theory and algorithms for ring deconvolution microscopy and propose a neural network based on Seidel aberration coefficients as a fast alternative. We demonstrate improvements in speed and image quality as compared to standard deconvolution and existing spatially varying deblurring across a diverse range of microscope modalities, including miniature microscopy, multicolor fluorescence microscopy, multimode fiber micro-endoscopy and light-sheet fluorescence microscopy. Our approach enables near-isotropic, subcellular resolution in each of these applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Neerja Aggarwal
Computational Hyperspectral Microscopy for Bioimaging PhD Thesis
EECS Department, University of California, Berkeley, 2025.
Abstract | Links | BibTeX | Tags:
@phdthesis{Aggarwal:EECS-2025-83,
title = {Computational Hyperspectral Microscopy for Bioimaging},
author = {Neerja Aggarwal},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2025/EECS-2025-83.html},
year = {2025},
date = {2025-05-01},
number = {UCB/EECS-2025-83},
school = {EECS Department, University of California, Berkeley},
abstract = {Hyperspectral imaging involves detecting the spectrum (intensity vs wavelength) of light emitted at each point in space. It has applications in biology such as fluorescence imaging of live cells and interferometry to see inside tissues. However, traditional hyperspectral systems often have to scan through this three-dimensional spatial-spectral datacube ($x,y, łambda$) due to a 2D sensor, resulting in long acquisition times and large setups. Snapshot imaging fits the entire 3D datacube onto a 2D sensor at once but sacrifices resolution. Computational imaging involves the codesign of both optics and algorithms together to beat traditional tradeoffs. In this work, we present three imaging systems for various bioimaging applications that benefit from computational imaging to improve spectral imaging performance.
In the first application, we redesigned a traditional spectrometer using a diffuser instead of a grating to diffract light. The resulting speckle pattern was captured using an image sensor and inverted to solve for the spectrum. This compact spectrometer was developed for optical coherence tomography, an interferometry technique for imaging eyes.
In the second project for fluorescence microscopy, we used a diffuser to multiplex light onto a spectral filter array on an image sensor. We used compressed sensing to solve for more voxels in the hyperspectral data cube than pixels on the sensor. We developed a compact attachment for a traditional benchtop microscopy that enables live imaging on biological samples and demonstrate high fidelity reconstructions in experiment.
In the final project, we adapted a Fourier ptychography system for spectral imaging using a filter array. Fourier ptychography uses angled illumination to scan through the spatial Fourier plane and build up a higher resolution image. By placing the filter array in the Fourier plane, we can scanned the object’s spatial frequencies through each spectral filter to build up a high resolution spatio-spectral datacube. We investigated this idea via simulation and proposed an experimental setup that could be used for digital pathology.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In the first application, we redesigned a traditional spectrometer using a diffuser instead of a grating to diffract light. The resulting speckle pattern was captured using an image sensor and inverted to solve for the spectrum. This compact spectrometer was developed for optical coherence tomography, an interferometry technique for imaging eyes.
In the second project for fluorescence microscopy, we used a diffuser to multiplex light onto a spectral filter array on an image sensor. We used compressed sensing to solve for more voxels in the hyperspectral data cube than pixels on the sensor. We developed a compact attachment for a traditional benchtop microscopy that enables live imaging on biological samples and demonstrate high fidelity reconstructions in experiment.
In the final project, we adapted a Fourier ptychography system for spectral imaging using a filter array. Fourier ptychography uses angled illumination to scan through the spatial Fourier plane and build up a higher resolution image. By placing the filter array in the Fourier plane, we can scanned the object’s spatial frequencies through each spectral filter to build up a high resolution spatio-spectral datacube. We investigated this idea via simulation and proposed an experimental setup that could be used for digital pathology.
Ruiming Cao; Dekel Galor; Amit Kohli; Jacob L. Yates; Laura Waller
Noise2Image: noise-enabled static scene recovery for event cameras Journal Article
In: Optica, vol. 12, no. 1, pp. 46–55, 2025.
Abstract | Links | BibTeX | Tags: Beam splitters; Cameras; CMOS cameras; Fluorescence microscopy; Neural networks; Three dimensional reconstruction
@article{Cao:25b,
title = {Noise2Image: noise-enabled static scene recovery for event cameras},
author = {Ruiming Cao and Dekel Galor and Amit Kohli and Jacob L. Yates and Laura Waller},
url = {https://opg.optica.org/optica/abstract.cfm?URI=optica-12-1-46},
doi = {10.1364/OPTICA.538916},
year = {2025},
date = {2025-01-01},
journal = {Optica},
volume = {12},
number = {1},
pages = {46–55},
publisher = {Optica Publishing Group},
abstract = {Event cameras, also known as dynamic vision sensors, are an emerging modality for measuring fast dynamics asynchronously. Event cameras capture changes of log-intensity over time as a stream of ``events'' and generally cannot measure intensity itself; hence, they are only used for imaging dynamic scenes. However, fluctuations due to random photon arrival inevitably trigger noise events, even for static scenes. While previous efforts have been focused on filtering out these undesirable noise events to improve signal quality, we find that, in the photon-noise regime, these noise events are correlated with the static scene intensity. We analyze the noise event generation and model its relationship to illuminance. Based on this understanding, we propose a method, called Noise2Image, to leverage the illuminance-dependent noise characteristics to recover the static parts of a scene, which are otherwise invisible to event cameras. We experimentally collect a dataset of noise events on static scenes to train and validate Noise2Image. Our results show that Noise2Image can robustly recover intensity images solely from noise events, providing an approach for capturing static scenes in event cameras, without additional hardware.},
keywords = {Beam splitters; Cameras; CMOS cameras; Fluorescence microscopy; Neural networks; Three dimensional reconstruction},
pubstate = {published},
tppubtype = {article}
}
Christian Foley; Eric Markley; Kyrollos Yanny; Laura Waller; Kristina Monakhova
Spectral DefocusCam: Super-Resolved Hyperspectral Imaging Through Defocus Proceedings Article
In: 2025 IEEE International Conference on Computational Photography (ICCP), pp. 1-12, 2025.
Links | BibTeX | Tags: Photography;Superresolution;Robot vision systems;Prototypes;Cameras;System-on-chip;Spatial resolution;Image reconstruction;Hyperspectral imaging;Lenses;Computational Photography;Hyperspectral Imaging
@inproceedings{11143845,
title = {Spectral DefocusCam: Super-Resolved Hyperspectral Imaging Through Defocus},
author = {Christian Foley and Eric Markley and Kyrollos Yanny and Laura Waller and Kristina Monakhova},
doi = {10.1109/ICCP64821.2025.11143845},
year = {2025},
date = {2025-01-01},
booktitle = {2025 IEEE International Conference on Computational Photography (ICCP)},
pages = {1-12},
keywords = {Photography;Superresolution;Robot vision systems;Prototypes;Cameras;System-on-chip;Spatial resolution;Image reconstruction;Hyperspectral imaging;Lenses;Computational Photography;Hyperspectral Imaging},
pubstate = {published},
tppubtype = {inproceedings}
}
Nalini M. Singh; Tiffany Chien; Arthur R. C. McCray; Colin Ophus; Laura Waller
A Gaussian Parameterization for Direct Atomic Structure Identification in Electron Tomography Proceedings Article
In: 2025 IEEE International Conference on Computational Photography (ICCP), pp. 1-10, 2025.
Links | BibTeX | Tags: Atomic measurements;Photography;Three-dimensional displays;Inverse problems;Transmission electron microscopy;Tomography;Atoms;Robustness;Image reconstruction;Electrons;Atomic electron tomography;Gaussian splatting
@inproceedings{11143828,
title = {A Gaussian Parameterization for Direct Atomic Structure Identification in Electron Tomography},
author = {Nalini M. Singh and Tiffany Chien and Arthur R. C. McCray and Colin Ophus and Laura Waller},
doi = {10.1109/ICCP64821.2025.11143828},
year = {2025},
date = {2025-01-01},
booktitle = {2025 IEEE International Conference on Computational Photography (ICCP)},
pages = {1-10},
keywords = {Atomic measurements;Photography;Three-dimensional displays;Inverse problems;Transmission electron microscopy;Tomography;Atoms;Robustness;Image reconstruction;Electrons;Atomic electron tomography;Gaussian splatting},
pubstate = {published},
tppubtype = {inproceedings}
}
Mingxuan Cai; Dekel Galor; Amit Pal Singh Kohli; Jacob L. Yates; Laura Waller
Event2Audio: Event-Based Optical Vibration Sensing Proceedings Article
In: 2025 IEEE International Conference on Computational Photography (ICCP), pp. 1-12, 2025.
Links | BibTeX | Tags: Vibrations;Integrated optics;Visualization;Event detection;Optical distortion;Cameras;Optical sensors;Laser beams;High-speed optical techniques;Videos;Optical Vibration Sensing;Vibrometry;Event Cameras
@inproceedings{11143833,
title = {Event2Audio: Event-Based Optical Vibration Sensing},
author = {Mingxuan Cai and Dekel Galor and Amit Pal Singh Kohli and Jacob L. Yates and Laura Waller},
doi = {10.1109/ICCP64821.2025.11143833},
year = {2025},
date = {2025-01-01},
booktitle = {2025 IEEE International Conference on Computational Photography (ICCP)},
pages = {1-12},
keywords = {Vibrations;Integrated optics;Visualization;Event detection;Optical distortion;Cameras;Optical sensors;Laser beams;High-speed optical techniques;Videos;Optical Vibration Sensing;Vibrometry;Event Cameras},
pubstate = {published},
tppubtype = {inproceedings}
}
Ruiming Cao; Nikita S. Divekar; James K. Nuñez; Srigokul Upadhyayula; Laura Waller
Neural space–time model for dynamic multi-shot imaging Journal Article
In: Nature Methods, pp. 1–6, 2024, ISSN: 1548-7105, (Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: Imaging, Phase-contrast microscopy, Super-resolution microscopy
@article{cao_neural_2024,
title = {Neural space–time model for dynamic multi-shot imaging},
author = {Ruiming Cao and Nikita S. Divekar and James K. Nuñez and Srigokul Upadhyayula and Laura Waller},
url = {https://www.nature.com/articles/s41592-024-02417-0},
doi = {10.1038/s41592-024-02417-0},
issn = {1548-7105},
year = {2024},
date = {2024-09-24},
urldate = {2024-09-01},
journal = {Nature Methods},
pages = {1--6},
abstract = {Computational imaging reconstructions from multiple measurements that are captured sequentially often suffer from motion artifacts if the scene is dynamic. We propose a neural space–time model (NSTM) that jointly estimates the scene and its motion dynamics, without data priors or pre-training. Hence, we can both remove motion artifacts and resolve sample dynamics from the same set of raw measurements used for the conventional reconstruction. We demonstrate NSTM in three computational imaging systems: differential phase-contrast microscopy, three-dimensional structured illumination microscopy and rolling-shutter DiffuserCam. We show that NSTM can recover subcellular motion dynamics and thus reduce the misinterpretation of living systems caused by motion artifacts.},
note = {Publisher: Nature Publishing Group},
keywords = {Imaging, Phase-contrast microscopy, Super-resolution microscopy},
pubstate = {published},
tppubtype = {article}
}
Tiffany Chien; Ruiming Cao; Fanglin Linda Liu; Leyla A. Kabuli; Laura Waller
Space-time reconstruction for lensless imaging using implicit neural representations Journal Article
In: Opt. Express, vol. 32, no. 20, pp. 35725–35732, 2024.
Abstract | Links | BibTeX | Tags: Computational imaging; Imaging systems; Inverse design; Machine learning; Machine vision; Neural networks
@article{Chien:24,
title = {Space-time reconstruction for lensless imaging using implicit neural representations},
author = {Tiffany Chien and Ruiming Cao and Fanglin Linda Liu and Leyla A. Kabuli and Laura Waller},
url = {https://opg.optica.org/oe/abstract.cfm?URI=oe-32-20-35725},
doi = {10.1364/OE.530480},
year = {2024},
date = {2024-09-01},
journal = {Opt. Express},
volume = {32},
number = {20},
pages = {35725--35732},
publisher = {Optica Publishing Group},
abstract = {Many computational imaging inverse problems are challenged by noise, model mismatch, and other imperfections that decrease reconstruction quality. For data taken sequentially in time, instead of reconstructing each frame independently, space-time algorithms simultaneously reconstruct multiple frames, thereby taking advantage of temporal redundancy through space-time priors. This helps with denoising and provides improved reconstruction quality, but often requires significant computational and memory resources. Designing effective but flexible temporal priors is also challenging. Here, we propose using an implicit neural representation to model dynamics and act as a computationally tractable and flexible space-time prior. We demonstrate this approach on video captured with a lensless imager, DiffuserCam, and show improved reconstruction results and robustness to noise compared to frame-by-frame methods.},
keywords = {Computational imaging; Imaging systems; Inverse design; Machine learning; Machine vision; Neural networks},
pubstate = {published},
tppubtype = {article}
}
Guanghan Meng; Dekel Galor; Laura Waller; Martin S. Banks
BiPMAP: a toolbox for predicting perceived motion artifacts on modern displays Journal Article
In: Opt. Express, vol. 32, no. 7, pp. 12181–12199, 2024.
Abstract | Links | BibTeX | Tags:
@article{Meng:24,
title = {BiPMAP: a toolbox for predicting perceived motion artifacts on modern displays},
author = {Guanghan Meng and Dekel Galor and Laura Waller and Martin S. Banks},
url = {https://opg.optica.org/oe/abstract.cfm?URI=oe-32-7-12181},
doi = {10.1364/OE.510985},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = {Opt. Express},
volume = {32},
number = {7},
pages = {12181--12199},
publisher = {Optica Publishing Group},
abstract = {Viewers of digital displays often experience motion artifacts (e.g., flicker, judder, edge banding, motion blur, color breakup, depth distortion) when presented with dynamic scenes. We developed an interactive software tool for display designers that predicts how a viewer perceives motion artifacts for a variety of stimulus, display, and viewing parameters: the Binocular Perceived Motion Artifact Predictor (BiPMAP). The tool enables the user to specify numerous stimulus, display, and viewing parameters. It implements a model of human spatiotemporal contrast sensitivity in order to determine which artifacts will be seen by a viewer and which will not. The tool visualizes the perceptual effects of discrete space-time sampling on the display by presenting side by side the expected perception when the stimulus is continuous compared to when the same stimulus is presented with the spatial and temporal parameters of a prototype display.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nathan Tessema Ersaro; Cem Yalcin; Liz Murray; Leyla Kabuli; Laura Waller; Rikky Muller
Fast non-iterative algorithm for 3D point-cloud holography Journal Article
In: Opt. Express, vol. 31, no. 22, pp. 36468–36485, 2023.
Abstract | Links | BibTeX | Tags: Diode pumped lasers; Fast Fourier transforms; Image quality; Phase retrieval; Spatial light modulators; Three dimensional imaging
@article{Ersaro:23,
title = {Fast non-iterative algorithm for 3D point-cloud holography},
author = {Nathan Tessema Ersaro and Cem Yalcin and Liz Murray and Leyla Kabuli and Laura Waller and Rikky Muller},
url = {https://opg.optica.org/oe/abstract.cfm?URI=oe-31-22-36468},
doi = {10.1364/OE.498302},
year = {2023},
date = {2023-10-01},
urldate = {2023-10-01},
journal = {Opt. Express},
volume = {31},
number = {22},
pages = {36468--36485},
publisher = {Optica Publishing Group},
abstract = {Recently developed iterative and deep learning-based approaches to computer-generated holography (CGH) have been shown to achieve high-quality photorealistic 3D images with spatial light modulators. However, such approaches remain overly cumbersome for patterning sparse collections of target points across a photoresponsive volume in applications including biological microscopy and material processing. Specifically, in addition to requiring heavy computation that cannot accommodate real-time operation in mobile or hardware-light settings, existing sampling-dependent 3D CGH methods preclude the ability to place target points with arbitrary precision, limiting accessible depths to a handful of planes. Accordingly, we present a non-iterative point cloud holography algorithm that employs fast deterministic calculations in order to efficiently allocate patches of SLM pixels to different target points in the 3D volume and spread the patterning of all points across multiple time frames. Compared to a matched-performance implementation of the iterative Gerchberg-Saxton algorithm, our algorithm’s relative computation speed advantage was found to increase with SLM pixel count, reaching >100,000x at 512 × 512 array format.},
keywords = {Diode pumped lasers; Fast Fourier transforms; Image quality; Phase retrieval; Spatial light modulators; Three dimensional imaging},
pubstate = {published},
tppubtype = {article}
}
Stuart Sherwin
Modeling, Designing, and Measuring EUV Photomasks PhD Thesis
EECS Department, University of California, Berkeley, 2023.
Abstract | Links | BibTeX | Tags:
@phdthesis{Sherwin:EECS-2023-37,
title = {Modeling, Designing, and Measuring EUV Photomasks},
author = {Stuart Sherwin},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2023/EECS-2023-37.html},
year = {2023},
date = {2023-05-01},
number = {UCB/EECS-2023-37},
school = {EECS Department, University of California, Berkeley},
abstract = {We present a selection of topics relating to modeling, designing, and measuring EUV (Extreme Ultraviolet) photomasks, with implications for high-volume nanofabrication of integrated circuits. These EUV photomasks must be accurately designed, but rigorously modeling large domains is extremely computationally intensive; we introduce an approximate Fresnel Double Scattering model which is 10,000x faster. This approximation can predict
the trend of phase vs pitch, which is critical to designing EUV phase shift masks (PSMs). We
also explore novel mask architectures to improve efficiency and contrast, such as an etched
multilayer PSM (up to 6x throughput but restrictive applicability), aperiodic multilayers
(up to +22% throughput and more general applicability), and multilayers with minimal
propagation distance at certain angles (lower throughput but higher contrast with minimized 3D effects). Finally we explore computational metrology with EUV reflectometry,
scatterometry, and imaging for probing the phase and amplitude response of an EUV mask, with experimental demonstrations at the Advanced Light Source synchrotron. We perform reflectometry experiments on 3 masks with different architectures to infer approximately 25
physical film parameters each. Another reflectometry application to contamination monitoring achieved single-picometer precision for thickness (3σ < 6pm) and sub-degree precision for phase (3σ < 0.2deg). We compare two implementations of phase scatterometry, either applying nonlinear optimization with approximate scattering, or linearizing the rigorous scattering relationship between intensity and phase; linearization is shown to generally be more accurate, but both methods have similar precision. We apply novel software and hardware for phase imaging, using PhaseLift convex phase retrieval, combined with a set of custom Zernike Phase Contrast (ZPC) zone plates. We perform hyperspectral ZPC phase imaging on 3 masks, where we see promising agreement with reflectometry in the trend of phase vs wavelength.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
the trend of phase vs pitch, which is critical to designing EUV phase shift masks (PSMs). We
also explore novel mask architectures to improve efficiency and contrast, such as an etched
multilayer PSM (up to 6x throughput but restrictive applicability), aperiodic multilayers
(up to +22% throughput and more general applicability), and multilayers with minimal
propagation distance at certain angles (lower throughput but higher contrast with minimized 3D effects). Finally we explore computational metrology with EUV reflectometry,
scatterometry, and imaging for probing the phase and amplitude response of an EUV mask, with experimental demonstrations at the Advanced Light Source synchrotron. We perform reflectometry experiments on 3 masks with different architectures to infer approximately 25
physical film parameters each. Another reflectometry application to contamination monitoring achieved single-picometer precision for thickness (3σ < 6pm) and sub-degree precision for phase (3σ < 0.2deg). We compare two implementations of phase scatterometry, either applying nonlinear optimization with approximate scattering, or linearizing the rigorous scattering relationship between intensity and phase; linearization is shown to generally be more accurate, but both methods have similar precision. We apply novel software and hardware for phase imaging, using PhaseLift convex phase retrieval, combined with a set of custom Zernike Phase Contrast (ZPC) zone plates. We perform hyperspectral ZPC phase imaging on 3 masks, where we see promising agreement with reflectometry in the trend of phase vs wavelength.
Gautam Gunjala; Antoine Wojdyla; Kenneth A. Goldberg; Zhi Qiao; Xianbo Shi; Lahsen Assoufid; Laura Waller
Data-driven modeling and control of an X-ray bimorph adaptive mirror Journal Article
In: Journal of Synchrotron Radiation, vol. 30, no. 1, 2023.
Abstract | Links | BibTeX | Tags: adaptive optics, beamline optics, x ray imaging
@article{Gunjala:tv5041,
title = {Data-driven modeling and control of an X-ray bimorph adaptive mirror},
author = {Gautam Gunjala and Antoine Wojdyla and Kenneth A. Goldberg and Zhi Qiao and Xianbo Shi and Lahsen Assoufid and Laura Waller},
url = {https://doi.org/10.1107/S1600577522011080},
doi = {10.1107/S1600577522011080},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {Journal of Synchrotron Radiation},
volume = {30},
number = {1},
abstract = {Adaptive X-ray mirrors are being adopted on high-coherent-flux synchrotron and X-ray free-electron laser beamlines where dynamic phase control and aberration compensation are necessary to preserve wavefront quality from source to sample, yet challenging to achieve. Additional difficulties arise from the inability to continuously probe the wavefront in this context, which demands methods of control that require little to no feedback. In this work, a data-driven approach to the control of adaptive X-ray optics with piezo-bimorph actuators is demonstrated. This approach approximates the non-linear system dynamics with a discrete-time model using random mirror shapes and interferometric measurements as training data. For mirrors of this type, prior states and voltage inputs affect the shape-change trajectory, and therefore must be included in the model. Without the need for assumed physical models of the mirror's behavior, the generality of the neural network structure accommodates drift, creep and hysteresis, and enables a control algorithm that achieves shape control and stability below 2nm RMS. Using a prototype mirror and it ex situ metrology, it is shown that the accuracy of our trained model enables open-loop shape control across a diverse set of states and that the control algorithm achieves shape error magnitudes that fall within diffraction-limited performance.},
keywords = {adaptive optics, beamline optics, x ray imaging},
pubstate = {published},
tppubtype = {article}
}
Eric Li; Stuart Sherwin; Gautam Gunjala; Laura Waller
Exceeding the limits of algorithmic self-calibrated aberration recovery in Fourier ptychography Journal Article
In: Opt. Continuum, vol. 2, no. 1, pp. 119–130, 2023.
Abstract | Links | BibTeX | Tags: Computational imaging; Image quality; Imaging systems; Optical aberrations; Phase imaging; Reconstruction algorithms
@article{Li:23,
title = {Exceeding the limits of algorithmic self-calibrated aberration recovery in Fourier ptychography},
author = {Eric Li and Stuart Sherwin and Gautam Gunjala and Laura Waller},
url = {https://opg.optica.org/optcon/abstract.cfm?URI=optcon-2-1-119},
doi = {10.1364/OPTCON.475990},
year = {2023},
date = {2023-01-01},
journal = {Opt. Continuum},
volume = {2},
number = {1},
pages = {119--130},
publisher = {Optica Publishing Group},
abstract = {Fourier ptychographic microscopy is a computational imaging technique that provides quantitative phase information and high resolution over a large field-of-view. Although the technique presents numerous advantages over conventional microscopy, model mismatch due to unknown optical aberrations can significantly limit reconstruction quality. A practical way of correcting for aberrations without additional data capture is through algorithmic self-calibration, in which a pupil recovery step is embedded into the reconstruction algorithm. However, software-only aberration correction is limited in accuracy. Here, we evaluate the merits of implementing a simple, dedicated calibration procedure for applications requiring high accuracy. In simulations, we find that for a target sample reconstruction error, we can image without any aberration corrections only up to a maximum aberration magnitude of $łambda$/40. When we use algorithmic self-calibration, we can tolerate an aberration magnitude up to $łambda$/10 and with our proposed diffuser calibration technique, this working range is extended further to $łambda$/3. Hence, one can trade off complexity for accuracy by using a separate calibration process, which is particularly useful for larger aberrations.},
keywords = {Computational imaging; Image quality; Imaging systems; Optical aberrations; Phase imaging; Reconstruction algorithms},
pubstate = {published},
tppubtype = {article}
}
Joseph D. Malone; Neerja Aggarwal; Laura Waller; Audrey K. Bowden
DiffuserSpec: spectroscopy with Scotch tape Journal Article
In: Opt. Lett., vol. 48, no. 2, pp. 323–326, 2023.
Abstract | Links | BibTeX | Tags: Near infrared radiation; Optical components; Reconstruction algorithms; Speckle patterns; Spectrometers; Spectroscopy
@article{Malone:23,
title = {DiffuserSpec: spectroscopy with Scotch tape},
author = {Joseph D. Malone and Neerja Aggarwal and Laura Waller and Audrey K. Bowden},
url = {https://opg.optica.org/ol/abstract.cfm?URI=ol-48-2-323},
doi = {10.1364/OL.476472},
year = {2023},
date = {2023-01-01},
journal = {Opt. Lett.},
volume = {48},
number = {2},
pages = {323--326},
publisher = {Optica Publishing Group},
abstract = {Computational spectroscopy breaks the inherent one-to-one spatial-to-spectral pixel mapping of traditional spectrometers by multiplexing spectral data over a given sensor region. Most computational spectrometers require components that are complex to design, fabricate, or both. DiffuserSpec is a simple computational spectrometer that uses the inherent spectral dispersion of commercially available diffusers to generate speckle patterns that are unique to each wavelength. Using Scotch tape as a diffuser, we demonstrate narrowband and broadband spectral reconstructions with 2-nm spectral resolution over an 85-nm bandwidth in the near-infrared, limited only by the bandwidth of the calibration dataset. We also investigate the effect of spatial sub-sampling of the 2D speckle pattern on resolution performance.},
keywords = {Near infrared radiation; Optical components; Reconstruction algorithms; Speckle patterns; Spectrometers; Spectroscopy},
pubstate = {published},
tppubtype = {article}
}
Yi Xue; David Ren; Laura Waller
Three-dimensional bi-functional refractive index and fluorescence microscopy (BRIEF) Journal Article
In: Biomed. Opt. Express, vol. 13, no. 11, pp. 5900–5908, 2022.
Abstract | Links | BibTeX | Tags: Digital imaging; Fluorescence microscopy; Image quality; Imaging techniques; Optical imaging; Three dimensional imaging
@article{Xue:22,
title = {Three-dimensional bi-functional refractive index and fluorescence microscopy (BRIEF)},
author = {Yi Xue and David Ren and Laura Waller},
url = {https://opg.optica.org/boe/abstract.cfm?URI=boe-13-11-5900},
doi = {10.1364/BOE.456621},
year = {2022},
date = {2022-11-01},
journal = {Biomed. Opt. Express},
volume = {13},
number = {11},
pages = {5900--5908},
publisher = {Optica Publishing Group},
abstract = {Fluorescence microscopy is a powerful tool for imaging biological samples with molecular specificity. In contrast, phase microscopy provides label-free measurement of the sample’s refractive index (RI), which is an intrinsic optical property that quantitatively relates to cell morphology, mass, and stiffness. Conventional imaging techniques measure either the labeled fluorescence (functional) information or the label-free RI (structural) information, though it may be valuable to have both. For example, biological tissues have heterogeneous RI distributions, causing sample-induced scattering that degrades the fluorescence image quality. When both fluorescence and 3D RI are measured, one can use the RI information to digitally correct multiple-scattering effects in the fluorescence image. Here, we develop a new computational multi-modal imaging method based on epi-mode microscopy that reconstructs both 3D fluorescence and 3D RI from a single dataset. We acquire dozens of fluorescence images, each ‘illuminated’ by a single fluorophore, then solve an inverse problem with a multiple-scattering forward model. We experimentally demonstrate our method for epi-mode 3D RI imaging and digital correction of multiple-scattering effects in fluorescence images.},
keywords = {Digital imaging; Fluorescence microscopy; Image quality; Imaging techniques; Optical imaging; Three dimensional imaging},
pubstate = {published},
tppubtype = {article}
}
Henry Pinkard; Laura Waller
Microscopes are coming for your job Journal Article
In: Nature Methods, pp. 1–2, 2022.
@article{pinkard2022microscopes,
title = {Microscopes are coming for your job},
author = {Henry Pinkard and Laura Waller},
url = {https://www.nature.com/articles/s41592-022-01566-4},
year = {2022},
date = {2022-09-08},
urldate = {2022-01-01},
journal = {Nature Methods},
pages = {1--2},
publisher = {Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Linda Liu
Single-Shot 3D Microscopy: Optics and Algorithms Co-Design PhD Thesis
EECS Department, University of California, Berkeley, 2022.
Abstract | Links | BibTeX | Tags:
@phdthesis{Liu:EECS-2022-224,
title = {Single-Shot 3D Microscopy: Optics and Algorithms Co-Design},
author = {Linda Liu},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-224.html},
year = {2022},
date = {2022-09-01},
number = {UCB/EECS-2022-224},
school = {EECS Department, University of California, Berkeley},
abstract = {Computational imaging involves simultaneously designing optical hardware and reconstruction software. Such a co-design framework brings together the best of both worlds for an imaging system. The goal is to develop a high-speed, high-resolution, and large field-of-view microscope that can detect 3D fluorescence signals from single image acquisition. To achieve this goal, I propose a new method called Fourier DiffuserScope, a single-shot 3D fluorescent microscope that uses a phase mask (i.e., a diffuser with random microlenses) in the Fourier plane to encode 3D information, then computationally reconstructs the volume by solving a sparsity-constrained inverse problem.
In this dissertation, I will discuss the design principles of the Fourier DiffuserScope from three perspectives: first-principles optics, compressed sensing theory, and physics-based machine learning. First, in the heuristic design, the phase mask consists of randomly placed microlenses with varying focal lengths; the random positions provide a larger field-of-view compared to a conventional microlens array, and the diverse focal lengths improve the axial depth range. I then build an experimental system that achieves less than 3 um lateral and 4 um axial resolution over a 1000x1000x280 um^3 volume. Lastly, we use a differentiable forward model of Fourier DiffuserScope in conjunction with a differentiable reconstruction algorithm to jointly optimize both the phase mask surface profile and the reconstruction parameters. We validate our method in 2D and 3D single-shot imaging, where the optimized diffuser demonstrates improved reconstruction quality compared to previous heuristic designs.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
In this dissertation, I will discuss the design principles of the Fourier DiffuserScope from three perspectives: first-principles optics, compressed sensing theory, and physics-based machine learning. First, in the heuristic design, the phase mask consists of randomly placed microlenses with varying focal lengths; the random positions provide a larger field-of-view compared to a conventional microlens array, and the diverse focal lengths improve the axial depth range. I then build an experimental system that achieves less than 3 um lateral and 4 um axial resolution over a 1000x1000x280 um^3 volume. Lastly, we use a differentiable forward model of Fourier DiffuserScope in conjunction with a differentiable reconstruction algorithm to jointly optimize both the phase mask surface profile and the reconstruction parameters. We validate our method in 2D and 3D single-shot imaging, where the optimized diffuser demonstrates improved reconstruction quality compared to previous heuristic designs.
Kristina Monakhova
Physics-Informed Machine Learning for Computational Imaging PhD Thesis
EECS Department, University of California, Berkeley, 2022.
Abstract | Links | BibTeX | Tags:
@phdthesis{Monakhova:EECS-2022-177,
title = {Physics-Informed Machine Learning for Computational Imaging},
author = {Kristina Monakhova},
url = {http://www2.eecs.berkeley.edu/Pubs/TechRpts/2022/EECS-2022-177.html},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
number = {UCB/EECS-2022-177},
school = {EECS Department, University of California, Berkeley},
abstract = {A key aspect of many computational imaging systems, from compressive cameras to low light photography, are the algorithms used to uncover the signal from encoded or noisy measurements. Some computational cameras encode higher-dimensional information (e.g. different wavelengths of light, 3D, time) onto a 2-dimensional sensor, then use algorithms to decode and recover this high-dimensional information. Others capture measurements that are extremely noisy, or degraded, and require algorithms to extract the signal and make the images usable by people, or by higher-level downstream algorithms. In each case, the algorithms used to decode and extract information from raw measurements are critical and necessary to make computational cameras function. Over the years the predominant methods, classic methods, to recover information from computational cameras have been based on minimizing an optimization problem consisting of a data term and hand-picked prior term. More recently, deep learning has been applied to these problems, but often has no way to incorporate known optical characteristics, requires large training datasets, and results in black-box models that cannot easily be interpreted. In this dissertation, we present physics-informed machine learning for computational imaging, which is a middle ground approach that combines elements of classic methods with deep learning. We show how to incorporate knowledge of the imaging system physics into neural networks to improve image quality and performance beyond what is feasible with either classic or deep methods for several computational cameras. We show several different ways to incorporate imaging physics into neural networks, including algorithm unrolling, differentiable optical models, unsupervised methods, and through generative adversarial networks. For each of these methods, we focus on a different computational camera with unique challenges and modeling considerations. First, we introduce an unrolled, physics-informed network that improves the quality and reconstruction time of lensless cameras, improving these cameras and showing photorealistic image quality on a variety of scenes. Building up on this, we demonstrate a new reconstruction network that can improve the reconstruction time for compressive, single-shot 3D microscopy with spatially-varying blur by 1,600X, enabling interactive previewing of the scene. In cases where training data is hard to acquire, we show that an untrained physics-informed network can improve image quality for compressive single-shot video and hyperspectral imaging without the need for training data. Finally, we design a physics-informed noise generator that can realistically synthesize noise at extremely high-gain, low-light settings. Using this learned noise model, we show how we can push a camera past its typical limit and take photorealistic videos at starlight levels of illumination for the first time. Each case highlights how using physics-informed machine learning can improve computational cameras and push them to their limits.},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Michael L. Whittaker; David Ren; Colin Ophus; Yugang Zhang; Laura Waller; Benjamin Gilbert; Jillian F. Banfield
Ion complexation waves emerge at the curved interfaces of layered minerals Journal Article
In: Nature Communications volume , vol. 13, iss. 1, 2022.
Abstract | Links | BibTeX | Tags: tomography
@article{Ion2022,
title = {Ion complexation waves emerge at the curved interfaces of layered minerals},
author = {Michael L. Whittaker and David Ren and Colin Ophus and Yugang Zhang and Laura Waller and Benjamin Gilbert and Jillian F. Banfield },
doi = {https://doi.org/10.1038/s41467-022-31004-0},
year = {2022},
date = {2022-06-13},
urldate = {2022-06-13},
journal = {Nature Communications volume },
volume = {13},
issue = {1},
abstract = {Visualizing hydrated interfaces is of widespread interest across the physical sciences and is a particularly acute need for layered minerals, whose properties are governed by the structure of the electric double layer (EDL) where mineral and solution meet. Here, we show that cryo electron microscopy and tomography enable direct imaging of the EDL at montmorillonite interfaces in monovalent electrolytes with ångstrom resolution over micron length scales. A learning-based multiple-scattering reconstruction method for cryo electron tomography reveals ions bound asymmetrically on opposite sides of curved, exfoliated layers. We observe conserved ion-density asymmetry across stacks of interacting layers in cryo electron microscopy that is associated with configurations of inner- and outer-sphere ion-water-mineral complexes that we term complexation waves. Coherent X-ray scattering confirms that complexation waves propagate at room-temperature via a competition between ion dehydration and charge interactions that are coupled across opposing sides of a layer, driving dynamic transitions between stacked and aggregated states via layer exfoliation.},
keywords = {tomography},
pubstate = {published},
tppubtype = {article}
}