Compare commits

...

10 commits

81 changed files with 26139 additions and 4189 deletions

1
.gitignore vendored
View file

@ -2,3 +2,4 @@
*.lock
*.log
*.bak
flip.rs

View file

@ -4,8 +4,30 @@ version = "0.1.0"
edition = "2024"
[dependencies]
anyhow = "1.0.100"
bitflags = "2.10.0"
bumpalo = "3.19.0"
enum_dispatch = "0.3.13"
exr = "1.73.0"
half = "2.7.1"
image_rs = { package = "image", version = "0.25.8" }
indicatif = "0.18.3"
num = "0.4.3"
num-integer = "0.1.46"
num-traits = "0.2.19"
once_cell = "1.21.3"
qoi = "0.4.1"
rand = "0.9.2"
rayon = "1.11.0"
smallvec = "1.15.1"
thiserror = "2.0.17"
[features]
default = []
use_f64 = []
[lints.clippy]
excessive_precision = "allow"
approx_constant = "allow"
upper_case_acronyms = "allow"
wrong_self_convention = "allow"

View file

@ -8,14 +8,20 @@ pub use perspective::PerspectiveCamera;
pub use realistic::RealisticCamera;
pub use spherical::SphericalCamera;
use crate::core::film::Film;
use crate::core::film::{Film, FilmTrait};
use crate::core::interaction::Interaction;
use crate::core::medium::Medium;
use crate::core::pbrt::{Float, RenderingCoordinateSystem, lerp};
use crate::core::sampler::CameraSample;
use crate::geometry::{Normal3f, Point3f, Ray, RayDifferential, Vector3f, VectorLike};
use crate::utils::spectrum::{SampledSpectrum, SampledWavelengths};
use crate::geometry::{
Normal3f, Point2f, Point2i, Point3f, Ray, RayDifferential, Vector3f, VectorLike,
};
use crate::image::ImageMetadata;
use crate::spectra::{SampledSpectrum, SampledWavelengths};
use crate::utils::transform::{AnimatedTransform, Transform};
use enum_dispatch::enum_dispatch;
use std::sync::Arc;
#[derive(Debug, Clone)]
@ -24,6 +30,17 @@ pub struct CameraRay {
pub weight: SampledSpectrum,
}
#[derive(Debug, Clone)]
pub struct CameraWiSample {
wi_spec: SampledSpectrum,
wi: Vector3f,
pdf: Float,
p_raster: Point2f,
p_ref: Interaction,
p_lens: Interaction,
}
#[derive(Debug)]
pub struct CameraTransform {
render_from_camera: AnimatedTransform,
world_from_render: Transform<Float>,
@ -63,6 +80,10 @@ impl CameraTransform {
}
}
pub fn camera_from_world(&self, time: Float) -> Transform<Float> {
(self.world_from_render * self.render_from_camera.interpolate(time)).inverse()
}
pub fn render_from_camera(&self, p: Point3f, time: Float) -> Point3f {
self.render_from_camera.apply_point(p, time)
}
@ -88,6 +109,7 @@ impl CameraTransform {
}
}
#[derive(Debug)]
pub struct CameraBase {
pub camera_transform: CameraTransform,
pub shutter_open: Float,
@ -100,19 +122,35 @@ pub struct CameraBase {
pub min_dir_differential_y: Vector3f,
}
impl CameraBase {
pub fn init_metadata(&self, metadata: &mut ImageMetadata) {
let camera_from_world: Transform<Float> =
self.camera_transform.camera_from_world(self.shutter_open);
metadata.camera_from_world = Some(camera_from_world.get_matrix());
}
}
#[enum_dispatch]
pub trait CameraTrait {
fn base(&self) -> &CameraBase;
fn get_film(&self) -> Film {
self.base().film.clone()
}
fn resolution(&self) -> Point2i {
self.base().film.full_resolution()
}
fn init_metadata(&self, metadata: &mut ImageMetadata);
fn generate_ray(&self, sample: CameraSample, lambda: &SampledWavelengths) -> Option<CameraRay>;
fn generate_ray_differential(
&self,
sample: CameraSample,
lambda: &SampledWavelengths,
) -> Option<CameraRay> {
let mut central_cam_ray = match self.generate_ray(sample, lambda) {
Some(cr) => cr,
None => return None,
};
let mut central_cam_ray = self.generate_ray(sample, lambda)?;
let mut rd = RayDifferential::default();
let mut rx_found = false;
let mut ry_found = false;
@ -191,8 +229,8 @@ pub trait CameraTrait {
let n_down = Vector3f::from(n_down_z);
let tx = -(n_down.dot(y_ray.o.into())) / n_down.dot(x_ray.d);
let ty = -(n_down.dot(x_ray.o.into()) - d) / n_down.dot(y_ray.d);
let px = x_ray.evaluate(tx);
let py = y_ray.evaluate(ty);
let px = x_ray.at(tx);
let py = y_ray.at(ty);
let spp_scale = 0.125_f32.max((samples_per_pixel as Float).sqrt());
*dpdx = spp_scale
* self.base().camera_transform.render_from_camera_vector(
@ -213,45 +251,16 @@ pub trait CameraTrait {
}
}
#[enum_dispatch(CameraTrait)]
#[derive(Debug)]
pub enum Camera {
Perspective(PerspectiveCamera),
Orthographic(OrthographicCamera),
Spherical(SphericalCamera),
Realistic(RealisticCamera),
}
impl CameraTrait for Camera {
fn base(&self) -> &CameraBase {
match self {
Camera::Perspective(c) => c.base(),
Camera::Orthographic(c) => c.base(),
Camera::Spherical(c) => c.base(),
Camera::Realistic(c) => c.base(),
}
}
fn generate_ray(&self, sample: CameraSample, lambda: &SampledWavelengths) -> Option<CameraRay> {
match self {
Camera::Perspective(c) => c.generate_ray(sample, lambda),
Camera::Orthographic(c) => c.generate_ray(sample, lambda),
Camera::Spherical(c) => c.generate_ray(sample, lambda),
Camera::Realistic(c) => c.generate_ray(sample, lambda),
}
}
fn generate_ray_differential(
&self,
sample: CameraSample,
lambda: &SampledWavelengths,
) -> Option<CameraRay> {
match self {
Camera::Perspective(c) => c.generate_ray_differential(sample, lambda),
Camera::Orthographic(c) => c.generate_ray_differential(sample, lambda),
Camera::Spherical(c) => c.generate_ray_differential(sample, lambda),
Camera::Realistic(c) => c.generate_ray_differential(sample, lambda),
}
}
}
#[derive(Debug)]
pub struct LensElementInterface {
pub curvature_radius: Float,
pub thickness: Float,

View file

@ -5,10 +5,11 @@ use crate::core::sampler::CameraSample;
use crate::geometry::{
Bounds2f, Point2f, Point3f, Ray, RayDifferential, Vector2f, Vector3f, VectorLike,
};
use crate::spectra::{SampledSpectrum, SampledWavelengths};
use crate::utils::sampling::sample_uniform_disk_concentric;
use crate::utils::spectrum::{SampledSpectrum, SampledWavelengths};
use crate::utils::transform::Transform;
#[derive(Debug)]
pub struct OrthographicCamera {
pub base: CameraBase,
pub screen_from_camera: Transform<Float>,
@ -71,6 +72,11 @@ impl CameraTrait for OrthographicCamera {
fn base(&self) -> &CameraBase {
&self.base
}
fn init_metadata(&self, metadata: &mut crate::image::ImageMetadata) {
self.base.init_metadata(metadata)
}
fn generate_ray(
&self,
sample: CameraSample,
@ -95,7 +101,7 @@ impl CameraTrait for OrthographicCamera {
// Compute point on plane of focus
let ft = self.focal_distance / ray.d.z();
let p_focus = ray.evaluate(ft);
let p_focus = ray.at(ft);
// Update ray for effect of lens
ray.o = Point3f::new(p_lens.x(), p_lens.y(), 0.);
@ -115,11 +121,7 @@ impl CameraTrait for OrthographicCamera {
sample: CameraSample,
lambda: &SampledWavelengths,
) -> Option<CameraRay> {
let mut central_cam_ray = match self.generate_ray(sample, lambda) {
Some(cr) => cr,
None => return None,
};
let mut central_cam_ray = self.generate_ray(sample, lambda)?;
let mut rd = RayDifferential::default();
if self.lens_radius > 0.0 {
return self.generate_ray_differential(sample, lambda);

View file

@ -7,10 +7,11 @@ use crate::core::sampler::CameraSample;
use crate::geometry::{
Bounds2f, Point2f, Point3f, Ray, RayDifferential, Vector2f, Vector3f, VectorLike,
};
use crate::spectra::{SampledSpectrum, SampledWavelengths};
use crate::utils::sampling::sample_uniform_disk_concentric;
use crate::utils::spectrum::{SampledSpectrum, SampledWavelengths};
use crate::utils::transform::Transform;
#[derive(Debug)]
pub struct PerspectiveCamera {
pub base: CameraBase,
pub screen_from_camera: Transform<Float>,
@ -48,7 +49,7 @@ impl PerspectiveCamera {
);
let raster_from_screen = raster_from_ndc * ndc_from_screen;
let screen_from_raster = raster_from_screen.inverse();
let camera_from_raster = screen_from_camera.inverse() * screen_from_raster.clone();
let camera_from_raster = screen_from_camera.inverse() * screen_from_raster;
let dx_camera = camera_from_raster.apply_to_point(Point3f::new(1., 0., 0.))
- camera_from_raster.apply_to_point(Point3f::new(0., 0., 0.));
let dy_camera = camera_from_raster.apply_to_point(Point3f::new(0., 1., 0.))
@ -60,7 +61,7 @@ impl PerspectiveCamera {
let cos_total_width = w_corner_camera.z();
Self {
base,
screen_from_camera: screen_from_camera.clone(),
screen_from_camera: *screen_from_camera,
camera_from_raster,
raster_from_screen,
screen_from_raster,
@ -77,6 +78,11 @@ impl CameraTrait for PerspectiveCamera {
fn base(&self) -> &CameraBase {
&self.base
}
fn init_metadata(&self, metadata: &mut crate::image::ImageMetadata) {
self.base.init_metadata(metadata)
}
fn generate_ray(
&self,
sample: CameraSample,
@ -101,7 +107,7 @@ impl CameraTrait for PerspectiveCamera {
// Compute point on plane of focus
let ft = self.focal_distance / r.d.z();
let p_focus = r.evaluate(ft);
let p_focus = r.at(ft);
// Update ray for effect of lens
r.o = Point3f::new(p_lens.x(), p_lens.y(), 0.);

View file

@ -5,11 +5,12 @@ use crate::core::sampler::CameraSample;
use crate::geometry::{
Bounds2f, Normal3f, Point2f, Point2i, Point3f, Ray, Vector2i, Vector3f, VectorLike,
};
use crate::utils::image::Image;
use crate::image::Image;
use crate::spectra::{SampledSpectrum, SampledWavelengths};
use crate::utils::math::{quadratic, square};
use crate::utils::scattering::refract;
use crate::utils::spectrum::{SampledSpectrum, SampledWavelengths};
#[derive(Debug)]
pub struct RealisticCamera {
base: CameraBase,
focus_distance: Float,
@ -61,14 +62,15 @@ impl RealisticCamera {
element_interface.push(el_int);
}
let mut exit_pupil_bounds: Vec<Bounds2f> = Vec::new();
let n_samples = 64;
for i in 0..64 {
let r0 = i as Float / 64. * base.film.diagonal() / 2.;
let r1 = (i + 1) as Float / n_samples as Float * base.film.diagonal() / 2.;
exit_pupil_bounds[i] = self.bound_exit_pupil(r0, r1);
}
let half_diag = base.film.diagonal() / 2.0;
let exit_pupil_bounds: Vec<_> = (0..n_samples)
.map(|i| {
let r0 = (i as Float / n_samples as Float) * half_diag;
let r1 = ((i + 1) as Float / n_samples as Float) * half_diag;
self.bound_exit_pupil(r0, r1)
})
.collect();
Self {
base,
@ -215,7 +217,7 @@ impl RealisticCamera {
}
// Test intersection point against element aperture
let p_hit = r_lens.evaluate(t);
let p_hit = r_lens.at(t);
if square(p_hit.x()) + square(p_hit.y()) > square(element.aperture_radius) {
return None;
}
@ -296,6 +298,10 @@ impl CameraTrait for RealisticCamera {
&self.base
}
fn init_metadata(&self, metadata: &mut crate::image::ImageMetadata) {
self.base.init_metadata(metadata)
}
fn generate_ray(
&self,
sample: CameraSample,

View file

@ -3,17 +3,18 @@ use crate::core::film::FilmTrait;
use crate::core::pbrt::{Float, PI};
use crate::core::sampler::CameraSample;
use crate::geometry::{Bounds2f, Point2f, Point3f, Ray, Vector3f, spherical_direction};
use crate::spectra::{SampledSpectrum, SampledWavelengths};
use crate::utils::math::{equal_area_square_to_sphere, wrap_equal_area_square};
use crate::utils::spectrum::{SampledSpectrum, SampledWavelengths};
#[derive(PartialEq)]
#[derive(Debug, PartialEq)]
pub struct EquiRectangularMapping;
#[derive(PartialEq)]
#[derive(Debug, PartialEq)]
pub enum Mapping {
EquiRectangular(EquiRectangularMapping),
}
#[derive(Debug)]
pub struct SphericalCamera {
pub base: CameraBase,
pub screen: Bounds2f,
@ -27,6 +28,10 @@ impl CameraTrait for SphericalCamera {
&self.base
}
fn init_metadata(&self, metadata: &mut crate::image::ImageMetadata) {
self.base.init_metadata(metadata)
}
fn generate_ray(
&self,
sample: CameraSample,

910
src/core/aggregates.rs Normal file
View file

@ -0,0 +1,910 @@
use crate::core::pbrt::{Float, find_interval};
use crate::core::primitive::PrimitiveTrait;
use crate::geometry::{Bounds3f, Point3f, Ray, Vector3f};
use crate::shapes::ShapeIntersection;
use crate::utils::math::encode_morton_3;
use crate::utils::math::next_float_down;
use crate::utils::partition_slice;
use rayon::prelude::*;
use std::cmp::Ordering;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SplitMethod {
SAH,
Hlbvh,
Middle,
EqualCounts,
}
#[derive(Debug, Default, Clone, Copy, PartialEq)]
struct BVHSplitBucket {
count: usize,
bounds: Bounds3f,
}
#[derive(Debug, Clone, Default)]
pub struct LinearBVHNode {
pub bounds: Bounds3f,
pub primitives_offset: usize,
pub n_primitives: u16,
pub axis: u8,
pub pad: u8,
}
#[derive(Debug, Clone, Copy, Default)]
struct MortonPrimitive {
primitive_index: usize,
morton_code: u32,
}
struct LBVHTreelet {
start_index: usize,
n_primitives: usize,
}
#[derive(Debug, Clone)]
pub struct BVHPrimitiveInfo {
primitive_number: usize, // Index into the original primitives vector
bounds: Bounds3f,
centroid: Point3f,
}
impl BVHPrimitiveInfo {
fn new(primitive_number: usize, bounds: Bounds3f) -> Self {
Self {
primitive_number,
bounds,
centroid: bounds.centroid(),
}
}
}
#[derive(Clone, Debug)]
pub enum BVHBuildNode {
Leaf {
first_prim_offset: usize,
n_primitives: usize,
bounds: Bounds3f,
},
Interior {
split_axis: u8,
children: [Box<BVHBuildNode>; 2],
bounds: Bounds3f,
},
}
impl Default for BVHBuildNode {
fn default() -> Self {
BVHBuildNode::Leaf {
first_prim_offset: 0,
n_primitives: 0,
bounds: Bounds3f::default(),
}
}
}
impl BVHBuildNode {
pub fn new_leaf(first_prim_offset: usize, n_primitives: usize, bounds: Bounds3f) -> Self {
Self::Leaf {
bounds,
first_prim_offset,
n_primitives,
}
}
pub fn new_interior(axis: u8, c0: Box<BVHBuildNode>, c1: Box<BVHBuildNode>) -> Self {
let bounds = c0.bounds().union(c1.bounds());
Self::Interior {
bounds,
children: [c0, c1],
split_axis: axis,
}
}
pub fn bounds(&self) -> Bounds3f {
match self {
Self::Leaf { bounds, .. } => *bounds,
Self::Interior { bounds, .. } => *bounds,
}
}
pub fn split_axis(&self) -> Option<u8> {
match self {
Self::Interior { split_axis, .. } => Some(*split_axis),
_ => None,
}
}
}
pub struct SharedPrimitiveBuffer<'a> {
ptr: *mut Arc<dyn PrimitiveTrait>,
pub offset: &'a AtomicUsize,
_marker: std::marker::PhantomData<&'a mut [Arc<dyn PrimitiveTrait>]>,
}
unsafe impl<'a> Sync for SharedPrimitiveBuffer<'a> {}
unsafe impl<'a> Send for SharedPrimitiveBuffer<'a> {}
impl<'a> SharedPrimitiveBuffer<'a> {
pub fn new(slice: &'a mut [Arc<dyn PrimitiveTrait>], offset: &'a AtomicUsize) -> Self {
Self {
ptr: slice.as_mut_ptr(),
offset,
_marker: std::marker::PhantomData,
}
}
pub fn append(
&self,
primitives: &[Arc<dyn PrimitiveTrait>],
indices: &[BVHPrimitiveInfo],
) -> usize {
let count = indices.len();
let start_index = self.offset.fetch_add(count, AtomicOrdering::Relaxed);
unsafe {
for (i, info) in indices.iter().enumerate() {
let target_ptr = self.ptr.add(start_index + i);
std::ptr::write(target_ptr, primitives[info.primitive_number].clone());
}
}
start_index
}
}
pub struct BVHAggregate {
max_prims_in_node: usize,
primitives: Vec<Arc<dyn PrimitiveTrait>>,
split_method: SplitMethod,
nodes: Vec<LinearBVHNode>,
}
impl BVHAggregate {
pub fn new(
mut primitives: Vec<Arc<dyn PrimitiveTrait>>,
max_prims_in_node: usize,
split_method: SplitMethod,
) -> Self {
let max_prims_in_node = std::cmp::min(255, max_prims_in_node);
if primitives.is_empty() {
return Self {
max_prims_in_node,
primitives,
split_method,
nodes: Vec::new(),
};
}
let mut primitive_info: Vec<BVHPrimitiveInfo> = primitives
.iter()
.enumerate()
.map(|(i, p)| BVHPrimitiveInfo::new(i, p.bounds()))
.collect();
let ordered_prims: Vec<Arc<dyn PrimitiveTrait>>;
let total_nodes_count: usize;
let root: Box<BVHBuildNode>;
match split_method {
SplitMethod::Hlbvh => {
let nodes_counter = AtomicUsize::new(0);
let ordered_prims_offset = AtomicUsize::new(0);
let mut local_ordered = vec![primitives[0].clone(); primitives.len()];
let shared_buffer =
SharedPrimitiveBuffer::new(&mut local_ordered, &ordered_prims_offset);
root =
Self::build_hlbvh(&primitive_info, &nodes_counter, &shared_buffer, &primitives);
ordered_prims = local_ordered;
total_nodes_count = nodes_counter.load(AtomicOrdering::Relaxed);
}
_ => {
let nodes_counter = AtomicUsize::new(0);
let ordered_prims_offset = AtomicUsize::new(0);
let mut local_ordered = vec![primitives[0].clone(); primitives.len()];
let shared_buffer =
SharedPrimitiveBuffer::new(&mut local_ordered, &ordered_prims_offset);
root = Self::build_recursive(
&mut primitive_info,
&nodes_counter,
&shared_buffer,
&primitives,
max_prims_in_node,
split_method,
);
ordered_prims = local_ordered;
total_nodes_count = nodes_counter.load(AtomicOrdering::Relaxed);
}
};
primitives = ordered_prims;
let mut nodes = vec![LinearBVHNode::default(); total_nodes_count];
let mut offset = 0;
Self::flatten_bvh(&root, &mut nodes, &mut offset);
Self {
max_prims_in_node,
primitives,
split_method,
nodes,
}
}
fn flatten_bvh(node: &BVHBuildNode, nodes: &mut [LinearBVHNode], offset: &mut usize) -> usize {
let local_offset = *offset;
*offset += 1;
match node {
BVHBuildNode::Leaf {
first_prim_offset,
n_primitives,
bounds,
} => {
let linear_node = &mut nodes[local_offset];
linear_node.bounds = *bounds;
linear_node.n_primitives = *n_primitives as u16;
linear_node.primitives_offset = *first_prim_offset;
linear_node.axis = 0; // Irrelevant for leaves
}
BVHBuildNode::Interior {
split_axis,
children,
bounds,
} => {
nodes[local_offset].bounds = *bounds;
nodes[local_offset].axis = *split_axis;
nodes[local_offset].n_primitives = 0;
Self::flatten_bvh(&children[0], nodes, offset);
let second_child_offset = Self::flatten_bvh(&children[1], nodes, offset);
nodes[local_offset].primitives_offset = second_child_offset;
}
}
local_offset
}
pub fn build_hlbvh(
bvh_primitives: &[BVHPrimitiveInfo],
total_nodes: &AtomicUsize,
ordered_prims: &SharedPrimitiveBuffer,
original_primitives: &[Arc<dyn PrimitiveTrait>],
) -> Box<BVHBuildNode> {
let bounds = bvh_primitives
.iter()
.fold(Bounds3f::default(), |b, p| b.union(p.bounds));
let mut morton_prims: Vec<MortonPrimitive> = bvh_primitives
.par_iter()
.map(|prim| {
const MORTON_BITS: i32 = 10;
const MORTON_SCALE: i32 = 1 << MORTON_BITS;
let centroid_offset = bounds.offset(&prim.centroid);
let offset = centroid_offset * (MORTON_SCALE as Float);
MortonPrimitive {
primitive_index: prim.primitive_number,
morton_code: encode_morton_3(offset.x(), offset.y(), offset.z()),
}
})
.collect();
morton_prims.par_sort_unstable_by_key(|p| p.morton_code);
const TREELET_MASK: u32 = 0b00111111111111000000000000000000;
let mut split_indices: Vec<usize> = morton_prims
.par_windows(2) // Iterates over overlapping pairs [i, i+1]
.enumerate()
.filter_map(|(i, w)| {
let m1 = w[0].morton_code & TREELET_MASK;
let m2 = w[1].morton_code & TREELET_MASK;
// If mask changes, the split is at index i + 1
if m1 != m2 { Some(i + 1) } else { None }
})
.collect();
let mut boundaries = Vec::with_capacity(split_indices.len() + 2);
boundaries.push(0);
boundaries.append(&mut split_indices);
boundaries.push(morton_prims.len());
let treelets_to_build: Vec<LBVHTreelet> = boundaries
.windows(2)
.map(|w| LBVHTreelet {
start_index: w[0],
n_primitives: w[1] - w[0],
})
.collect();
let treelet_roots: Vec<Box<BVHBuildNode>> = treelets_to_build
.par_iter()
.map(|tr| {
let mut nodes_created = 0;
const FIRST_BIT_INDEX: i32 = 29 - 12;
let root = Self::emit_lbvh(
bvh_primitives,
&morton_prims[tr.start_index..tr.start_index + tr.n_primitives],
&mut nodes_created,
ordered_prims,
original_primitives,
FIRST_BIT_INDEX,
4,
);
// Add thread-local count to global atomic
total_nodes.fetch_add(nodes_created, AtomicOrdering::Relaxed);
root
})
.collect();
let mut contiguous_nodes: Vec<BVHBuildNode> = treelet_roots
.into_iter()
.map(|node_box| *node_box)
.collect();
Self::build_upper_sah(&mut contiguous_nodes, total_nodes)
}
fn emit_lbvh(
bvh_primitives: &[BVHPrimitiveInfo],
morton_prims: &[MortonPrimitive],
total_nodes: &mut usize,
ordered_prims: &SharedPrimitiveBuffer,
original_primitives: &[Arc<dyn PrimitiveTrait>],
bit_index: i32,
max_prims_in_node: usize,
) -> Box<BVHBuildNode> {
let n_primitives = morton_prims.len();
if bit_index == -1 || n_primitives <= max_prims_in_node {
*total_nodes += 1;
// Calculate bounds while collecting indices
let mut bounds = Bounds3f::default();
let mut indices = Vec::with_capacity(n_primitives);
for mp in morton_prims {
let info = &bvh_primitives[mp.primitive_index];
bounds = bounds.union(info.bounds);
indices.push(info.clone());
}
let first_prim_offset = ordered_prims.append(original_primitives, &indices);
return Box::new(BVHBuildNode::new_leaf(
first_prim_offset,
n_primitives,
bounds,
));
}
let mask = 1 << bit_index;
let first_code = morton_prims[0].morton_code;
let last_match_index = find_interval(n_primitives, |index| {
let current_code = morton_prims[index].morton_code;
(current_code & mask) == (first_code & mask)
});
let split_offset = (last_match_index + 1) as usize;
if split_offset >= n_primitives {
return Self::emit_lbvh(
bvh_primitives,
morton_prims,
total_nodes,
ordered_prims,
original_primitives,
bit_index - 1,
max_prims_in_node,
);
}
let (left_morton, right_morton) = morton_prims.split_at(split_offset);
*total_nodes += 1;
let child0 = Self::emit_lbvh(
bvh_primitives,
left_morton,
total_nodes,
ordered_prims,
original_primitives,
bit_index - 1,
max_prims_in_node,
);
let child1 = Self::emit_lbvh(
bvh_primitives,
right_morton,
total_nodes,
ordered_prims,
original_primitives,
bit_index - 1,
max_prims_in_node,
);
let axis = (bit_index % 3) as u8;
Box::new(BVHBuildNode::new_interior(axis, child0, child1))
}
fn build_upper_sah(nodes: &mut [BVHBuildNode], total_nodes: &AtomicUsize) -> Box<BVHBuildNode> {
let n_nodes = nodes.len();
if n_nodes == 1 {
return Box::new(nodes[0].clone());
}
total_nodes.fetch_add(1, AtomicOrdering::Relaxed);
let bounds = nodes
.iter()
.fold(Bounds3f::default(), |b, node| b.union(node.bounds()));
let centroid_bounds = nodes.iter().fold(Bounds3f::default(), |b, node| {
b.union_point(node.bounds().centroid())
});
let dim = centroid_bounds.max_dimension();
if centroid_bounds.p_max[dim] == centroid_bounds.p_min[dim] {
let mid = n_nodes / 2;
let (left_part, right_part) = nodes.split_at_mut(mid);
return Box::new(BVHBuildNode::new_interior(
dim as u8,
Self::build_upper_sah(left_part, total_nodes),
Self::build_upper_sah(right_part, total_nodes),
));
}
const N_BUCKETS: usize = 12;
#[derive(Copy, Clone, Default)]
struct Bucket {
count: usize,
bounds: Bounds3f,
}
let mut buckets = [Bucket::default(); N_BUCKETS];
let get_bucket_idx = |node: &BVHBuildNode| -> usize {
let offset = centroid_bounds.offset(&node.bounds().centroid())[dim];
let mut b = (N_BUCKETS as Float * offset) as usize;
if b == N_BUCKETS {
b = N_BUCKETS - 1;
}
b
};
// Initialize _Bucket_ for HLBVH SAH partition buckets
for node in nodes.iter() {
let b = get_bucket_idx(node);
buckets[b].count += 1;
buckets[b].bounds = buckets[b].bounds.union(node.bounds());
}
// Compute costs for splitting after each bucket
let mut cost = [0.0; N_BUCKETS - 1];
// Forward Pass: Accumulate Left side (0 -> N-1)
let mut left_area = [0.0; N_BUCKETS];
let mut left_count = [0; N_BUCKETS];
let mut b_left = Bounds3f::default();
let mut c_left = 0;
for i in 0..N_BUCKETS {
b_left = b_left.union(buckets[i].bounds);
c_left += buckets[i].count;
left_area[i] = b_left.surface_area();
left_count[i] = c_left;
}
// Backward Pass: Accumulate Right side (N-1 -> 0) and compute cost
let mut b_right = Bounds3f::default();
let mut c_right = 0;
let inv_total_sa = 1.0 / bounds.surface_area();
for i in (0..N_BUCKETS - 1).rev() {
b_right = b_right.union(buckets[i + 1].bounds);
c_right += buckets[i + 1].count;
let count_left = left_count[i];
let sa_left = left_area[i];
let sa_right = b_right.surface_area();
cost[i] = 0.125
+ (count_left as Float * sa_left + c_right as Float * sa_right) * inv_total_sa;
}
// Find bucket to split at that minimizes SAH metric
let mut min_cost = cost[0];
let mut min_cost_split_bucket = 0;
for (i, &c) in cost.iter().enumerate().skip(1) {
if c < min_cost {
min_cost = c;
min_cost_split_bucket = i;
}
}
// Split nodes and create interior HLBVH SAH node
let mid = {
let mut left = 0;
for i in 0..n_nodes {
let b = get_bucket_idx(&nodes[i]);
if b <= min_cost_split_bucket {
nodes.swap(left, i);
left += 1;
}
}
left
};
if mid == 0 || mid == n_nodes {
let mid = n_nodes / 2;
// Partially sort so the median is in the middle and elements are partitioned around it
nodes.select_nth_unstable_by(mid, |a, b| {
a.bounds().centroid()[dim]
.partial_cmp(&b.bounds().centroid()[dim])
.unwrap_or(std::cmp::Ordering::Equal)
});
let (left_part, right_part) = nodes.split_at_mut(mid);
Box::new(BVHBuildNode::new_interior(
dim as u8,
Self::build_upper_sah(left_part, total_nodes),
Self::build_upper_sah(right_part, total_nodes),
))
} else {
// Standard SAH Split
let (left_part, right_part) = nodes.split_at_mut(mid);
Box::new(BVHBuildNode::new_interior(
dim as u8,
Self::build_upper_sah(left_part, total_nodes),
Self::build_upper_sah(right_part, total_nodes),
))
}
}
fn build_recursive(
bvh_primitives: &mut [BVHPrimitiveInfo],
total_nodes: &AtomicUsize,
ordered_prims: &SharedPrimitiveBuffer,
original_primitives: &[Arc<dyn PrimitiveTrait>],
max_prims_in_node: usize,
split_method: SplitMethod,
) -> Box<BVHBuildNode> {
total_nodes.fetch_add(1, AtomicOrdering::Relaxed);
let bounds = bvh_primitives
.iter()
.fold(Bounds3f::default(), |b, p| b.union(p.bounds));
let n_primitives = bvh_primitives.len();
if bounds.surface_area() == 0.0 || n_primitives == 1 || n_primitives <= max_prims_in_node {
let first_prim_offset = ordered_prims.append(original_primitives, bvh_primitives);
return Box::new(BVHBuildNode::new_leaf(
first_prim_offset,
n_primitives,
bounds,
));
}
let centroid_bounds = bvh_primitives.iter().fold(Bounds3f::default(), |b, p| {
b.union_point(p.bounds.centroid())
});
let dim = centroid_bounds.max_dimension();
if centroid_bounds.p_max[dim] == centroid_bounds.p_min[dim] {
let first_prim_offset = ordered_prims.append(original_primitives, bvh_primitives);
return Box::new(BVHBuildNode::new_leaf(
first_prim_offset,
n_primitives,
bounds,
));
}
let mut mid: usize;
match split_method {
SplitMethod::Middle => {
let pmid = (centroid_bounds.p_min[dim] + centroid_bounds.p_max[dim]) / 2.;
mid = partition_slice(bvh_primitives, |p| p.centroid[dim] < pmid);
if mid != 0 && mid != n_primitives {
} else {
mid = n_primitives / 2;
bvh_primitives.select_nth_unstable_by(mid, |a, b| {
a.centroid[dim].partial_cmp(&b.centroid[dim]).unwrap()
});
}
}
SplitMethod::EqualCounts => {
mid = n_primitives / 2;
bvh_primitives.select_nth_unstable_by(mid, |a, b| {
a.centroid[dim].partial_cmp(&b.centroid[dim]).unwrap()
});
}
SplitMethod::SAH | _ => {
if n_primitives < 2 {
mid = n_primitives / 2;
bvh_primitives.select_nth_unstable_by(mid, |a, b| {
a.centroid[dim]
.partial_cmp(&b.centroid[dim])
.unwrap_or(Ordering::Equal)
});
} else {
const N_BUCKETS: usize = 12;
let mut buckets = [BVHSplitBucket::default(); N_BUCKETS];
for prim in bvh_primitives.iter() {
let mut b = (N_BUCKETS as Float
* centroid_bounds.offset(&prim.centroid)[dim])
as usize;
if b == N_BUCKETS {
b = N_BUCKETS - 1;
}
buckets[b].count += 1;
buckets[b].bounds = buckets[b].bounds.union(prim.bounds);
}
// Compute costs for splitting after each bucket>
const N_SPLITS: usize = N_BUCKETS - 1;
let mut costs = [0.0 as Float; N_SPLITS];
let mut count_below = 0;
let mut bound_below = Bounds3f::default();
for i in 0..N_SPLITS {
bound_below = bound_below.union(buckets[i].bounds);
count_below += buckets[i].count;
costs[i] += count_below as Float * bound_below.surface_area();
}
// Finish initializing costs using a backward scan over splits
let mut count_above = 0;
let mut bound_above = Bounds3f::default();
for i in (0..N_SPLITS).rev() {
bound_above = bound_above.union(buckets[i + 1].bounds);
count_above += buckets[i + 1].count;
costs[i] += count_above as Float * bound_above.surface_area();
}
// Find bucket to split at that minimizes SAH metric>
let mut min_cost = Float::INFINITY;
let mut min_cost_split_bucket = 0;
for (i, &cost) in costs.iter().enumerate().take(N_SPLITS) {
if cost < min_cost {
min_cost = cost;
min_cost_split_bucket = i;
}
}
// Compute leaf cost and SAH split cost for chosen split
let leaf_cost = n_primitives as Float;
min_cost = 0.5 + min_cost / bounds.surface_area();
// Either create leaf or split primitives at selected SAH bucket>
if n_primitives > max_prims_in_node || min_cost < leaf_cost {
mid = partition_slice(bvh_primitives, |bp| {
let mut b = (N_BUCKETS as Float
* centroid_bounds.offset(&bp.centroid)[dim])
as usize;
if b == N_BUCKETS {
b = N_BUCKETS - 1;
}
b <= min_cost_split_bucket
});
} else {
let first_prim_offset =
ordered_prims.append(original_primitives, bvh_primitives);
return Box::new(BVHBuildNode::new_leaf(
first_prim_offset,
n_primitives,
bounds,
));
}
}
}
};
let (left_prims, right_prims) = bvh_primitives.split_at_mut(mid);
if n_primitives > 128 * 1024 {
let (child0, child1) = rayon::join(
|| {
Self::build_recursive(
left_prims,
total_nodes,
ordered_prims,
original_primitives,
max_prims_in_node,
split_method,
)
},
|| {
Self::build_recursive(
right_prims,
total_nodes,
ordered_prims,
original_primitives,
max_prims_in_node,
split_method,
)
},
);
let axis = dim as u8;
Box::new(BVHBuildNode::new_interior(axis, child0, child1))
} else {
let child0 = Self::build_recursive(
left_prims,
total_nodes,
ordered_prims,
original_primitives,
max_prims_in_node,
split_method,
);
let child1 = Self::build_recursive(
right_prims,
total_nodes,
ordered_prims,
original_primitives,
max_prims_in_node,
split_method,
);
let axis = dim as u8;
Box::new(BVHBuildNode::new_interior(axis, child0, child1))
}
}
pub fn intersect(&self, r: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection> {
if self.nodes.is_empty() {
return None;
}
let mut best_si: Option<ShapeIntersection> = None;
let mut hit_t = t_max.unwrap_or(Float::INFINITY);
let inv_dir = Vector3f::new(1.0 / r.d.x(), 1.0 / r.d.y(), 1.0 / r.d.z());
let dir_is_neg = [
if inv_dir.x() < 0.0 { 1 } else { 0 },
if inv_dir.y() < 0.0 { 1 } else { 0 },
if inv_dir.z() < 0.0 { 1 } else { 0 },
];
let mut to_visit_offset = 0;
let mut current_node_index = 0;
let mut nodes_to_visit = [0usize; 64];
loop {
let node = &self.nodes[current_node_index];
// Check ray against BVH node bounds using the current closest hit_t
if node
.bounds
.intersect_p(r.o, hit_t, inv_dir, &dir_is_neg)
.is_some()
{
if node.n_primitives > 0 {
// Intersect ray with all primitives in this leaf
for i in 0..node.n_primitives {
let prim_idx = node.primitives_offset + i as usize;
let prim = &self.primitives[prim_idx];
if let Some(si) = prim.intersect(r, Some(hit_t)) {
hit_t = si.t_hit();
best_si = Some(si);
}
}
if to_visit_offset == 0 {
break;
}
to_visit_offset -= 1;
current_node_index = nodes_to_visit[to_visit_offset];
} else {
// Check the sign of the ray direction against the split axis
if dir_is_neg[node.axis as usize] == 1 {
// Ray is negative (Right -> Left).
// Near child is Second Child (stored in primitives_offset).
// Far child is First Child (current + 1).
// Push Far
nodes_to_visit[to_visit_offset] = current_node_index + 1;
to_visit_offset += 1;
// Visit Near immediately
current_node_index = node.primitives_offset;
} else {
// Ray is positive (Left -> Right).
// Push Far
nodes_to_visit[to_visit_offset] = node.primitives_offset;
to_visit_offset += 1;
current_node_index += 1;
}
}
} else {
// The ray missed the AABB of this node. Pop stack to try the next node.
if to_visit_offset == 0 {
break;
}
to_visit_offset -= 1;
current_node_index = nodes_to_visit[to_visit_offset];
}
}
best_si
}
fn intersect_p(&self, r: &Ray, t_max: Option<Float>) -> bool {
if self.nodes.is_empty() {
return false;
}
let t_max = t_max.unwrap_or(Float::INFINITY);
let inv_dir = Vector3f::new(1.0 / r.d.x(), 1.0 / r.d.y(), 1.0 / r.d.z());
let dir_is_neg = [
if inv_dir.x() < 0.0 { 1 } else { 0 },
if inv_dir.y() < 0.0 { 1 } else { 0 },
if inv_dir.z() < 0.0 { 1 } else { 0 },
];
let mut to_visit_offset = 0;
let mut current_node_index = 0;
let mut nodes_to_visit = [0usize; 64];
loop {
let node = &self.nodes[current_node_index];
// Check AABB
if node
.bounds
.intersect_p(r.o, t_max, inv_dir, &dir_is_neg)
.is_some()
{
if node.n_primitives > 0 {
for i in 0..node.n_primitives {
let prim_idx = node.primitives_offset + i as usize;
let prim = &self.primitives[prim_idx];
if prim.intersect_p(r, Some(t_max)) {
return true;
}
}
// No intersection in this leaf, try next node in stack
if to_visit_offset == 0 {
break;
}
to_visit_offset -= 1;
current_node_index = nodes_to_visit[to_visit_offset];
} else {
// Standard front-to-back traversal order helps find an occlusion
// closer to the origin faster, potentially saving work.
if dir_is_neg[node.axis as usize] == 1 {
nodes_to_visit[to_visit_offset] = current_node_index + 1;
to_visit_offset += 1;
current_node_index = node.primitives_offset;
} else {
nodes_to_visit[to_visit_offset] = node.primitives_offset;
to_visit_offset += 1;
current_node_index += 1;
}
}
} else {
if to_visit_offset == 0 {
break;
}
to_visit_offset -= 1;
current_node_index = nodes_to_visit[to_visit_offset];
}
}
false
}
}

327
src/core/bssrdf.rs Normal file
View file

@ -0,0 +1,327 @@
use crate::core::bxdf::BSDF;
use crate::core::interaction::{InteractionData, ShadingGeometry, SurfaceInteraction};
use crate::core::pbrt::{Float, PI};
use crate::geometry::{Frame, Normal3f, Point2f, Point3f, Point3fi, Vector3f};
use crate::shapes::Shape;
use crate::spectra::{N_SPECTRUM_SAMPLES, SampledSpectrum};
use crate::utils::math::{catmull_rom_weights, square};
use crate::utils::sampling::sample_catmull_rom_2d;
use enum_dispatch::enum_dispatch;
use std::sync::Arc;
#[derive(Debug)]
pub struct BSSRDFSample<'a> {
pub sp: SampledSpectrum,
pub pdf: SampledSpectrum,
pub sw: BSDF<'a>,
pub wo: Vector3f,
}
#[derive(Clone, Debug)]
pub struct SubsurfaceInteraction {
pi: Point3fi,
n: Normal3f,
ns: Normal3f,
dpdu: Vector3f,
dpdv: Vector3f,
dpdus: Vector3f,
dpdvs: Vector3f,
}
impl SubsurfaceInteraction {
pub fn new(si: &SurfaceInteraction) -> Self {
Self {
pi: si.common.pi,
n: si.common.n,
dpdu: si.dpdu,
dpdv: si.dpdv,
ns: si.shading.n,
dpdus: si.shading.dpdu,
dpdvs: si.shading.dpdv,
}
}
pub fn p(&self) -> Point3f {
self.pi.into()
}
}
impl From<SurfaceInteraction> for SubsurfaceInteraction {
fn from(si: SurfaceInteraction) -> SubsurfaceInteraction {
SubsurfaceInteraction {
pi: si.common.pi,
n: si.common.n,
ns: si.shading.n,
dpdu: si.dpdu,
dpdv: si.dpdv,
dpdus: si.shading.dpdu,
dpdvs: si.shading.dpdv,
}
}
}
impl From<&SubsurfaceInteraction> for SurfaceInteraction {
fn from(ssi: &SubsurfaceInteraction) -> SurfaceInteraction {
SurfaceInteraction {
common: InteractionData {
pi: ssi.pi,
n: ssi.n,
wo: Vector3f::zero(),
time: 0.,
medium_interface: None,
medium: None,
},
uv: Point2f::zero(),
dpdu: ssi.dpdu,
dpdv: ssi.dpdv,
dndu: Normal3f::zero(),
dndv: Normal3f::zero(),
shading: ShadingGeometry {
n: ssi.ns,
dpdu: ssi.dpdus,
dpdv: ssi.dpdvs,
dndu: Normal3f::zero(),
dndv: Normal3f::zero(),
},
face_index: 0,
area_light: None,
material: None,
dpdx: Vector3f::zero(),
dpdy: Vector3f::zero(),
dudx: 0.,
dvdx: 0.,
dudy: 0.,
dvdy: 0.,
shape: Shape::default().into(),
}
}
}
#[derive(Clone, Debug)]
pub struct BSSRDFTable {
rho_samples: Vec<Float>,
radius_samples: Vec<Float>,
profile: Vec<Float>,
rho_eff: Vec<Float>,
profile_cdf: Vec<Float>,
}
impl BSSRDFTable {
pub fn new(n_rho_samples: usize, n_radius_samples: usize) -> Self {
let rho_samples: Vec<Float> = Vec::with_capacity(n_rho_samples);
let radius_samples: Vec<Float> = Vec::with_capacity(n_radius_samples);
let profile: Vec<Float> = Vec::with_capacity(n_radius_samples * n_rho_samples);
let rho_eff: Vec<Float> = Vec::with_capacity(n_rho_samples);
let profile_cdf: Vec<Float> = Vec::with_capacity(n_radius_samples * n_rho_samples);
Self {
rho_samples,
radius_samples,
profile,
rho_eff,
profile_cdf,
}
}
pub fn eval_profile(&self, rho_index: usize, radius_index: usize) -> Float {
assert!(rho_index < self.rho_samples.len());
assert!(radius_index < self.radius_samples.len());
self.profile[rho_index * self.radius_samples.len() + radius_index]
}
}
#[derive(Clone, Default, Debug)]
pub struct BSSRDFProbeSegment {
pub p0: Point3f,
pub p1: Point3f,
}
#[enum_dispatch]
pub trait BSSRDFTrait: Send + Sync + std::fmt::Debug {
fn sample_sp(&self, u1: Float, u2: Point2f) -> Option<BSSRDFProbeSegment>;
fn probe_intersection_to_sample(&self, si: &SubsurfaceInteraction) -> BSSRDFSample<'_>;
}
#[enum_dispatch(BSSRDFTrait)]
#[derive(Debug, Clone)]
pub enum BSSRDF<'a> {
Tabulated(TabulatedBSSRDF<'a>),
}
#[derive(Clone, Debug)]
pub struct TabulatedBSSRDF<'a> {
po: Point3f,
wo: Vector3f,
ns: Normal3f,
eta: Float,
sigma_t: SampledSpectrum,
rho: SampledSpectrum,
table: &'a BSSRDFTable,
}
impl<'a> TabulatedBSSRDF<'a> {
pub fn new(
po: Point3f,
wo: Vector3f,
ns: Normal3f,
eta: Float,
sigma_a: &SampledSpectrum,
sigma_s: &SampledSpectrum,
table: &'a BSSRDFTable,
) -> Self {
let sigma_t = *sigma_a + *sigma_s;
let rho = SampledSpectrum::safe_div(sigma_s, &sigma_t);
Self {
po,
wo,
ns,
eta,
table,
sigma_t,
rho,
}
}
pub fn sp(&self, pi: Point3f) -> SampledSpectrum {
self.sr(self.po.distance(pi))
}
pub fn sr(&self, r: Float) -> SampledSpectrum {
let mut sr_spectrum = SampledSpectrum::new(0.);
for i in 0..N_SPECTRUM_SAMPLES {
let r_optical = r * self.sigma_t[i];
let (rho_offset, rho_weights) =
match catmull_rom_weights(&self.table.rho_samples, self.rho[i]) {
Some(res) => res,
None => continue,
};
let (radius_offset, radius_weights) =
match catmull_rom_weights(&self.table.radius_samples, r_optical) {
Some(res) => res,
None => continue,
};
let mut sr = 0.;
for (j, rho_weight) in rho_weights.iter().enumerate() {
for (k, radius_weight) in radius_weights.iter().enumerate() {
let weight = rho_weight * radius_weight;
if weight != 0. {
sr += weight * self.table.eval_profile(rho_offset + j, radius_offset + k);
}
}
}
if r_optical != 0. {
sr /= 2. * PI * r_optical;
}
sr_spectrum[i] = sr;
}
sr_spectrum *= self.sigma_t * self.sigma_t;
SampledSpectrum::clamp_zero(&sr_spectrum)
}
pub fn sample_sr(&self, u: Float) -> Option<Float> {
if self.sigma_t[0] == 0. {
return None;
}
let (ret, _, _) = sample_catmull_rom_2d(
&self.table.rho_samples,
&self.table.radius_samples,
&self.table.profile,
&self.table.profile_cdf,
self.rho[0],
u,
);
Some(ret / self.sigma_t[0])
}
pub fn pdf_sr(&self, r: Float) -> SampledSpectrum {
let mut pdf = SampledSpectrum::new(0.);
for i in 0..N_SPECTRUM_SAMPLES {
let r_optical = r * self.sigma_t[i];
let (rho_offset, rho_weights) =
match catmull_rom_weights(&self.table.rho_samples, self.rho[i]) {
Some(res) => res,
None => continue,
};
let (radius_offset, radius_weights) =
match catmull_rom_weights(&self.table.radius_samples, r_optical) {
Some(res) => res,
None => continue,
};
let mut sr = 0.;
let mut rho_eff = 0.;
for (j, rho_weight) in rho_weights.iter().enumerate() {
if *rho_weight != 0. {
// Update _rhoEff_ and _sr_ for wavelength
rho_eff += self.table.rho_eff[rho_offset + j] * rho_weight;
// Fix: Use .iter().enumerate() for 'k'
for (k, radius_weight) in radius_weights.iter().enumerate() {
if *radius_weight != 0. {
sr += self.table.eval_profile(rho_offset + j, radius_offset + k)
* rho_weight
* radius_weight;
}
}
}
}
// Cancel marginal PDF factor from tabulated BSSRDF profile
if r_optical != 0. {
sr /= 2. * PI * r_optical;
}
pdf[i] = sr * square(self.sigma_t[i]) / rho_eff;
}
SampledSpectrum::clamp_zero(&pdf)
}
pub fn pdf_sp(&self, pi: Point3f, ni: Normal3f) -> SampledSpectrum {
let d = pi - self.po;
let f = Frame::from_z(self.ns.into());
let d_local = f.to_local(d);
let n_local = f.to_local(ni.into());
let r_proj = [
(square(d_local.y() + square(d_local.z()))).sqrt(),
(square(d_local.z() + square(d_local.x()))).sqrt(),
(square(d_local.x() + square(d_local.y()))).sqrt(),
];
let axis_prob = [0.25, 0.25, 0.25];
let mut pdf = SampledSpectrum::new(0.);
for axis in 0..3 {
pdf += self.pdf_sr(r_proj[axis] * n_local[axis].abs() * axis_prob[axis]);
}
pdf
}
}
impl<'a> BSSRDFTrait for TabulatedBSSRDF<'a> {
fn sample_sp(&self, u1: Float, u2: Point2f) -> Option<BSSRDFProbeSegment> {
let f = if u1 < 0.25 {
Frame::from_x(self.ns.into())
} else if u1 < 0.5 {
Frame::from_y(self.ns.into())
} else {
Frame::from_z(self.ns.into())
};
let r = self.sample_sr(u2[0])?;
let phi = 2. * PI * u2[1];
let r_max = self.sample_sr(0.999)?;
let l = 2. * (square(r_max) - square(r)).sqrt();
let p_start = self.po + r * (f.x * phi.cos() + f.y * phi.sin()) - l * f.z / 2.;
let p_target = p_start + l * f.z;
Some(BSSRDFProbeSegment {
p0: p_start,
p1: p_target,
})
}
fn probe_intersection_to_sample(&self, _si: &SubsurfaceInteraction) -> BSSRDFSample<'_> {
todo!()
}
}

File diff suppressed because it is too large Load diff

View file

@ -5,19 +5,19 @@ use crate::geometry::{
Bounds2f, Bounds2fi, Bounds2i, Normal3f, Point2f, Point2i, Point3f, Vector2f, Vector2fi,
Vector2i, Vector3f,
};
use crate::utils::color::{RGB, SRGBEncoding, Triplet, XYZ, white_balance};
use crate::utils::colorspace::RGBColorspace;
use crate::utils::containers::Array2D;
use crate::utils::image::{
Image, ImageChannelDesc, ImageChannelValues, ImageMetadata, PixelFormat,
use crate::image::{Image, ImageChannelDesc, ImageChannelValues, ImageMetadata, PixelFormat};
use crate::spectra::sampled::{LAMBDA_MAX, LAMBDA_MIN};
use crate::spectra::{
ConstantSpectrum, DenselySampledSpectrum, N_SPECTRUM_SAMPLES, SampledSpectrum,
SampledWavelengths, Spectrum, SpectrumTrait, cie_x, cie_y, cie_z,
};
use crate::utils::AtomicFloat;
use crate::utils::color::{MatrixMulColor, RGB, SRGB, Triplet, XYZ, white_balance};
use crate::utils::colorspace::RGBColorSpace;
use crate::utils::containers::Array2D;
use crate::utils::math::SquareMatrix;
use crate::utils::math::linear_least_squares;
use crate::utils::sampling::VarianceEstimator;
use crate::utils::spectrum::{
ConstantSpectrum, DenselySampledSpectrum, LAMBDA_MAX, LAMBDA_MIN, N_SPECTRUM_SAMPLES,
SampledSpectrum, SampledWavelengths, Spectrum, inner_product, spectra,
};
use crate::utils::transform::AnimatedTransform;
use rayon::prelude::*;
use std::sync::{Arc, atomic::AtomicUsize, atomic::Ordering};
@ -26,27 +26,37 @@ use once_cell::sync::Lazy;
use std::error::Error;
use std::sync::Mutex;
#[derive(Clone, Debug)]
pub struct RGBFilm {
pub base: FilmBase,
pub max_component_value: Float,
pub write_fp16: bool,
pub filter_integral: Float,
pub output_rgbf_from_sensor_rgb: SquareMatrix<Float, 3>,
pub pixels: Array2D<RGBPixel>,
pub pixels: Arc<Array2D<RGBPixel>>,
}
#[derive(Debug)]
pub struct RGBPixel {
rgb_sum: [f64; 3],
weight_sum: f64,
rgb_splat: Mutex<[f64; 3]>,
rgb_sum: [AtomicFloat; 3],
weight_sum: AtomicFloat,
rgb_splat: [AtomicFloat; 3],
}
impl Default for RGBPixel {
fn default() -> Self {
Self {
rgb_sum: [0., 0., 0.],
weight_sum: 0.,
rgb_splat: Mutex::new([0., 0., 0.]),
rgb_sum: [
AtomicFloat::new(0.0),
AtomicFloat::new(0.0),
AtomicFloat::new(0.0),
],
weight_sum: AtomicFloat::new(0.0),
rgb_splat: [
AtomicFloat::new(0.0),
AtomicFloat::new(0.0),
AtomicFloat::new(0.0),
],
}
}
}
@ -54,11 +64,10 @@ impl Default for RGBPixel {
impl RGBFilm {
pub fn new(
base: FilmBase,
colorspace: RGBColorspace,
colorspace: &RGBColorSpace,
max_component_value: Float,
write_fp16: bool,
) -> Self {
let pixels = Array2D::new(base.pixel_bounds);
let filter_integral = base.filter.integral();
let sensor_matrix = base
.sensor
@ -66,50 +75,66 @@ impl RGBFilm {
.expect("Sensor must exist")
.xyz_from_sensor_rgb;
let output_rgbf_from_sensor_rgb = colorspace.rgb_from_xyz * sensor_matrix;
let width = base.pixel_bounds.p_max.x() - base.pixel_bounds.p_min.x();
let height = base.pixel_bounds.p_max.y() - base.pixel_bounds.p_min.y();
let count = (width * height) as usize;
let mut pixel_vec = Vec::with_capacity(count);
for _ in 0..count {
pixel_vec.push(RGBPixel::default());
}
let pixels_array = Array2D::new(base.pixel_bounds);
Self {
base,
max_component_value,
write_fp16,
filter_integral,
output_rgbf_from_sensor_rgb,
pixels,
pixels: Arc::new(pixels_array),
}
}
}
#[derive(Clone, Debug)]
pub struct GBufferBFilm {
pub base: FilmBase,
output_from_render: AnimatedTransform,
apply_inverse: bool,
pixels: Array2D<GBufferPixel>,
colorspace: RGBColorspace,
pixels: Arc<Array2D<GBufferPixel>>,
colorspace: RGBColorSpace,
max_component_value: Float,
write_fp16: bool,
filter_integral: Float,
output_rgbf_from_sensor_rgb: SquareMatrix<Float, 3>,
}
#[derive(Debug, Default)]
struct GBufferPixel {
rgb_sum: [f64; 3],
weight_sum: f64,
g_bugger_weight_sum: f64,
rgb_splat: Mutex<[f64; 3]>,
p_sum: Point3f,
dz_dx_sum: Float,
dz_dy_sum: Float,
n_sum: Normal3f,
ns_sum: Normal3f,
uv_sum: Point2f,
rgb_albedo_sum: [f64; 3],
rgb_variance: VarianceEstimator,
pub rgb_sum: [AtomicFloat; 3],
pub weight_sum: AtomicFloat,
pub g_bugger_weight_sum: AtomicFloat,
pub rgb_splat: [AtomicFloat; 3],
pub p_sum: Point3f,
pub dz_dx_sum: AtomicFloat,
pub dz_dy_sum: Float,
pub n_sum: Normal3f,
pub ns_sum: Normal3f,
pub uv_sum: Point2f,
pub rgb_albedo_sum: [AtomicFloat; 3],
pub rgb_variance: VarianceEstimator,
}
#[derive(Clone, Debug)]
pub struct SpectralFilm {
pub base: FilmBase,
output_from_render: AnimatedTransform,
pixels: Array2D<SpectralPixel>,
}
#[derive(Clone, Debug)]
struct SpectralPixel;
const N_SWATCH_REFLECTANCES: usize = 24;
@ -131,13 +156,13 @@ impl PixelSensor {
r: &Spectrum,
g: &Spectrum,
b: &Spectrum,
output_colorspace: RGBColorspace,
output_colorspace: RGBColorSpace,
sensor_illum: &Spectrum,
imaging_ratio: Float,
) -> Result<Self, Box<dyn Error>> {
let r_bar = DenselySampledSpectrum::from_spectrum(&r, LAMBDA_MIN, LAMBDA_MAX);
let g_bar = DenselySampledSpectrum::from_spectrum(&g, LAMBDA_MIN, LAMBDA_MAX);
let b_bar = DenselySampledSpectrum::from_spectrum(&b, LAMBDA_MIN, LAMBDA_MAX);
let r_bar = DenselySampledSpectrum::from_spectrum(r, LAMBDA_MIN, LAMBDA_MAX);
let g_bar = DenselySampledSpectrum::from_spectrum(g, LAMBDA_MIN, LAMBDA_MAX);
let b_bar = DenselySampledSpectrum::from_spectrum(b, LAMBDA_MIN, LAMBDA_MAX);
let mut rgb_camera = [[0.; 3]; N_SWATCH_REFLECTANCES];
for i in 0..N_SWATCH_REFLECTANCES {
@ -154,16 +179,16 @@ impl PixelSensor {
}
let mut xyz_output = [[0.; 3]; N_SWATCH_REFLECTANCES];
let sensor_white_g = inner_product(sensor_illum, &Spectrum::DenselySampled(g_bar.clone()));
let sensor_white_y = inner_product(sensor_illum, &spectra::Y);
let sensor_white_g = sensor_illum.inner_product(&Spectrum::DenselySampled(g_bar.clone()));
let sensor_white_y = sensor_illum.inner_product(cie_y());
for i in 0..N_SWATCH_REFLECTANCES {
let s = SWATCH_REFLECTANCES[i].clone();
let xyz = Self::project_reflectance::<XYZ>(
&s,
&output_colorspace.illuminant,
&spectra::X,
&spectra::Y,
&spectra::Z,
cie_x(),
cie_y(),
cie_z(),
) * (sensor_white_y / sensor_white_g);
for c in 0..3 {
xyz_output[i][c] = xyz[c];
@ -182,13 +207,13 @@ impl PixelSensor {
}
pub fn new_with_white_balance(
output_colorspace: RGBColorspace,
output_colorspace: RGBColorSpace,
sensor_illum: Option<Spectrum>,
imaging_ratio: Float,
) -> Self {
let r_bar = DenselySampledSpectrum::from_spectrum(&spectra::X, LAMBDA_MIN, LAMBDA_MAX);
let g_bar = DenselySampledSpectrum::from_spectrum(&spectra::Y, LAMBDA_MIN, LAMBDA_MAX);
let b_bar = DenselySampledSpectrum::from_spectrum(&spectra::Z, LAMBDA_MIN, LAMBDA_MAX);
let r_bar = DenselySampledSpectrum::from_spectrum(cie_x(), LAMBDA_MIN, LAMBDA_MAX);
let g_bar = DenselySampledSpectrum::from_spectrum(cie_y(), LAMBDA_MIN, LAMBDA_MAX);
let b_bar = DenselySampledSpectrum::from_spectrum(cie_z(), LAMBDA_MIN, LAMBDA_MAX);
let xyz_from_sensor_rgb: SquareMatrix<Float, 3>;
if let Some(illum) = sensor_illum {
@ -220,13 +245,13 @@ impl PixelSensor {
for lambda_ind in LAMBDA_MIN..=LAMBDA_MAX {
let lambda = lambda_ind as Float;
let illum_val = illum.sample_at(lambda);
let illum_val = illum.evaluate(lambda);
g_integral += b2.sample_at(lambda) * illum_val;
let refl_illum = refl.sample_at(lambda) * illum_val;
result[0] += b1.sample_at(lambda) * refl_illum;
result[1] += b2.sample_at(lambda) * refl_illum;
result[2] += b3.sample_at(lambda) * refl_illum;
g_integral += b2.evaluate(lambda) * illum_val;
let refl_illum = refl.evaluate(lambda) * illum_val;
result[0] += b1.evaluate(lambda) * refl_illum;
result[1] += b2.evaluate(lambda) * refl_illum;
result[2] += b3.evaluate(lambda) * refl_illum;
}
if g_integral > 0. {
@ -240,7 +265,7 @@ impl PixelSensor {
}
pub fn to_sensor_rgb(&self, l: SampledSpectrum, lambda: &SampledWavelengths) -> RGB {
let l_norm = l.safe_div(lambda.pdf());
let l_norm = SampledSpectrum::safe_div(&l, &lambda.pdf());
self.imaging_ratio
* RGB::new(
(self.r_bar.sample(lambda) * l_norm).average(),
@ -264,13 +289,14 @@ pub struct VisibleSurface {
impl VisibleSurface {
pub fn new(
_si: SurfaceInteraction,
albedo: SampledSpectrum,
_lambda: SampledWavelengths,
_si: &SurfaceInteraction,
albedo: &SampledSpectrum,
_lambda: &SampledWavelengths,
) -> Self {
let mut vs = VisibleSurface::default();
vs.albedo = albedo;
vs
VisibleSurface {
albedo: *albedo,
..Default::default()
}
}
}
@ -290,7 +316,7 @@ impl Default for VisibleSurface {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct FilmBase {
pub full_resolution: Point2i,
pub pixel_bounds: Bounds2i,
@ -352,7 +378,7 @@ pub trait FilmTrait: Sync {
PixelFormat::F32
};
let channel_names = vec!["R".to_string(), "G".to_string(), "B".to_string()];
let channel_names = &["R", "G", "B"];
let pixel_bounds = self.base().pixel_bounds;
let resolution = Point2i::from(pixel_bounds.diagonal());
@ -392,7 +418,7 @@ pub trait FilmTrait: Sync {
})
.collect();
let mut image = Image::new(format, resolution, channel_names, Arc::new(SRGBEncoding));
let mut image = Image::new(format, resolution, channel_names, SRGB);
let rgb_desc = ImageChannelDesc::new(&[0, 1, 2]);
for (iy, row_data) in processed_rows.into_iter().enumerate() {
@ -461,6 +487,7 @@ pub trait FilmTrait: Sync {
}
}
#[derive(Clone, Debug)]
pub enum Film {
RGB(RGBFilm),
GBuffer(GBufferBFilm),
@ -493,11 +520,11 @@ impl FilmTrait for RGBFilm {
rgb *= self.max_component_value / m;
}
let pixel = &mut self.pixels[p_film];
let pixel = &self.pixels[p_film];
for c in 0..3 {
pixel.rgb_sum[c] += (weight * rgb[c]) as f64;
pixel.rgb_sum[c].add((weight * rgb[c]) as f64);
}
pixel.weight_sum += weight as f64;
pixel.weight_sum.add(weight as f64);
}
fn add_splat(&mut self, p: Point2f, l: SampledSpectrum, lambda: &SampledWavelengths) {
@ -527,8 +554,7 @@ impl FilmTrait for RGBFilm {
if wt != 0. {
let pixel = &self.pixels[*pi];
for i in 0..3 {
let mut rgb_splat = pixel.rgb_splat.lock().unwrap();
rgb_splat[i] += wt as f64 * rgb_splat[i];
pixel.rgb_splat[i].add((wt * rgb[i]) as f64);
}
}
}
@ -537,26 +563,27 @@ impl FilmTrait for RGBFilm {
fn get_pixel_rgb(&self, p: Point2i, splat_scale: Option<Float>) -> RGB {
let pixel = &self.pixels[p];
let mut rgb = RGB::new(
pixel.rgb_sum[0] as Float,
pixel.rgb_sum[1] as Float,
pixel.rgb_sum[2] as Float,
pixel.rgb_sum[0].load() as Float,
pixel.rgb_sum[1].load() as Float,
pixel.rgb_sum[2].load() as Float,
);
let weight_sum = pixel.weight_sum;
let weight_sum = pixel.weight_sum.load();
if weight_sum != 0. {
rgb /= weight_sum as Float
}
let rgb_splat = pixel.rgb_splat.lock().unwrap();
if let Some(splat) = splat_scale {
for c in 0..3 {
rgb[c] += splat * rgb_splat[c] as Float / self.filter_integral;
let splat_val = pixel.rgb_splat[c].load();
rgb[c] += splat * splat_val as Float / self.filter_integral;
}
} else {
for c in 0..3 {
rgb[c] += rgb_splat[c] as Float / self.filter_integral;
let splat_val = pixel.rgb_splat[c].load();
rgb[c] += splat_val as Float / self.filter_integral;
}
}
self.output_rgbf_from_sensor_rgb * rgb
self.output_rgbf_from_sensor_rgb.mul_rgb(rgb)
}
fn to_output_rgb(&self, l: SampledSpectrum, lambda: &SampledWavelengths) -> RGB {
@ -564,7 +591,7 @@ impl FilmTrait for RGBFilm {
.get_pixel_sensor()
.expect("Sensor must exist")
.to_sensor_rgb(l, lambda);
self.output_rgbf_from_sensor_rgb * sensor_rgb
self.output_rgbf_from_sensor_rgb.mul_rgb(sensor_rgb)
}
fn uses_visible_surface(&self) -> bool {
@ -615,8 +642,7 @@ impl FilmTrait for GBufferBFilm {
if wt != 0. {
let pixel = &self.pixels[*pi];
for i in 0..3 {
let mut rgb_splat = pixel.rgb_splat.lock().unwrap();
rgb_splat[i] += wt as f64 * rgb_splat[i];
pixel.rgb_splat[i].add((wt * rgb[i]) as f64);
}
}
}
@ -627,32 +653,33 @@ impl FilmTrait for GBufferBFilm {
.get_pixel_sensor()
.expect("Sensor must exist")
.to_sensor_rgb(l, lambda);
self.output_rgbf_from_sensor_rgb * sensor_rgb
self.output_rgbf_from_sensor_rgb.mul_rgb(sensor_rgb)
}
fn get_pixel_rgb(&self, p: Point2i, splat_scale: Option<Float>) -> RGB {
let pixel = &self.pixels[p];
let mut rgb = RGB::new(
pixel.rgb_sum[0] as Float,
pixel.rgb_sum[1] as Float,
pixel.rgb_sum[2] as Float,
pixel.rgb_sum[0].load() as Float,
pixel.rgb_sum[1].load() as Float,
pixel.rgb_sum[2].load() as Float,
);
let weight_sum = pixel.weight_sum;
let weight_sum = pixel.weight_sum.load();
if weight_sum != 0. {
rgb /= weight_sum as Float
}
let rgb_splat = pixel.rgb_splat.lock().unwrap();
if let Some(splat) = splat_scale {
for c in 0..3 {
rgb[c] += splat * rgb_splat[c] as Float / self.filter_integral;
let splat_val = pixel.rgb_splat[c].load();
rgb[c] += splat * splat_val as Float / self.filter_integral;
}
} else {
for c in 0..3 {
rgb[c] += rgb_splat[c] as Float / self.filter_integral;
let splat_val = pixel.rgb_splat[c].load();
rgb[c] += splat_val as Float / self.filter_integral;
}
}
self.output_rgbf_from_sensor_rgb * rgb
self.output_rgbf_from_sensor_rgb.mul_rgb(rgb)
}
fn uses_visible_surface(&self) -> bool {

View file

@ -1,17 +1,19 @@
use crate::core::pbrt::{Float, lerp};
use crate::core::sampler::PiecewiseConstant2D;
use crate::geometry::{Bounds2f, Bounds2i, Point2f, Point2i, Vector2f};
use crate::utils::containers::Array2D;
use crate::utils::math::{gaussian, gaussian_integral, sample_tent, windowed_sinc};
use crate::utils::sampling::PiecewiseConstant2D;
use enum_dispatch::enum_dispatch;
use rand::Rng;
use std::hash::Hash;
pub struct FilterSample {
p: Point2f,
weight: Float,
pub p: Point2f,
pub weight: Float,
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct FilterSampler {
domain: Bounds2f,
distrib: PiecewiseConstant2D,
@ -19,7 +21,7 @@ pub struct FilterSampler {
}
impl FilterSampler {
pub fn new<F>(radius: Vector2f, resolution: Point2i, evaluate_fn: F) -> Self
pub fn new<F>(radius: Vector2f, func: F) -> Self
where
F: Fn(Point2f) -> Float,
{
@ -27,15 +29,18 @@ impl FilterSampler {
Point2f::new(-radius.x(), -radius.y()),
Point2f::new(radius.x(), radius.y()),
);
let array_bounds = Bounds2i::from_points(Point2i::new(0, 0), resolution);
let mut f = Array2D::new(array_bounds);
for j in 0..resolution.y() {
for i in 0..resolution.x() {
let nx = (32.0 * radius.x()) as usize;
let ny = (32.0 * radius.y()) as usize;
let mut f = Array2D::new_with_dims(nx, ny);
for y in 0..f.y_size() {
for x in 0..f.x_size() {
let p = domain.lerp(Point2f::new(
(i as Float + 0.5) / resolution.x() as Float,
(j as Float + 0.5) / resolution.y() as Float,
(x as Float + 0.5) / f.x_size() as Float,
(y as Float + 0.5) / f.y_size() as Float,
));
f[Point2i::new(i, j)] = evaluate_fn(p);
f[(x as i32, y as i32)] = func(p);
}
}
let distrib = PiecewiseConstant2D::new_with_bounds(&f, domain);
@ -53,6 +58,7 @@ impl FilterSampler {
}
}
#[enum_dispatch]
pub trait FilterTrait {
fn radius(&self) -> Vector2f;
fn evaluate(&self, p: Point2f) -> Float;
@ -60,7 +66,8 @@ pub trait FilterTrait {
fn sample(&self, u: Point2f) -> FilterSample;
}
#[derive(Debug)]
#[enum_dispatch(FilterTrait)]
#[derive(Clone, Debug)]
pub enum Filter {
Box(BoxFilter),
Gaussian(GaussianFilter),
@ -69,49 +76,7 @@ pub enum Filter {
Triangle(TriangleFilter),
}
impl FilterTrait for Filter {
fn radius(&self) -> Vector2f {
match self {
Filter::Box(c) => c.radius(),
Filter::Gaussian(c) => c.radius(),
Filter::Mitchell(c) => c.radius(),
Filter::LanczosSinc(c) => c.radius(),
Filter::Triangle(c) => c.radius(),
}
}
fn evaluate(&self, p: Point2f) -> Float {
match self {
Filter::Box(c) => c.evaluate(p),
Filter::Gaussian(c) => c.evaluate(p),
Filter::Mitchell(c) => c.evaluate(p),
Filter::LanczosSinc(c) => c.evaluate(p),
Filter::Triangle(c) => c.evaluate(p),
}
}
fn integral(&self) -> Float {
match self {
Filter::Box(c) => c.integral(),
Filter::Gaussian(c) => c.integral(),
Filter::Mitchell(c) => c.integral(),
Filter::LanczosSinc(c) => c.integral(),
Filter::Triangle(c) => c.integral(),
}
}
fn sample(&self, u: Point2f) -> FilterSample {
match self {
Filter::Box(c) => c.sample(u),
Filter::Gaussian(c) => c.sample(u),
Filter::Mitchell(c) => c.sample(u),
Filter::LanczosSinc(c) => c.sample(u),
Filter::Triangle(c) => c.sample(u),
}
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct BoxFilter {
pub radius: Vector2f,
}
@ -148,7 +113,7 @@ impl FilterTrait for BoxFilter {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct GaussianFilter {
pub radius: Vector2f,
pub sigma: Float,
@ -162,14 +127,12 @@ impl GaussianFilter {
let exp_x = gaussian(radius.x(), 0., sigma);
let exp_y = gaussian(radius.y(), 0., sigma);
let sampler = FilterSampler::new(
radius,
Point2i::new((32.0 * radius.x()) as i32, (32.0 * radius.y()) as i32),
|p: Point2f| {
(gaussian(p.x(), 0., sigma) - exp_x).max(0.)
* (gaussian(p.y(), 0., sigma) - exp_y).max(0.)
},
);
let sampler = FilterSampler::new(radius, move |p: Point2f| {
let gx = (gaussian(p.x(), 0., sigma) - exp_x).max(0.0);
let gy = (gaussian(p.y(), 0., sigma) - exp_y).max(0.0);
gx * gy
});
Self {
radius,
sigma,
@ -202,7 +165,7 @@ impl FilterTrait for GaussianFilter {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct MitchellFilter {
pub radius: Vector2f,
pub b: Float,
@ -212,30 +175,12 @@ pub struct MitchellFilter {
impl MitchellFilter {
pub fn new(radius: Vector2f, b: Float, c: Float) -> Self {
let sampler = FilterSampler::new(
radius,
Point2i::new((32.0 * radius.x()) as i32, (32.0 * radius.y()) as i32),
move |p: Point2f| {
let mitchell_1d = |x: Float| {
let x = x.abs();
if x <= 1.0 {
((12.0 - 9.0 * b - 6.0 * c) * x.powi(3)
+ (-18.0 + 12.0 * b + 6.0 * c) * x.powi(2)
+ (6.0 - 2.0 * b))
* (1.0 / 6.0)
} else if x <= 2.0 {
((-b - 6.0 * c) * x.powi(3)
+ (6.0 * b + 30.0 * c) * x.powi(2)
+ (-12.0 * b - 48.0 * c) * x
+ (8.0 * b + 24.0 * c))
* (1.0 / 6.0)
} else {
0.0
}
};
mitchell_1d(2.0 * p.x() / radius.x()) * mitchell_1d(2.0 * p.y() / radius.y())
},
);
let sampler = FilterSampler::new(radius, move |p: Point2f| {
let nx = 2.0 * p.x() / radius.x();
let ny = 2.0 * p.y() / radius.y();
Self::mitchell_1d_eval(b, c, nx) * Self::mitchell_1d_eval(b, c, ny)
});
Self {
radius,
b,
@ -244,23 +189,27 @@ impl MitchellFilter {
}
}
fn mitchell_1d(&self, x: Float) -> Float {
fn mitchell_1d_eval(b: Float, c: Float, x: Float) -> Float {
let x = x.abs();
if x <= 1.0 {
((12.0 - 9.0 * self.b - 6.0 * self.c) * x.powi(3)
+ (-18.0 + 12.0 * self.b + 6.0 * self.c) * x.powi(2)
+ (6.0 - 2.0 * self.b))
((12.0 - 9.0 * b - 6.0 * c) * x.powi(3)
+ (-18.0 + 12.0 * b + 6.0 * c) * x.powi(2)
+ (6.0 - 2.0 * b))
* (1.0 / 6.0)
} else if x <= 2.0 {
((-self.b - 6.0 * self.c) * x.powi(3)
+ (6.0 * self.b + 30.0 * self.c) * x.powi(2)
+ (-12.0 * self.b - 48.0 * self.c) * x
+ (8.0 * self.b + 24.0 * self.c))
((-b - 6.0 * c) * x.powi(3)
+ (6.0 * b + 30.0 * c) * x.powi(2)
+ (-12.0 * b - 48.0 * c) * x
+ (8.0 * b + 24.0 * c))
* (1.0 / 6.0)
} else {
0.0
}
}
fn mitchell_1d(&self, x: Float) -> Float {
Self::mitchell_1d_eval(self.b, self.c, x)
}
}
impl FilterTrait for MitchellFilter {
@ -282,7 +231,7 @@ impl FilterTrait for MitchellFilter {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct LanczosSincFilter {
pub radius: Vector2f,
pub tau: Float,
@ -291,13 +240,10 @@ pub struct LanczosSincFilter {
impl LanczosSincFilter {
pub fn new(radius: Vector2f, tau: Float) -> Self {
let sampler = FilterSampler::new(
radius,
Point2i::new((32.0 * radius.x()) as i32, (32.0 * radius.y()) as i32),
move |p: Point2f| {
windowed_sinc(p.x(), radius.x(), tau) * windowed_sinc(p.y(), radius.y(), tau)
},
);
let sampler = FilterSampler::new(radius, move |p: Point2f| {
windowed_sinc(p.x(), radius.x(), tau) * windowed_sinc(p.y(), radius.y(), tau)
});
Self {
radius,
tau,
@ -344,7 +290,7 @@ impl FilterTrait for LanczosSincFilter {
}
}
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct TriangleFilter {
pub radius: Vector2f,
}

344
src/core/geometry/bounds.rs Normal file
View file

@ -0,0 +1,344 @@
use super::{Float, NumFloat};
use super::{Point, Point2f, Point3, Point3f, Vector, Vector2, Vector2f, Vector3, Vector3f};
use crate::core::geometry::traits::{Sqrt, VectorLike};
use crate::core::geometry::{max, min};
use crate::utils::interval::Interval;
use crate::utils::math::lerp;
use num_traits::{Bounded, Num};
use std::mem;
use std::ops::{Add, Div, DivAssign, Mul, Sub};
// AABB BOUNDING BOXES
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct Bounds<T, const N: usize> {
pub p_min: Point<T, N>,
pub p_max: Point<T, N>,
}
impl<'a, T, const N: usize> IntoIterator for &'a Bounds<T, N> {
type Item = &'a Point<T, N>;
type IntoIter = std::array::IntoIter<&'a Point<T, N>, 2>;
fn into_iter(self) -> Self::IntoIter {
[&self.p_min, &self.p_max].into_iter()
}
}
impl<T, const N: usize> Bounds<T, N>
where
T: Num + PartialOrd + Copy,
{
pub fn from_point(p: Point<T, N>) -> Self {
Self { p_min: p, p_max: p }
}
pub fn from_points(p1: Point<T, N>, p2: Point<T, N>) -> Self {
let mut p_min_arr = [T::zero(); N];
let mut p_max_arr = [T::zero(); N];
for i in 0..N {
if p1[i] < p2[i] {
p_min_arr[i] = p1[i];
p_max_arr[i] = p2[i];
} else {
p_min_arr[i] = p2[i];
p_max_arr[i] = p1[i];
}
}
Self {
p_min: Point(p_min_arr),
p_max: Point(p_max_arr),
}
}
pub fn union_point(self, p: Point<T, N>) -> Self {
let mut p_min = self.p_min;
let mut p_max = self.p_max;
for i in 0..N {
p_min[i] = min(p_min[i], p[i]);
p_max[i] = max(p_max[i], p[i]);
}
Self { p_min, p_max }
}
pub fn union(self, b2: Self) -> Self {
let mut p_min = self.p_min;
let mut p_max = self.p_max;
for i in 0..N {
p_min[i] = min(p_min[i], b2.p_min[i]);
p_max[i] = max(p_max[i], b2.p_max[i]);
}
Self { p_min, p_max }
}
pub fn diagonal(&self) -> Vector<T, N> {
self.p_max - self.p_min
}
pub fn centroid(&self) -> Point<T, N> {
let two = T::one() + T::one();
self.p_min + (self.diagonal() / two)
}
pub fn volume(&self) -> T {
let d = self.diagonal();
d.0.iter().fold(T::one(), |acc, &val| acc * val)
}
pub fn expand(&self, delta: T) -> Self {
let mut p_min = self.p_min;
let mut p_max = self.p_max;
p_min = p_min - Vector::fill(delta);
p_max = p_max + Vector::fill(delta);
Self { p_min, p_max }
}
pub fn lerp(&self, t: Point<T, N>) -> Point<T, N> {
let mut results_arr = [T::zero(); N];
for i in 0..N {
results_arr[i] = lerp(t[i], self.p_min[i], self.p_max[i])
}
Point(results_arr)
}
pub fn max_dimension(&self) -> usize
where
Point<T, N>: Sub<Output = Vector<T, N>>,
{
let d = self.diagonal();
let mut max_dim = 0;
let mut max_span = d[0];
for i in 1..N {
if d[i] > max_span {
max_span = d[i];
max_dim = i;
}
}
max_dim
}
pub fn offset(&self, p: &Point<T, N>) -> Vector<T, N>
where
Point<T, N>: Sub<Output = Vector<T, N>>,
Vector<T, N>: DivAssign<T>,
{
let mut o = *p - self.p_min;
let d = self.diagonal();
for i in 0..N {
if d[i] > T::zero() {
o[i] = o[i] / d[i];
}
}
o
}
pub fn corner(&self, corner_index: usize) -> Point<T, N> {
Point(std::array::from_fn(|i| {
if (corner_index >> i) & 1 == 1 {
self.p_max[i]
} else {
self.p_min[i]
}
}))
}
pub fn overlaps(&self, rhs: &Self) -> bool {
for i in 0..N {
if self.p_max[i] < rhs.p_min[i] || self.p_min[i] > rhs.p_max[i] {
return false;
}
}
true
}
pub fn contains(&self, p: Point<T, N>) -> bool {
(0..N).all(|i| p[i] >= self.p_min[i] && p[i] <= self.p_max[i])
}
pub fn contains_exclusive(&self, p: Point<T, N>) -> bool {
(0..N).all(|i| p[i] >= self.p_min[i] && p[i] < self.p_max[i])
}
pub fn is_empty(&self) -> bool {
(0..N).any(|i| self.p_min[i] >= self.p_max[i])
}
pub fn is_degenerate(&self) -> bool {
(0..N).any(|i| self.p_min[i] > self.p_max[i])
}
}
impl<T, const N: usize> Default for Bounds<T, N>
where
T: Bounded + Copy,
{
fn default() -> Self {
Self {
p_min: Point([T::max_value(); N]),
p_max: Point([T::min_value(); N]),
}
}
}
pub type Bounds2<T> = Bounds<T, 2>;
pub type Bounds2f = Bounds2<Float>;
pub type Bounds2i = Bounds2<i32>;
pub type Bounds2fi = Bounds2<Interval>;
pub type Bounds3<T> = Bounds<T, 3>;
pub type Bounds3i = Bounds3<i32>;
pub type Bounds3f = Bounds3<Float>;
pub type Bounds3fi = Bounds3<Interval>;
impl<T> Bounds3<T>
where
T: Num + PartialOrd + Copy + Default,
{
pub fn surface_area(&self) -> T {
let d = self.diagonal();
let two = T::one() + T::one();
two * (d.x() * d.y() + d.x() * d.z() + d.y() * d.z())
}
}
impl<T> Bounds3<T>
where
T: NumFloat + PartialOrd + Copy + Default + Sqrt,
{
pub fn bounding_sphere(&self) -> (Point3<T>, T) {
let two = T::one() + T::one();
let center = self.p_min + self.diagonal() / two;
let radius = if self.contains(center) {
center.distance(self.p_max)
} else {
T::zero()
};
(center, radius)
}
pub fn insersect(&self, o: Point3<T>, d: Vector3<T>, t_max: T) -> Option<(T, T)> {
let mut t0 = T::zero();
let mut t1 = t_max;
for i in 0..3 {
let inv_ray_dir = T::one() / d[i];
let mut t_near = (self.p_min[i] - o[i]) * inv_ray_dir;
let mut t_far = (self.p_max[i] - o[i]) * inv_ray_dir;
if t_near > t_far {
mem::swap(&mut t_near, &mut t_far);
}
t0 = if t_near > t0 { t_near } else { t0 };
t1 = if t_far < t1 { t_far } else { t1 };
if t0 > t1 {
return None;
}
}
Some((t0, t1))
}
}
impl<T> Bounds2<T>
where
T: Num + Copy + Default,
{
pub fn area(&self) -> T {
let d: Vector2<T> = self.p_max - self.p_min;
d.x() * d.y()
}
}
impl Bounds3f {
#[inline(always)]
pub fn intersect_p(
&self,
o: Point3f,
ray_t_max: Float,
inv_dir: Vector3f,
dir_is_neg: &[usize; 3],
) -> Option<(Float, Float)> {
let bounds = [&self.p_min, &self.p_max];
// Check X
let mut t_min = (bounds[dir_is_neg[0]].x() - o.x()) * inv_dir.x();
let mut t_max = (bounds[1 - dir_is_neg[0]].x() - o.x()) * inv_dir.x();
// Check Y
let ty_min = (bounds[dir_is_neg[1]].y() - o.y()) * inv_dir.y();
let ty_max = (bounds[1 - dir_is_neg[1]].y() - o.y()) * inv_dir.y();
if t_min > ty_max || ty_min > t_max {
return None;
}
if ty_min > t_min {
t_min = ty_min;
}
if ty_max < t_max {
t_max = ty_max;
}
// Check Z
let tz_min = (bounds[dir_is_neg[2]].z() - o.z()) * inv_dir.z();
let tz_max = (bounds[1 - dir_is_neg[2]].z() - o.z()) * inv_dir.z();
if t_min > tz_max || tz_min > t_max {
return None;
}
if tz_min > t_min {
t_min = tz_min;
}
if tz_max < t_max {
t_max = tz_max;
}
if (t_min < ray_t_max) && (t_max > 0.0) {
Some((t_min, t_max))
} else {
None
}
}
pub fn intersect_with_inverse(&self, o: Point3f, d: Vector3f, ray_t_max: Float) -> bool {
let inv_dir = Vector3::new(1.0 / d.x(), 1.0 / d.y(), 1.0 / d.z());
let dir_is_neg: [usize; 3] = [
(d.x() < 0.0) as usize,
(d.y() < 0.0) as usize,
(d.z() < 0.0) as usize,
];
let bounds = [&self.p_min, &self.p_max];
// Check for ray intersection against x and y slabs
let mut t_min = (bounds[dir_is_neg[0]].x() - o.x()) * inv_dir.x();
let mut t_max = (bounds[1 - dir_is_neg[0]].x() - o.x()) * inv_dir.x();
let ty_min = (bounds[dir_is_neg[1]].y() - o.y()) * inv_dir.y();
let ty_max = (bounds[1 - dir_is_neg[1]].y() - o.y()) * inv_dir.y();
if t_min > ty_max || ty_min > t_max {
return false;
}
if ty_min > t_min {
t_min = ty_min;
}
if ty_max < t_max {
t_max = ty_max;
}
let tz_min = (bounds[dir_is_neg[2]].z() - o.z()) * inv_dir.z();
let tz_max = (bounds[1 - dir_is_neg[2]].z() - o.z()) * inv_dir.z();
if t_min > tz_max || tz_min > t_max {
return false;
}
if tz_min > t_min {
t_min = tz_min;
}
if tz_max < t_max {
t_max = tz_max;
}
(t_min < ray_t_max) && (t_max > 0.0)
}
}

116
src/core/geometry/cone.rs Normal file
View file

@ -0,0 +1,116 @@
use super::{Bounds3f, Float, PI, Point3f, Vector3f, VectorLike};
use crate::utils::math::{degrees, safe_acos, safe_asin, safe_sqrt, square};
use crate::utils::transform::Transform;
#[derive(Debug, Clone)]
pub struct DirectionCone {
pub w: Vector3f,
pub cos_theta: Float,
}
impl Default for DirectionCone {
fn default() -> Self {
Self {
w: Vector3f::zero(),
cos_theta: Float::INFINITY,
}
}
}
impl DirectionCone {
pub fn new(w: Vector3f, cos_theta: Float) -> Self {
Self {
w: w.normalize(),
cos_theta,
}
}
pub fn new_from_vector(w: Vector3f) -> Self {
Self::new(w, 1.0)
}
pub fn is_empty(&self) -> bool {
self.cos_theta == Float::INFINITY
}
pub fn entire_sphere() -> Self {
Self::new(Vector3f::new(0., 0., 1.), -1.)
}
pub fn closest_vector_income(&self, wt: Vector3f) -> Vector3f {
let wp = wt.normalize();
let w = self.w;
if wp.dot(w) > self.cos_theta {
return wp;
}
let sin_theta = -safe_sqrt(1. - self.cos_theta * self.cos_theta);
let a = wp.cross(w);
self.cos_theta * w
+ sin_theta / a.norm()
* Vector3f::new(
w.x()
* (wp.y() * w.y() + wp.z() * w.z()
- wp.x() * (square(w.y() + square(w.z())))),
w.y()
* (wp.x() * w.x() + wp.z() * w.z()
- wp.y() * (square(w.x() + square(w.z())))),
w.z()
* (wp.x() * w.x() + wp.y() * w.y()
- wp.z() * (square(w.x() + square(w.y())))),
)
}
pub fn inside(d: &DirectionCone, w: Vector3f) -> bool {
!d.is_empty() && d.w.dot(w.normalize()) > d.cos_theta
}
pub fn bound_subtended_directions(b: &Bounds3f, p: Point3f) -> DirectionCone {
let (p_center, radius) = b.bounding_sphere();
if p.distance_squared(p_center) < square(radius) {
return DirectionCone::entire_sphere();
}
let w = (p_center - p).normalize();
let sin2_theta_max = square(radius) / p_center.distance_squared(p);
let cos_theta_max = safe_sqrt(1. - sin2_theta_max);
DirectionCone::new(w, cos_theta_max)
}
pub fn union(a: &DirectionCone, b: &DirectionCone) -> DirectionCone {
if a.is_empty() {
return b.clone();
}
if b.is_empty() {
return a.clone();
}
// Handle the cases where one cone is inside the other
let theta_a = safe_acos(a.cos_theta);
let theta_b = safe_acos(b.cos_theta);
let theta_d = a.w.angle_between(b.w);
if (theta_d + theta_b).min(PI) <= theta_b {
return a.clone();
}
if (theta_d + theta_a).min(PI) <= theta_a {
return b.clone();
}
// Compute the spread angle of the merged cone, $\theta_o$
let theta_o = (theta_a + theta_d + theta_b) / 2.;
if theta_o >= PI {
return DirectionCone::entire_sphere();
}
// Find the merged cone's axis and return cone union
let theta_r = theta_o - theta_a;
let wr = a.w.cross(b.w);
if wr.norm_squared() >= 0. {
return DirectionCone::entire_sphere();
}
let w = Transform::rotate_around_axis(degrees(theta_r), wr).apply_to_vector(a.w);
DirectionCone::new(w, theta_o.cos())
}
}

122
src/core/geometry/mod.rs Normal file
View file

@ -0,0 +1,122 @@
pub mod bounds;
pub mod cone;
pub mod primitives;
pub mod ray;
pub mod traits;
pub use self::bounds::{Bounds, Bounds2f, Bounds2fi, Bounds2i, Bounds3f, Bounds3fi, Bounds3i};
pub use self::cone::DirectionCone;
pub use self::primitives::{
Frame, Normal, Normal3f, Point, Point2f, Point2fi, Point2i, Point3, Point3f, Point3fi, Point3i,
Vector, Vector2, Vector2f, Vector2fi, Vector2i, Vector3, Vector3f, Vector3fi, Vector3i,
};
pub use self::ray::{Ray, RayDifferential};
pub use self::traits::{Lerp, Sqrt, Tuple, VectorLike};
use crate::core::pbrt::{Float, PI, clamp_t};
use crate::utils::math::square;
use num_traits::Float as NumFloat;
#[inline]
pub fn min<T: PartialOrd>(a: T, b: T) -> T {
if a < b { a } else { b }
}
#[inline]
pub fn max<T: PartialOrd>(a: T, b: T) -> T {
if a > b { a } else { b }
}
#[inline]
pub fn cos_theta(w: Vector3f) -> Float {
w.z()
}
#[inline]
pub fn abs_cos_theta(w: Vector3f) -> Float {
w.z().abs()
}
#[inline]
pub fn cos2_theta(w: Vector3f) -> Float {
square(w.z())
}
#[inline]
pub fn sin2_theta(w: Vector3f) -> Float {
0_f32.max(1. - cos2_theta(w))
}
#[inline]
pub fn sin_theta(w: Vector3f) -> Float {
sin2_theta(w).sqrt()
}
#[inline]
pub fn tan_theta(w: Vector3f) -> Float {
sin_theta(w) / cos_theta(w)
}
#[inline]
pub fn tan2_theta(w: Vector3f) -> Float {
sin2_theta(w) / cos2_theta(w)
}
#[inline]
pub fn cos_phi(w: Vector3f) -> Float {
let sin_theta = sin_theta(w);
if sin_theta == 0. {
1.
} else {
clamp_t(w.x() / sin_theta, -1., 1.)
}
}
#[inline]
pub fn sin_phi(w: Vector3f) -> Float {
let sin_theta = sin_theta(w);
if sin_theta == 0. {
0.
} else {
clamp_t(w.y() / sin_theta, -1., 1.)
}
}
pub fn same_hemisphere(w: Vector3f, wp: Vector3f) -> bool {
w.z() * wp.z() > 0.
}
pub fn spherical_direction(sin_theta: Float, cos_theta: Float, phi: Float) -> Vector3f {
Vector3f::new(sin_theta * phi.cos(), sin_theta * phi.sin(), cos_theta)
}
pub fn spherical_triangle_area(a: Vector3f, b: Vector3f, c: Vector3f) -> Float {
(2.0 * (a.dot(b.cross(c))).atan2(1.0 + a.dot(b) + a.dot(c) + b.dot(c))).abs()
}
pub fn spherical_quad_area(a: Vector3f, b: Vector3f, c: Vector3f, d: Vector3f) -> Float {
let mut axb = a.cross(b);
let mut bxc = b.cross(c);
let mut cxd = c.cross(d);
let mut dxa = d.cross(a);
if axb.norm_squared() == 0.
|| bxc.norm_squared() == 0.
|| cxd.norm_squared() == 0.
|| dxa.norm_squared() == 0.
{
return 0.;
}
axb = axb.normalize();
bxc = bxc.normalize();
cxd = cxd.normalize();
dxa = dxa.normalize();
let alpha = dxa.angle_between(-axb);
let beta = axb.angle_between(-bxc);
let gamma = bxc.angle_between(-cxd);
let delta = cxd.angle_between(-dxa);
(alpha + beta + gamma + delta - 2. * PI).abs()
}
pub fn spherical_theta(v: Vector3f) -> Float {
clamp_t(v.z(), -1.0, 1.0).acos()
}
pub fn spherical_phi(v: Vector3f) -> Float {
let p = v.y().atan2(v.x());
if p < 0.0 { p + 2.0 * PI } else { p }
}

View file

@ -0,0 +1,880 @@
use super::traits::{Sqrt, Tuple, VectorLike};
use super::{Float, NumFloat, PI, clamp_t};
use crate::utils::interval::Interval;
use crate::utils::math::{difference_of_products, quadratic, safe_asin};
use num_traits::{AsPrimitive, FloatConst, Num, Signed, Zero};
use std::hash::{Hash, Hasher};
use std::iter::Sum;
use std::ops::{
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
};
// N-dimensional displacement
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Vector<T, const N: usize>(pub [T; N]);
// N-dimensional location
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Point<T, const N: usize>(pub [T; N]);
// N-dimensional surface normal
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Normal<T, const N: usize>(pub [T; N]);
#[macro_export]
macro_rules! impl_tuple_core {
($Struct:ident) => {
impl<T: Copy, const N: usize> Tuple<T, N> for $Struct<T, N> {
#[inline]
fn data(&self) -> &[T; N] {
&self.0
}
#[inline]
fn data_mut(&mut self) -> &mut [T; N] {
&mut self.0
}
#[inline]
fn from_array(arr: [T; N]) -> Self {
Self(arr)
}
}
impl<T: Default + Copy, const N: usize> Default for $Struct<T, N> {
fn default() -> Self {
Self([T::default(); N])
}
}
impl<T, const N: usize> $Struct<T, N>
where
T: Zero + Copy,
{
#[inline]
pub fn zero() -> Self {
Self([T::zero(); N])
}
}
impl<const N: usize> $Struct<f32, N> {
#[inline]
pub fn floor(&self) -> $Struct<i32, N> {
$Struct(self.0.map(|v| v.floor() as i32))
}
#[inline]
pub fn average(&self) -> f32 {
let sum: f32 = self.0.iter().sum();
sum / (N as f32)
}
}
impl<T, const N: usize> $Struct<T, N>
where
T: Copy + PartialOrd,
{
#[inline]
pub fn min(&self, other: Self) -> Self {
let mut out = self.0;
for i in 0..N {
if other.0[i] < out[i] {
out[i] = other.0[i];
}
}
Self(out)
}
#[inline]
pub fn max(&self, other: Self) -> Self {
let mut out = self.0;
for i in 0..N {
if other.0[i] > out[i] {
out[i] = other.0[i]
}
}
Self(out)
}
#[inline]
pub fn max_component_value(&self) -> T {
let mut m = self.0[0];
for i in 1..N {
if self.0[i] > m {
m = self.0[i];
}
}
m
}
}
impl<T, const N: usize> $Struct<T, N>
where
T: Copy,
{
#[inline]
pub fn fill(value: T) -> Self {
Self([value; N])
}
#[inline]
pub fn cast<U>(&self) -> $Struct<U, N>
where
U: 'static + Copy,
T: 'static + Copy + AsPrimitive<U>,
{
$Struct(self.0.map(|c| c.as_()))
}
}
impl<T, const N: usize> Index<usize> for $Struct<T, N> {
type Output = T;
#[inline]
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
impl<T, const N: usize> IndexMut<usize> for $Struct<T, N> {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.0[index]
}
}
impl<T, const N: usize> Neg for $Struct<T, N>
where
T: Neg<Output = T> + Copy,
{
type Output = Self;
fn neg(self) -> Self::Output {
Self(self.0.map(|c| -c))
}
}
};
}
#[macro_export]
macro_rules! impl_scalar_ops {
($Struct:ident) => {
impl<T, const N: usize> Mul<T> for $Struct<T, N>
where
T: Mul<Output = T> + Copy,
{
type Output = Self;
fn mul(self, rhs: T) -> Self::Output {
let mut result = self.0;
for i in 0..N {
result[i] = result[i] * rhs;
}
Self(result)
}
}
impl<const N: usize> Mul<$Struct<Float, N>> for Float {
type Output = $Struct<Float, N>;
fn mul(self, rhs: $Struct<Float, N>) -> Self::Output {
rhs * self
}
}
impl<T, const N: usize> MulAssign<T> for $Struct<T, N>
where
T: MulAssign + Copy,
{
fn mul_assign(&mut self, rhs: T) {
for i in 0..N {
self.0[i] *= rhs;
}
}
}
impl<T, const N: usize> Div<T> for $Struct<T, N>
where
T: Div<Output = T> + Copy,
{
type Output = Self;
fn div(self, rhs: T) -> Self::Output {
let mut result = self.0;
for i in 0..N {
result[i] = result[i] / rhs;
}
Self(result)
}
}
impl<T, const N: usize> DivAssign<T> for $Struct<T, N>
where
T: DivAssign + Copy,
{
fn div_assign(&mut self, rhs: T) {
for i in 0..N {
self.0[i] /= rhs;
}
}
}
};
}
#[macro_export]
macro_rules! impl_op {
($Op:ident, $op:ident, $Lhs:ident, $Rhs:ident, $Output:ident) => {
impl<T, const N: usize> $Op<$Rhs<T, N>> for $Lhs<T, N>
where
T: $Op<Output = T> + Copy,
{
type Output = $Output<T, N>;
fn $op(self, rhs: $Rhs<T, N>) -> Self::Output {
let mut result = self.0;
for i in 0..N {
result[i] = $Op::$op(self.0[i], rhs.0[i]);
}
$Output(result)
}
}
};
}
#[macro_export]
macro_rules! impl_op_assign {
($OpAssign:ident, $op_assign:ident, $Lhs:ident, $Rhs:ident) => {
impl<T, const N: usize> $OpAssign<$Rhs<T, N>> for $Lhs<T, N>
where
T: $OpAssign + Copy,
{
fn $op_assign(&mut self, rhs: $Rhs<T, N>) {
for i in 0..N {
$OpAssign::$op_assign(&mut self.0[i], rhs.0[i]);
}
}
}
};
}
#[macro_export]
macro_rules! impl_float_vector_ops {
($Struct:ident) => {
impl<T, const N: usize> VectorLike for $Struct<T, N>
where
T: Copy
+ Zero
+ Add<Output = T>
+ Mul<Output = T>
+ Sub<Output = T>
+ Div<Output = T>
+ Sqrt,
{
type Scalar = T;
fn dot(self, rhs: Self) -> T {
let mut sum = T::zero();
for i in 0..N {
sum = sum + self[i] * rhs[i];
}
sum
}
}
};
}
macro_rules! impl_abs {
($Struct:ident) => {
impl<T, const N: usize> $Struct<T, N>
where
T: Signed + Copy,
{
pub fn abs(self) -> Self {
let mut result = self.0;
for i in 0..N {
result[i] = result[i].abs();
}
Self(result)
}
}
};
}
macro_rules! impl_accessors {
($Struct:ident) => {
impl<T: Copy> $Struct<T, 2> {
pub fn x(&self) -> T {
self.0[0]
}
pub fn y(&self) -> T {
self.0[1]
}
}
impl<T: Copy> $Struct<T, 3> {
pub fn x(&self) -> T {
self.0[0]
}
pub fn y(&self) -> T {
self.0[1]
}
pub fn z(&self) -> T {
self.0[2]
}
}
};
}
impl_tuple_core!(Vector);
impl_tuple_core!(Point);
impl_tuple_core!(Normal);
impl_scalar_ops!(Vector);
impl_scalar_ops!(Normal);
// Addition
impl_op!(Add, add, Vector, Vector, Vector);
impl_op!(Add, add, Point, Vector, Point);
impl_op!(Add, add, Vector, Point, Point);
impl_op!(Add, add, Normal, Normal, Normal);
// Subtraction
impl_op!(Sub, sub, Vector, Vector, Vector);
impl_op!(Sub, sub, Point, Vector, Point);
impl_op!(Sub, sub, Point, Point, Vector);
impl_op!(Sub, sub, Normal, Normal, Normal);
// AddAssign
impl_op_assign!(AddAssign, add_assign, Vector, Vector);
impl_op_assign!(AddAssign, add_assign, Point, Vector);
impl_op_assign!(AddAssign, add_assign, Normal, Normal);
// SubAssign
impl_op_assign!(SubAssign, sub_assign, Vector, Vector);
impl_op_assign!(SubAssign, sub_assign, Point, Vector);
impl_op_assign!(SubAssign, sub_assign, Normal, Normal);
impl_float_vector_ops!(Vector);
impl_float_vector_ops!(Normal);
impl_abs!(Vector);
impl_abs!(Normal);
impl_abs!(Point);
impl_accessors!(Vector);
impl_accessors!(Point);
impl_accessors!(Normal);
impl<T: Copy, const N: usize> From<Vector<T, N>> for Normal<T, N> {
fn from(v: Vector<T, N>) -> Self {
Self(v.0)
}
}
impl<T: Copy, const N: usize> From<Normal<T, N>> for Vector<T, N> {
fn from(n: Normal<T, N>) -> Self {
Self(n.0)
}
}
impl<T: Copy, const N: usize> From<Vector<T, N>> for Point<T, N> {
fn from(v: Vector<T, N>) -> Self {
Self(v.0)
}
}
impl<T: Copy, const N: usize> From<Point<T, N>> for Vector<T, N> {
fn from(n: Point<T, N>) -> Self {
Self(n.0)
}
}
impl<T, const N: usize> Point<T, N>
where
T: NumFloat + Sqrt,
{
pub fn distance(self, other: Self) -> T {
(self - other).norm()
}
pub fn distance_squared(self, other: Self) -> T {
(self - other).norm_squared()
}
}
impl Point2f {
pub fn invert_bilinear(p: Point2f, vert: &[Point2f]) -> Point2f {
let a = vert[0];
let b = vert[1];
let c = vert[3];
let d = vert[2];
let e = b - a;
let f = d - a;
let g = (a - b) + (c - d);
let h = p - a;
let cross2d = |a: Vector2f, b: Vector2f| difference_of_products(a.x(), b.y(), a.y(), b.x());
let k2 = cross2d(g, f);
let k1 = cross2d(e, f) + cross2d(h, g);
let k0 = cross2d(h, e);
// if edges are parallel, this is a linear equation
if k2.abs() < 0.001 {
if (e.x() * k1 - g.x() * k0).abs() < 1e-5 {
return Point2f::new(
(h.y() * k1 + f.y() * k0) / (e.y() * k1 - g.y() * k0),
-k0 / k1,
);
} else {
return Point2f::new(
(h.x() * k1 + f.x() * k0) / (e.x() * k1 - g.x() * k0),
-k0 / k1,
);
}
}
if let Some((v0, v1)) = quadratic(k2, k1, k0) {
let u = (h.x() - f.x() * v0) / (e.x() + g.x() * v0);
if !(0.0..=1.).contains(&u) || !(0.0..=1.0).contains(&v0) {
return Point2f::new((h.x() - f.x() * v1) / (e.x() + g.x() * v1), v1);
}
Point2f::new(u, v0)
} else {
Point2f::zero()
}
}
}
// Utility aliases and functions
pub type Point2<T> = Point<T, 2>;
pub type Point2f = Point2<Float>;
pub type Point2i = Point2<i32>;
pub type Point2fi = Point2<Interval>;
pub type Point3<T> = Point<T, 3>;
pub type Point3f = Point3<Float>;
pub type Point3i = Point3<i32>;
pub type Point3fi = Point3<Interval>;
pub type Vector2<T> = Vector<T, 2>;
pub type Vector2f = Vector2<Float>;
pub type Vector2i = Vector2<i32>;
pub type Vector2fi = Vector2<Interval>;
pub type Vector3<T> = Vector<T, 3>;
pub type Vector3f = Vector3<Float>;
pub type Vector3i = Vector3<i32>;
pub type Vector3fi = Vector3<Interval>;
pub type Normal3<T> = Normal<T, 3>;
pub type Normal3f = Normal3<Float>;
pub type Normal3i = Normal3<i32>;
impl<T: Copy> Vector2<T> {
pub fn new(x: T, y: T) -> Self {
Self([x, y])
}
}
impl<T: Copy> Point2<T> {
pub fn new(x: T, y: T) -> Self {
Self([x, y])
}
}
impl<T: Copy> Vector3<T> {
pub fn new(x: T, y: T, z: T) -> Self {
Self([x, y, z])
}
}
impl<T: Copy> Point3<T> {
pub fn new(x: T, y: T, z: T) -> Self {
Self([x, y, z])
}
}
impl<T: Copy> Normal3<T> {
pub fn new(x: T, y: T, z: T) -> Self {
Self([x, y, z])
}
}
// Vector operations
impl<T> Vector3<T>
where
T: Num + Copy + Neg<Output = T>,
{
pub fn cross(self, rhs: Self) -> Self {
Self([
self[1] * rhs[2] - self[2] * rhs[1],
self[2] * rhs[0] - self[0] * rhs[2],
self[0] * rhs[1] - self[1] * rhs[0],
])
}
}
impl<T> Normal3<T>
where
T: Num + Copy + Neg<Output = T>,
{
pub fn cross(self, rhs: Self) -> Self {
Self([
self[1] * rhs[2] - self[2] * rhs[1],
self[2] * rhs[0] - self[0] * rhs[2],
self[0] * rhs[1] - self[1] * rhs[0],
])
}
}
impl<T> Vector3<T>
where
T: Num + NumFloat + Copy + Neg<Output = T>,
{
pub fn coordinate_system(&self) -> (Self, Self)
where
T: NumFloat,
{
let v2 = if self[0].abs() > self[1].abs() {
Self::new(-self[2], T::zero(), self[0]) / (self[0] * self[0] + self[2] * self[2]).sqrt()
} else {
Self::new(T::zero(), self[2], -self[1]) / (self[1] * self[1] + self[2] * self[2]).sqrt()
};
(v2, self.cross(v2))
}
pub fn coordinate_system_from_cpp(&self) -> (Self, Self) {
let sign = self.z().copysign(T::one());
let a = -T::one() / (sign + self.z());
let b = self.x() * self.y() * a;
let v2 = Self::new(
T::one() + sign * self.x().powi(2) * a,
sign * b,
-sign * self.x(),
);
let v3 = Self::new(b, sign + self.y().powi(2) * a, -self.y());
(v2, v3)
}
}
impl<T> Normal3<T>
where
T: Num + NumFloat + Copy + Neg<Output = T>,
{
pub fn coordinate_system(&self) -> (Self, Self)
where
T: NumFloat,
{
let v2 = if self[0].abs() > self[1].abs() {
Self::new(-self[2], T::zero(), self[0]) / (self[0] * self[0] + self[2] * self[2]).sqrt()
} else {
Self::new(T::zero(), self[2], -self[1]) / (self[1] * self[1] + self[2] * self[2]).sqrt()
};
(v2, self.cross(v2))
}
pub fn coordinate_system_from_cpp(&self) -> (Self, Self) {
let sign = self.z().copysign(T::one());
let a = -T::one() / (sign + self.z());
let b = self.x() * self.y() * a;
let v2 = Self::new(
T::one() + sign * self.x().powi(2) * a,
sign * b,
-sign * self.x(),
);
let v3 = Self::new(b, sign + self.y().powi(2) * a, -self.y());
(v2, v3)
}
}
impl<const N: usize> Hash for Vector<Float, N> {
fn hash<H: Hasher>(&self, state: &mut H) {
for item in self.0.iter() {
item.to_bits().hash(state);
}
}
}
impl<const N: usize> Hash for Point<Float, N> {
fn hash<H: Hasher>(&self, state: &mut H) {
for item in self.0.iter() {
item.to_bits().hash(state);
}
}
}
impl<const N: usize> Hash for Normal<Float, N> {
fn hash<H: Hasher>(&self, state: &mut H) {
for item in self.0.iter() {
item.to_bits().hash(state);
}
}
}
// INTERVAL STUFF
impl<const N: usize> Point<Interval, N> {
pub fn new_from_point(p: Point<Float, N>) -> Self {
let mut arr = [Interval::default(); N];
for i in 0..N {
arr[i] = Interval::new(p[i]);
}
Self(arr)
}
pub fn new_with_error(p: Point<Float, N>, e: Vector<Float, N>) -> Self {
let mut arr = [Interval::default(); N];
for i in 0..N {
arr[i] = Interval::new_from_value_and_error(p[i], e[i]);
}
Self(arr)
}
pub fn error(&self) -> Vector<Float, N> {
let mut arr = [0.0; N];
for i in 0..N {
arr[i] = self[i].width() / 2.0;
}
Vector(arr)
}
pub fn midpoint(&self) -> Point<Float, N> {
let mut arr = [0.0; N];
for i in 0..N {
arr[i] = self[i].midpoint();
}
Point(arr)
}
pub fn is_exact(&self) -> bool {
self.0.iter().all(|interval| interval.width() == 0.0)
}
}
impl<const N: usize> Vector<Interval, N> {
pub fn new_from_vector(v: Vector<Float, N>) -> Self {
let mut arr = [Interval::default(); N];
for i in 0..N {
arr[i] = Interval::new(v[i]);
}
Self(arr)
}
pub fn new_with_error(v: Vector<Float, N>, e: Vector<Float, N>) -> Self {
let mut arr = [Interval::default(); N];
for i in 0..N {
arr[i] = Interval::new_from_value_and_error(v[i], e[i]);
}
Self(arr)
}
pub fn error(&self) -> Vector<Float, N> {
let mut arr = [0.0; N];
for i in 0..N {
arr[i] = self[i].width() / 2.0;
}
Vector(arr)
}
pub fn midpoint(&self) -> Vector<Float, N> {
let mut arr = [0.0; N];
for i in 0..N {
arr[i] = self[i].midpoint();
}
Vector(arr)
}
pub fn is_exact(&self) -> bool {
self.0.iter().all(|interval| interval.width() == 0.0)
}
}
impl<const N: usize> From<Point<Interval, N>> for Point<Float, N> {
fn from(pi: Point<Interval, N>) -> Self {
let mut arr = [0.0; N];
for i in 0..N {
arr[i] = pi[i].midpoint();
}
Point(arr)
}
}
impl<const N: usize> From<Vector<Interval, N>> for Vector<Float, N> {
fn from(pi: Vector<Interval, N>) -> Self {
let mut arr = [0.0; N];
for i in 0..N {
arr[i] = pi[i].midpoint();
}
Vector(arr)
}
}
impl<const N: usize> From<Vector<Float, N>> for Vector<Interval, N> {
fn from(v: Vector<Float, N>) -> Self {
let mut arr = [Interval::default(); N];
for i in 0..N {
arr[i] = Interval::new(v[i]);
}
Self(arr)
}
}
impl<const N: usize> Mul<Vector<Interval, N>> for Interval {
type Output = Vector<Interval, N>;
fn mul(self, rhs: Vector<Interval, N>) -> Self::Output {
rhs * self
}
}
impl<const N: usize> Div<Vector<Interval, N>> for Interval {
type Output = Vector<Interval, N>;
fn div(self, rhs: Vector<Interval, N>) -> Self::Output {
let mut result = rhs.0;
for i in 0..N {
result[i] = self / rhs[i];
}
Vector(result)
}
}
impl<const N: usize> From<Vector<i32, N>> for Vector<f32, N> {
fn from(v: Vector<i32, N>) -> Self {
Self(v.0.map(|c| c as f32))
}
}
impl<const N: usize> From<Point<i32, N>> for Point<Float, N> {
fn from(p: Point<i32, N>) -> Self {
Point(p.0.map(|c| c as Float))
}
}
impl<T> Normal3<T>
where
T: Num + PartialOrd + Copy + Neg<Output = T> + Sqrt,
{
pub fn face_forward(self, v: Vector3<T>) -> Self {
if Vector3::<T>::from(self).dot(v) < T::zero() {
-self
} else {
self
}
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(C)]
pub struct OctahedralVector {
x: u16,
y: u16,
}
impl OctahedralVector {
pub fn new(mut v: Vector3f) -> Self {
v /= v.x().abs() + v.y().abs() + v.z().abs();
let (x_enc, y_enc) = if v.z() >= 0.0 {
(Self::encode(v.x()), Self::encode(v.y()))
} else {
(
Self::encode((1.0 - v.y().abs()) * Self::sign(v.x())),
Self::encode((1.0 - v.x().abs()) * Self::sign(v.y())),
)
};
Self { x: x_enc, y: y_enc }
}
pub fn to_vector(self) -> Vector3f {
let mut v = Vector3f::default();
// Map [0, 65535] back to [-1, 1]
v[0] = -1.0 + 2.0 * (self.x as Float / 65535.0);
v[1] = -1.0 + 2.0 * (self.y as Float / 65535.0);
v[2] = 1.0 - (v.x().abs() + v.y().abs());
if v.z() < 0.0 {
let xo = v.x();
v[0] = (1.0 - v.y().abs()) * Self::sign(xo);
v[1] = (1.0 - xo.abs()) * Self::sign(v.y());
}
v.normalize()
}
#[inline]
pub fn sign(v: Float) -> Float {
1.0.copysign(v)
}
#[inline]
pub fn encode(f: Float) -> u16 {
(clamp_t((f + 1.0) / 2.0, 0.0, 1.0) * 65535.0).round() as u16
}
}
impl From<Vector3f> for OctahedralVector {
fn from(v: Vector3f) -> Self {
Self::new(v)
}
}
impl From<OctahedralVector> for Vector3f {
fn from(ov: OctahedralVector) -> Self {
ov.to_vector()
}
}
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct Frame {
pub x: Vector3f,
pub y: Vector3f,
pub z: Vector3f,
}
impl Frame {
pub fn new(x: Vector3f, z: Vector3f) -> Self {
Self {
x,
y: z.cross(x),
z,
}
}
pub fn from_x(x: Vector3f) -> Self {
let (y, z) = x.normalize().coordinate_system();
Self {
x: x.normalize(),
y,
z,
}
}
pub fn from_xz(x: Vector3f, z: Vector3f) -> Self {
Self {
x,
y: z.cross(x),
z,
}
}
pub fn from_xy(x: Vector3f, y: Vector3f) -> Self {
Self {
x,
y,
z: x.cross(y),
}
}
pub fn from_y(y: Vector3f) -> Self {
let (z, x) = y.normalize().coordinate_system();
Self {
x,
y: y.normalize(),
z,
}
}
pub fn from_z(z: Vector3f) -> Self {
let (x, y) = z.normalize().coordinate_system();
Self {
x,
y,
z: z.normalize(),
}
}
pub fn to_local(&self, v: Vector3f) -> Vector3f {
Vector3f::new(v.dot(self.x), v.dot(self.y), v.dot(self.z))
}
pub fn to_local_normal(&self, n: Normal3f) -> Normal3f {
let n: Vector3f = n.into();
Normal3f::new(n.dot(self.x), n.dot(self.y), n.dot(self.z))
}
pub fn from_local(&self, v: Vector3f) -> Vector3f {
self.x * v.x() + self.y * v.y() + self.z * v.z()
}
pub fn from_local_normal(&self, v: Normal3f) -> Normal3f {
Normal3f::from(self.x * v.x() + self.y * v.y() + self.z * v.z())
}
}

119
src/core/geometry/ray.rs Normal file
View file

@ -0,0 +1,119 @@
use super::{Normal3f, Point3f, Point3fi, Vector3f, VectorLike};
use crate::core::medium::Medium;
use crate::core::pbrt::Float;
use crate::utils::math::{next_float_down, next_float_up};
use std::sync::Arc;
#[derive(Clone, Debug)]
pub struct Ray {
pub o: Point3f,
pub d: Vector3f,
pub medium: Option<Arc<Medium>>,
pub time: Float,
// We do this instead of creating a trait for Rayable or some gnarly thing like that
pub differential: Option<RayDifferential>,
}
impl Default for Ray {
fn default() -> Self {
Self {
o: Point3f::new(0.0, 0.0, 0.0),
d: Vector3f::new(0.0, 0.0, 0.0),
medium: None,
time: 0.0,
differential: None,
}
}
}
impl Ray {
pub fn new(o: Point3f, d: Vector3f, time: Option<Float>, medium: Option<Arc<Medium>>) -> Self {
Self {
o,
d,
time: time.unwrap_or_else(|| Self::default().time),
medium,
..Self::default()
}
}
pub fn at(&self, t: Float) -> Point3f {
self.o + self.d * t
}
pub fn offset_origin(p: &Point3fi, n: &Normal3f, w: &Vector3f) -> Point3f {
let d: Float = Vector3f::from(n.abs()).dot(p.error());
let normal: Vector3f = Vector3f::from(*n);
let mut offset = p.midpoint();
if w.dot(normal) < 0.0 {
offset -= normal * d;
} else {
offset += normal * d;
}
for i in 0..3 {
if n[i] > 0.0 {
offset[i] = next_float_up(offset[i]);
} else if n[i] < 0.0 {
offset[i] = next_float_down(offset[i]);
}
}
offset
}
pub fn spawn(pi: &Point3fi, n: &Normal3f, time: Float, d: Vector3f) -> Ray {
let origin = Self::offset_origin(pi, n, &d);
Ray {
o: origin,
d,
time,
medium: None,
differential: None,
}
}
pub fn spawn_to_point(p_from: &Point3fi, n: &Normal3f, time: Float, p_to: Point3f) -> Ray {
let d = p_to - p_from.midpoint();
Self::spawn(p_from, n, time, d)
}
pub fn spawn_to_interaction(
p_from: &Point3fi,
n_from: &Normal3f,
time: Float,
p_to: &Point3fi,
n_to: &Normal3f,
) -> Ray {
let dir_for_offset = p_to.midpoint() - p_from.midpoint();
let pf = Self::offset_origin(p_from, n_from, &dir_for_offset);
let pt = Self::offset_origin(p_to, n_to, &(pf - p_to.midpoint()));
let d = pt - pf;
Ray {
o: pf,
d,
time,
medium: None,
differential: None,
}
}
pub fn scale_differentials(&mut self, s: Float) {
if let Some(differential) = &mut self.differential {
differential.rx_origin = self.o + (differential.rx_origin - self.o) * s;
differential.ry_origin = self.o + (differential.ry_origin - self.o) * s;
differential.rx_direction = self.d + (differential.rx_direction - self.d) * s;
differential.ry_direction = self.d + (differential.ry_direction - self.d) * s;
}
}
}
#[derive(Debug, Default, Copy, Clone)]
pub struct RayDifferential {
pub rx_origin: Point3f,
pub ry_origin: Point3f,
pub rx_direction: Vector3f,
pub ry_direction: Vector3f,
}

183
src/core/geometry/traits.rs Normal file
View file

@ -0,0 +1,183 @@
use crate::core::pbrt::Float;
use crate::utils::interval::Interval;
use crate::utils::math::{next_float_down, next_float_up};
use num_integer::Roots;
use num_traits::{Float as NumFloat, FloatConst, Num, One, Signed, Zero};
use std::ops::{Add, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub};
pub trait Tuple<T, const N: usize>:
Sized + Copy + Index<usize, Output = T> + IndexMut<usize>
{
fn data(&self) -> &[T; N];
fn data_mut(&mut self) -> &mut [T; N];
fn from_array(arr: [T; N]) -> Self;
#[inline]
fn permute(&self, p: [usize; N]) -> Self
where
T: Copy,
{
let new_data = p.map(|index| self[index]);
Self::from_array(new_data)
}
fn max_component_value(&self) -> T
where
T: PartialOrd + Copy,
{
self.data()
.iter()
.copied()
.reduce(|a, b| if a > b { a } else { b })
.expect("Cannot get max component of a zero-length tuple")
}
fn min_component_value(&self) -> T
where
T: PartialOrd + Copy,
{
self.data()
.iter()
.copied()
.reduce(|a, b| if a < b { a } else { b })
.expect("Cannot get min component of a zero-length tuple")
}
fn max_component_index(&self) -> usize
where
T: PartialOrd,
{
self.data()
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
.map(|(index, _)| index)
.unwrap_or(0)
}
fn min_component_index(&self) -> usize
where
T: PartialOrd,
{
self.data()
.iter()
.enumerate()
.min_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
.map(|(index, _)| index)
.unwrap_or(0)
}
}
pub trait VectorLike:
Sized
+ Copy
+ Add<Output = Self>
+ Sub<Output = Self>
+ Div<Self::Scalar, Output = Self>
+ Mul<Self::Scalar, Output = Self>
{
type Scalar: Copy + Zero + Add<Output = Self::Scalar> + Mul<Output = Self::Scalar> + Sqrt;
fn dot(self, rhs: Self) -> Self::Scalar;
fn norm_squared(self) -> Self::Scalar {
self.dot(self)
}
fn abs_dot(self, rhs: Self) -> Self::Scalar
where
Self::Scalar: Signed,
{
self.dot(rhs).abs()
}
fn gram_schmidt(self, rhs: Self) -> Self {
self - rhs * self.dot(rhs)
}
fn norm(&self) -> Self::Scalar {
self.norm_squared().sqrt()
}
fn normalize(self) -> Self
where
Self::Scalar: NumFloat,
{
let n = self.norm();
if n.is_zero() { self } else { self / n }
}
fn angle_between(self, rhs: Self) -> Self::Scalar
where
Self::Scalar: NumFloat,
{
let dot_product = self.normalize().dot(rhs.normalize());
let clamped_dot = dot_product
.min(Self::Scalar::one())
.max(-Self::Scalar::one());
clamped_dot.acos()
}
}
pub trait Sqrt {
fn sqrt(self) -> Self;
}
impl Sqrt for Float {
fn sqrt(self) -> Self {
self.sqrt()
}
}
impl Sqrt for f64 {
fn sqrt(self) -> Self {
self.sqrt()
}
}
impl Sqrt for i32 {
fn sqrt(self) -> Self {
self.isqrt()
}
}
impl Sqrt for u32 {
fn sqrt(self) -> Self {
self.isqrt()
}
}
impl Sqrt for Interval {
fn sqrt(self) -> Self {
let low = if self.low < 0.0 {
0.0
} else {
next_float_down(self.low.sqrt())
};
let high = if self.high < 0.0 {
0.0
} else {
next_float_up(self.high.sqrt())
};
Self { low, high }
}
}
pub trait Lerp<Factor = Float>: Sized + Copy {
fn lerp(t: Factor, a: Self, b: Self) -> Self;
}
impl<T, F, Diff> Lerp<F> for T
where
T: Copy + Sub<Output = Diff> + Add<Diff, Output = T>,
Diff: Mul<F, Output = Diff>,
F: Copy,
{
#[inline(always)]
fn lerp(t: F, a: Self, b: Self) -> Self {
a + (b - a) * t
}
}

View file

@ -1,13 +0,0 @@
pub struct BPDTIntegrator;
pub struct MLTIntegrator;
pub struct SPPMIntegrator;
pub struct SamplerIntegrator;
pub enum Integrator {
BPDT(BPDTIntegrator),
MLT(MLTIntegrator),
SPPM(SPPMIntegrator),
Sampler(SamplerIntegrator),
}
impl Integrator {}

View file

@ -1,11 +1,25 @@
use crate::core::bxdf::BSDF;
use crate::core::material::MaterialTrait;
use crate::core::medium::{Medium, MediumInterface};
use crate::core::pbrt::Float;
use crate::geometry::{Normal3f, Point2f, Point3f, Point3fi, Ray, Vector3f, VectorLike};
use crate::lights::Light;
use crate::shapes::ShapeTrait;
use crate::camera::{Camera, CameraTrait};
use crate::core::bssrdf::BSSRDF;
use crate::core::bxdf::{BSDF, BxDFFlags, DiffuseBxDF};
use crate::core::material::{
Material, MaterialEvalContext, MaterialTrait, NormalBumpEvalContext, bump_map, normal_map,
};
use crate::core::medium::{Medium, MediumInterface, PhaseFunction};
use crate::core::options::get_options;
use crate::core::pbrt::{Float, clamp_t};
use crate::core::sampler::{Sampler, SamplerTrait};
use crate::core::texture::{FloatTexture, UniversalTextureEvaluator};
use crate::geometry::{
Normal3f, Point2f, Point3f, Point3fi, Ray, RayDifferential, Vector3f, VectorLike,
};
use crate::image::Image;
use crate::lights::{Light, LightTrait};
use crate::shapes::Shape;
use crate::spectra::{SampledSpectrum, SampledWavelengths};
use crate::utils::math::{difference_of_products, square};
use bumpalo::Bump;
use enum_dispatch::enum_dispatch;
use std::any::Any;
use std::sync::Arc;
@ -15,14 +29,14 @@ pub struct InteractionData {
pub n: Normal3f,
pub time: Float,
pub wo: Vector3f,
pub medium_interface: Option<Arc<MediumInterface>>,
pub medium_interface: Option<MediumInterface>,
pub medium: Option<Arc<Medium>>,
}
pub trait Interaction: Send + Sync {
#[enum_dispatch]
pub trait InteractionTrait: Send + Sync + std::fmt::Debug {
fn get_common(&self) -> &InteractionData;
fn get_common_mut(&mut self) -> &mut InteractionData;
fn as_any(&self) -> &dyn Any;
fn p(&self) -> Point3f {
self.get_common().pi.into()
@ -42,31 +56,49 @@ pub trait Interaction: Send + Sync {
}
fn is_surface_interaction(&self) -> bool {
self.as_any().is::<SurfaceInteraction>()
false
}
fn is_medium_interaction(&self) -> bool {
self.as_any().is::<MediumInteraction>()
false
}
fn get_medium(&self, w: Vector3f) -> Option<Arc<Medium>>;
fn spawn_ray(&self, d: Vector3f) -> Ray;
fn spawn_ray_to_point(&self, p2: Point3f) -> Ray {
let origin = self.p();
let direction = p2 - origin;
Ray {
o: origin,
d: direction,
time: self.time(),
medium: self.get_medium(direction),
differential: None,
fn get_medium(&self, w: Vector3f) -> Option<Arc<Medium>> {
let data = self.get_common();
if let Some(mi) = &data.medium_interface {
if w.dot(data.n.into()) > 0.0 {
mi.outside.clone()
} else {
mi.inside.clone()
}
} else {
data.medium.clone()
}
}
fn spawn_ray_to_interaction(&self, other: &dyn Interaction) -> Ray {
self.spawn_ray_to_point(other.p())
fn spawn_ray(&self, d: Vector3f) -> Ray {
let data = self.get_common();
let mut ray = Ray::spawn(&data.pi, &data.n, data.time, d);
ray.medium = self.get_medium(d);
ray
}
fn spawn_ray_to_point(&self, p2: Point3f) -> Ray {
let data = self.get_common();
let mut ray = Ray::spawn_to_point(&data.pi, &data.n, data.time, p2);
ray.medium = self.get_medium(ray.d);
ray
}
fn spawn_ray_to_interaction(&self, other: &dyn InteractionTrait) -> Ray {
let data = self.get_common();
let other_data = other.get_common();
let mut ray =
Ray::spawn_to_interaction(&data.pi, &data.n, data.time, &other_data.pi, &other_data.n);
ray.medium = self.get_medium(ray.d);
ray
}
fn offset_ray_vector(&self, w: Vector3f) -> Point3f {
@ -78,6 +110,67 @@ pub trait Interaction: Send + Sync {
}
}
#[enum_dispatch(InteractionTrait)]
#[derive(Debug, Clone)]
pub enum Interaction {
Surface(SurfaceInteraction),
Medium(MediumInteraction),
Simple(SimpleInteraction),
}
impl Interaction {
pub fn set_medium_interface(&mut self, mi: Option<MediumInterface>) {
match self {
Interaction::Surface(si) => si.common.medium_interface = mi,
Interaction::Simple(si) => si.common.medium_interface = mi,
Interaction::Medium(_) => {} // Medium interactions don't usually sit on boundaries
}
}
}
#[derive(Debug, Clone)]
pub struct SimpleInteraction {
pub common: InteractionData,
}
impl SimpleInteraction {
pub fn new(pi: Point3fi, time: Float, medium_interface: Option<MediumInterface>) -> Self {
Self {
common: InteractionData {
pi,
time,
medium_interface,
n: Normal3f::default(),
wo: Vector3f::default(),
medium: None,
},
}
}
pub fn new_interface(p: Point3f, medium_interface: Option<MediumInterface>) -> Self {
Self {
common: InteractionData {
pi: Point3fi::new_from_point(p),
n: Normal3f::zero(),
wo: Vector3f::zero(),
time: 0.0,
medium: None,
medium_interface,
},
}
}
}
impl InteractionTrait for SimpleInteraction {
fn get_common(&self) -> &InteractionData {
&self.common
}
fn get_common_mut(&mut self) -> &mut InteractionData {
&mut self.common
}
}
#[derive(Default, Clone, Debug)]
pub struct ShadingGeometry {
pub n: Normal3f,
@ -87,7 +180,7 @@ pub struct ShadingGeometry {
pub dndv: Normal3f,
}
#[derive(Default, Debug, Clone)]
#[derive(Debug, Default, Clone)]
pub struct SurfaceInteraction {
pub common: InteractionData,
pub uv: Point2f,
@ -96,47 +189,304 @@ pub struct SurfaceInteraction {
pub dndu: Normal3f,
pub dndv: Normal3f,
pub shading: ShadingGeometry,
pub medium_interface: Option<Arc<MediumInterface>>,
pub face_index: usize,
pub area_light: Option<Arc<Light>>,
pub material: Option<Arc<dyn MaterialTrait>>,
pub material: Option<Arc<Material>>,
pub dpdx: Vector3f,
pub dpdy: Vector3f,
pub dudx: Float,
pub dvdx: Float,
pub dudy: Float,
pub dvdy: Float,
pub shape: Option<Arc<dyn ShapeTrait>>,
pub bsdf: Option<BSDF>,
pub shape: Arc<Shape>,
}
impl SurfaceInteraction {
pub fn set_intersection_properties(
&mut self,
mtl: Arc<dyn MaterialTrait>,
area: Arc<Light>,
prim_medium_interface: Option<Arc<MediumInterface>>,
ray_medium: Arc<Medium>,
) {
self.material = Some(mtl);
self.area_light = Some(area);
if prim_medium_interface.as_ref().map_or(false, |mi| mi.is_medium_transition()) {
self.common.medium_interface = prim_medium_interface;
pub fn le(&self, w: Vector3f, lambda: &SampledWavelengths) -> SampledSpectrum {
if let Some(area_light) = &self.area_light {
area_light.l(self.p(), self.n(), self.uv, w, lambda)
} else {
self.common.medium = Some(ray_medium);
SampledSpectrum::new(0.)
}
}
pub fn compute_differentials(&mut self, r: &Ray, camera: &Camera, samples_per_pixel: i32) {
let computed = if let Some(diff) = &r.differential {
let dot_rx = self.common.n.dot(diff.rx_direction.into());
let dot_ry = self.common.n.dot(diff.ry_direction.into());
if dot_rx != 0.0 && dot_ry != 0.0 {
// Estimate screen-space change in p using ray differentials>
let p_as_vec = Normal3f::new(self.p().x(), self.p().y(), self.p().z());
let d = -self.common.n.dot(p_as_vec);
// Compute t for x-auxiliary ray
let rx_origin_vec =
Normal3f::new(diff.rx_origin.x(), diff.rx_origin.y(), diff.rx_origin.z());
let tx = (-self.common.n.dot(rx_origin_vec) - d) / dot_rx;
// Compute intersection point px
let px = diff.rx_origin + diff.rx_direction * tx;
// Compute t for y-auxiliary ray
let ry_origin_vec =
Normal3f::new(diff.ry_origin.x(), diff.ry_origin.y(), diff.ry_origin.z());
let ty = (-self.common.n.dot(ry_origin_vec) - d) / dot_ry;
let py = diff.ry_origin + diff.ry_direction * ty;
self.dpdx = px - self.p();
self.dpdy = py - self.p();
true
} else {
false
}
} else {
false
};
if !computed {
camera.approximate_dp_dxy(
self.p(),
self.n(),
self.time(),
samples_per_pixel,
&mut self.dpdx,
&mut self.dpdy,
);
}
let ata00 = self.dpdu.dot(self.dpdu);
let ata01 = self.dpdu.dot(self.dpdv);
let ata11 = self.dpdv.dot(self.dpdv);
let mut inv_det = 1. / difference_of_products(ata00, ata11, ata01, ata01);
inv_det = if inv_det.is_finite() { inv_det } else { 0. };
let atb0x = self.dpdu.dot(self.dpdx);
let atb1x = self.dpdv.dot(self.dpdx);
let atb0y = self.dpdu.dot(self.dpdy);
let atb1y = self.dpdv.dot(self.dpdy);
// Compute u and v derivatives in x and y
self.dudx = difference_of_products(ata11, atb0x, ata01, atb1x) * inv_det;
self.dvdx = difference_of_products(ata00, atb1x, ata01, atb0x) * inv_det;
self.dudy = difference_of_products(ata11, atb0y, ata01, atb1y) * inv_det;
self.dvdy = difference_of_products(ata00, atb1y, ata01, atb0y) * inv_det;
// Clamp derivatives
self.dudx = if self.dudx.is_finite() {
clamp_t(self.dudx, -1e8, 1e8)
} else {
0.
};
self.dvdx = if self.dvdx.is_finite() {
clamp_t(self.dvdx, -1e8, 1e8)
} else {
0.
};
self.dudy = if self.dudy.is_finite() {
clamp_t(self.dudy, -1e8, 1e8)
} else {
0.
};
self.dvdy = if self.dvdy.is_finite() {
clamp_t(self.dvdy, -1e8, 1e8)
} else {
0.
};
}
pub fn skip_intersection(&self, ray: &mut Ray, t: Float) {
let new_ray = Ray::spawn(&self.pi(), &self.n(), ray.time, ray.d);
ray.o = new_ray.o;
// Skipping other variables, since they should not change when passing through surface
if let Some(diff) = &mut ray.differential {
diff.rx_origin += diff.rx_direction * t;
diff.ry_origin += diff.ry_direction * t;
}
}
pub fn get_bsdf<'a>(
&mut self,
r: &Ray,
lambda: &SampledWavelengths,
camera: &Camera,
scratch: &'a Bump,
sampler: &mut Sampler,
) -> Option<BSDF<'a>> {
self.compute_differentials(r, camera, sampler.samples_per_pixel() as i32);
let material = {
let root_mat = self.material.as_deref()?;
let mut active_mat: &Material = root_mat;
let tex_eval = UniversalTextureEvaluator;
while let Material::Mix(mix) = active_mat {
// We need a context to evaluate the 'amount' texture
let ctx = MaterialEvalContext::from(&*self);
active_mat = mix.choose_material(&tex_eval, &ctx);
}
active_mat.clone()
};
let ctx = MaterialEvalContext::from(&*self);
let tex_eval = UniversalTextureEvaluator;
let displacement = material.get_displacement();
let normal_map = material.get_normal_map();
if displacement.is_some() || normal_map.is_some() {
// This calls the function defined above
self.compute_bump_geometry(&tex_eval, displacement, normal_map);
}
let mut bsdf = material.get_bxdf(&tex_eval, &ctx, lambda, scratch);
if get_options().force_diffuse {
let r = bsdf.rho_wo(self.common.wo, &[sampler.get1d()], &[sampler.get2d()]);
let diff_bxdf = scratch.alloc(DiffuseBxDF::new(r));
bsdf = BSDF::new(self.shading.n, self.shading.dpdu, Some(diff_bxdf));
}
Some(bsdf)
}
pub fn get_bssrdf(
&self,
_ray: &Ray,
lambda: &SampledWavelengths,
_camera: &Camera,
_scratch: &Bump,
) -> Option<BSSRDF<'_>> {
let material = {
let root_mat = self.material.as_deref()?;
let mut active_mat: &Material = root_mat;
let tex_eval = UniversalTextureEvaluator;
while let Material::Mix(mix) = active_mat {
// We need a context to evaluate the 'amount' texture
let ctx = MaterialEvalContext::from(self);
active_mat = mix.choose_material(&tex_eval, &ctx);
}
active_mat.clone()
};
let ctx = MaterialEvalContext::from(self);
let tex_eval = UniversalTextureEvaluator;
material.get_bssrdf(&tex_eval, &ctx, lambda)
}
fn compute_bump_geometry(
&mut self,
tex_eval: &UniversalTextureEvaluator,
displacement: Option<FloatTexture>,
normal_image: Option<&Image>,
) {
let ctx = NormalBumpEvalContext::from(&*self);
let (dpdu, dpdv) = if let Some(disp) = displacement {
bump_map(tex_eval, &disp, &ctx)
} else if let Some(map) = normal_image {
normal_map(map, &ctx)
} else {
(self.shading.dpdu, self.shading.dpdv)
};
let mut ns = Normal3f::from(dpdu.cross(dpdv).normalize());
if ns.dot(self.n()) < 0.0 {
ns = -ns;
}
self.set_shading_geometry(ns, dpdu, dpdv, self.shading.dndu, self.shading.dndv, false);
}
pub fn spawn_ray_with_differentials(
&self,
ray_i: &Ray,
wi: Vector3f,
flags: BxDFFlags,
eta: Float,
) -> Ray {
let mut rd = self.spawn_ray(wi);
if let Some(diff_i) = &ray_i.differential {
let mut n = self.shading.n;
let mut dndx = self.shading.dndu * self.dudx + self.shading.dndv * self.dvdx;
let mut dndy = self.shading.dndu * self.dudy + self.shading.dndv * self.dvdy;
let dwodx = -diff_i.rx_direction - self.wo();
let dwody = -diff_i.ry_direction - self.wo();
let new_diff_rx_origin = self.p() + self.dpdx;
let new_diff_ry_origin = self.p() + self.dpdy;
let mut new_diff_rx_dir = Vector3f::default();
let mut new_diff_ry_dir = Vector3f::default();
let mut valid_differentials = false;
if flags.contains(BxDFFlags::SPECULAR_REFLECTION) {
valid_differentials = true;
let d_wo_dot_n_dx = dwodx.dot(n.into()) + self.wo().dot(dndx.into());
let d_wo_dot_n_dy = dwody.dot(n.into()) + self.wo().dot(dndy.into());
let wo_dot_n = self.wo().dot(n.into());
new_diff_rx_dir = wi - dwodx
+ (Vector3f::from(dndx) * wo_dot_n + Vector3f::from(n) * d_wo_dot_n_dx) * 2.0;
new_diff_ry_dir = wi - dwody
+ (Vector3f::from(dndy) * wo_dot_n + Vector3f::from(n) * d_wo_dot_n_dy) * 2.0;
} else if flags.contains(BxDFFlags::SPECULAR_TRANSMISSION) {
valid_differentials = true;
if self.wo().dot(n.into()) < 0.0 {
n = -n;
dndx = -dndx;
dndy = -dndy;
}
// Compute partial derivatives
let d_wo_dot_n_dx = dwodx.dot(n.into()) + self.wo().dot(dndx.into());
let d_wo_dot_n_dy = dwody.dot(n.into()) + self.wo().dot(dndy.into());
let wo_dot_n = self.wo().dot(n.into());
let wi_dot_n = wi.dot(Vector3f::from(n));
let abs_wi_dot_n = wi.abs_dot(n.into());
let mu = wo_dot_n / eta - abs_wi_dot_n;
let f_eta = 1.0 / eta;
let f_eta2 = 1.0 / square(eta);
let term = f_eta + (f_eta2 * wo_dot_n / wi_dot_n);
let dmudx = d_wo_dot_n_dx * term;
let dmudy = d_wo_dot_n_dy * term;
new_diff_rx_dir =
wi - dwodx * eta + (Vector3f::from(dndx) * mu + Vector3f::from(n) * dmudx);
new_diff_ry_dir =
wi - dwody * eta + (Vector3f::from(dndy) * mu + Vector3f::from(n) * dmudy);
}
if valid_differentials {
let threshold = 1e16;
if new_diff_rx_dir.norm_squared() > threshold
|| new_diff_ry_dir.norm_squared() > threshold
|| Vector3f::from(new_diff_rx_origin).norm_squared() > threshold
|| Vector3f::from(new_diff_ry_origin).norm_squared() > threshold
{
rd.differential = None;
} else {
rd.differential = Some(RayDifferential {
rx_origin: new_diff_rx_origin,
ry_origin: new_diff_ry_origin,
rx_direction: new_diff_rx_dir,
ry_direction: new_diff_ry_dir,
});
}
}
}
rd
}
}
pub struct PhaseFunction;
pub struct MediumInteraction {
pub common: InteractionData,
pub medium: Arc<Medium>,
pub phase: PhaseFunction,
}
impl Interaction for SurfaceInteraction {
impl InteractionTrait for SurfaceInteraction {
fn get_common(&self) -> &InteractionData {
&self.common
}
@ -145,12 +495,8 @@ impl Interaction for SurfaceInteraction {
&mut self.common
}
fn as_any(&self) -> &dyn Any {
self
}
fn get_medium(&self, w: Vector3f) -> Option<Arc<Medium>> {
self.medium_interface.as_ref().and_then(|interface| {
self.common.medium_interface.as_ref().and_then(|interface| {
if self.n().dot(w.into()) > 0.0 {
interface.outside.clone()
} else {
@ -159,34 +505,8 @@ impl Interaction for SurfaceInteraction {
})
}
fn spawn_ray(&self, d: Vector3f) -> Ray {
let mut ray = Ray::spawn(&self.pi(), &self.n(), self.time(), d);
ray.medium = self.get_medium(d);
ray
}
fn spawn_ray_to_point(&self, p2: Point3f) -> Ray {
let mut ray = Ray::spawn_to_point(&self.pi(), &self.n(), self.time(), p2);
ray.medium = self.get_medium(ray.d);
ray
}
fn spawn_ray_to_interaction(&self, other: &dyn Interaction) -> Ray {
// Check if the other interaction is a surface to use the robust spawn method
if let Some(si_to) = other.as_any().downcast_ref::<SurfaceInteraction>() {
let mut ray = Ray::spawn_to_interaction(
&self.pi(),
&self.n(),
self.time(),
&si_to.pi(),
&si_to.n(),
);
ray.medium = self.get_medium(ray.d);
ray
} else {
// Fallback for non-surface interactions
self.spawn_ray_to_point(other.p())
}
fn is_surface_interaction(&self) -> bool {
true
}
}
@ -211,7 +531,14 @@ impl SurfaceInteraction {
}
Self {
common: InteractionData { pi, n, time, wo, medium_interface: None, medium: None },
common: InteractionData {
pi,
n,
time,
wo,
medium_interface: None,
medium: None,
},
uv,
dpdu,
dpdv,
@ -224,7 +551,6 @@ impl SurfaceInteraction {
dndu,
dndv,
},
medium_interface: None,
material: None,
face_index: 0,
area_light: None,
@ -234,8 +560,7 @@ impl SurfaceInteraction {
dudy: 0.0,
dvdx: 0.0,
dvdy: 0.0,
shape: None,
bsdf: None,
shape: Arc::new(Shape::default()),
}
}
@ -271,26 +596,91 @@ impl SurfaceInteraction {
}
self.shading.dpdu = dpdus;
self.shading.dpdv = dpdvs;
self.shading.dndu = dndus.into();
self.shading.dndv = dndvs.into();
self.shading.dndu = dndus;
self.shading.dndv = dndvs;
}
pub fn new_simple(pi: Point3fi, n: Normal3f, uv: Point2f) -> Self {
let mut si = Self::default();
si.common = InteractionData {
pi,
n,
time: 0.,
wo: Vector3f::zero(),
medium_interface: None,
medium: None,
};
si.uv = uv;
si
Self {
common: InteractionData {
pi,
n,
time: 0.,
wo: Vector3f::zero(),
medium_interface: None,
medium: None,
},
uv,
..Default::default()
}
}
pub fn new_minimal(pi: Point3fi, uv: Point2f) -> Self {
Self {
common: InteractionData {
pi,
..Default::default()
},
uv,
..Default::default()
}
}
pub fn set_intersection_properties(
&mut self,
mtl: Arc<Material>,
area: Arc<Light>,
prim_medium_interface: Option<MediumInterface>,
ray_medium: Arc<Medium>,
) {
self.material = Some(mtl);
self.area_light = Some(area);
if prim_medium_interface
.as_ref()
.is_some_and(|mi| mi.is_medium_transition())
{
self.common.medium_interface = prim_medium_interface;
} else {
self.common.medium = Some(ray_medium);
}
}
}
impl Interaction for MediumInteraction {
#[derive(Clone, Debug)]
pub struct MediumInteraction {
pub common: InteractionData,
pub medium: Arc<Medium>,
pub phase: PhaseFunction,
}
impl MediumInteraction {
pub fn new(
p: Point3f,
wo: Vector3f,
time: Float,
medium: Arc<Medium>,
phase: PhaseFunction,
) -> Self {
Self {
common: InteractionData {
pi: Point3fi::new_from_point(p),
n: Normal3f::default(),
time,
wo: wo.normalize(),
medium_interface: None,
medium: Some(medium.clone()),
},
medium,
phase,
}
}
}
impl InteractionTrait for MediumInteraction {
fn is_medium_interaction(&self) -> bool {
true
}
fn get_common(&self) -> &InteractionData {
&self.common
}
@ -298,22 +688,4 @@ impl Interaction for MediumInteraction {
fn get_common_mut(&mut self) -> &mut InteractionData {
&mut self.common
}
fn as_any(&self) -> &dyn Any {
self
}
fn get_medium(&self, _w: Vector3f) -> Option<Arc<Medium>> {
Some(self.medium.clone())
}
fn spawn_ray(&self, d: Vector3f) -> Ray {
Ray {
o: self.p(),
d,
time: self.time(),
medium: Some(self.medium.clone()),
differential: None,
}
}
}

View file

@ -1,27 +1,176 @@
#[derive(Clone, Debug)]
pub struct CoatedDiffuseMaterial;
#[derive(Clone, Debug)]
pub struct CoatedConductorMaterial;
#[derive(Clone, Debug)]
pub struct ConductorMaterial;
#[derive(Clone, Debug)]
pub struct DielectricMaterial;
#[derive(Clone, Debug)]
pub struct DiffuseMaterial;
#[derive(Clone, Debug)]
pub struct DiffuseTransmissionMaterial;
#[derive(Clone, Debug)]
pub struct HairMaterial;
#[derive(Clone, Debug)]
pub struct MeasuredMaterial;
#[derive(Clone, Debug)]
pub struct SubsurfaceMaterial;
#[derive(Clone, Debug)]
pub struct ThinDielectricMaterial;
#[derive(Clone, Debug)]
pub struct MixMaterial;
use bumpalo::Bump;
use enum_dispatch::enum_dispatch;
use std::ops::Deref;
use std::sync::Arc;
use crate::core::bssrdf::BSSRDF;
use crate::core::bxdf::{BSDF, DielectricBxDF, DiffuseBxDF};
use crate::core::interaction::InteractionTrait;
use crate::core::interaction::{Interaction, ShadingGeometry, SurfaceInteraction};
use crate::core::pbrt::Float;
use crate::core::texture::{FloatTexture, SpectrumTexture, TextureEvalContext, TextureEvaluator};
use crate::geometry::{Frame, Normal3f, Point2f, Point3f, Vector2f, Vector3f, VectorLike};
use crate::image::{Image, WrapMode, WrapMode2D};
use crate::spectra::{SampledWavelengths, Spectrum, SpectrumTrait};
use crate::utils::hash::hash_float;
use crate::utils::scattering::TrowbridgeReitzDistribution;
#[derive(Clone, Debug)]
pub struct MaterialEvalContext {
pub texture: TextureEvalContext,
pub wo: Vector3f,
pub ns: Normal3f,
pub dpdus: Vector3f,
}
impl Deref for MaterialEvalContext {
type Target = TextureEvalContext;
fn deref(&self) -> &Self::Target {
&self.texture
}
}
impl From<&SurfaceInteraction> for MaterialEvalContext {
fn from(si: &SurfaceInteraction) -> Self {
Self {
texture: TextureEvalContext::from(si),
wo: si.common.wo,
ns: si.shading.n,
dpdus: si.shading.dpdu,
}
}
}
#[derive(Clone, Debug, Default)]
pub struct NormalBumpEvalContext {
p: Point3f,
uv: Point2f,
n: Normal3f,
shading: ShadingGeometry,
dpdx: Vector3f,
dpdy: Vector3f,
// All 0
dudx: Float,
dudy: Float,
dvdx: Float,
dvdy: Float,
face_index: usize,
}
impl From<&SurfaceInteraction> for NormalBumpEvalContext {
fn from(si: &SurfaceInteraction) -> Self {
Self {
p: si.p(),
uv: si.uv,
n: si.n(),
shading: si.shading.clone(),
dudx: si.dudx,
dudy: si.dudy,
dvdx: si.dvdx,
dvdy: si.dvdy,
dpdx: si.dpdx,
dpdy: si.dpdy,
face_index: si.face_index,
}
}
}
impl From<&NormalBumpEvalContext> for TextureEvalContext {
fn from(ctx: &NormalBumpEvalContext) -> Self {
Self {
p: ctx.p,
uv: ctx.uv,
n: ctx.n,
dpdx: ctx.dpdx,
dpdy: ctx.dpdy,
dudx: ctx.dudx,
dudy: ctx.dudy,
dvdx: ctx.dvdx,
dvdy: ctx.dvdy,
face_index: ctx.face_index,
}
}
}
pub fn normal_map(normal_map: &Image, ctx: &NormalBumpEvalContext) -> (Vector3f, Vector3f) {
let wrap = WrapMode2D::from(WrapMode::Repeat);
let uv = Point2f::new(ctx.uv[0], 1. - ctx.uv[1]);
let r = normal_map.bilerp_channel_with_wrap(uv, 0, wrap);
let g = normal_map.bilerp_channel_with_wrap(uv, 1, wrap);
let b = normal_map.bilerp_channel_with_wrap(uv, 2, wrap);
let mut ns = Vector3f::new(2.0 * r - 1.0, 2.0 * g - 1.0, 2.0 * b - 1.0);
ns = ns.normalize();
let frame = Frame::from_xz(ctx.shading.dpdu.normalize(), Vector3f::from(ctx.shading.n));
ns = frame.from_local(ns);
let ulen = ctx.shading.dpdu.norm();
let vlen = ctx.shading.dpdv.norm();
let dpdu = ctx.shading.dpdu.gram_schmidt(ns).normalize() * ulen;
let dpdv = ctx.shading.dpdu.cross(dpdu).normalize() * vlen;
(dpdu, dpdv)
}
pub fn bump_map<T: TextureEvaluator>(
tex_eval: &T,
displacement: &FloatTexture,
ctx: &NormalBumpEvalContext,
) -> (Vector3f, Vector3f) {
debug_assert!(tex_eval.can_evaluate(&[displacement], &[]));
let mut du = 0.5 * (ctx.dudx.abs() + ctx.dudy.abs());
if du == 0.0 {
du = 0.0005;
}
let mut dv = 0.5 * (ctx.dvdx.abs() + ctx.dvdy.abs());
if dv == 0.0 {
dv = 0.0005;
}
let mut shifted_ctx = TextureEvalContext::from(ctx);
shifted_ctx.p = ctx.p + ctx.shading.dpdu * du;
shifted_ctx.uv = ctx.uv + Vector2f::new(du, 0.0);
let u_displace = tex_eval.evaluate_float(displacement, &shifted_ctx);
shifted_ctx.p = ctx.p + ctx.shading.dpdv * dv;
shifted_ctx.uv = ctx.uv + Vector2f::new(0.0, dv);
let v_displace = tex_eval.evaluate_float(displacement, &shifted_ctx);
let center_ctx = TextureEvalContext::from(ctx);
let displace = tex_eval.evaluate_float(displacement, &center_ctx);
let d_displace_du = (u_displace - displace) / du;
let d_displace_dv = (v_displace - displace) / dv;
let n_vec = Vector3f::from(ctx.shading.n);
let dndu_vec = Vector3f::from(ctx.shading.dndu);
let dndv_vec = Vector3f::from(ctx.shading.dndv);
let dpdu = ctx.shading.dpdu + n_vec * d_displace_du + dndu_vec * displace;
let dpdv = ctx.shading.dpdv + n_vec * d_displace_dv + dndv_vec * displace;
(dpdu, dpdv)
}
#[enum_dispatch]
pub trait MaterialTrait: Send + Sync + std::fmt::Debug {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
tex_eval: &T,
ctx: &MaterialEvalContext,
lambda: &SampledWavelengths,
scratch: &'a Bump,
) -> BSDF<'a>;
fn get_bssrdf<'a, T: TextureEvaluator>(
&self,
tex_eval: &T,
ctx: &MaterialEvalContext,
lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>>;
fn can_evaluate_textures(&self, tex_eval: &dyn TextureEvaluator) -> bool;
fn get_normal_map(&self) -> Option<&Image>;
fn get_displacement(&self) -> Option<FloatTexture>;
fn has_surface_scattering(&self) -> bool;
}
#[derive(Clone, Debug)]
#[enum_dispatch(MaterialTrait)]
pub enum Material {
CoatedDiffuse(CoatedDiffuseMaterial),
CoatedConductor(CoatedConductorMaterial),
@ -36,6 +185,458 @@ pub enum Material {
Mix(MixMaterial),
}
impl Material {}
#[derive(Clone, Debug)]
pub struct CoatedDiffuseMaterial;
impl MaterialTrait for CoatedDiffuseMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
pub trait MaterialTrait: Send + Sync + std::fmt::Debug {}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct CoatedConductorMaterial;
impl MaterialTrait for CoatedConductorMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct ConductorMaterial;
impl MaterialTrait for ConductorMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct DielectricMaterial {
normal_map: Option<Arc<Image>>,
displacement: FloatTexture,
u_roughness: FloatTexture,
v_roughness: FloatTexture,
remap_roughness: bool,
eta: Spectrum,
}
impl MaterialTrait for DielectricMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
tex_eval: &T,
ctx: &MaterialEvalContext,
lambda: &SampledWavelengths,
scratch: &'a Bump,
) -> BSDF<'a> {
let mut sampled_eta = self.eta.evaluate(lambda[0]);
if !self.eta.is_constant() {
lambda.terminate_secondary();
}
if sampled_eta == 0.0 {
sampled_eta = 1.0;
}
let mut u_rough = tex_eval.evaluate_float(&self.u_roughness, ctx);
let mut v_rough = tex_eval.evaluate_float(&self.v_roughness, ctx);
if self.remap_roughness {
u_rough = TrowbridgeReitzDistribution::roughness_to_alpha(u_rough);
v_rough = TrowbridgeReitzDistribution::roughness_to_alpha(v_rough);
}
let distrib = TrowbridgeReitzDistribution::new(u_rough, v_rough);
let bxdf = scratch.alloc(DielectricBxDF::new(sampled_eta, distrib));
BSDF::new(ctx.ns, ctx.dpdus, Some(bxdf))
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
None
}
fn can_evaluate_textures(&self, tex_eval: &dyn TextureEvaluator) -> bool {
tex_eval.can_evaluate(&[&self.u_roughness, &self.v_roughness], &[])
}
fn get_normal_map(&self) -> Option<&Image> {
self.normal_map.as_deref()
}
fn get_displacement(&self) -> Option<FloatTexture> {
Some(self.displacement.clone())
}
fn has_surface_scattering(&self) -> bool {
false
}
}
#[derive(Clone, Debug)]
pub struct DiffuseMaterial {
normal_map: Option<Arc<Image>>,
displacement: FloatTexture,
reflectance: SpectrumTexture,
}
impl MaterialTrait for DiffuseMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
tex_eval: &T,
ctx: &MaterialEvalContext,
lambda: &SampledWavelengths,
scratch: &'a Bump,
) -> BSDF<'a> {
let r = tex_eval.evaluate_spectrum(&self.reflectance, ctx, lambda);
let bxdf = scratch.alloc(DiffuseBxDF::new(r));
BSDF::new(ctx.ns, ctx.dpdus, Some(bxdf))
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, tex_eval: &dyn TextureEvaluator) -> bool {
tex_eval.can_evaluate(&[], &[&self.reflectance])
}
fn get_normal_map(&self) -> Option<&Image> {
self.normal_map.as_deref()
}
fn get_displacement(&self) -> Option<FloatTexture> {
Some(self.displacement.clone())
}
fn has_surface_scattering(&self) -> bool {
false
}
}
#[derive(Clone, Debug)]
pub struct DiffuseTransmissionMaterial;
impl MaterialTrait for DiffuseTransmissionMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct HairMaterial;
impl MaterialTrait for HairMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct MeasuredMaterial;
impl MaterialTrait for MeasuredMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SubsurfaceMaterial;
impl MaterialTrait for SubsurfaceMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct ThinDielectricMaterial;
impl MaterialTrait for ThinDielectricMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
_scratch: &'a Bump,
) -> BSDF<'a> {
todo!()
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
todo!()
}
fn can_evaluate_textures(&self, _tex_eval: &dyn TextureEvaluator) -> bool {
todo!()
}
fn get_normal_map(&self) -> Option<&Image> {
todo!()
}
fn get_displacement(&self) -> Option<FloatTexture> {
todo!()
}
fn has_surface_scattering(&self) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct MixMaterial {
pub amount: FloatTexture,
pub materials: [Box<Material>; 2],
}
impl MixMaterial {
pub fn choose_material<T: TextureEvaluator>(
&self,
tex_eval: &T,
ctx: &MaterialEvalContext,
) -> &Material {
let amt = tex_eval.evaluate_float(&self.amount, ctx);
if amt <= 0.0 {
return &self.materials[0];
}
if amt >= 1.0 {
return &self.materials[1];
}
let u = hash_float(&(ctx.p, ctx.wo));
if amt < u {
&self.materials[0]
} else {
&self.materials[1]
}
}
}
impl MaterialTrait for MixMaterial {
fn get_bxdf<'a, T: TextureEvaluator>(
&self,
tex_eval: &T,
ctx: &MaterialEvalContext,
lambda: &SampledWavelengths,
scratch: &'a Bump,
) -> BSDF<'a> {
let chosen_mat = self.choose_material(tex_eval, ctx);
chosen_mat.get_bxdf(tex_eval, ctx, lambda, scratch)
}
fn get_bssrdf<'a, T>(
&self,
_tex_eval: &T,
_ctx: &MaterialEvalContext,
_lambda: &SampledWavelengths,
) -> Option<BSSRDF<'a>> {
None
}
fn can_evaluate_textures(&self, tex_eval: &dyn TextureEvaluator) -> bool {
tex_eval.can_evaluate(&[&self.amount], &[])
}
fn get_normal_map(&self) -> Option<&Image> {
None
}
fn get_displacement(&self) -> Option<FloatTexture> {
None
// panic!(
// "MixMaterial::get_displacement() shouldn't be called. \
// Displacement is not supported on Mix materials directly."
// );
}
fn has_surface_scattering(&self) -> bool {
false
}
}

View file

@ -1,17 +1,404 @@
use bumpalo::Bump;
use enum_dispatch::enum_dispatch;
use std::sync::Arc;
use crate::core::pbrt::{Float, INV_4_PI, PI, clamp_t};
use crate::geometry::{
Bounds3f, Frame, Point2f, Point3f, Point3i, Ray, Vector3f, VectorLike, spherical_direction,
};
use crate::spectra::{
BlackbodySpectrum, DenselySampledSpectrum, LAMBDA_MAX, LAMBDA_MIN, RGBIlluminantSpectrum,
RGBUnboundedSpectrum, SampledSpectrum, SampledWavelengths, Spectrum, SpectrumTrait,
};
use crate::utils::containers::SampledGrid;
use crate::utils::math::square;
use crate::utils::rng::Rng;
use crate::utils::transform::Transform;
#[derive(Debug, Clone, Copy)]
pub struct PhaseFunctionSample {
pub p: Float,
pub wi: Vector3f,
pub pdf: Float,
}
impl PhaseFunctionSample {
pub fn new(p: Float, wi: Vector3f, pdf: Float) -> Self {
Self { p, wi, pdf }
}
}
#[enum_dispatch]
pub trait PhaseFunctionTrait {
fn p(&self, wo: Vector3f, wi: Vector3f) -> Float;
fn sample_p(&self, wo: Vector3f, u: Point2f) -> Option<PhaseFunctionSample>;
fn pdf(&self, wo: Vector3f, wi: Vector3f) -> Float;
}
#[enum_dispatch(PhaseFunctionTrait)]
#[derive(Debug, Clone)]
pub struct HomogeneousMedium;
#[derive(Debug, Clone)]
pub struct GridMedium;
#[derive(Debug, Clone)]
pub struct RGBGridMedium;
#[derive(Debug, Clone)]
pub struct CloudMedium;
#[derive(Debug, Clone)]
pub struct NanoVDBMedium;
pub enum PhaseFunction {
HenyeyGreenstein(HGPhaseFunction),
}
#[derive(Debug, Clone, Copy)]
pub struct HGPhaseFunction {
g: Float,
}
impl HGPhaseFunction {
pub fn new(g: Float) -> Self {
Self { g }
}
fn phase_hg(&self, cos_theta: Float) -> Float {
let denom = 1.0 + square(self.g) + 2.0 * self.g * cos_theta;
INV_4_PI * (1.0 - square(self.g)) / (denom * denom.sqrt())
}
}
impl PhaseFunctionTrait for HGPhaseFunction {
fn p(&self, wo: Vector3f, wi: Vector3f) -> Float {
self.phase_hg(-(wo.dot(wi)))
}
fn sample_p(&self, wo: Vector3f, u: Point2f) -> Option<PhaseFunctionSample> {
let cos_theta = if self.g.abs() < 1e-3 {
1.0 - 2.0 * u[0]
} else {
let square_term = (1.0 - square(self.g)) / (1.0 + self.g * (1.0 - 2.0 * u[0]));
-(1.0 + square(self.g) - square(square_term)) / (2.0 * self.g)
};
let sin_theta = (1.0 - square(cos_theta)).max(0.0).sqrt();
let phi = 2.0 * PI * u[1];
let w_frame = Frame::from_z(wo);
let wi = w_frame.from_local(spherical_direction(sin_theta, cos_theta, phi));
let p_val = self.phase_hg(cos_theta);
Some(PhaseFunctionSample::new(p_val, wi, p_val))
}
fn pdf(&self, wo: Vector3f, wi: Vector3f) -> Float {
self.p(wo, wi)
}
}
#[derive(Debug, Clone)]
pub struct MajorantGrid {
pub bounds: Bounds3f,
pub res: Point3i,
pub voxels: Vec<Float>,
}
impl MajorantGrid {
pub fn new(bounds: Bounds3f, res: Point3i) -> Self {
Self {
bounds,
res,
voxels: Vec::with_capacity((res.x() * res.y() * res.z()) as usize),
}
}
pub fn lookup(&self, x: i32, y: i32, z: i32) -> Float {
let idx = z * self.res.x() * self.res.y() + y * self.res.x() + x;
if idx >= 0 && (idx as usize) < self.voxels.len() {
self.voxels[idx as usize]
} else {
0.0
}
}
pub fn set(&mut self, x: i32, y: i32, z: i32, v: Float) {
let idx = z * self.res.x() * self.res.y() + y * self.res.x() + x;
if idx >= 0 && (idx as usize) < self.voxels.len() {
self.voxels[idx as usize] = v;
}
}
pub fn voxel_bounds(&self, x: i32, y: i32, z: i32) -> Bounds3f {
let p0 = Point3f::new(
x as Float / self.res.x() as Float,
y as Float / self.res.y() as Float,
z as Float / self.res.z() as Float,
);
let p1 = Point3f::new(
(x + 1) as Float / self.res.x() as Float,
(y + 1) as Float / self.res.y() as Float,
(z + 1) as Float / self.res.z() as Float,
);
Bounds3f::from_points(p0, p1)
}
}
#[derive(Clone, Copy, Debug)]
pub struct RayMajorantSegment {
t_min: Float,
t_max: Float,
sigma_maj: SampledSpectrum,
}
pub enum RayMajorantIterator {
Homogeneous(HomogeneousMajorantIterator),
DDA(DDAMajorantIterator),
// Grid(GridMajorantIterator<'a>),
Void,
}
impl Iterator for RayMajorantIterator {
type Item = RayMajorantSegment;
fn next(&mut self) -> Option<Self::Item> {
match self {
RayMajorantIterator::Homogeneous(iter) => iter.next(),
RayMajorantIterator::DDA(iter) => iter.next(),
RayMajorantIterator::Void => None,
}
}
}
pub struct HomogeneousMajorantIterator {
called: bool,
seg: RayMajorantSegment,
}
impl HomogeneousMajorantIterator {
pub fn new(t_min: Float, t_max: Float, sigma_maj: SampledSpectrum) -> Self {
Self {
seg: RayMajorantSegment {
t_min,
t_max,
sigma_maj,
},
called: false,
}
}
}
impl Iterator for HomogeneousMajorantIterator {
type Item = RayMajorantSegment;
fn next(&mut self) -> Option<Self::Item> {
if self.called {
return None;
}
self.called = true;
Some(self.seg)
}
}
pub struct DDAMajorantIterator {
sigma_t: SampledSpectrum,
t_min: Float,
t_max: Float,
grid: MajorantGrid,
next_crossing_t: [Float; 3],
delta_t: [Float; 3],
step: [i32; 3],
voxel_limit: [i32; 3],
voxel: [i32; 3],
}
impl DDAMajorantIterator {
pub fn new(
ray: &Ray,
t_min: Float,
t_max: Float,
grid: &MajorantGrid,
sigma_t: &SampledSpectrum,
) -> Self {
let mut iter = Self {
t_min,
t_max,
sigma_t: *sigma_t,
grid: grid.clone(),
next_crossing_t: [0.0; 3],
delta_t: [0.0; 3],
step: [0; 3],
voxel_limit: [0; 3],
voxel: [0; 3],
};
let diag = grid.bounds.diagonal();
let mut ray_grid_d = Vector3f::new(
ray.d.x() / diag.x(),
ray.d.y() / diag.y(),
ray.d.z() / diag.z(),
);
let p_grid_start = grid.bounds.offset(&ray.at(t_min));
let grid_intersect = Vector3f::from(p_grid_start);
for axis in 0..3 {
iter.voxel[axis] = clamp_t(
(grid_intersect[axis] * grid.res[axis] as Float) as i32,
0,
grid.res[axis] - 1,
);
iter.delta_t[axis] = 1.0 / (ray_grid_d[axis].abs() * grid.res[axis] as Float);
if ray_grid_d[axis] == -0.0 {
ray_grid_d[axis] = 0.0;
}
if ray_grid_d[axis] >= 0.0 {
let next_voxel_pos = (iter.voxel[axis] + 1) as Float / grid.res[axis] as Float;
iter.next_crossing_t[axis] =
t_min + (next_voxel_pos - grid_intersect[axis]) / ray_grid_d[axis];
iter.step[axis] = 1;
iter.voxel_limit[axis] = grid.res[axis];
} else {
let next_voxel_pos = (iter.voxel[axis]) as Float / grid.res[axis] as Float;
iter.next_crossing_t[axis] =
t_min + (next_voxel_pos - grid_intersect[axis]) / ray_grid_d[axis];
iter.step[axis] = -1;
iter.voxel_limit[axis] = -1;
}
}
iter
}
}
impl Iterator for DDAMajorantIterator {
type Item = RayMajorantSegment;
fn next(&mut self) -> Option<Self::Item> {
if self.t_min >= self.t_max {
return None;
}
// Find stepAxis for stepping to next voxel and exit point tVoxelExit
let d0 = self.next_crossing_t[0];
let d1 = self.next_crossing_t[1];
let d2 = self.next_crossing_t[2];
let bits = ((d0 < d1) as usize) << 2 | ((d0 < d2) as usize) << 1 | ((d1 < d2) as usize);
const CMP_TO_AXIS: [usize; 8] = [2, 1, 2, 1, 2, 2, 0, 0];
let step_axis = CMP_TO_AXIS[bits];
let t_voxel_exit = self.t_max.min(self.next_crossing_t[step_axis]);
// Get maxDensity for current voxel and initialize RayMajorantSegment, seg
let density_scale = self
.grid
.lookup(self.voxel[0], self.voxel[1], self.voxel[2]);
let sigma_maj = self.sigma_t * density_scale;
let seg = RayMajorantSegment {
t_min: self.t_min,
t_max: t_voxel_exit,
sigma_maj,
};
// Advance to next voxel in maximum density grid
self.t_min = t_voxel_exit;
if self.next_crossing_t[step_axis] > self.t_max {
self.t_min = self.t_max;
}
self.voxel[step_axis] += self.step[step_axis];
if self.voxel[step_axis] == self.voxel_limit[step_axis] {
self.t_min = self.t_max;
}
// Increment the crossing time for this axis
self.next_crossing_t[step_axis] += self.delta_t[step_axis];
Some(seg)
}
}
pub struct MediumProperties {
pub sigma_a: SampledSpectrum,
pub sigma_s: SampledSpectrum,
pub phase: PhaseFunction,
pub le: SampledSpectrum,
}
impl MediumProperties {
pub fn sigma_t(&self) -> SampledSpectrum {
self.sigma_a * self.sigma_s
}
}
#[enum_dispatch]
pub trait MediumTrait: Send + Sync + std::fmt::Debug {
fn is_emissive(&self) -> bool;
fn sample_point(&self, p: Point3f, lambda: &SampledWavelengths) -> MediumProperties;
fn sample_ray(
&self,
ray: &Ray,
t_max: Float,
lambda: &SampledWavelengths,
buf: &Bump,
) -> RayMajorantIterator;
fn sample_t_maj<F>(
&self,
mut ray: Ray,
mut t_max: Float,
u: Float,
rng: &mut Rng,
lambda: &SampledWavelengths,
mut callback: F,
) -> SampledSpectrum
where
F: FnMut(Point3f, MediumProperties, SampledSpectrum, SampledSpectrum, &mut Rng) -> bool,
{
let len = ray.d.norm();
t_max *= len;
ray.d /= len;
let buf = Bump::new();
let mut iter = self.sample_ray(&ray, t_max, lambda, &buf);
let mut t_maj = SampledSpectrum::new(1.0);
while let Some(seg) = iter.next() {
if seg.sigma_maj[0] == 0. {
let dt = seg.t_max - seg.t_min;
let dt = if dt.is_infinite() { Float::MAX } else { dt };
t_maj *= (-dt * seg.sigma_maj).exp();
continue;
}
let mut t_min = seg.t_min;
loop {
let dist = -(1. - u.ln()) / seg.sigma_maj[0];
let t = t_min + dist;
if t < seg.t_max {
t_maj *= (-(t - t_min) * seg.sigma_maj).exp();
let p = ray.at(t);
let mp = self.sample_point(p, lambda);
if !callback(p, mp, seg.sigma_maj, t_maj, rng) {
return SampledSpectrum::new(1.);
}
t_maj = SampledSpectrum::new(1.);
t_min = t;
} else {
let dt = seg.t_max - t_min;
let dt = if dt.is_infinite() { Float::MAX } else { dt };
t_maj *= (-dt * seg.sigma_maj).exp();
break;
}
}
}
SampledSpectrum::new(1.)
}
}
#[derive(Debug, Clone)]
#[enum_dispatch(MediumTrait)]
pub enum Medium {
Homogeneous(HomogeneousMedium),
Grid(GridMedium),
@ -20,6 +407,391 @@ pub enum Medium {
NanoVDB(NanoVDBMedium),
}
#[derive(Debug, Clone)]
pub struct HomogeneousMedium {
sigma_a_spec: DenselySampledSpectrum,
sigma_s_spec: DenselySampledSpectrum,
le_spec: DenselySampledSpectrum,
phase: HGPhaseFunction,
}
impl HomogeneousMedium {
pub fn new(
sigma_a: Spectrum,
sigma_s: Spectrum,
sigma_scale: Float,
le: Spectrum,
le_scale: Float,
g: Float,
) -> Self {
let mut sigma_a_spec =
DenselySampledSpectrum::from_spectrum(&sigma_a, LAMBDA_MIN, LAMBDA_MAX);
let mut sigma_s_spec =
DenselySampledSpectrum::from_spectrum(&sigma_s, LAMBDA_MIN, LAMBDA_MAX);
let mut le_spec = DenselySampledSpectrum::from_spectrum(&le, LAMBDA_MIN, LAMBDA_MAX);
sigma_a_spec.scale(sigma_scale);
sigma_s_spec.scale(sigma_scale);
le_spec.scale(le_scale);
Self {
sigma_a_spec,
sigma_s_spec,
le_spec,
phase: HGPhaseFunction::new(g),
}
}
}
impl MediumTrait for HomogeneousMedium {
fn is_emissive(&self) -> bool {
self.le_spec.max_value() > 0.
}
fn sample_point(&self, _p: Point3f, lambda: &SampledWavelengths) -> MediumProperties {
let sigma_a = self.sigma_a_spec.sample(lambda);
let sigma_s = self.sigma_s_spec.sample(lambda);
let le = self.le_spec.sample(lambda);
MediumProperties {
sigma_a,
sigma_s,
phase: self.phase.into(),
le,
}
}
fn sample_ray(
&self,
_ray: &Ray,
t_max: Float,
lambda: &SampledWavelengths,
_scratch: &Bump,
) -> RayMajorantIterator {
let sigma_a = self.sigma_a_spec.sample(lambda);
let sigma_s = self.sigma_s_spec.sample(lambda);
let sigma_maj = sigma_a + sigma_s;
let iter = HomogeneousMajorantIterator::new(0.0, t_max, sigma_maj);
RayMajorantIterator::Homogeneous(iter)
}
}
#[derive(Debug, Clone)]
pub struct GridMedium {
bounds: Bounds3f,
render_from_medium: Transform<Float>,
sigma_a_spec: DenselySampledSpectrum,
sigma_s_spec: DenselySampledSpectrum,
density_grid: SampledGrid<Float>,
phase: HGPhaseFunction,
temperature_grid: Option<SampledGrid<Float>>,
le_spec: DenselySampledSpectrum,
le_scale: SampledGrid<Float>,
is_emissive: bool,
majorant_grid: MajorantGrid,
}
impl GridMedium {
#[allow(clippy::too_many_arguments)]
pub fn new(
bounds: &Bounds3f,
render_from_medium: &Transform<Float>,
sigma_a: &Spectrum,
sigma_s: &Spectrum,
sigma_scale: Float,
g: Float,
density_grid: SampledGrid<Float>,
temperature_grid: Option<SampledGrid<Float>>,
le: &Spectrum,
le_scale: SampledGrid<Float>,
) -> Self {
let mut sigma_a_spec =
DenselySampledSpectrum::from_spectrum(sigma_a, LAMBDA_MIN, LAMBDA_MAX);
let mut sigma_s_spec =
DenselySampledSpectrum::from_spectrum(sigma_s, LAMBDA_MIN, LAMBDA_MAX);
let le_spec = DenselySampledSpectrum::from_spectrum(le, LAMBDA_MIN, LAMBDA_MAX);
sigma_a_spec.scale(sigma_scale);
sigma_s_spec.scale(sigma_scale);
let mut majorant_grid = MajorantGrid::new(*bounds, Point3i::new(16, 16, 16));
let is_emissive = if temperature_grid.is_some() {
true
} else {
le_spec.max_value() > 0.
};
for z in 0..majorant_grid.res.z() {
for y in 0..majorant_grid.res.y() {
for x in 0..majorant_grid.res.x() {
let bounds = majorant_grid.voxel_bounds(x, y, z);
majorant_grid.set(x, y, z, density_grid.max_value(bounds));
}
}
}
Self {
bounds: *bounds,
render_from_medium: *render_from_medium,
sigma_a_spec,
sigma_s_spec,
density_grid,
phase: HGPhaseFunction::new(g),
temperature_grid,
le_spec,
le_scale,
is_emissive,
majorant_grid,
}
}
}
impl MediumTrait for GridMedium {
fn is_emissive(&self) -> bool {
self.is_emissive
}
fn sample_point(&self, p: Point3f, lambda: &SampledWavelengths) -> MediumProperties {
let mut sigma_a = self.sigma_a_spec.sample(lambda);
let mut sigma_s = self.sigma_s_spec.sample(lambda);
let p_transform = self.render_from_medium.apply_inverse(p);
let p = Point3f::from(self.bounds.offset(&p_transform));
let d = self.density_grid.lookup(p);
sigma_a *= d;
sigma_s *= d;
let scale = if self.is_emissive {
self.le_scale.lookup(p)
} else {
0.0
};
let le = if scale > 0.0 {
let raw_emission = match &self.temperature_grid {
Some(grid) => {
let temp = grid.lookup(p);
BlackbodySpectrum::new(temp).sample(lambda)
}
None => self.le_spec.sample(lambda),
};
raw_emission * scale
} else {
SampledSpectrum::new(0.0)
};
MediumProperties {
sigma_a,
sigma_s,
phase: self.phase.into(),
le,
}
}
fn sample_ray(
&self,
ray: &Ray,
t_max: Float,
lambda: &SampledWavelengths,
_buf: &Bump,
) -> RayMajorantIterator {
let (local_ray, local_t_max) = self.render_from_medium.apply_inverse_ray(ray, Some(t_max));
let inv_dir = Vector3f::new(
1.0 / local_ray.d.x(),
1.0 / local_ray.d.y(),
1.0 / local_ray.d.z(),
);
let dir_is_neg = [
if local_ray.d.x() < 0.0 { 1 } else { 0 },
if local_ray.d.y() < 0.0 { 1 } else { 0 },
if local_ray.d.z() < 0.0 { 1 } else { 0 },
];
let (t_min, t_max) =
match self
.bounds
.intersect_p(local_ray.o, local_t_max, inv_dir, &dir_is_neg)
{
Some((t0, t1)) => (t0, t1),
None => return RayMajorantIterator::Void, // Missed the medium bounds
};
let sigma_t = self.sigma_a_spec.sample(lambda) + self.sigma_s_spec.sample(lambda);
let iter =
DDAMajorantIterator::new(&local_ray, t_min, t_max, &self.majorant_grid, &sigma_t);
RayMajorantIterator::DDA(iter)
}
}
#[derive(Debug, Clone)]
pub struct RGBGridMedium {
bounds: Bounds3f,
render_from_medium: Transform<Float>,
le_grid: Option<SampledGrid<RGBIlluminantSpectrum>>,
le_scale: Float,
phase: HGPhaseFunction,
sigma_a_grid: Option<SampledGrid<RGBUnboundedSpectrum>>,
sigma_s_grid: Option<SampledGrid<RGBUnboundedSpectrum>>,
sigma_scale: Float,
majorant_grid: MajorantGrid,
}
impl RGBGridMedium {
#[allow(clippy::too_many_arguments)]
pub fn new(
bounds: &Bounds3f,
render_from_medium: &Transform<Float>,
g: Float,
sigma_a_grid: Option<SampledGrid<RGBUnboundedSpectrum>>,
sigma_s_grid: Option<SampledGrid<RGBUnboundedSpectrum>>,
sigma_scale: Float,
le_grid: Option<SampledGrid<RGBIlluminantSpectrum>>,
le_scale: Float,
) -> Self {
let mut majorant_grid = MajorantGrid::new(*bounds, Point3i::new(16, 16, 16));
for z in 0..majorant_grid.res.x() {
for y in 0..majorant_grid.res.y() {
for x in 0..majorant_grid.res.x() {
let bounds = majorant_grid.voxel_bounds(x, y, z);
let convert = |s: &RGBUnboundedSpectrum| s.max_value();
let max_sigma_t = sigma_a_grid
.as_ref()
.map_or(1.0, |g| g.max_value_convert(bounds, convert))
+ sigma_s_grid
.as_ref()
.map_or(1.0, |g| g.max_value_convert(bounds, convert));
majorant_grid.set(x, y, z, sigma_scale * max_sigma_t);
}
}
}
Self {
bounds: *bounds,
render_from_medium: *render_from_medium,
le_grid,
le_scale,
phase: HGPhaseFunction::new(g),
sigma_a_grid,
sigma_s_grid,
sigma_scale,
majorant_grid,
}
}
}
impl MediumTrait for RGBGridMedium {
fn is_emissive(&self) -> bool {
self.le_grid.is_some() && self.le_scale > 0.
}
fn sample_point(&self, p: Point3f, lambda: &SampledWavelengths) -> MediumProperties {
let p_transform = self.render_from_medium.apply_inverse(p);
let p = Point3f::from(self.bounds.offset(&p_transform));
let convert = |s: &RGBUnboundedSpectrum| s.sample(lambda);
let sigma_a = self.sigma_scale
* self
.sigma_a_grid
.as_ref()
.map_or(SampledSpectrum::new(1.0), |g| g.lookup_convert(p, convert));
let sigma_s = self.sigma_scale
* self
.sigma_s_grid
.as_ref()
.map_or(SampledSpectrum::new(1.0), |g| g.lookup_convert(p, convert));
let le = self
.le_grid
.as_ref()
.filter(|_| self.le_scale > 0.0)
.map(|g| g.lookup_convert(p, |s| s.sample(lambda)) * self.le_scale)
.unwrap_or_else(|| SampledSpectrum::new(0.0));
MediumProperties {
sigma_a,
sigma_s,
phase: self.phase.into(),
le,
}
}
fn sample_ray(
&self,
ray: &Ray,
t_max: Float,
_lambda: &SampledWavelengths,
_buf: &Bump,
) -> RayMajorantIterator {
let (local_ray, local_t_max) = self.render_from_medium.apply_inverse_ray(ray, Some(t_max));
let inv_dir = Vector3f::new(
1.0 / local_ray.d.x(),
1.0 / local_ray.d.y(),
1.0 / local_ray.d.z(),
);
let dir_is_neg = [
if local_ray.d.x() < 0.0 { 1 } else { 0 },
if local_ray.d.y() < 0.0 { 1 } else { 0 },
if local_ray.d.z() < 0.0 { 1 } else { 0 },
];
let (t_min, t_max) =
match self
.bounds
.intersect_p(local_ray.o, local_t_max, inv_dir, &dir_is_neg)
{
Some((t0, t1)) => (t0, t1),
None => return RayMajorantIterator::Void, // Missed the medium bounds
};
let sigma_t = SampledSpectrum::new(1.);
let iter =
DDAMajorantIterator::new(&local_ray, t_min, t_max, &self.majorant_grid, &sigma_t);
RayMajorantIterator::DDA(iter)
}
}
#[derive(Debug, Clone)]
pub struct CloudMedium;
impl MediumTrait for CloudMedium {
fn is_emissive(&self) -> bool {
todo!()
}
fn sample_point(&self, _p: Point3f, _lambda: &SampledWavelengths) -> MediumProperties {
todo!()
}
fn sample_ray(
&self,
_ray: &Ray,
_t_max: Float,
_lambda: &SampledWavelengths,
_buf: &Bump,
) -> RayMajorantIterator {
todo!()
}
}
#[derive(Debug, Clone)]
pub struct NanoVDBMedium;
impl MediumTrait for NanoVDBMedium {
fn is_emissive(&self) -> bool {
todo!()
}
fn sample_point(&self, _p: Point3f, _lambda: &SampledWavelengths) -> MediumProperties {
todo!()
}
fn sample_ray(
&self,
_ray: &Ray,
_t_max: Float,
_lambda: &SampledWavelengths,
_buf: &Bump,
) -> RayMajorantIterator {
todo!()
}
}
#[derive(Debug, Default, Clone)]
pub struct MediumInterface {
pub inside: Option<Arc<Medium>>,
@ -32,26 +804,10 @@ impl MediumInterface {
}
pub fn is_medium_transition(&self) -> bool {
if let Some(ref inside) = self.inside {
// self.inside == Some
if let Some(ref outside) = self.outside {
// self.outside == Some
let pi = inside as *const _ as *const usize;
let po = outside as *const _ as *const usize;
pi != po
} else {
// self.outside == None
true
}
} else {
// self.inside == None
if let Some(ref _outside) = self.outside {
// self.outside == Some
true
} else {
// self.outside == None
false
}
match (&self.inside, &self.outside) {
(Some(inside), Some(outside)) => !Arc::ptr_eq(inside, outside),
(None, None) => false,
_ => true,
}
}
}

View file

@ -1,11 +1,13 @@
pub mod aggregates;
pub mod bssrdf;
pub mod bxdf;
pub mod cie;
pub mod film;
pub mod filter;
pub mod integrator;
pub mod interaction;
pub mod material;
pub mod medium;
pub mod options;
pub mod pbrt;
pub mod primitive;
pub mod sampler;

94
src/core/options.rs Normal file
View file

@ -0,0 +1,94 @@
use crate::geometry::Point2i;
use std::ops::Deref;
use std::sync::OnceLock;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RenderingCoordinateSystem {
Camera,
CameraWorld,
World,
}
#[derive(Debug, Clone)]
pub struct BasicPBRTOptions {
pub seed: i32,
pub quiet: bool,
pub disable_pixel_jitter: bool,
pub disable_wavelength_jitter: bool,
pub disable_texture_filtering: bool,
pub force_diffuse: bool,
pub use_gpu: bool,
pub wavefront: bool,
pub rendering_space: RenderingCoordinateSystem,
pub debug_start: Option<(Point2i, i32)>,
pub write_partial_images: bool,
pub mse_reference_image: Option<String>,
pub mse_reference_output: Option<String>,
}
impl Default for BasicPBRTOptions {
fn default() -> Self {
Self {
seed: 0,
quiet: false,
disable_pixel_jitter: false,
disable_wavelength_jitter: false,
disable_texture_filtering: false,
force_diffuse: false,
use_gpu: false,
wavefront: false,
rendering_space: RenderingCoordinateSystem::CameraWorld,
debug_start: Some((Point2i::default(), 0)),
write_partial_images: false,
mse_reference_image: None,
mse_reference_output: None,
}
}
}
#[derive(Debug, Clone)]
pub struct PBRTOptions {
pub basic: BasicPBRTOptions,
pub n_threads: usize,
pub log_level: String,
pub image_file: String,
}
impl Default for PBRTOptions {
fn default() -> Self {
Self {
basic: BasicPBRTOptions::default(),
n_threads: 0,
log_level: "info".to_string(),
image_file: "output.exr".to_string(),
}
}
}
impl Deref for PBRTOptions {
type Target = BasicPBRTOptions;
fn deref(&self) -> &Self::Target {
&self.basic
}
}
static OPTIONS: OnceLock<PBRTOptions> = OnceLock::new();
pub fn init_pbrt(options: PBRTOptions) {
OPTIONS
.set(options)
.expect("PBRT has already been initialized!");
}
pub fn cleanup_pbrt() {
todo!()
}
pub fn get_options() -> &'static PBRTOptions {
OPTIONS.get().unwrap_or_else(|| {
static DEFAULT: OnceLock<PBRTOptions> = OnceLock::new();
DEFAULT.get_or_init(PBRTOptions::default)
})
}

View file

@ -1,11 +1,96 @@
use crate::geometry::{Lerp, Point2f, Vector2f, Vector3f};
use num_traits::Num;
use num_traits::{Num, PrimInt};
use std::collections::HashSet;
use std::hash::Hash;
use std::ops::{Add, Mul};
use std::sync::atomic::{AtomicU64, Ordering as SyncOrdering};
use std::sync::{Arc, Mutex};
pub type Float = f32;
pub const MACHINE_EPSILON: Float = std::f32::EPSILON * 0.5;
#[cfg(not(feature = "use_f64"))]
pub type FloatBits = u32;
#[cfg(feature = "use_f64")]
pub type FloatBits = u64;
pub trait FloatBitOps: Copy {
type Bits;
fn to_bits_val(self) -> Self::Bits;
fn exponent_val(self) -> i32;
fn significand_val(self) -> Self::Bits;
fn sign_bit_val(self) -> Self::Bits;
fn from_bits_val(bits: Self::Bits) -> Self;
}
impl FloatBitOps for f32 {
type Bits = u32;
#[inline(always)]
fn to_bits_val(self) -> u32 {
self.to_bits()
}
// Shift 23, Mask 8 bits (0xFF), Bias 127
#[inline(always)]
fn exponent_val(self) -> i32 {
let bits = self.to_bits();
((bits >> 23) & 0xFF) as i32 - 127
}
// Mask bottom 23 bits
#[inline(always)]
fn significand_val(self) -> u32 {
self.to_bits() & ((1 << 23) - 1)
}
// Mask top bit (31)
#[inline(always)]
fn sign_bit_val(self) -> u32 {
self.to_bits() & 0x8000_0000
}
#[inline(always)]
fn from_bits_val(bits: u32) -> Self {
Self::from_bits(bits)
}
}
impl FloatBitOps for f64 {
type Bits = u64;
#[inline(always)]
fn to_bits_val(self) -> u64 {
self.to_bits()
}
// Shift 52, Mask 11 bits (0x7FF), Bias 1023
#[inline(always)]
fn exponent_val(self) -> i32 {
let bits = self.to_bits();
((bits >> 52) & 0x7FF) as i32 - 1023
}
// Mask bottom 52 bits
#[inline(always)]
fn significand_val(self) -> u64 {
self.to_bits() & ((1u64 << 52) - 1)
}
// Mask top bit (63)
#[inline(always)]
fn sign_bit_val(self) -> u64 {
self.to_bits() & 0x8000_0000_0000_0000
}
#[inline(always)]
fn from_bits_val(bits: u64) -> Self {
Self::from_bits(bits)
}
}
pub const MACHINE_EPSILON: Float = Float::EPSILON * 0.5;
pub const SHADOW_EPSILON: Float = 0.0001;
pub const ONE_MINUS_EPSILON: Float = 0.99999994;
pub const PI: Float = std::f32::consts::PI;
@ -17,12 +102,12 @@ pub const PI_OVER_4: Float = 0.785_398_163_397_448_309_61;
pub const SQRT_2: Float = 1.414_213_562_373_095_048_80;
#[inline]
pub fn lerp<Value, Factor>(t: Factor, a: Value, b: Value) -> Value
pub fn lerp<T, F>(t: F, a: T, b: T) -> T
where
Factor: Copy + Num,
Value: Copy + Lerp<Factor>,
T: Lerp<F>,
F: Copy,
{
Value::lerp(t, a, b)
T::lerp(t, a, b)
}
pub fn linear_pdf<T>(x: T, a: T, b: T) -> T
@ -75,33 +160,40 @@ pub fn evaluate_polynomial(t: Float, coeffs: &[Float]) -> Option<Float> {
}
#[inline]
pub fn find_interval<P>(sz: usize, pred: P) -> usize
pub fn find_interval<T, P>(sz: T, pred: P) -> T
where
P: Fn(usize) -> bool,
T: PrimInt,
P: Fn(T) -> bool,
{
if sz <= 2 {
return 0;
let zero = T::zero();
let one = T::one();
let two = one + one;
if sz <= two {
return zero;
}
let mut low = 1;
let mut high = sz - 1;
let mut low = one;
let mut high = sz - one;
while low < high {
let mid = low + (high - low) / 2;
// mid = low + (high - low) / 2
let mid = low + (high - low) / two;
if pred(mid) {
low = mid + 1;
low = mid + one;
} else {
high = mid;
}
}
let result = low - 1;
clamp_t(result, 0, sz - 2)
let result = low - one;
num_traits::clamp(result, zero, sz - two)
}
#[inline]
pub fn gamma(n: i32) -> Float {
return (n as Float * MACHINE_EPSILON) / (1. - n as Float * MACHINE_EPSILON);
n as Float * MACHINE_EPSILON / (1. - n as Float * MACHINE_EPSILON)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
@ -148,3 +240,30 @@ macro_rules! check_rare {
}
};
}
pub struct InternCache<T> {
cache: Mutex<HashSet<Arc<T>>>,
}
impl<T> InternCache<T>
where
T: Eq + Hash + Clone,
{
pub fn new() -> Self {
Self {
cache: Mutex::new(HashSet::new()),
}
}
pub fn lookup(&self, value: T) -> Arc<T> {
let mut lock = self.cache.lock().unwrap();
if let Some(existing) = lock.get(&value) {
return existing.clone(); // Returns a cheap Arc copy
}
let new_item = Arc::new(value);
lock.insert(new_item.clone());
new_item
}
}

View file

@ -1,15 +1,19 @@
use crate::core::interaction::Interaction;
use crate::core::aggregates::LinearBVHNode;
use crate::core::interaction::{Interaction, InteractionTrait, SurfaceInteraction};
use crate::core::material::Material;
use crate::core::medium::{Medium, MediumInterface};
use crate::core::pbrt::Float;
use crate::geometry::{Bounds3f, Ray};
use crate::shapes::{ShapeIntersection, ShapeTrait};
use crate::core::material::MaterialTrait;
use crate::core::medium::MediumInterface;
use crate::lights::Light;
use crate::core::texture::{FloatTextureTrait, TextureEvalContext};
use crate::geometry::{Bounds3f, Ray};
use crate::lights::Light;
use crate::shapes::{Shape, ShapeIntersection, ShapeTrait};
use crate::utils::hash::hash_float;
use crate::utils::transform::{AnimatedTransform, Transform};
use enum_dispatch::enum_dispatch;
use std::sync::Arc;
#[enum_dispatch]
pub trait PrimitiveTrait: Send + Sync + std::fmt::Debug {
fn bounds(&self) -> Bounds3f;
fn intersect(&self, r: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection>;
@ -18,34 +22,52 @@ pub trait PrimitiveTrait: Send + Sync + std::fmt::Debug {
#[derive(Debug, Clone)]
pub struct GeometricPrimitive {
shape: Arc<dyn ShapeTrait>,
material: Arc<dyn MaterialTrait>,
shape: Arc<Shape>,
material: Arc<Material>,
area_light: Arc<Light>,
medium_interface: Arc<MediumInterface>,
medium_interface: MediumInterface,
alpha: Option<Arc<dyn FloatTextureTrait>>,
}
impl PrimitiveTrait for GeometricPrimitive {
fn bounds(&self) -> Bounds3f {
self.shape.bounds()
}
fn intersect(&self, r: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection> {
let mut si = self.shape.intersect(r, t_max)?;
let ctx: TextureEvalContext = si.intr().into();
let Some(ref alpha) = self.alpha else { return None };
let a = alpha.evaluate(&ctx);
if a < 1. {
let u = if a <= 0. { 1. } else { hash_float((r.o, r.d)) };
if u > a {
let r_next = si.intr().spawn_ray(r.d);
let new_t_max = t_max.map(|t| t - si.t_hit())?;
let mut si_next = self.intersect(&r_next, Some(new_t_max - si.t_hit()))?;
si_next.set_t_hit(si_next.t_hit() + si.t_hit());
return Some(si_next)
if let Some(ref alpha) = self.alpha {
let ctx = TextureEvalContext::from(&si.intr);
let a = alpha.evaluate(&ctx);
if a < 1.0 {
let u = if a <= 0.0 {
1.0
} else {
hash_float(&(r.o, r.d))
};
if u > a {
let r_next = si.intr.spawn_ray(r.d);
let new_t_max = t_max.map(|t| t - si.t_hit());
if let Some(mut si_next) = self.intersect(&r_next, new_t_max) {
si_next.set_t_hit(si_next.t_hit() + si.t_hit());
return Some(si_next);
} else {
return None;
}
}
}
}
si.intr_mut().set_intersection_properties(self.material.clone(), self.area_light.clone(), Some(self.medium_interface.clone()), r.medium.clone()?);
si.set_intersection_properties(
self.material.clone(),
self.area_light.clone(),
Some(self.medium_interface.clone()),
Some(r.medium.clone().expect("Medium not set")),
);
Some(si)
}
@ -59,16 +81,128 @@ impl PrimitiveTrait for GeometricPrimitive {
}
#[derive(Debug, Clone)]
pub struct SimplePrimitive {
shape: Arc<dyn ShapeTrait>,
material: Arc<dyn MaterialTrait>,
pub struct SimplePrimitiv {
shape: Arc<Shape>,
material: Arc<Material>,
}
pub struct TransformedPrimitive;
pub struct AnimatedPrimitive;
pub struct BVHAggregatePrimitive;
#[derive(Debug, Clone)]
pub struct TransformedPrimitive {
primitive: Arc<dyn PrimitiveTrait>,
render_from_primitive: Transform<Float>,
}
impl PrimitiveTrait for TransformedPrimitive {
fn bounds(&self) -> Bounds3f {
self.render_from_primitive
.apply_to_bounds(self.primitive.bounds())
}
fn intersect(&self, r: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection> {
let (ray, t_max) = self.render_from_primitive.apply_inverse_ray(r, t_max);
let mut si = self.primitive.intersect(&ray, Some(t_max))?;
let intr_wrapper = Interaction::Surface(si.intr);
let new_render_enum = self
.render_from_primitive
.apply_to_interaction(&intr_wrapper);
if let Interaction::Surface(new_surf) = new_render_enum {
si.intr = new_surf;
} else {
panic!("TransformedPrimitive: Transform changed interaction type (impossible)");
}
Some(si)
}
fn intersect_p(&self, _r: &Ray, _t_max: Option<Float>) -> bool {
todo!()
}
}
#[derive(Debug, Clone)]
pub struct AnimatedPrimitive {
primitive: Arc<dyn PrimitiveTrait>,
render_from_primitive: AnimatedTransform,
}
impl PrimitiveTrait for AnimatedPrimitive {
fn bounds(&self) -> Bounds3f {
self.render_from_primitive
.motion_bounds(&self.primitive.bounds())
}
fn intersect(&self, r: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection> {
let interp_render_from_primitive = self.render_from_primitive.interpolate(r.time);
let (ray, t_max) = interp_render_from_primitive.apply_inverse_ray(r, t_max);
let mut si = self.primitive.intersect(&ray, Some(t_max))?;
let wrapper = si.intr.into();
let new_wrapper = interp_render_from_primitive.apply_to_interaction(&wrapper);
if let Interaction::Surface(new_surf) = new_wrapper {
si.intr = new_surf;
} else {
panic!("TransformedPrimitive: Interaction type changed unexpectedly!");
}
Some(si)
}
fn intersect_p(&self, _r: &Ray, _t_max: Option<Float>) -> bool {
todo!()
}
}
#[derive(Debug, Clone)]
pub struct BVHAggregatePrimitive {
max_prims_in_node: usize,
primitives: Vec<Arc<dyn PrimitiveTrait>>,
nodes: Vec<LinearBVHNode>,
}
impl PrimitiveTrait for BVHAggregatePrimitive {
fn bounds(&self) -> Bounds3f {
if !self.nodes.is_empty() {
self.nodes[0].bounds
} else {
Bounds3f::default()
}
}
fn intersect(&self, r: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection> {
if self.nodes.is_empty() {
return None;
}
self.intersect(r, t_max)
}
fn intersect_p(&self, r: &Ray, t_max: Option<Float>) -> bool {
if self.nodes.is_empty() {
return false;
}
self.intersect_p(r, t_max)
}
}
#[derive(Debug, Clone)]
pub struct KdTreeAggregate;
impl PrimitiveTrait for KdTreeAggregate {
fn bounds(&self) -> Bounds3f {
todo!()
}
fn intersect(&self, _r: &Ray, _t_max: Option<Float>) -> Option<ShapeIntersection> {
todo!()
}
fn intersect_p(&self, _r: &Ray, _t_max: Option<Float>) -> bool {
todo!()
}
}
#[derive(Clone, Debug)]
#[enum_dispatch(PrimitiveTrait)]
pub enum Primitive {
Geometric(GeometricPrimitive),
Transformed(TransformedPrimitive),
@ -76,28 +210,3 @@ pub enum Primitive {
BVH(BVHAggregatePrimitive),
KdTree(KdTreeAggregate),
}
impl Primitive {
// pub fn bounds(&self) -> Bounds3f {
// match self {
// Primitive::Geometric(primitive) => primitive.bounds(),
// Primitive::Transformed(primitive) => primitive.bounds(),
// Primitive::Animated(primitive) => primitive.bounds(),
// Primitive::BVH(primitive) => primitive.bounds(),
// Primitive::KdTree(primitive) => primitive.bounds(),
// }
// }
}
// struct GeometricPrimitive {
// shape: Shape,
// material: Material,
// area_light: Light,
// medium_interface: MediumInterface,
// alpha: Texture<f64>,
// }
//
//
// impl GeometricPrimitive {
// fn new(shape: Shape, material: Material, medium_interface: MediumInterface, alpha: Texture<f64>)
// }

View file

@ -1,6 +1,24 @@
use crate::core::pbrt::{Float, PI, PI_OVER_2, PI_OVER_4, find_interval, lerp};
use std::ops::RangeFull;
use enum_dispatch::enum_dispatch;
use rand::seq::index::sample;
use crate::core::filter::FilterTrait;
use crate::core::pbrt::{
Float, ONE_MINUS_EPSILON, PI, PI_OVER_2, PI_OVER_4, clamp_t, find_interval, lerp,
};
use crate::geometry::{Bounds2f, Point2f, Point2i, Vector2f};
use crate::utils::containers::Array2D;
use crate::utils::math::{
BinaryPermuteScrambler, DigitPermutation, FastOwenScrambler, NoRandomizer, OwenScrambler,
PRIME_TABLE_SIZE, Scrambler, compute_radical_inverse_permutations, encode_morton_2,
inverse_radical_inverse, log2_int, owen_scrambled_radical_inverse, permutation_element,
radical_inverse, round_up_pow2, scrambled_radical_inverse, sobol_interval_to_index,
sobol_sample,
};
use crate::utils::rng::Rng;
use crate::utils::sobol::N_SOBOL_DIMENSIONS;
use crate::utils::{hash::*, sobol};
#[derive(Debug, Clone, Copy)]
pub struct CameraSample {
@ -21,156 +39,722 @@ impl Default for CameraSample {
}
}
#[derive(Debug, Clone)]
pub struct PiecewiseConstant1D {
pub func: Vec<Float>,
pub cdf: Vec<Float>,
pub min: Float,
pub max: Float,
pub func_integral: Float,
pub fn get_camera_sample<S, F>(sampler: &mut S, p_pixel: Point2i, filter: &F) -> CameraSample
where
S: SamplerTrait,
F: FilterTrait,
{
let fs = filter.sample(sampler.get_pixel2d());
CameraSample {
p_film: Point2f::from(p_pixel) + Vector2f::from(fs.p) + Vector2f::new(0.5, 0.5),
time: sampler.get1d(),
p_lens: sampler.get2d(),
filter_weight: fs.weight,
}
}
impl PiecewiseConstant1D {
pub fn new(f: &[Float]) -> Self {
Self::new_with_bounds(f, 0., 1.)
#[derive(Default, Debug, Clone)]
pub struct IndependentSampler {
samples_per_pixel: usize,
seed: u64,
rng: Rng,
}
impl IndependentSampler {
pub fn new(samples_per_pixel: usize, seed: u64) -> Self {
Self {
samples_per_pixel,
seed,
rng: Rng::default(),
}
}
}
impl SamplerTrait for IndependentSampler {
fn samples_per_pixel(&self) -> usize {
self.samples_per_pixel
}
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>) {
let hash_input = [p.x() as u64, p.y() as u64, self.seed];
let sequence_index = hash_buffer(&hash_input, 0);
self.rng.set_sequence(sequence_index);
self.rng
.advance((sample_index as u64) * 65536 + (dim.unwrap_or(0) as u64));
}
pub fn new_with_bounds(f: &[Float], min: Float, max: Float) -> Self {
assert!(max > min);
let n = f.len();
let mut func = Vec::with_capacity(n);
for &val in f {
func.push(val.abs());
fn get1d(&mut self) -> Float {
self.rng.uniform::<Float>()
}
fn get2d(&mut self) -> Point2f {
Point2f::new(self.rng.uniform::<Float>(), self.rng.uniform::<Float>())
}
fn get_pixel2d(&mut self) -> Point2f {
self.get2d()
}
}
const MAX_HALTON_RESOLUTION: i32 = 128;
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub enum RandomizeStrategy {
#[default]
None,
PermuteDigits,
FastOwen,
Owen,
}
#[derive(Default, Debug, Clone)]
pub struct HaltonSampler {
samples_per_pixel: usize,
randomize: RandomizeStrategy,
digit_permutations: Vec<DigitPermutation>,
base_scales: [u64; 2],
base_exponents: [u64; 2],
mult_inverse: [u64; 2],
halton_index: u64,
dim: usize,
}
impl HaltonSampler {
pub fn new(
samples_per_pixel: usize,
full_res: Point2i,
randomize: RandomizeStrategy,
seed: u64,
) -> Self {
let digit_permutations = compute_radical_inverse_permutations(seed);
let mut base_scales = [0u64; 2];
let mut base_exponents = [0u64; 2];
let bases = [2, 3];
let res_coords = [full_res.x(), full_res.y()];
for i in 0..2 {
let base = bases[i] as u64;
let mut scale = 1u64;
let mut exp = 0u64;
let limit = std::cmp::min(res_coords[i], MAX_HALTON_RESOLUTION) as u64;
while scale < limit {
scale *= base;
exp += 1;
}
base_scales[i] = scale;
base_exponents[i] = exp;
}
let mut cdf = vec![0.; n + 1];
for i in 1..=n {
debug_assert!(func[i - 1] >= 0.);
cdf[i] = cdf[i - 1] + func[i - 1] * (max - min) / n as Float;
}
let mut mult_inverse = [0u64; 2];
let func_integral = cdf[n];
if func_integral == 0. {
for i in 1..=n {
cdf[i] = i as Float / n as Float;
}
} else {
for i in 1..=n {
cdf[i] /= func_integral;
}
}
mult_inverse[0] =
Self::multiplicative_inverse(base_scales[0] as i64, base_scales[0] as i64);
mult_inverse[1] =
Self::multiplicative_inverse(base_scales[1] as i64, base_scales[1] as i64);
Self {
func,
cdf,
func_integral,
min,
max,
samples_per_pixel,
randomize,
digit_permutations,
base_scales,
base_exponents,
mult_inverse,
halton_index: 0,
dim: 0,
}
}
pub fn integral(&self) -> Float {
self.func_integral
}
pub fn size(&self) -> usize {
self.func.len()
}
pub fn sample(&self, u: Float) -> (Float, Float, usize) {
let o = find_interval(self.cdf.len(), |idx| self.cdf[idx] <= u);
let mut du = u - self.cdf[o];
if self.cdf[o + 1] - self.cdf[o] > 0. {
du /= self.cdf[o + 1] - self.cdf[o];
}
debug_assert!(!du.is_nan());
let value = lerp((o as Float + du) / self.size() as Float, self.min, self.max);
let pdf_val = if self.func_integral > 0. {
self.func[o] / self.func_integral
fn sample_dimension(&self, dimension: usize) -> Float {
if self.randomize == RandomizeStrategy::None {
radical_inverse(dimension, self.halton_index)
} else if self.randomize == RandomizeStrategy::PermuteDigits {
scrambled_radical_inverse(
dimension,
self.halton_index,
&self.digit_permutations[dimension],
)
} else {
0.
};
(value, pdf_val, o)
owen_scrambled_radical_inverse(
dimension,
self.halton_index,
mix_bits(1 + ((dimension as u64) << 4)) as u32,
)
}
}
}
#[derive(Debug, Clone)]
pub struct PiecewiseConstant2D {
pub p_conditional_v: Vec<PiecewiseConstant1D>,
pub p_marginal: PiecewiseConstant1D,
pub domain: Bounds2f,
}
impl PiecewiseConstant2D {
pub fn new(data: &Array2D<Float>, nu: usize, nv: usize, domain: Bounds2f) -> Self {
let mut p_conditional_v = Vec::with_capacity(nv);
for v in 0..nv {
let start = v * nu;
let end = start + nu;
p_conditional_v.push(PiecewiseConstant1D::new_with_bounds(
&data.as_slice()[start..end],
domain.p_min.x(),
domain.p_max.x(),
));
fn multiplicative_inverse(a: i64, n: i64) -> u64 {
let (x, _) = Self::extended_gcd(a as u64, n as u64);
x.rem_euclid(n) as u64
}
fn extended_gcd(a: u64, b: u64) -> (i64, i64) {
if b == 0 {
return (1, 0);
}
let marginal_func: Vec<Float> = p_conditional_v.iter().map(|p| p.integral()).collect();
let p_marginal = PiecewiseConstant1D::new_with_bounds(
&marginal_func,
domain.p_min.y(),
domain.p_max.y(),
let (xp, yp) = Self::extended_gcd(b, a % b);
let d = (a / b) as i64;
(yp, xp - d * yp)
}
}
impl SamplerTrait for HaltonSampler {
fn samples_per_pixel(&self) -> usize {
self.samples_per_pixel
}
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>) {
self.halton_index = 0;
let sample_stride = self.base_scales[0] * self.base_scales[1];
if sample_stride > 1 {
let pm_x = p.x().rem_euclid(MAX_HALTON_RESOLUTION) as u64;
let pm_y = p.y().rem_euclid(MAX_HALTON_RESOLUTION) as u64;
let dim_offset_x = inverse_radical_inverse(pm_x, 2, self.base_exponents[0]);
self.halton_index = self.halton_index.wrapping_add(
dim_offset_x
.wrapping_mul(sample_stride / self.base_scales[0])
.wrapping_mul(self.mult_inverse[0]),
);
let dim_offset_y = inverse_radical_inverse(pm_y, 3, self.base_exponents[1]);
self.halton_index = self.halton_index.wrapping_add(
dim_offset_y
.wrapping_mul(sample_stride / self.base_scales[1])
.wrapping_mul(self.mult_inverse[1]),
);
self.halton_index %= sample_stride;
}
self.halton_index = self
.halton_index
.wrapping_add((sample_index as u64).wrapping_mul(sample_stride));
self.dim = 2.max(dim.unwrap_or(0));
}
fn get1d(&mut self) -> Float {
if self.dim > PRIME_TABLE_SIZE {
self.dim = 2;
}
self.sample_dimension(self.dim)
}
fn get2d(&mut self) -> Point2f {
if self.dim > PRIME_TABLE_SIZE {
self.dim = 2;
}
let dim = self.dim;
self.dim += 2;
Point2f::new(self.sample_dimension(dim), self.sample_dimension(dim + 1))
}
fn get_pixel2d(&mut self) -> Point2f {
Point2f::new(
radical_inverse(0, self.halton_index >> self.base_exponents[0]),
radical_inverse(1, self.halton_index >> self.base_exponents[1]),
)
}
}
#[derive(Default, Debug, Clone)]
pub struct StratifiedSampler {
x_pixel_samples: usize,
y_pixel_samples: usize,
jitter: bool,
seed: u64,
rng: Rng,
pixel: Point2i,
sample_index: usize,
dim: usize,
}
impl StratifiedSampler {
pub fn new(
x_pixel_samples: usize,
y_pixel_samples: usize,
seed: Option<u64>,
jitter: bool,
) -> Self {
Self {
x_pixel_samples,
y_pixel_samples,
jitter,
seed: seed.unwrap_or(0),
rng: Rng::default(),
pixel: Point2i::default(),
sample_index: 0,
dim: 0,
}
}
}
impl SamplerTrait for StratifiedSampler {
fn samples_per_pixel(&self) -> usize {
self.x_pixel_samples * self.y_pixel_samples
}
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>) {
self.pixel = p;
self.sample_index = sample_index;
let hash_input = [p.x() as u64, p.y() as u64, self.seed];
let sequence_index = hash_buffer(&hash_input, 0);
self.rng.set_sequence(sequence_index);
self.rng
.advance((sample_index as u64) * 65536 + (dim.unwrap_or(0) as u64));
}
fn get1d(&mut self) -> Float {
let hash_input = [
self.pixel.x() as u64,
self.pixel.y() as u64,
self.dim as u64,
self.seed,
];
let hash = hash_buffer(&hash_input, 0);
let stratum = permutation_element(
self.sample_index as u32,
self.samples_per_pixel() as u32,
hash as u32,
);
self.dim += 1;
let delta = if self.jitter {
self.rng.uniform::<Float>()
} else {
0.5
};
(stratum as Float + delta) / (self.samples_per_pixel() as Float)
}
fn get2d(&mut self) -> Point2f {
let hash_input = [
self.pixel.x() as u64,
self.pixel.y() as u64,
self.dim as u64,
self.seed,
];
let hash = hash_buffer(&hash_input, 0);
let stratum = permutation_element(
self.sample_index as u32,
self.samples_per_pixel() as u32,
hash as u32,
);
self.dim += 2;
let x = stratum % self.x_pixel_samples as u32;
let y = stratum / self.y_pixel_samples as u32;
let dx = if self.jitter {
self.rng.uniform::<Float>()
} else {
0.5
};
let dy = if self.jitter {
self.rng.uniform::<Float>()
} else {
0.5
};
Point2f::new(
(x as Float + dx) / self.x_pixel_samples as Float,
(y as Float + dy) / self.y_pixel_samples as Float,
)
}
fn get_pixel2d(&mut self) -> Point2f {
self.get2d()
}
}
#[derive(Default, Debug, Clone)]
pub struct PaddedSobolSampler {
samples_per_pixel: usize,
seed: u64,
randomize: RandomizeStrategy,
pixel: Point2i,
sample_index: usize,
dim: usize,
}
impl PaddedSobolSampler {
pub fn new(samples_per_pixel: usize, randomize: RandomizeStrategy, seed: Option<u64>) -> Self {
Self {
p_conditional_v,
p_marginal,
domain,
samples_per_pixel,
seed: seed.unwrap_or(0),
randomize,
pixel: Point2i::default(),
sample_index: 0,
dim: 0,
}
}
pub fn new_with_bounds(data: &Array2D<Float>, domain: Bounds2f) -> Self {
Self::new(data, data.x_size() as usize, data.y_size() as usize, domain)
}
pub fn new_with_data(data: &Array2D<Float>, nx: usize, ny: usize) -> Self {
Self::new(
data,
nx,
ny,
Bounds2f::from_points(Point2f::new(0., 0.), Point2f::new(1., 1.)),
)
}
pub fn resolution(&self) -> Point2i {
Point2i::new(
self.p_conditional_v[0].size() as i32,
self.p_conditional_v[1].size() as i32,
)
}
pub fn integral(&self) -> f32 {
self.p_marginal.integral()
}
pub fn sample(&self, u: Point2f) -> (Point2f, f32, Point2i) {
let (d1, pdf1, off_y) = self.p_marginal.sample(u.y());
let (d0, pdf0, off_x) = self.p_conditional_v[off_y].sample(u.x());
let pdf = pdf0 * pdf1;
let offset = Point2i::new(off_x as i32, off_y as i32);
(Point2f::new(d0, d1), pdf, offset)
}
pub fn pdf(&self, p: Point2f) -> f32 {
let p_offset = self.domain.offset(&p);
let nu = self.p_conditional_v[0].size();
let nv = self.p_marginal.size();
let iu = (p_offset.x() * nu as f32).clamp(0.0, nu as f32 - 1.0) as usize;
let iv = (p_offset.y() * nv as f32).clamp(0.0, nv as f32 - 1.0) as usize;
let integral = self.p_marginal.integral();
if integral == 0.0 {
0.0
} else {
self.p_conditional_v[iv].func[iu] / integral
fn sample_dimension(&self, dimension: usize, a: u32, hash: u32) -> Float {
if self.randomize == RandomizeStrategy::None {
return sobol_sample(a as u64, dimension, NoRandomizer);
}
match self.randomize {
RandomizeStrategy::PermuteDigits => {
sobol_sample(a as u64, dimension, BinaryPermuteScrambler::new(hash))
}
RandomizeStrategy::FastOwen => {
sobol_sample(a as u64, dimension, FastOwenScrambler::new(hash))
}
RandomizeStrategy::Owen => sobol_sample(a as u64, dimension, OwenScrambler::new(hash)),
RandomizeStrategy::None => unreachable!(),
}
}
}
impl SamplerTrait for PaddedSobolSampler {
fn samples_per_pixel(&self) -> usize {
self.samples_per_pixel
}
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>) {
self.pixel = p;
self.sample_index = sample_index;
self.dim = dim.unwrap_or(0);
}
fn get1d(&mut self) -> Float {
let hash_input = [
self.pixel.x() as u64,
self.pixel.y() as u64,
self.dim as u64,
self.seed,
];
let hash = hash_buffer(&hash_input, 0) as u32;
let index = permutation_element(
self.sample_index as u32,
self.samples_per_pixel as u32,
hash,
);
self.sample_dimension(0, index, hash >> 32)
}
fn get2d(&mut self) -> Point2f {
let hash_input = [
self.pixel.x() as u64,
self.pixel.y() as u64,
self.dim as u64,
self.seed,
];
let hash = hash_buffer(&hash_input, 0) as u32;
let index = permutation_element(
self.sample_index as u32,
self.samples_per_pixel as u32,
hash,
);
self.dim += 2;
Point2f::new(
self.sample_dimension(0, index, hash),
self.sample_dimension(1, index, hash >> 32),
)
}
fn get_pixel2d(&mut self) -> Point2f {
self.get2d()
}
}
#[derive(Default, Debug, Clone)]
pub struct SobolSampler {
samples_per_pixel: usize,
scale: i32,
seed: u64,
randomize: RandomizeStrategy,
pixel: Point2i,
dim: usize,
sobol_index: u64,
}
impl SobolSampler {
pub fn new(
samples_per_pixel: usize,
full_resolution: Point2i,
randomize: RandomizeStrategy,
seed: Option<u64>,
) -> Self {
let scale = round_up_pow2(full_resolution.x().max(full_resolution.y()));
Self {
samples_per_pixel,
scale,
seed: seed.unwrap_or(0),
randomize,
pixel: Point2i::default(),
dim: 0,
sobol_index: 0,
}
}
fn sample_dimension(&self, dimension: usize) -> Float {
if self.randomize == RandomizeStrategy::None {
return sobol_sample(self.sobol_index, dimension, NoRandomizer);
}
let hash_input = [self.pixel.x() as u64, self.pixel.y() as u64, self.seed];
let hash = hash_buffer(&hash_input, 0) as u32;
match self.randomize {
RandomizeStrategy::PermuteDigits => sobol_sample(
self.sobol_index,
dimension,
BinaryPermuteScrambler::new(hash),
),
RandomizeStrategy::FastOwen => {
sobol_sample(self.sobol_index, dimension, FastOwenScrambler::new(hash))
}
RandomizeStrategy::Owen => {
sobol_sample(self.sobol_index, dimension, OwenScrambler::new(hash))
}
RandomizeStrategy::None => unreachable!(),
}
}
}
impl SamplerTrait for SobolSampler {
fn samples_per_pixel(&self) -> usize {
self.samples_per_pixel
}
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>) {
self.pixel = p;
self.dim = 2.max(dim.unwrap_or(0));
self.sobol_index =
sobol_interval_to_index(log2_int(self.scale as Float) as u32, sample_index as u64, p)
}
fn get1d(&mut self) -> Float {
if self.dim >= N_SOBOL_DIMENSIONS {
self.dim = 2;
}
let dim = self.dim;
self.dim += 1;
self.sample_dimension(dim)
}
fn get2d(&mut self) -> Point2f {
if self.dim >= N_SOBOL_DIMENSIONS {
self.dim = 2;
}
let u = Point2f::new(
self.sample_dimension(self.dim),
self.sample_dimension(self.dim + 1),
);
self.dim += 2;
u
}
fn get_pixel2d(&mut self) -> Point2f {
let mut u = Point2f::new(
sobol_sample(self.sobol_index, 0, NoRandomizer),
sobol_sample(self.sobol_index, 1, NoRandomizer),
);
u[0] = clamp_t(
u[0] * self.scale as Float - self.pixel[0] as Float,
0.,
ONE_MINUS_EPSILON,
) as Float;
u[1] = clamp_t(
u[1] * self.scale as Float - self.pixel[1] as Float,
1.,
ONE_MINUS_EPSILON,
) as Float;
u
}
}
#[derive(Default, Debug, Clone)]
pub struct ZSobolSampler {
randomize: RandomizeStrategy,
seed: u64,
log2_samples_per_pixel: u32,
n_base4_digits: u32,
morton_index: u64,
dim: usize,
}
impl ZSobolSampler {
pub fn new(
samples_per_pixel: u32,
full_resolution: Point2i,
randomize: RandomizeStrategy,
seed: Option<u64>,
) -> Self {
let log2_samples_per_pixel = log2_int(samples_per_pixel as Float) as u32;
let res = round_up_pow2(full_resolution.x().max(full_resolution.y()));
let log4_samples_per_pixel = log2_samples_per_pixel.div_ceil(2);
let n_base4_digits = log2_int(res as Float) as u32 + log4_samples_per_pixel;
Self {
randomize,
seed: seed.unwrap_or(0),
log2_samples_per_pixel,
n_base4_digits,
morton_index: 0,
dim: 0,
}
}
fn get_sample_index(&self) -> u64 {
const PERMUTATIONS: [[u8; 4]; 24] = [
[0, 1, 2, 3],
[0, 1, 3, 2],
[0, 2, 1, 3],
[0, 2, 3, 1],
[0, 3, 2, 1],
[0, 3, 1, 2],
[1, 0, 2, 3],
[1, 0, 3, 2],
[1, 2, 0, 3],
[1, 2, 3, 0],
[1, 3, 2, 0],
[1, 3, 0, 2],
[2, 1, 0, 3],
[2, 1, 3, 0],
[2, 0, 1, 3],
[2, 0, 3, 1],
[2, 3, 0, 1],
[2, 3, 1, 0],
[3, 1, 2, 0],
[3, 1, 0, 2],
[3, 2, 1, 0],
[3, 2, 0, 1],
[3, 0, 2, 1],
[3, 0, 1, 2],
];
let mut sample_index = 0;
let pow2_samples = (self.log2_samples_per_pixel & 1) != 0;
let last_digit = if pow2_samples { 1 } else { 0 };
for i in (last_digit..self.n_base4_digits).rev() {
let digit_shift = (2 * i) - if pow2_samples { 1 } else { 0 };
let mut digit = (self.morton_index >> digit_shift) & 3;
let higher_digits = self.morton_index >> (digit_shift + 2);
let mix_input = higher_digits ^ (0x55555555 * self.dim as u64);
let p = (mix_bits(mix_input) >> 24) % 24;
digit = PERMUTATIONS[p as usize][digit as usize] as u64;
sample_index |= digit << digit_shift;
}
if pow2_samples {
let lsb = self.morton_index & 1;
sample_index |= lsb;
}
sample_index
}
}
impl SamplerTrait for ZSobolSampler {
fn samples_per_pixel(&self) -> usize {
todo!()
}
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>) {
self.dim = dim.unwrap_or(0);
self.morton_index = (encode_morton_2(p.x() as u32, p.y() as u32)
<< self.log2_samples_per_pixel)
| sample_index as u64
}
fn get1d(&mut self) -> Float {
let sample_index = self.get_sample_index();
let hash_input = [self.dim as u64, self.seed];
let hash = hash_buffer(&hash_input, 0) as u32;
self.dim += 1;
if self.randomize == RandomizeStrategy::None {
return sobol_sample(sample_index, self.dim, NoRandomizer);
}
match self.randomize {
RandomizeStrategy::PermuteDigits => {
sobol_sample(sample_index, self.dim, BinaryPermuteScrambler::new(hash))
}
RandomizeStrategy::FastOwen => {
sobol_sample(sample_index, self.dim, FastOwenScrambler::new(hash))
}
RandomizeStrategy::Owen => {
sobol_sample(sample_index, self.dim, OwenScrambler::new(hash))
}
RandomizeStrategy::None => unreachable!(),
}
}
fn get2d(&mut self) -> Point2f {
let sample_index = self.get_sample_index();
self.dim += 2;
let hash_input = [self.dim as u64, self.seed];
let hash = hash_buffer(&hash_input, 0);
let sample_hash = [hash as u32, (hash >> 32) as u32];
if self.randomize == RandomizeStrategy::None {
return Point2f::new(
sobol_sample(sample_index, 0, NoRandomizer),
sobol_sample(sample_index, 1, NoRandomizer),
);
}
match self.randomize {
RandomizeStrategy::PermuteDigits => Point2f::new(
sobol_sample(sample_index, 0, BinaryPermuteScrambler::new(sample_hash[0])),
sobol_sample(sample_index, 1, BinaryPermuteScrambler::new(sample_hash[1])),
),
RandomizeStrategy::FastOwen => Point2f::new(
sobol_sample(sample_index, 0, FastOwenScrambler::new(sample_hash[0])),
sobol_sample(sample_index, 1, FastOwenScrambler::new(sample_hash[1])),
),
RandomizeStrategy::Owen => Point2f::new(
sobol_sample(sample_index, 0, OwenScrambler::new(sample_hash[0])),
sobol_sample(sample_index, 1, OwenScrambler::new(sample_hash[1])),
),
RandomizeStrategy::None => unreachable!(),
}
}
fn get_pixel2d(&mut self) -> Point2f {
self.get2d()
}
}
#[derive(Default, Debug, Clone)]
pub struct MLTSampler;
impl SamplerTrait for MLTSampler {
fn samples_per_pixel(&self) -> usize {
todo!()
}
fn start_pixel_sample(&mut self, _p: Point2i, _sample_index: usize, _dim: Option<usize>) {
todo!()
}
fn get1d(&mut self) -> Float {
todo!()
}
fn get2d(&mut self) -> Point2f {
todo!()
}
fn get_pixel2d(&mut self) -> Point2f {
todo!()
}
}
#[enum_dispatch]
pub trait SamplerTrait {
fn samples_per_pixel(&self) -> usize;
fn start_pixel_sample(&mut self, p: Point2i, sample_index: usize, dim: Option<usize>);
fn get1d(&mut self) -> Float;
fn get2d(&mut self) -> Point2f;
fn get_pixel2d(&mut self) -> Point2f;
}
#[enum_dispatch(SamplerTrait)]
#[derive(Debug, Clone)]
pub enum Sampler {
Independent(IndependentSampler),
Stratified(StratifiedSampler),
Halton(HaltonSampler),
PaddedSobol(PaddedSobolSampler),
Sobol(SobolSampler),
ZSobol(ZSobolSampler),
MLT(MLTSampler),
}

173
src/core/scattering.rs Normal file
View file

@ -0,0 +1,173 @@
use crate::core::geometry::{
Normal3f, Point2f, Vector2f, Vector3f, VectorLike, abs_cos_theta, cos_phi, cos2_theta, sin_phi,
tan2_theta,
};
use crate::core::pbrt::{Float, PI, clamp_t};
use crate::spectra::{N_SPECTRUM_SAMPLES, SampledSpectrum};
use crate::utils::math::safe_sqrt;
use crate::utils::math::{lerp, square};
use crate::utils::sampling::sample_uniform_disk_polar;
use num::complex::Complex;
#[derive(Debug, Default, Clone, Copy)]
pub struct TrowbridgeReitzDistribution {
alpha_x: Float,
alpha_y: Float,
}
impl TrowbridgeReitzDistribution {
pub fn new(alpha_x: Float, alpha_y: Float) -> Self {
Self { alpha_x, alpha_y }
}
pub fn d(&self, wm: Vector3f) -> Float {
let tan2_theta = tan2_theta(wm);
if tan2_theta.is_infinite() {
return 0.;
}
let cos4_theta = square(cos2_theta(wm));
let e =
tan2_theta * (square(cos_phi(wm) / self.alpha_x) + square(sin_phi(wm) / self.alpha_y));
1.0 / (PI * self.alpha_x * self.alpha_y * cos4_theta * square(1. + e))
}
pub fn effectively_smooth(&self) -> bool {
self.alpha_x.max(self.alpha_y) < 1e-3
}
pub fn lambda(&self, w: Vector3f) -> Float {
let tan2_theta = tan2_theta(w);
if tan2_theta.is_infinite() {
return 0.;
}
let alpha2 = square(cos_phi(w) * self.alpha_x) + square(sin_phi(w) * self.alpha_y);
((1. + alpha2 * tan2_theta).sqrt() - 1.) / 2.
}
pub fn g(&self, wo: Vector3f, wi: Vector3f) -> Float {
1. / (1. + self.lambda(wo) + self.lambda(wi))
}
pub fn g1(&self, w: Vector3f) -> Float {
1. / (1. / self.lambda(w))
}
pub fn d_from_w(&self, w: Vector3f, wm: Vector3f) -> Float {
self.g1(w) / abs_cos_theta(w) * self.d(wm) * w.dot(wm).abs()
}
pub fn pdf(&self, w: Vector3f, wm: Vector3f) -> Float {
self.d_from_w(w, wm)
}
pub fn sample_wm(&self, w: Vector3f, u: Point2f) -> Vector3f {
let mut wh = Vector3f::new(self.alpha_x * w.x(), self.alpha_y * w.y(), w.z()).normalize();
if wh.z() < 0. {
wh = -wh;
}
let t1 = if wh.z() < 0.99999 {
Vector3f::new(0., 0., 1.).cross(wh).normalize()
} else {
Vector3f::new(1., 0., 0.)
};
let t2 = wh.cross(t1);
let mut p = sample_uniform_disk_polar(u);
let h = (1. - square(p.x())).sqrt();
p[1] = lerp((1. + wh.z()) / 2., h, p.y());
let pz = 0_f32.max(1. - Vector2f::from(p).norm_squared());
let nh = p.x() * t1 + p.y() * t2 + pz * wh;
Vector3f::new(
self.alpha_x * nh.x(),
self.alpha_y * nh.y(),
nh.z().max(1e-6),
)
.normalize()
}
pub fn roughness_to_alpha(roughness: Float) -> Float {
roughness.sqrt()
}
pub fn regularize(&mut self) {
if self.alpha_x < 0.3 {
self.alpha_x = clamp_t(2. * self.alpha_x, 0.1, 0.3);
}
if self.alpha_y < 0.3 {
self.alpha_y = clamp_t(2. * self.alpha_y, 0.1, 0.3);
}
}
}
pub fn refract(wi: Vector3f, n: Normal3f, eta_ratio: Float) -> Option<(Vector3f, Float)> {
let mut n_interface = n;
let mut eta = eta_ratio;
let mut cos_theta_i = Vector3f::from(n_interface).dot(wi);
if cos_theta_i < 0.0 {
eta = 1.0 / eta;
cos_theta_i = -cos_theta_i;
n_interface = -n_interface;
}
let sin2_theta_i = (1.0 - square(cos_theta_i)).max(0.0_f32);
let sin2_theta_t = sin2_theta_i / square(eta);
// Handle total internal reflection
if sin2_theta_t >= 1.0 {
return None;
}
let cos_theta_t = (1.0 - sin2_theta_t).sqrt();
let wt = -wi / eta + (cos_theta_i / eta - cos_theta_t) * Vector3f::from(n_interface);
Some((wt, eta))
}
pub fn reflect(wo: Vector3f, n: Normal3f) -> Vector3f {
-wo + Vector3f::from(2. * wo.dot(n.into()) * n)
}
pub fn fr_dielectric(cos_theta_i: Float, eta: Float) -> Float {
let mut cos_safe = clamp_t(cos_theta_i, -1., 1.);
let mut eta_corr = eta;
if cos_theta_i < 0. {
eta_corr = 1. / eta_corr;
cos_safe = -cos_safe;
}
let sin2_theta_i = 1. - square(cos_safe);
let sin2_theta_t = sin2_theta_i / square(eta_corr);
if sin2_theta_t >= 1. {
return 1.;
}
let cos_theta_t = safe_sqrt(1. - sin2_theta_t);
let r_parl = (eta_corr * cos_safe - cos_theta_t) / (eta_corr * cos_safe + cos_theta_t);
let r_perp = (cos_safe - eta_corr * cos_theta_t) / (cos_safe + eta_corr * cos_theta_t);
(square(r_parl) + square(r_perp)) / 2.
}
pub fn fr_complex(cos_theta_i: Float, eta: Complex<Float>) -> Float {
let cos_corr = clamp_t(cos_theta_i, 0., 1.);
let sin2_theta_i = 1. - square(cos_corr);
let sin2_theta_t: Complex<Float> = sin2_theta_i / square(eta);
let cos2_theta_t: Complex<Float> = (1. - sin2_theta_t).sqrt();
let r_parl = (eta * cos_corr - cos2_theta_t) / (eta * cos_corr + cos2_theta_t);
let r_perp = (cos_corr - eta * cos2_theta_t) / (cos_corr + eta * cos2_theta_t);
(r_parl.norm() + r_perp.norm()) / 2.
}
pub fn fr_complex_from_spectrum(
cos_theta_i: Float,
eta: SampledSpectrum,
k: SampledSpectrum,
) -> SampledSpectrum {
let mut result = SampledSpectrum::default();
for i in 0..N_SPECTRUM_SAMPLES {
result[i] = fr_complex(cos_theta_i, Complex::new(eta[i], k[i]));
}
result
}

View file

@ -1,41 +1,295 @@
use crate::core::interaction::{Interaction, InteractionTrait, SurfaceInteraction};
use crate::core::pbrt::Float;
use crate::geometry::{Point3f, Vector3f, Normal3f, Point2f};
use crate::core::interaction::SurfaceInteraction;
use crate::core::pbrt::{INV_2_PI, INV_PI, PI};
use crate::geometry::{Normal3f, Point2f, Point3f, Vector2f, Vector3f, VectorLike};
use crate::geometry::{spherical_phi, spherical_theta};
use crate::image::WrapMode;
use crate::spectra::{
RGBAlbedoSpectrum, RGBIlluminantSpectrum, RGBUnboundedSpectrum, SampledSpectrum, Spectrum,
};
use crate::spectra::{SampledWavelengths, SpectrumTrait};
use crate::utils::color::{ColorEncoding, RGB};
use crate::utils::math::square;
use crate::utils::mipmap::MIPMap;
use crate::utils::mipmap::{MIPMapFilterOptions, MIPMapSample};
use crate::utils::transform::Transform;
pub struct TextureEvalContext {
use std::collections::HashMap;
use std::sync::{Arc, Mutex, OnceLock};
use enum_dispatch::enum_dispatch;
use std::path::Path;
struct TexCoord2D {
st: Point2f,
dsdx: Float,
dsdy: Float,
dtdx: Float,
dtdy: Float,
}
#[enum_dispatch]
trait TextureMapping2DTrait {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord2D;
}
#[enum_dispatch(TextureMapping2DTrait)]
#[derive(Clone, Debug)]
pub enum TextureMapping2D {
UV(UVMapping),
Spherical(SphericalMapping),
Cylindrical(CylindricalMapping),
Planar(PlanarMapping),
}
#[derive(Clone, Debug)]
pub struct UVMapping {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl Default for UVMapping {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0,
}
}
}
impl TextureMapping2DTrait for UVMapping {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord2D {
let dsdx = self.su * ctx.dudx;
let dsdy = self.su * ctx.dudy;
let dtdx = self.sv * ctx.dvdx;
let dtdy = self.sv * ctx.dvdy;
let st = Point2f::new(self.su * ctx.uv[0] + self.du, self.sv * ctx.uv[1] * self.dv);
TexCoord2D {
st,
dsdx,
dsdy,
dtdx,
dtdy,
}
}
}
#[derive(Clone, Debug)]
pub struct SphericalMapping {
texture_from_render: Transform<Float>,
}
impl SphericalMapping {
pub fn new(texture_from_render: &Transform<Float>) -> Self {
Self {
texture_from_render: *texture_from_render,
}
}
}
impl TextureMapping2DTrait for SphericalMapping {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord2D {
let pt = self.texture_from_render.apply_to_point(ctx.p);
let x2y2 = square(pt.x()) + square(pt.y());
let sqrtx2y2 = x2y2.sqrt();
let dsdp = Vector3f::new(-pt.y(), pt.x(), 0.) / (2. * PI * x2y2);
let dtdp = 1. / (PI * (x2y2 * square(pt.z())))
* Vector3f::new(
pt.x() * pt.z() / sqrtx2y2,
pt.y() * pt.z() / sqrtx2y2,
-sqrtx2y2,
);
let dpdx = self.texture_from_render.apply_to_vector(ctx.dpdx);
let dpdy = self.texture_from_render.apply_to_vector(ctx.dpdy);
let dsdx = dsdp.dot(dpdx);
let dsdy = dsdp.dot(dpdy);
let dtdx = dtdp.dot(dpdx);
let dtdy = dtdp.dot(dpdy);
let vec = (pt - Point3f::default()).normalize();
let st = Point2f::new(spherical_theta(vec) * INV_PI, spherical_phi(vec) * INV_2_PI);
TexCoord2D {
st,
dsdx,
dsdy,
dtdx,
dtdy,
}
}
}
#[derive(Clone, Debug)]
pub struct CylindricalMapping {
texture_from_render: Transform<Float>,
}
impl CylindricalMapping {
pub fn new(texture_from_render: &Transform<Float>) -> Self {
Self {
texture_from_render: *texture_from_render,
}
}
}
impl TextureMapping2DTrait for CylindricalMapping {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord2D {
let pt = self.texture_from_render.apply_to_point(ctx.p);
let x2y2 = square(pt.x()) + square(pt.y());
let dsdp = Vector3f::new(-pt.y(), pt.x(), 0.) / (2. * PI * x2y2);
let dtdp = Vector3f::new(1., 0., 0.);
let dpdx = self.texture_from_render.apply_to_vector(ctx.dpdx);
let dpdy = self.texture_from_render.apply_to_vector(ctx.dpdy);
let dsdx = dsdp.dot(dpdx);
let dsdy = dsdp.dot(dpdy);
let dtdx = dtdp.dot(dpdx);
let dtdy = dtdp.dot(dpdy);
let st = Point2f::new((PI * pt.y().atan2(pt.x())) * INV_2_PI, pt.z());
TexCoord2D {
st,
dsdx,
dsdy,
dtdx,
dtdy,
}
}
}
#[derive(Clone, Debug)]
pub struct PlanarMapping {
texture_from_render: Transform<Float>,
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float,
}
impl PlanarMapping {
pub fn new(
texture_from_render: &Transform<Float>,
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float,
) -> Self {
Self {
texture_from_render: *texture_from_render,
vs,
vt,
ds,
dt,
}
}
}
impl TextureMapping2DTrait for PlanarMapping {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord2D {
let vec: Vector3f = self.texture_from_render.apply_to_point(ctx.p).into();
let dpdx = self.texture_from_render.apply_to_vector(ctx.dpdx);
let dpdy = self.texture_from_render.apply_to_vector(ctx.dpdy);
let dsdx = self.vs.dot(dpdx);
let dsdy = self.vs.dot(dpdy);
let dtdx = self.vt.dot(dpdx);
let dtdy = self.vt.dot(dpdy);
let st = Point2f::new(self.ds + vec.dot(self.vs), self.dt + vec.dot(self.vt));
TexCoord2D {
st,
dsdx,
dsdy,
dtdx,
dtdy,
}
}
}
pub struct TexCoord3D {
p: Point3f,
dpdx: Vector3f,
dpdx: Vector3f,
dpdy: Vector3f,
n: Normal3f,
uv: Point2f,
// All 0
dudx: Float,
dudy: Float,
dvdx: Float,
dvdy: Float,
face_index: usize,
}
pub trait TextureMapping3DTrait {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord3D;
}
#[derive(Clone, Debug)]
#[enum_dispatch(TextureMapping3DTrait)]
pub enum TextureMapping3D {
PointTransform(PointTransformMapping),
}
#[derive(Clone, Debug)]
pub struct PointTransformMapping {
texture_from_render: Transform<Float>,
}
impl PointTransformMapping {
pub fn new(texture_from_render: &Transform<Float>) -> Self {
Self {
texture_from_render: *texture_from_render,
}
}
}
impl TextureMapping3DTrait for PointTransformMapping {
fn map(&self, ctx: &TextureEvalContext) -> TexCoord3D {
TexCoord3D {
p: self.texture_from_render.apply_to_point(ctx.p),
dpdx: self.texture_from_render.apply_to_vector(ctx.dpdx),
dpdy: self.texture_from_render.apply_to_vector(ctx.dpdy),
}
}
}
#[derive(Clone, Default, Debug)]
pub struct TextureEvalContext {
pub p: Point3f,
pub dpdx: Vector3f,
pub dpdy: Vector3f,
pub n: Normal3f,
pub uv: Point2f,
pub dudx: Float,
pub dudy: Float,
pub dvdx: Float,
pub dvdy: Float,
pub face_index: usize,
}
impl TextureEvalContext {
pub fn new(p: Point3f,
dpdx: Vector3f,
dpdy: Vector3f,
n: Normal3f,
uv: Point2f,
dudx: Float,
dudy: Float,
dvdx: Float,
dvdy: Float,
face_index: usize,
#[allow(clippy::too_many_arguments)]
pub fn new(
p: Point3f,
dpdx: Vector3f,
dpdy: Vector3f,
n: Normal3f,
uv: Point2f,
dudx: Float,
dudy: Float,
dvdx: Float,
dvdy: Float,
face_index: usize,
) -> Self {
Self {p, dpdx, dpdy, n, uv, dudx, dudy, dvdx, dvdy , face_index }
Self {
p,
dpdx,
dpdy,
n,
uv,
dudx,
dudy,
dvdx,
dvdy,
face_index,
}
}
}
impl From<&SurfaceInteraction> for TextureEvalContext {
fn from(si: &SurfaceInteraction) -> Self {
Self {
p: si.common.pi.into(),
p: si.p(),
dpdx: si.dpdx,
dpdy: si.dpdy,
n: si.common.n,
@ -49,18 +303,62 @@ impl From<&SurfaceInteraction> for TextureEvalContext {
}
}
impl From<&Interaction> for TextureEvalContext {
fn from(intr: &Interaction) -> Self {
match intr {
Interaction::Surface(si) => TextureEvalContext::from(si),
Interaction::Medium(mi) => TextureEvalContext {
p: mi.p(),
..Default::default()
},
Interaction::Simple(si) => TextureEvalContext {
p: si.p(),
..Default::default()
},
}
}
}
#[enum_dispatch]
pub trait FloatTextureTrait: Send + Sync + std::fmt::Debug {
fn evaluate(&self, _ctx: &TextureEvalContext) -> Float {
todo!()
}
}
#[enum_dispatch(FloatTextureTrait)]
#[derive(Debug, Clone)]
pub struct FloatImageTexture;
pub enum FloatTexture {
FloatConstant(FloatConstantTexture),
GPUFloatImage(GPUFloatImageTexture),
FloatMix(FloatMixTexture),
FloatDirectionMix(FloatDirectionMixTexture),
FloatScaled(FloatScaledTexture),
FloatBilerp(FloatBilerpTexture),
FloatCheckerboard(FloatCheckerboardTexture),
FloatDots(FloatDotsTexture),
FBm(FBmTexture),
FloatPtex(FloatPtexTexture),
GPUFLoatPtex(GPUFloatPtexTexture),
Windy(WindyTexture),
Wrinkled(WrinkledTexture),
}
impl FloatTextureTrait for FloatImageTexture {
#[derive(Debug, Clone)]
pub struct FloatConstantTexture {
value: Float,
}
impl FloatConstantTexture {
pub fn new(value: Float) -> Self {
Self { value }
}
}
impl FloatTextureTrait for FloatConstantTexture {
fn evaluate(&self, _ctx: &TextureEvalContext) -> Float {
todo!();
self.value
}
}
@ -69,20 +367,56 @@ pub struct GPUFloatImageTexture;
impl FloatTextureTrait for GPUFloatImageTexture {}
#[derive(Debug, Clone)]
pub struct FloatMixTexture;
impl FloatTextureTrait for FloatMixTexture {}
pub struct FloatMixTexture {
tex1: Box<FloatTexture>,
tex2: Box<FloatTexture>,
amount: Box<FloatTexture>,
}
impl FloatMixTexture {
pub fn new(
tex1: Box<FloatTexture>,
tex2: Box<FloatTexture>,
amount: Box<FloatTexture>,
) -> Self {
Self { tex1, tex2, amount }
}
}
impl FloatTextureTrait for FloatMixTexture {
fn evaluate(&self, ctx: &TextureEvalContext) -> Float {
let amt = self.amount.evaluate(ctx);
let mut t1 = 0.;
let mut t2 = 0.;
if amt != 1. {
t1 = self.tex1.evaluate(ctx);
}
if amt != 0. {
t2 = self.tex2.evaluate(ctx);
}
(1. - amt) * t1 + amt * t2
}
}
#[derive(Debug, Clone)]
pub struct FloatDirectionMixTexture;
impl FloatTextureTrait for FloatDirectionMixTexture {}
#[derive(Debug, Clone)]
pub struct FloatScaledTexture;
impl FloatTextureTrait for FloatScaledTexture {}
pub struct FloatScaledTexture {
tex: Box<FloatTexture>,
scale: Box<FloatTexture>,
}
#[derive(Debug, Clone)]
pub struct FloatConstantTexture;
impl FloatTextureTrait for FloatConstantTexture {}
impl FloatTextureTrait for FloatScaledTexture {
fn evaluate(&self, ctx: &TextureEvalContext) -> Float {
let sc = self.scale.evaluate(ctx);
if sc == 0. {
return 0.;
}
self.tex.evaluate(ctx)
}
}
#[derive(Debug, Clone)]
pub struct FloatBilerpTexture;
@ -105,8 +439,8 @@ pub struct FloatPtexTexture;
impl FloatTextureTrait for FloatPtexTexture {}
#[derive(Debug, Clone)]
pub struct GPUFloatPtex;
impl FloatTextureTrait for GPUFloatPtex {}
pub struct GPUFloatPtexTexture;
impl FloatTextureTrait for GPUFloatPtexTexture {}
#[derive(Debug, Clone)]
pub struct WindyTexture;
@ -116,60 +450,15 @@ impl FloatTextureTrait for WindyTexture {}
pub struct WrinkledTexture;
impl FloatTextureTrait for WrinkledTexture {}
#[derive(Debug, Clone)]
pub enum FloatTexture {
FloatImage(FloatImageTexture),
GPUFloatImage(GPUFloatImageTexture),
FloatMix(FloatMixTexture),
FloatDirectionMix(FloatDirectionMixTexture),
FloatScaled(FloatScaledTexture),
FloatConstant(FloatConstantTexture),
FloatBilerp(FloatBilerpTexture),
FloatCheckerboard(FloatCheckerboardTexture),
FloatDots(FloatDotsTexture),
FBm(FBmTexture),
FloatPtex(FloatPtexTexture),
GPUFloatPtex(GPUFloatPtex),
Windy(WindyTexture),
Wrinkled(WrinkledTexture),
}
impl FloatTextureTrait for FloatTexture {
fn evaluate(&self, ctx: &TextureEvalContext) -> Float {
match self {
FloatTexture::FloatImage(texture) => texture.evaluate(ctx),
FloatTexture::GPUFloatImage(texture) => texture.evaluate(ctx),
FloatTexture::FloatMix(texture) => texture.evaluate(ctx),
FloatTexture::FloatDirectionMix(texture) => texture.evaluate(ctx),
FloatTexture::FloatScaled(texture) => texture.evaluate(ctx),
FloatTexture::FloatConstant(texture) => texture.evaluate(ctx),
FloatTexture::FloatBilerp(texture) => texture.evaluate(ctx),
FloatTexture::FloatCheckerboard(texture) => texture.evaluate(ctx),
FloatTexture::FloatDots(texture) => texture.evaluate(ctx),
FloatTexture::FBm(texture) => texture.evaluate(ctx),
FloatTexture::FloatPtex(texture) => texture.evaluate(ctx),
FloatTexture::GPUFloatPtex(texture) => texture.evaluate(ctx),
FloatTexture::Windy(texture) => texture.evaluate(ctx),
FloatTexture::Wrinkled(texture) => texture.evaluate(ctx),
}
#[enum_dispatch]
pub trait SpectrumTextureTrait: Send + Sync + std::fmt::Debug {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
pub struct RGBConstantTexture;
pub struct RGBReflectanceConstantTexture;
pub struct SpectrumConstantTexture;
pub struct SpectrumBilerpTexture;
pub struct SpectrumCheckerboardTexture;
pub struct SpectrumImageTexture;
pub struct GPUSpectrumImageTexture;
pub struct MarbleTexture;
pub struct SpectrumMixTexture;
pub struct SpectrumDirectionMixTexture;
pub struct SpectrumDotsTexture;
pub struct SpectrumPtexTexture;
pub struct GPUSpectrumPtexTexture;
pub struct SpectrumScaledTexture;
#[derive(Clone, Debug)]
#[enum_dispatch(SpectrumTextureTrait)]
pub enum SpectrumTexture {
RGBConstant(RGBConstantTexture),
RGBReflectanceConstant(RGBReflectanceConstantTexture),
@ -187,23 +476,319 @@ pub enum SpectrumTexture {
SpectrumScaled(SpectrumScaledTexture),
}
impl SpectrumTexture {
// pub fn evaluate(&self, ctx: TextureEvalContext) -> f32 {
// match self {
// SpectrumTexture::FloatImage(texture) => texture.evaluate(ctx),
// SpectrumTexture::GPUFloatImage(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatMix(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatDirectionMix(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatScaled(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatConstant(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatBilerp(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatCheckerboard(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatDots(texture) => texture.evaluate(ctx),
// SpectrumTexture::FBm(texture) => texture.evaluate(ctx),
// SpectrumTexture::FloatPtex(texture) => texture.evaluate(ctx),
// SpectrumTexture::GPUFloatPtex(texture) => texture.evaluate(ctx),
// SpectrumTexture::Windy(texture) => texture.evaluate(ctx),
// SpectrumTexture::Wrinkled(texture) => texture.evaluate(ctx),
// }
// }
#[derive(Clone, Debug)]
pub struct RGBConstantTexture;
impl SpectrumTextureTrait for RGBConstantTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct RGBReflectanceConstantTexture;
impl SpectrumTextureTrait for RGBReflectanceConstantTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SpectrumConstantTexture {
value: Spectrum,
}
impl SpectrumTextureTrait for SpectrumConstantTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, lambda: &SampledWavelengths) -> SampledSpectrum {
self.value.sample(lambda)
}
}
#[derive(Clone, Debug)]
pub struct SpectrumBilerpTexture;
impl SpectrumTextureTrait for SpectrumBilerpTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SpectrumCheckerboardTexture;
impl SpectrumTextureTrait for SpectrumCheckerboardTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub enum SpectrumType {
Illuminant,
Albedo,
Unbounded,
}
#[derive(Clone, Debug)]
pub struct SpectrumImageTexture {
base: ImageTextureBase,
spectrum_type: SpectrumType,
}
impl SpectrumImageTexture {
#[allow(clippy::too_many_arguments)]
pub fn new(
mapping: TextureMapping2D,
filename: String,
filter_options: MIPMapFilterOptions,
wrap_mode: WrapMode,
scale: Float,
invert: bool,
encoding: ColorEncoding,
spectrum_type: SpectrumType,
) -> Self {
let base = ImageTextureBase::new(
mapping,
filename,
filter_options,
wrap_mode,
scale,
invert,
encoding,
);
Self {
base,
spectrum_type,
}
}
}
impl SpectrumTextureTrait for SpectrumImageTexture {
fn evaluate(&self, ctx: &TextureEvalContext, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut c = self.base.mapping.map(ctx);
c.st[1] = 1. - c.st[1];
let dst0 = Vector2f::new(c.dsdx, c.dtdx);
let dst1 = Vector2f::new(c.dsdy, c.dtdy);
let rgb_unclamp = self.base.scale * self.base.mipmap.filter::<RGB>(c.st, dst0, dst1);
let rgb = RGB::clamp_zero(rgb_unclamp);
if let Some(cs) = self.base.mipmap.get_rgb_colorspace() {
match self.spectrum_type {
SpectrumType::Unbounded => {
return RGBUnboundedSpectrum::new(&cs, rgb).sample(lambda);
}
SpectrumType::Albedo => {
return RGBAlbedoSpectrum::new(&cs, rgb).sample(lambda);
}
_ => return RGBIlluminantSpectrum::new(&cs, rgb).sample(lambda),
}
}
assert!(rgb[0] == rgb[1] && rgb[1] == rgb[2]);
SampledSpectrum::new(rgb[0])
}
}
#[derive(Clone, Debug)]
pub struct GPUSpectrumImageTexture;
impl SpectrumTextureTrait for GPUSpectrumImageTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct MarbleTexture;
impl SpectrumTextureTrait for MarbleTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SpectrumMixTexture;
impl SpectrumTextureTrait for SpectrumMixTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SpectrumDirectionMixTexture {
tex1: Box<SpectrumTexture>,
tex2: Box<SpectrumTexture>,
dir: Vector3f,
}
impl SpectrumTextureTrait for SpectrumDirectionMixTexture {
fn evaluate(&self, ctx: &TextureEvalContext, lambda: &SampledWavelengths) -> SampledSpectrum {
let amt = ctx.n.abs_dot(self.dir.into());
let mut t1 = SampledSpectrum::default();
let mut t2 = SampledSpectrum::default();
if amt != 0. {
t1 = self.tex1.evaluate(ctx, lambda);
}
if amt != 1. {
t2 = self.tex2.evaluate(ctx, lambda);
}
amt * t1 + (1. - amt) * t2
}
}
#[derive(Clone, Debug)]
pub struct SpectrumDotsTexture;
impl SpectrumTextureTrait for SpectrumDotsTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SpectrumPtexTexture;
impl SpectrumTextureTrait for SpectrumPtexTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct GPUSpectrumPtexTexture;
impl SpectrumTextureTrait for GPUSpectrumPtexTexture {
fn evaluate(&self, _ctx: &TextureEvalContext, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct SpectrumScaledTexture {
tex: Box<SpectrumTexture>,
scale: Box<FloatTexture>,
}
impl SpectrumTextureTrait for SpectrumScaledTexture {
fn evaluate(&self, ctx: &TextureEvalContext, lambda: &SampledWavelengths) -> SampledSpectrum {
let sc = self.scale.evaluate(ctx);
if sc == 0. {
return SampledSpectrum::new(0.);
}
self.tex.evaluate(ctx, lambda) * sc
}
}
#[derive(Debug, Hash, PartialEq, Eq, Clone)]
struct TexInfo {
filename: String,
filter_options: MIPMapFilterOptions,
wrap_mode: WrapMode,
encoding: ColorEncoding,
}
static TEXTURE_CACHE: OnceLock<Mutex<HashMap<TexInfo, Arc<MIPMap>>>> = OnceLock::new();
fn get_texture_cache() -> &'static Mutex<HashMap<TexInfo, Arc<MIPMap>>> {
TEXTURE_CACHE.get_or_init(|| Mutex::new(HashMap::new()))
}
#[derive(Clone, Debug)]
pub struct ImageTextureBase {
mapping: TextureMapping2D,
filename: String,
scale: Float,
invert: bool,
mipmap: Arc<MIPMap>,
}
impl ImageTextureBase {
pub fn new(
mapping: TextureMapping2D,
filename: String,
filter_options: MIPMapFilterOptions,
wrap_mode: WrapMode,
scale: Float,
invert: bool,
encoding: ColorEncoding,
) -> Self {
let tex_info = TexInfo {
filename: filename.clone(),
filter_options,
wrap_mode,
encoding,
};
let cache_mutex = get_texture_cache();
{
let cache = cache_mutex.lock().unwrap();
if let Some(mipmap) = cache.get(&tex_info) {
return Self {
mapping,
filename,
scale,
invert,
mipmap: mipmap.clone(),
};
}
}
let path = Path::new(&filename);
let mipmap_raw = MIPMap::create_from_file(path, filter_options, wrap_mode, encoding)
.expect("Failed to create MIPMap from file");
let mipmap_arc = Arc::new(mipmap_raw);
{
let mut cache = cache_mutex.lock().unwrap();
let stored_mipmap = cache.entry(tex_info).or_insert(mipmap_arc);
Self {
mapping,
filename,
scale,
invert,
mipmap: stored_mipmap.clone(),
}
}
}
pub fn clear_cache() {
let mut cache = get_texture_cache().lock().unwrap();
cache.clear();
}
pub fn multiply_scale(&mut self, s: Float) {
self.scale *= s;
}
}
pub trait TextureEvaluator: Send + Sync {
fn evaluate_float(&self, tex: &FloatTexture, ctx: &TextureEvalContext) -> Float;
fn evaluate_spectrum(
&self,
tex: &SpectrumTexture,
ctx: &TextureEvalContext,
lambda: &SampledWavelengths,
) -> SampledSpectrum;
fn can_evaluate(&self, _ftex: &[&FloatTexture], _stex: &[&SpectrumTexture]) -> bool;
}
#[derive(Copy, Clone, Default)]
pub struct UniversalTextureEvaluator;
impl TextureEvaluator for UniversalTextureEvaluator {
fn evaluate_float(&self, tex: &FloatTexture, ctx: &TextureEvalContext) -> Float {
tex.evaluate(ctx)
}
fn evaluate_spectrum(
&self,
tex: &SpectrumTexture,
ctx: &TextureEvalContext,
lambda: &SampledWavelengths,
) -> SampledSpectrum {
tex.evaluate(ctx, lambda)
}
fn can_evaluate(
&self,
_float_textures: &[&FloatTexture],
_spectrum_textures: &[&SpectrumTexture],
) -> bool {
true
}
}

View file

@ -77,6 +77,11 @@ where
self.p_max - self.p_min
}
pub fn centroid(&self) -> Point<T, N> {
let two = T::one() + T::one();
self.p_min + (self.diagonal() / two)
}
pub fn volume(&self) -> T {
let d = self.diagonal();
d.0.iter().fold(T::one(), |acc, &val| acc * val)
@ -132,15 +137,13 @@ where
}
pub fn corner(&self, corner_index: usize) -> Point<T, N> {
let mut p_arr = [self.p_min[0]; N];
for i in 0..N {
p_arr[i] = if ((corner_index >> i) & 1) == 1 {
Point(std::array::from_fn(|i| {
if (corner_index >> i) & 1 == 1 {
self.p_max[i]
} else {
self.p_min[i]
}
}
Point(p_arr)
}))
}
pub fn overlaps(&self, rhs: &Self) -> bool {
@ -248,6 +251,55 @@ where
}
impl Bounds3f {
#[inline(always)]
pub fn intersect_p(
&self,
o: Point3f,
ray_t_max: Float,
inv_dir: Vector3f,
dir_is_neg: &[usize; 3],
) -> Option<(Float, Float)> {
let bounds = [&self.p_min, &self.p_max];
// Check X
let mut t_min = (bounds[dir_is_neg[0]].x() - o.x()) * inv_dir.x();
let mut t_max = (bounds[1 - dir_is_neg[0]].x() - o.x()) * inv_dir.x();
// Check Y
let ty_min = (bounds[dir_is_neg[1]].y() - o.y()) * inv_dir.y();
let ty_max = (bounds[1 - dir_is_neg[1]].y() - o.y()) * inv_dir.y();
if t_min > ty_max || ty_min > t_max {
return None;
}
if ty_min > t_min {
t_min = ty_min;
}
if ty_max < t_max {
t_max = ty_max;
}
// Check Z
let tz_min = (bounds[dir_is_neg[2]].z() - o.z()) * inv_dir.z();
let tz_max = (bounds[1 - dir_is_neg[2]].z() - o.z()) * inv_dir.z();
if t_min > tz_max || tz_min > t_max {
return None;
}
if tz_min > t_min {
t_min = tz_min;
}
if tz_max < t_max {
t_max = tz_max;
}
if (t_min < ray_t_max) && (t_max > 0.0) {
Some((t_min, t_max))
} else {
None
}
}
pub fn intersect_with_inverse(&self, o: Point3f, d: Vector3f, ray_t_max: Float) -> bool {
let inv_dir = Vector3::new(1.0 / d.x(), 1.0 / d.y(), 1.0 / d.z());
let dir_is_neg: [usize; 3] = [

View file

@ -4,8 +4,8 @@ use crate::utils::transform::Transform;
#[derive(Debug, Clone)]
pub struct DirectionCone {
w: Vector3f,
cos_theta: Float,
pub w: Vector3f,
pub cos_theta: Float,
}
impl Default for DirectionCone {
@ -60,57 +60,57 @@ impl DirectionCone {
- wp.z() * (square(w.x() + square(w.y())))),
)
}
}
pub fn inside(d: &DirectionCone, w: Vector3f) -> bool {
!d.is_empty() && d.w.dot(w.normalize()) > d.cos_theta
}
pub fn bound_subtended_directions(b: &Bounds3f, p: Point3f) -> DirectionCone {
let (p_center, radius) = b.bounding_sphere();
if p.distance_squared(p_center) < square(radius) {
return DirectionCone::entire_sphere();
}
let w = (p_center - p).normalize();
let sin2_theta_max = square(radius) / p_center.distance_squared(p);
let cos_theta_max = safe_sqrt(1. - sin2_theta_max);
DirectionCone::new(w, cos_theta_max)
}
pub fn union(a: &DirectionCone, b: &DirectionCone) -> DirectionCone {
if a.is_empty() {
return b.clone();
}
if b.is_empty() {
return a.clone();
}
// Handle the cases where one cone is inside the other
let theta_a = safe_acos(a.cos_theta);
let theta_b = safe_acos(b.cos_theta);
let theta_d = a.w.angle_between(b.w);
if (theta_d + theta_b).min(PI) <= theta_b {
return a.clone();
}
if (theta_d + theta_a).min(PI) <= theta_a {
return b.clone();
}
// Compute the spread angle of the merged cone, $\theta_o$
let theta_o = (theta_a + theta_d + theta_b) / 2.;
if theta_o >= PI {
return DirectionCone::entire_sphere();
}
// Find the merged cone's axis and return cone union
let theta_r = theta_o - theta_a;
let wr = a.w.cross(b.w);
if wr.norm_squared() >= 0. {
return DirectionCone::entire_sphere();
}
let w = Transform::rotate_around_axis(degrees(theta_r), wr).apply_to_vector(a.w);
DirectionCone::new(w, theta_o.cos())
pub fn inside(d: &DirectionCone, w: Vector3f) -> bool {
!d.is_empty() && d.w.dot(w.normalize()) > d.cos_theta
}
pub fn bound_subtended_directions(b: &Bounds3f, p: Point3f) -> DirectionCone {
let (p_center, radius) = b.bounding_sphere();
if p.distance_squared(p_center) < square(radius) {
return DirectionCone::entire_sphere();
}
let w = (p_center - p).normalize();
let sin2_theta_max = square(radius) / p_center.distance_squared(p);
let cos_theta_max = safe_sqrt(1. - sin2_theta_max);
DirectionCone::new(w, cos_theta_max)
}
pub fn union(a: &DirectionCone, b: &DirectionCone) -> DirectionCone {
if a.is_empty() {
return b.clone();
}
if b.is_empty() {
return a.clone();
}
// Handle the cases where one cone is inside the other
let theta_a = safe_acos(a.cos_theta);
let theta_b = safe_acos(b.cos_theta);
let theta_d = a.w.angle_between(b.w);
if (theta_d + theta_b).min(PI) <= theta_b {
return a.clone();
}
if (theta_d + theta_a).min(PI) <= theta_a {
return b.clone();
}
// Compute the spread angle of the merged cone, $\theta_o$
let theta_o = (theta_a + theta_d + theta_b) / 2.;
if theta_o >= PI {
return DirectionCone::entire_sphere();
}
// Find the merged cone's axis and return cone union
let theta_r = theta_o - theta_a;
let wr = a.w.cross(b.w);
if wr.norm_squared() >= 0. {
return DirectionCone::entire_sphere();
}
let w = Transform::rotate_around_axis(degrees(theta_r), wr).apply_to_vector(a.w);
DirectionCone::new(w, theta_o.cos())
}
}

View file

@ -58,23 +58,21 @@ pub fn tan2_theta(w: Vector3f) -> Float {
#[inline]
pub fn cos_phi(w: Vector3f) -> Float {
let sin_theta = sin_theta(w);
let result = if sin_theta == 0. {
if sin_theta == 0. {
1.
} else {
clamp_t(w.x() / sin_theta, -1., 1.)
};
result
}
}
#[inline]
pub fn sin_phi(w: Vector3f) -> Float {
let sin_theta = sin_theta(w);
let result = if sin_theta == 0. {
if sin_theta == 0. {
0.
} else {
clamp_t(w.y() / sin_theta, -1., 1.)
};
result
}
}
pub fn same_hemisphere(w: Vector3f, wp: Vector3f) -> bool {
@ -85,7 +83,7 @@ pub fn spherical_direction(sin_theta: Float, cos_theta: Float, phi: Float) -> Ve
Vector3f::new(sin_theta * phi.cos(), sin_theta * phi.sin(), cos_theta)
}
pub fn spherical_triangle_area<T: NumFloat>(a: Vector3f, b: Vector3f, c: Vector3f) -> Float {
pub fn spherical_triangle_area(a: Vector3f, b: Vector3f, c: Vector3f) -> Float {
(2.0 * (a.dot(b.cross(c))).atan2(1.0 + a.dot(b) + a.dot(c) + b.dot(c))).abs()
}
@ -114,11 +112,11 @@ pub fn spherical_quad_area(a: Vector3f, b: Vector3f, c: Vector3f, d: Vector3f) -
(alpha + beta + gamma + delta - 2. * PI).abs()
}
pub fn spherical_theta<T: NumFloat>(v: Vector3f) -> Float {
pub fn spherical_theta(v: Vector3f) -> Float {
clamp_t(v.z(), -1.0, 1.0).acos()
}
pub fn spherical_phi<T: NumFloat>(v: Vector3f) -> Float {
pub fn spherical_phi(v: Vector3f) -> Float {
let p = v.y().atan2(v.x());
if p < 0.0 { p + 2.0 * PI } else { p }
}

View file

@ -1,5 +1,5 @@
use super::traits::{Lerp, Sqrt, Tuple, VectorLike};
use super::{Float, NumFloat, PI};
use super::traits::{Sqrt, Tuple, VectorLike};
use super::{Float, NumFloat, PI, clamp_t};
use crate::utils::interval::Interval;
use crate::utils::math::{difference_of_products, quadratic, safe_asin};
use num_traits::{AsPrimitive, FloatConst, Num, Signed, Zero};
@ -39,18 +39,6 @@ macro_rules! impl_tuple_core {
}
}
impl<T, Factor, const N: usize> Lerp<Factor> for $Struct<T, N>
where
Factor: Copy + Num,
T: Copy + Mul<Factor, Output = T> + Add<Output = T>,
{
#[inline]
fn lerp(t: Factor, a: Self, b: Self) -> Self {
let result = std::array::from_fn(|i| a[i] * (Factor::one() - t) + b[i] * t);
Self::from_array(result)
}
}
impl<T: Default + Copy, const N: usize> Default for $Struct<T, N> {
fn default() -> Self {
Self([T::default(); N])
@ -67,12 +55,55 @@ macro_rules! impl_tuple_core {
}
}
impl<const N: usize> $Struct<f32, N> {
#[inline]
pub fn floor(&self) -> $Struct<i32, N> {
$Struct(self.0.map(|v| v.floor() as i32))
}
#[inline]
pub fn average(&self) -> f32 {
let sum: f32 = self.0.iter().sum();
sum / (N as f32)
}
}
impl<T, const N: usize> $Struct<T, N>
where
T: Copy + PartialOrd,
{
#[inline]
pub fn min(&self, other: Self) -> Self {
let mut out = self.0;
for i in 0..N {
if other.0[i] < out[i] {
out[i] = other.0[i];
}
}
Self(out)
}
#[inline]
pub fn max(&self, other: Self) -> Self {
let mut out = self.0;
for i in 0..N {
if other.0[i] > out[i] {
out[i] = other.0[i]
}
}
Self(out)
}
#[inline]
pub fn max_component_value(&self) -> T {
let mut m = self.0[0];
for i in 1..N {
if self.0[i] > m {
m = self.0[i];
}
}
m
}
}
impl<T, const N: usize> $Struct<T, N>
@ -241,7 +272,6 @@ macro_rules! impl_float_vector_ops {
};
}
macro_rules! impl_abs {
($Struct:ident) => {
impl<T, const N: usize> $Struct<T, N>
@ -391,12 +421,12 @@ impl Point2f {
if let Some((v0, v1)) = quadratic(k2, k1, k0) {
let u = (h.x() - f.x() * v0) / (e.x() + g.x() * v0);
if u < 0. || u > 1. || v0 < 0. || v0 > 1. {
if !(0.0..=1.).contains(&u) || !(0.0..=1.0).contains(&v0) {
return Point2f::new((h.x() - f.x() * v1) / (e.x() + g.x() * v1), v1);
}
return Point2f::new(u, v0);
Point2f::new(u, v0)
} else {
return Point2f::zero();
Point2f::zero()
}
}
}
@ -538,7 +568,7 @@ where
impl<const N: usize> Hash for Vector<Float, N> {
fn hash<H: Hasher>(&self, state: &mut H) {
for item in self.0.iter() {
item.to_bits().hash(state);
item.to_bits().hash(state);
}
}
}
@ -546,7 +576,7 @@ impl<const N: usize> Hash for Vector<Float, N> {
impl<const N: usize> Hash for Point<Float, N> {
fn hash<H: Hasher>(&self, state: &mut H) {
for item in self.0.iter() {
item.to_bits().hash(state);
item.to_bits().hash(state);
}
}
}
@ -554,7 +584,7 @@ impl<const N: usize> Hash for Point<Float, N> {
impl<const N: usize> Hash for Normal<Float, N> {
fn hash<H: Hasher>(&self, state: &mut H) {
for item in self.0.iter() {
item.to_bits().hash(state);
item.to_bits().hash(state);
}
}
}
@ -656,6 +686,16 @@ impl<const N: usize> From<Vector<Interval, N>> for Vector<Float, N> {
}
}
impl<const N: usize> From<Vector<Float, N>> for Vector<Interval, N> {
fn from(v: Vector<Float, N>) -> Self {
let mut arr = [Interval::default(); N];
for i in 0..N {
arr[i] = Interval::new(v[i]);
}
Self(arr)
}
}
impl<const N: usize> Mul<Vector<Interval, N>> for Interval {
type Output = Vector<Interval, N>;
fn mul(self, rhs: Vector<Interval, N>) -> Self::Output {
@ -691,7 +731,7 @@ where
T: Num + PartialOrd + Copy + Neg<Output = T> + Sqrt,
{
pub fn face_forward(self, v: Vector3<T>) -> Self {
if Vector3::<T>::from(self).dot(v.into()) < T::zero() {
if Vector3::<T>::from(self).dot(v) < T::zero() {
-self
} else {
self
@ -699,6 +739,69 @@ where
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(C)]
pub struct OctahedralVector {
x: u16,
y: u16,
}
impl OctahedralVector {
pub fn new(mut v: Vector3f) -> Self {
v /= v.x().abs() + v.y().abs() + v.z().abs();
let (x_enc, y_enc) = if v.z() >= 0.0 {
(Self::encode(v.x()), Self::encode(v.y()))
} else {
(
Self::encode((1.0 - v.y().abs()) * Self::sign(v.x())),
Self::encode((1.0 - v.x().abs()) * Self::sign(v.y())),
)
};
Self { x: x_enc, y: y_enc }
}
pub fn to_vector(self) -> Vector3f {
let mut v = Vector3f::default();
// Map [0, 65535] back to [-1, 1]
v[0] = -1.0 + 2.0 * (self.x as Float / 65535.0);
v[1] = -1.0 + 2.0 * (self.y as Float / 65535.0);
v[2] = 1.0 - (v.x().abs() + v.y().abs());
if v.z() < 0.0 {
let xo = v.x();
v[0] = (1.0 - v.y().abs()) * Self::sign(xo);
v[1] = (1.0 - xo.abs()) * Self::sign(v.y());
}
v.normalize()
}
#[inline]
pub fn sign(v: Float) -> Float {
1.0.copysign(v)
}
#[inline]
pub fn encode(f: Float) -> u16 {
(clamp_t((f + 1.0) / 2.0, 0.0, 1.0) * 65535.0).round() as u16
}
}
impl From<Vector3f> for OctahedralVector {
fn from(v: Vector3f) -> Self {
Self::new(v)
}
}
impl From<OctahedralVector> for Vector3f {
fn from(ov: OctahedralVector) -> Self {
ov.to_vector()
}
}
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub struct Frame {
pub x: Vector3f,

View file

@ -37,12 +37,12 @@ impl Ray {
}
}
pub fn evaluate(&self, t: Float) -> Point3f {
pub fn at(&self, t: Float) -> Point3f {
self.o + self.d * t
}
pub fn offset_origin(p: &Point3fi, n: &Normal3f, w: &Vector3f) -> Point3f {
let d: Float = Vector3f::from(n.abs()).dot(p.error().into());
let d: Float = Vector3f::from(n.abs()).dot(p.error());
let normal: Vector3f = Vector3f::from(*n);
let mut offset = p.midpoint();

View file

@ -166,16 +166,18 @@ impl Sqrt for Interval {
}
}
pub trait Lerp<Factor: Copy + Num>: Sized + Copy {
pub trait Lerp<Factor = Float>: Sized + Copy {
fn lerp(t: Factor, a: Self, b: Self) -> Self;
}
impl<T> Lerp<T> for T
impl<T, F, Diff> Lerp<F> for T
where
T: Num + Copy + Mul<T, Output = T> + Add<T, Output = T>,
T: Copy + Sub<Output = Diff> + Add<Diff, Output = T>,
Diff: Mul<F, Output = Diff>,
F: Copy,
{
#[inline]
fn lerp(t: T, a: Self, b: Self) -> Self {
a * (T::one() - t) + b * t
#[inline(always)]
fn lerp(t: F, a: Self, b: Self) -> Self {
a + (b - a) * t
}
}

0
src/gpu/mod.rs Normal file
View file

359
src/image/io.rs Normal file
View file

@ -0,0 +1,359 @@
use crate::core::pbrt::Float;
use crate::image::{
Image, ImageAndMetadata, ImageMetadata, PixelData, PixelFormat, Point2i, WrapMode,
};
use crate::utils::color::{ColorEncoding, LINEAR, SRGB};
use crate::utils::error::ImageError;
use anyhow::{Context, Result, bail};
use exr::prelude::{read_first_rgba_layer_from_file, write_rgba_file};
use image_rs::ImageReader;
use image_rs::{DynamicImage, ImageBuffer, Rgb, Rgba};
use std::fs::File;
use std::io::{BufRead, BufReader, BufWriter, Read, Write};
use std::path::Path;
impl Image {
pub fn read(path: &Path, encoding: Option<ColorEncoding>) -> Result<ImageAndMetadata> {
let ext = path
.extension()
.and_then(|s| s.to_str())
.unwrap_or("")
.to_lowercase();
match ext.as_str() {
"exr" => read_exr(path),
"pfm" => read_pfm(path),
_ => read_generic(path, encoding),
}
}
pub fn write(&self, filename: &str, metadata: &ImageMetadata) -> Result<(), ImageError> {
let path = Path::new(filename);
let ext = path.extension().and_then(|s| s.to_str()).unwrap_or("");
let res = match ext.to_lowercase().as_str() {
"exr" => self.write_exr(path, metadata),
"png" => self.write_png(path),
"pfm" => self.write_pfm(path),
"qoi" => self.write_qoi(path),
_ => Err(anyhow::anyhow!("Unsupported write format: {}", ext)),
};
res.map_err(|e| ImageError::Io(std::io::Error::other(e)))
}
fn write_png(&self, path: &Path) -> Result<()> {
let w = self.resolution.x() as u32;
let h = self.resolution.y() as u32;
// Convert whatever we have to u8 [0..255]
let data = self.to_u8_buffer();
let channels = self.n_channels();
match channels {
1 => {
// Luma
image_rs::save_buffer_with_format(
path,
&data,
w,
h,
image_rs::ColorType::L8,
image_rs::ImageFormat::Png,
)?;
}
3 => {
// RGB
image_rs::save_buffer_with_format(
path,
&data,
w,
h,
image_rs::ColorType::Rgb8,
image_rs::ImageFormat::Png,
)?;
}
4 => {
// RGBA
image_rs::save_buffer_with_format(
path,
&data,
w,
h,
image_rs::ColorType::Rgba8,
image_rs::ImageFormat::Png,
)?;
}
_ => bail!("PNG writer only supports 1, 3, or 4 channels"),
}
Ok(())
}
fn write_qoi(&self, path: &Path) -> Result<()> {
let w = self.resolution.x() as u32;
let h = self.resolution.y() as u32;
let data = self.to_u8_buffer();
let color_type = match self.n_channels() {
3 => image_rs::ColorType::Rgb8,
4 => image_rs::ColorType::Rgba8,
_ => bail!("QOI only supports 3 or 4 channels"),
};
image_rs::save_buffer_with_format(
path,
&data,
w,
h,
color_type,
image_rs::ImageFormat::Qoi,
)?;
Ok(())
}
fn write_exr(&self, path: &Path, _metadata: &ImageMetadata) -> Result<()> {
// EXR requires F32
let w = self.resolution.x() as usize;
let h = self.resolution.y() as usize;
let c = self.n_channels();
write_rgba_file(path, w, h, |x, y| {
// Helper to get float value regardless of internal storage
let get = |ch| {
self.get_channel_with_wrap(
Point2i::new(x as i32, y as i32),
ch,
WrapMode::Clamp.into(),
)
};
if c == 1 {
let v = get(0);
(v, v, v, 1.0)
} else if c == 3 {
(get(0), get(1), get(2), 1.0)
} else {
(get(0), get(1), get(2), get(3))
}
})
.context("Failed to write EXR")?;
Ok(())
}
fn write_pfm(&self, path: &Path) -> Result<()> {
let file = File::create(path)?;
let mut writer = BufWriter::new(file);
if self.n_channels() != 3 {
bail!("PFM writing currently only supports 3 channels (RGB)");
}
// Header
writeln!(writer, "PF")?;
writeln!(writer, "{} {}", self.resolution.x(), self.resolution.y())?;
let scale = if cfg!(target_endian = "little") {
-1.0
} else {
1.0
};
writeln!(writer, "{}", scale)?;
// PBRT stores top-to-bottom.
for y in (0..self.resolution.y()).rev() {
for x in 0..self.resolution.x() {
for c in 0..3 {
let val =
self.get_channel_with_wrap(Point2i::new(x, y), c, WrapMode::Clamp.into());
writer.write_all(&val.to_le_bytes())?;
}
}
}
Ok(())
}
fn to_u8_buffer(&self) -> Vec<u8> {
match &self.pixels {
PixelData::U8(data) => data.clone(),
PixelData::F16(data) => data
.iter()
.map(|v| (v.to_f32().clamp(0.0, 1.0) * 255.0 + 0.5) as u8)
.collect(),
PixelData::F32(data) => data
.iter()
.map(|v| (v.clamp(0.0, 1.0) * 255.0 + 0.5) as u8)
.collect(),
}
}
}
fn read_generic(path: &Path, encoding: Option<ColorEncoding>) -> Result<ImageAndMetadata> {
let dyn_img = ImageReader::open(path)
.with_context(|| format!("Failed to open image: {:?}", path))?
.decode()?;
let w = dyn_img.width() as i32;
let h = dyn_img.height() as i32;
let res = Point2i::new(w, h);
// Check if it was loaded as high precision or standard
let image = match dyn_img {
DynamicImage::ImageRgb32F(buf) => Image {
format: PixelFormat::F32,
resolution: res,
channel_names: vec!["R".into(), "G".into(), "B".into()],
encoding: LINEAR,
pixels: PixelData::F32(buf.into_raw()),
},
DynamicImage::ImageRgba32F(buf) => Image {
format: PixelFormat::F32,
resolution: res,
channel_names: vec!["R".into(), "G".into(), "B".into(), "A".into()],
encoding: LINEAR,
pixels: PixelData::F32(buf.into_raw()),
},
_ => {
// Default to RGB8 for everything else
if dyn_img.color().has_alpha() {
let buf = dyn_img.to_rgba8();
Image {
format: PixelFormat::U8,
resolution: res,
channel_names: vec!["R".into(), "G".into(), "B".into(), "A".into()],
encoding: encoding.unwrap_or(SRGB),
pixels: PixelData::U8(buf.into_raw()),
}
} else {
let buf = dyn_img.to_rgb8();
Image {
format: PixelFormat::U8,
resolution: res,
channel_names: vec!["R".into(), "G".into(), "B".into()],
encoding: encoding.unwrap_or(SRGB),
pixels: PixelData::U8(buf.into_raw()),
}
}
}
};
let metadata = ImageMetadata::default();
Ok(ImageAndMetadata { image, metadata })
}
fn read_exr(path: &Path) -> Result<ImageAndMetadata> {
let image = read_first_rgba_layer_from_file(
path,
|resolution, _| {
let size = resolution.width() * resolution.height() * 4;
vec![0.0 as Float; size]
},
|buffer, position, pixel| {
let width = position.width();
let idx = (position.y() * width + position.x()) * 4;
// Map exr pixel struct to our buffer
buffer[idx] = pixel.0;
buffer[idx + 1] = pixel.1;
buffer[idx + 2] = pixel.2;
buffer[idx + 3] = pixel.3;
},
)
.with_context(|| format!("Failed to read EXR: {:?}", path))?;
let w = image.layer_data.size.width() as i32;
let h = image.layer_data.size.height() as i32;
let image = Image {
format: PixelFormat::F32,
resolution: Point2i::new(w, h),
channel_names: vec!["R".into(), "G".into(), "B".into(), "A".into()],
encoding: LINEAR,
pixels: PixelData::F32(image.layer_data.channel_data.pixels),
};
let metadata = ImageMetadata::default();
Ok(ImageAndMetadata { image, metadata })
}
fn read_pfm(path: &Path) -> Result<ImageAndMetadata> {
let file = File::open(path)?;
let mut reader = BufReader::new(file);
// PFM Headers are: "PF\nwidth height\nscale\n" (or Pf for grayscale)
let mut header_word = String::new();
reader.read_line(&mut header_word)?;
let header_word = header_word.trim();
let channels = match header_word {
"PF" => 3,
"Pf" => 1,
_ => bail!("Invalid PFM header: {}", header_word),
};
let mut dims_line = String::new();
reader.read_line(&mut dims_line)?;
let dims: Vec<i32> = dims_line
.split_whitespace()
.map(|s| s.parse().unwrap_or(0))
.collect();
if dims.len() < 2 {
bail!("Invalid PFM dimensions");
}
let w = dims[0];
let h = dims[1];
let mut scale_line = String::new();
reader.read_line(&mut scale_line)?;
let scale: f32 = scale_line.trim().parse().context("Invalid PFM scale")?;
let file_is_little_endian = scale < 0.0;
let abs_scale = scale.abs();
let mut buffer = Vec::new();
reader.read_to_end(&mut buffer)?;
let expected_bytes = (w * h * channels) as usize * 4;
if buffer.len() < expected_bytes {
bail!("PFM file too short");
}
let mut pixels = vec![0.0 as Float; (w * h * channels) as usize];
// PFM is Bottom-to-Top
for y in 0..h {
// Flippety-do
let src_y = h - 1 - y;
for x in 0..w {
for c in 0..channels {
let src_idx = ((src_y * w + x) * channels + c) as usize * 4;
let dst_idx = ((y * w + x) * channels + c) as usize;
let bytes: [u8; 4] = buffer[src_idx..src_idx + 4].try_into()?;
let val = if file_is_little_endian {
f32::from_le_bytes(bytes)
} else {
f32::from_be_bytes(bytes)
};
pixels[dst_idx] = val * abs_scale;
}
}
}
let names = if channels == 1 {
vec!["Y".into()]
} else {
vec!["R".into(), "G".into(), "B".into()]
};
let image = Image {
format: PixelFormat::F32,
resolution: Point2i::new(w, h),
channel_names: names,
encoding: LINEAR,
pixels: PixelData::F32(pixels),
};
let metadata = ImageMetadata::default();
Ok(ImageAndMetadata { image, metadata })
}

118
src/image/metadata.rs Normal file
View file

@ -0,0 +1,118 @@
use crate::core::pbrt::Float;
use crate::geometry::{Bounds2i, Point2i};
use crate::utils::colorspace::RGBColorSpace;
use crate::utils::math::SquareMatrix;
use smallvec::SmallVec;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
#[derive(Clone, Debug, Default)]
pub struct ImageChannelValues(pub SmallVec<[Float; 4]>);
impl ImageChannelValues {
pub fn average(&self) -> Float {
if self.0.is_empty() {
return 0.0;
}
let sum: Float = self.0.iter().sum();
sum / (self.0.len() as Float)
}
pub fn max_value(&self) -> Float {
self.0.iter().fold(Float::MIN, |a, &b| a.max(b))
}
}
impl From<&[Float]> for ImageChannelValues {
fn from(slice: &[Float]) -> Self {
Self(SmallVec::from_slice(slice))
}
}
impl From<Vec<Float>> for ImageChannelValues {
fn from(vec: Vec<Float>) -> Self {
Self(SmallVec::from_vec(vec))
}
}
impl<const N: usize> From<[Float; N]> for ImageChannelValues {
fn from(arr: [Float; N]) -> Self {
Self(SmallVec::from_slice(&arr))
}
}
impl Deref for ImageChannelValues {
type Target = SmallVec<[Float; 4]>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for ImageChannelValues {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum WrapMode {
Black,
Clamp,
Repeat,
OctahedralSphere,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WrapMode2D {
pub uv: [WrapMode; 2],
}
impl From<WrapMode> for WrapMode2D {
fn from(w: WrapMode) -> Self {
Self { uv: [w, w] }
}
}
#[derive(Debug, Clone, Default)]
pub struct ImageChannelDesc {
pub offset: Vec<usize>,
}
impl ImageChannelDesc {
pub fn new(offset: &[usize]) -> Self {
Self {
offset: offset.into(),
}
}
pub fn size(&self) -> usize {
self.offset.len()
}
pub fn is_empty(&self) -> bool {
self.offset.is_empty()
}
pub fn is_identity(&self) -> bool {
for i in 0..self.size() {
if self.offset[i] != i {
return false;
}
}
true
}
}
#[derive(Debug, Default)]
pub struct ImageMetadata {
pub render_time_seconds: Option<Float>,
pub camera_from_world: Option<SquareMatrix<Float, 4>>,
pub ndc_from_world: Option<SquareMatrix<Float, 4>>,
pub pixel_bounds: Option<Bounds2i>,
pub full_resolution: Option<Point2i>,
pub samples_per_pixel: Option<i32>,
pub mse: Option<Float>,
pub colorspace: Option<RGBColorSpace>,
pub strings: HashMap<String, String>,
pub string_vectors: HashMap<String, Vec<String>>,
}

444
src/image/mod.rs Normal file
View file

@ -0,0 +1,444 @@
pub mod io;
pub mod metadata;
pub mod ops;
pub mod pixel;
use crate::core::pbrt::{Float, lerp};
use crate::geometry::{Bounds2f, Point2f, Point2fi, Point2i};
use crate::utils::color::{ColorEncoding, ColorEncodingTrait, LINEAR};
use crate::utils::containers::Array2D;
use crate::utils::math::square;
use core::hash;
use half::f16;
use pixel::PixelStorage;
use rayon::prelude::*;
use smallvec::{SmallVec, smallvec};
use std::ops::{Deref, DerefMut};
pub use metadata::{ImageChannelDesc, ImageChannelValues, ImageMetadata, WrapMode, WrapMode2D};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PixelFormat {
U8,
F16,
F32,
}
impl PixelFormat {
pub fn is_8bit(&self) -> bool {
matches!(self, PixelFormat::U8)
}
pub fn is_16bit(&self) -> bool {
matches!(self, PixelFormat::F16)
}
pub fn is_32bit(&self) -> bool {
matches!(self, PixelFormat::F32)
}
pub fn texel_bytes(&self) -> usize {
match self {
PixelFormat::U8 => 1,
PixelFormat::F16 => 2,
PixelFormat::F32 => 4,
}
}
}
impl std::fmt::Display for PixelFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PixelFormat::U8 => write!(f, "U256"),
PixelFormat::F16 => write!(f, "Half"),
PixelFormat::F32 => write!(f, "Float"),
}
}
}
#[derive(Debug, Clone)]
pub enum PixelData {
U8(Vec<u8>),
F16(Vec<f16>),
F32(Vec<f32>),
}
#[derive(Debug, Clone)]
pub struct Image {
pub format: PixelFormat,
pub resolution: Point2i,
pub channel_names: Vec<String>,
pub encoding: ColorEncoding,
pub pixels: PixelData,
}
#[derive(Debug)]
pub struct ImageAndMetadata {
pub image: Image,
pub metadata: ImageMetadata,
}
impl Image {
fn from_vector(
format: PixelFormat,
resolution: Point2i,
channel_names: Vec<String>,
encoding: ColorEncoding,
) -> Self {
let size = (resolution.x() * resolution.y()) as usize * channel_names.len();
let pixels = match format {
PixelFormat::U8 => PixelData::U8(vec![0; size]),
PixelFormat::F16 => PixelData::F16(vec![f16::ZERO; size]),
PixelFormat::F32 => PixelData::F32(vec![0.0; size]),
};
Self {
format,
resolution,
channel_names,
encoding,
pixels,
}
}
pub fn new(
format: PixelFormat,
resolution: Point2i,
channel_names: &[&str],
encoding: ColorEncoding,
) -> Self {
let owned_names = channel_names.iter().map(|s| s.to_string()).collect();
Self::from_vector(format, resolution, owned_names, encoding)
}
pub fn format(&self) -> PixelFormat {
self.format
}
pub fn resolution(&self) -> Point2i {
self.resolution
}
pub fn n_channels(&self) -> usize {
self.channel_names.len()
}
pub fn channel_names(&self) -> Vec<&str> {
self.channel_names.iter().map(|s| s.as_str()).collect()
}
pub fn channel_names_from_desc(&self, desc: &ImageChannelDesc) -> Vec<&str> {
desc.offset
.iter()
.map(|&i| self.channel_names[i].as_str())
.collect()
}
pub fn encoding(&self) -> ColorEncoding {
self.encoding
}
pub fn pixel_offset(&self, p: Point2i) -> usize {
(p.y() as usize * self.resolution.x() as usize + p.x() as usize) * self.n_channels()
}
pub fn get_channel(&self, p: Point2i, c: usize) -> Float {
self.get_channel_with_wrap(p, c, WrapMode::Clamp.into())
}
pub fn get_channel_with_wrap(&self, p: Point2i, c: usize, wrap: WrapMode2D) -> Float {
let mut pp = p;
if !self.remap_pixel_coords(&mut pp, wrap) {
return 0.0;
}
let idx = self.pixel_offset(pp) + c;
match &self.pixels {
PixelData::U8(d) => u8::to_linear(d[idx], self.encoding),
PixelData::F16(d) => f16::to_linear(d[idx], self.encoding),
PixelData::F32(d) => f32::to_linear(d[idx], self.encoding),
}
}
pub fn get_channels(&self, p: Point2i, wrap: WrapMode2D) -> ImageChannelValues {
let mut pp = p;
if !self.remap_pixel_coords(&mut pp, wrap) {
return ImageChannelValues(smallvec![0.0; self.n_channels()]);
}
let start_idx = self.pixel_offset(pp);
let n_channels = self.n_channels();
let mut values: SmallVec<[Float; 4]> = SmallVec::with_capacity(n_channels);
match &self.pixels {
PixelData::U8(data) => {
let slice = &data[start_idx..start_idx + n_channels];
for &v in slice {
values.push(u8::to_linear(v, self.encoding));
}
}
PixelData::F16(data) => {
let slice = &data[start_idx..start_idx + n_channels];
for &v in slice {
values.push(f16::to_linear(v, self.encoding));
}
}
PixelData::F32(data) => {
let slice = &data[start_idx..start_idx + n_channels];
for &v in slice {
values.push(f32::to_linear(v, self.encoding));
}
}
}
ImageChannelValues(values)
}
pub fn get_channels_desc(
&self,
p: Point2i,
desc: &ImageChannelDesc,
wrap: WrapMode2D,
) -> ImageChannelValues {
let mut pp = p;
if !self.remap_pixel_coords(&mut pp, wrap) {
return ImageChannelValues(smallvec![0.0; desc.offset.len()]);
}
let pixel_offset = self.pixel_offset(pp);
let mut values: SmallVec<[Float; 4]> = SmallVec::with_capacity(desc.offset.len());
match &self.pixels {
PixelData::U8(data) => {
for &channel_idx in &desc.offset {
let val = data[pixel_offset + channel_idx];
values.push(u8::to_linear(val, self.encoding));
}
}
PixelData::F16(data) => {
for &channel_idx in &desc.offset {
let val = data[pixel_offset + channel_idx];
values.push(f16::to_linear(val, self.encoding));
}
}
PixelData::F32(data) => {
for &channel_idx in &desc.offset {
let val = data[pixel_offset + channel_idx];
values.push(f32::to_linear(val, self.encoding));
}
}
}
ImageChannelValues(values)
}
pub fn get_channels_default(&self, p: Point2i) -> ImageChannelValues {
self.get_channels(p, WrapMode::Clamp.into())
}
pub fn all_channels_desc(&self) -> ImageChannelDesc {
ImageChannelDesc {
offset: (0..self.n_channels()).collect(),
}
}
pub fn get_channel_desc(
&self,
requested_channels: &[&str],
) -> Result<ImageChannelDesc, String> {
let mut offset = Vec::with_capacity(requested_channels.len());
for &req in requested_channels.iter() {
match self.channel_names.iter().position(|n| n == req) {
Some(idx) => {
offset.push(idx);
}
None => {
return Err(format!(
"Image is missing requested channel '{}'. Available channels: {:?}",
req, self.channel_names
));
}
}
}
Ok(ImageChannelDesc { offset })
}
pub fn set_channel(&mut self, p: Point2i, c: usize, value: Float) {
let val_no_nan = if value.is_nan() { 0.0 } else { value };
let offset = self.pixel_offset(p) + c;
match &mut self.pixels {
PixelData::U8(data) => {
let linear = [val_no_nan];
self.encoding
.from_linear_slice(&linear, &mut data[offset..offset + 1]);
}
PixelData::F16(data) => data[offset] = f16::from_f32(val_no_nan),
PixelData::F32(data) => data[offset] = val_no_nan,
}
}
pub fn set_channels(
&mut self,
p: Point2i,
desc: &ImageChannelDesc,
values: &ImageChannelValues,
) {
assert_eq!(desc.size(), values.len());
for i in 0..desc.size() {
self.set_channel(p, desc.offset[i], values[i]);
}
}
pub fn set_channels_all(&mut self, p: Point2i, values: &ImageChannelValues) {
self.set_channels(p, &self.all_channels_desc(), values)
}
fn remap_pixel_coords(&self, p: &mut Point2i, wrap_mode: WrapMode2D) -> bool {
for i in 0..2 {
if p[i] >= 0 && p[i] < self.resolution[i] {
continue;
}
match wrap_mode.uv[i] {
WrapMode::Black => return false,
WrapMode::Clamp => p[i] = p[i].clamp(0, self.resolution[i] - 1),
WrapMode::Repeat => p[i] = p[i].rem_euclid(self.resolution[i]),
WrapMode::OctahedralSphere => {
p[i] = p[i].clamp(0, self.resolution[i] - 1);
}
}
}
true
}
pub fn bilerp_channel(&self, p: Point2f, c: usize) -> Float {
self.bilerp_channel_with_wrap(p, c, WrapMode::Clamp.into())
}
pub fn bilerp_channel_with_wrap(&self, p: Point2f, c: usize, wrap_mode: WrapMode2D) -> Float {
let x = p.x() * self.resolution.x() as Float - 0.5;
let y = p.y() * self.resolution.y() as Float - 0.5;
let xi = x.floor() as i32;
let yi = y.floor() as i32;
let dx = x - xi as Float;
let dy = y - yi as Float;
let v00 = self.get_channel_with_wrap(Point2i::new(xi, yi), c, wrap_mode);
let v10 = self.get_channel_with_wrap(Point2i::new(xi + 1, yi), c, wrap_mode);
let v01 = self.get_channel_with_wrap(Point2i::new(xi, yi + 1), c, wrap_mode);
let v11 = self.get_channel_with_wrap(Point2i::new(xi + 1, yi + 1), c, wrap_mode);
lerp(dy, lerp(dx, v00, v10), lerp(dx, v01, v11))
}
pub fn lookup_nearest_channel_with_wrap(
&self,
p: Point2f,
c: usize,
wrap_mode: WrapMode2D,
) -> Float {
let pi = Point2i::new(
p.x() as i32 * self.resolution.x(),
p.y() as i32 * self.resolution.y(),
);
self.get_channel_with_wrap(pi, c, wrap_mode)
}
pub fn lookup_nearest_channel(&self, p: Point2f, c: usize) -> Float {
self.lookup_nearest_channel_with_wrap(p, c, WrapMode::Clamp.into())
}
pub fn get_sampling_distribution<F>(&self, dxd_a: F, domain: Bounds2f) -> Array2D<Float>
where
F: Fn(Point2f) -> Float + Sync + Send,
{
let width = self.resolution.x();
let height = self.resolution.y();
let mut dist = Array2D::new_with_dims(width as usize, height as usize);
dist.values
.par_chunks_mut(width as usize)
.enumerate()
.for_each(|(y, row)| {
let y = y as i32;
for (x, out_val) in row.iter_mut().enumerate() {
let x = x as i32;
let value = self.get_channels_default(Point2i::new(x, y)).average();
let u = (x as Float + 0.5) / width as Float;
let v = (y as Float + 0.5) / height as Float;
let p = domain.lerp(Point2f::new(u, v));
*out_val = value * dxd_a(p);
}
});
dist
}
pub fn get_sampling_distribution_uniform(&self) -> Array2D<Float> {
let default_domain = Bounds2f::from_points(Point2f::new(0.0, 0.0), Point2f::new(1.0, 1.0));
self.get_sampling_distribution(|_| 1.0, default_domain)
}
pub fn mse(
&self,
desc: ImageChannelDesc,
ref_img: &Image,
generate_mse_image: bool,
) -> (ImageChannelValues, Option<Image>) {
let mut sum_se: Vec<f64> = vec![0.; desc.size()];
let names_ref = self.channel_names_from_desc(&desc);
let ref_desc = ref_img
.get_channel_desc(&self.channel_names_from_desc(&desc))
.expect("Channels not found in image");
assert_eq!(self.resolution(), ref_img.resolution());
let width = self.resolution.x() as usize;
let height = self.resolution.y() as usize;
let n_channels = desc.offset.len();
let mut mse_pixels = if generate_mse_image {
vec![0.0f32; width * height * n_channels]
} else {
Vec::new()
};
for y in 0..self.resolution().y() {
for x in 0..self.resolution().x() {
let v = self.get_channels_desc(Point2i::new(x, y), &desc, WrapMode::Clamp.into());
let v_ref =
self.get_channels_desc(Point2i::new(x, y), &ref_desc, WrapMode::Clamp.into());
for c in 0..desc.size() {
let se = square(v[c] as f64 - v_ref[c] as f64);
if se.is_infinite() {
continue;
}
sum_se[c] += se;
if generate_mse_image {
let idx = (y as usize * width + x as usize) * n_channels + c;
mse_pixels[idx] = se as f32;
}
}
}
}
let pixel_count = (self.resolution.x() * self.resolution.y()) as f64;
let mse_values: SmallVec<[Float; 4]> =
sum_se.iter().map(|&s| (s / pixel_count) as Float).collect();
let mse_image = if generate_mse_image {
Some(Image::new(
PixelFormat::F32,
self.resolution,
&names_ref,
LINEAR,
))
} else {
None
};
(ImageChannelValues(mse_values), mse_image)
}
}

414
src/image/ops.rs Normal file
View file

@ -0,0 +1,414 @@
// use rayon::prelude::*;
use crate::core::pbrt::Float;
use crate::geometry::{Bounds2i, Point2i};
use crate::image::pixel::PixelStorage;
use crate::image::{Image, PixelData, PixelFormat, WrapMode, WrapMode2D};
use crate::utils::math::windowed_sinc;
use rayon::prelude::*;
use std::sync::{Arc, Mutex};
#[derive(Debug, Clone, Copy)]
pub struct ResampleWeight {
pub first_pixel: i32,
pub weight: [Float; 4],
}
impl Image {
pub fn flip_y(&mut self) {
let res = self.resolution;
let nc = self.n_channels();
match &mut self.pixels {
PixelData::U8(d) => flip_y_kernel(d, res, nc),
PixelData::F16(d) => flip_y_kernel(d, res, nc),
PixelData::F32(d) => flip_y_kernel(d, res, nc),
}
}
pub fn crop(&self, bounds: Bounds2i) -> Image {
let new_res = Point2i::new(
bounds.p_max.x() - bounds.p_min.x(),
bounds.p_max.y() - bounds.p_min.y(),
);
let mut new_image = Image::from_vector(
self.format,
new_res,
self.channel_names.clone(),
self.encoding,
);
match (&self.pixels, &mut new_image.pixels) {
(PixelData::U8(src), PixelData::U8(dst)) => {
crop_kernel(src, dst, self.resolution, bounds, self.n_channels())
}
(PixelData::F16(src), PixelData::F16(dst)) => {
crop_kernel(src, dst, self.resolution, bounds, self.n_channels())
}
(PixelData::F32(src), PixelData::F32(dst)) => {
crop_kernel(src, dst, self.resolution, bounds, self.n_channels())
}
_ => panic!("Format mismatch in crop"),
}
new_image
}
pub fn copy_rect_out(&self, extent: Bounds2i, buf: &mut [Float], wrap: WrapMode2D) {
match &self.pixels {
PixelData::U8(d) => copy_rect_out_kernel(d, self, extent, buf, wrap),
PixelData::F16(d) => copy_rect_out_kernel(d, self, extent, buf, wrap),
PixelData::F32(d) => copy_rect_out_kernel(d, self, extent, buf, wrap),
}
}
pub fn copy_rect_in(&mut self, extent: Bounds2i, buf: &[Float]) {
let resolution = self.resolution;
let n_channels = self.n_channels();
let encoding = self.encoding;
match &mut self.pixels {
PixelData::U8(d) => {
copy_rect_in_kernel(d, resolution, n_channels, encoding, extent, buf)
}
PixelData::F16(d) => {
copy_rect_in_kernel(d, resolution, n_channels, encoding, extent, buf)
}
PixelData::F32(d) => {
copy_rect_in_kernel(d, resolution, n_channels, encoding, extent, buf)
}
}
}
pub fn float_resize_up(&self, new_res: Point2i, wrap_mode: WrapMode2D) -> Image {
assert!(new_res.x() >= self.resolution.x() && new_res.y() >= self.resolution.y());
assert!(
matches!(self.format, PixelFormat::F32),
"ResizeUp requires Float format"
);
let resampled_image = Arc::new(Mutex::new(Image::from_vector(
PixelFormat::F32, // Force float output
new_res,
self.channel_names.clone(),
self.encoding,
)));
let x_weights = resample_weights(self.resolution.x() as usize, new_res.x() as usize);
let y_weights = resample_weights(self.resolution.y() as usize, new_res.y() as usize);
let n_channels = self.n_channels();
let tile_size = 16;
let tiles = generate_tiles(new_res, tile_size);
tiles.par_iter().for_each(|out_extent| {
let x_start = x_weights[out_extent.p_min.x() as usize].first_pixel;
let x_end = x_weights[(out_extent.p_max.x() - 1) as usize].first_pixel + 4;
let y_start = y_weights[out_extent.p_min.y() as usize].first_pixel;
let y_end = y_weights[(out_extent.p_max.y() - 1) as usize].first_pixel + 4;
let in_extent =
Bounds2i::from_points(Point2i::new(x_start, y_start), Point2i::new(x_end, y_end));
let mut in_buf = vec![0.0; in_extent.area() as usize * n_channels];
self.copy_rect_out(in_extent, &mut in_buf, wrap_mode);
let out_buf = compute_resize_tile(
&in_buf,
in_extent,
*out_extent,
n_channels,
&x_weights,
&y_weights,
);
let mut guard = resampled_image.lock().unwrap();
guard.copy_rect_in(*out_extent, &out_buf);
});
Arc::try_unwrap(resampled_image)
.unwrap()
.into_inner()
.unwrap()
}
pub fn generate_pyramid(base: Image, _wrap: WrapMode) -> Vec<Image> {
let mut levels = vec![base];
let internal_wrap = WrapMode2D {
uv: [WrapMode::Clamp; 2],
};
loop {
let prev = levels.last().unwrap();
let old = prev.resolution;
if old.x() == 1 && old.y() == 1 {
break;
}
let new_res = Point2i::new((old.x() / 2).max(1), (old.y() / 2).max(1));
let mut next = Image::from_vector(
prev.format,
new_res,
prev.channel_names.clone(),
prev.encoding,
);
match &mut next.pixels {
PixelData::U8(d) => downsample_kernel(d, new_res, prev, internal_wrap),
PixelData::F16(d) => downsample_kernel(d, new_res, prev, internal_wrap),
PixelData::F32(d) => downsample_kernel(d, new_res, prev, internal_wrap),
}
levels.push(next);
}
levels
}
}
fn flip_y_kernel<T: PixelStorage>(pixels: &mut [T], res: Point2i, channels: usize) {
let w = res.x() as usize;
let h = res.y() as usize;
let stride = w * channels;
for y in 0..(h / 2) {
let bot = h - 1 - y;
for i in 0..stride {
pixels.swap(y * stride + i, bot * stride + i);
}
}
}
fn crop_kernel<T: PixelStorage>(
src: &[T],
dst: &mut [T],
src_res: Point2i,
bounds: Bounds2i,
channels: usize,
) {
let dst_w = (bounds.p_max.x() - bounds.p_min.x()) as usize;
// let dst_h = (bounds.p_max.y() - bounds.p_min.y()) as usize;
dst.par_chunks_mut(dst_w * channels)
.enumerate()
.for_each(|(dy, dst_row)| {
let sy = bounds.p_min.y() as usize + dy;
let sx_start = bounds.p_min.x() as usize;
let src_offset = (sy * src_res.x() as usize + sx_start) * channels;
let count = dst_w * channels;
dst_row.copy_from_slice(&src[src_offset..src_offset + count]);
});
}
fn copy_rect_out_kernel<T: PixelStorage>(
src: &[T],
image: &Image,
extent: Bounds2i,
buf: &mut [Float],
wrap: WrapMode2D,
) {
let w = (extent.p_max.x() - extent.p_min.x()) as usize;
let channels = image.n_channels();
let enc = image.encoding;
let res = image.resolution;
buf.par_chunks_mut(w * channels)
.enumerate()
.for_each(|(y_rel, row_buf)| {
let y = extent.p_min.y() + y_rel as i32;
for x_rel in 0..w {
let x = extent.p_min.x() + x_rel as i32;
// This allows us to use 'src' directly (Fast Path).
if x >= 0 && x < res.x() && y >= 0 && y < res.y() {
let offset = (y as usize * res.x() as usize + x as usize) * channels;
for c in 0..channels {
row_buf[x_rel * channels + c] = T::to_linear(src[offset + c], enc);
}
} else {
// Slow path: Out of bounds, requires Wrap Mode logic.
// We fall back to get_channel which handles the wrapping math.
let p = Point2i::new(x, y);
for c in 0..channels {
row_buf[x_rel * channels + c] = image.get_channel_with_wrap(p, c, wrap);
}
}
}
});
}
fn copy_rect_in_kernel<T: PixelStorage>(
dst: &mut [T],
res: Point2i,
channels: usize,
enc: crate::utils::color::ColorEncoding,
extent: Bounds2i,
buf: &[Float],
) {
let w = (extent.p_max.x() - extent.p_min.x()) as usize;
let res_x = res.x() as usize;
let rows = buf.chunks(w * channels);
for (y_rel, row) in rows.enumerate() {
let y = extent.p_min.y() + y_rel as i32;
if y < 0 || y >= res.y() {
continue;
}
let dst_row_start = (y as usize * res_x) * channels;
for (x_rel, &val) in row.iter().enumerate() {
let c = x_rel % channels;
let x_pixel = x_rel / channels;
let x = extent.p_min.x() + x_pixel as i32;
if x >= 0 && x < res.x() {
let idx = dst_row_start + (x as usize * channels) + c;
dst[idx] = T::from_linear(val, enc);
}
}
}
}
fn downsample_kernel<T: PixelStorage>(
dst: &mut [T],
dst_res: Point2i,
prev: &Image,
wrap: WrapMode2D,
) {
let w = dst_res.x() as usize;
let channels = prev.n_channels();
let enc = prev.encoding;
let old_res = prev.resolution;
dst.par_chunks_mut(w * channels)
.enumerate()
.for_each(|(y, row)| {
let src_y = y * 2;
for x in 0..w {
let src_x = x * 2;
for c in 0..channels {
let mut sum = 0.0;
let mut count = 0.0;
for dy in 0..2 {
for dx in 0..2 {
let sx = src_x as i32 + dx;
let sy = src_y as i32 + dy;
if sx < old_res.x() && sy < old_res.y() {
sum += prev.get_channel_with_wrap(Point2i::new(sx, sy), c, wrap);
count += 1.0;
}
}
}
let avg = if count > 0.0 { sum / count } else { 0.0 };
row[x * channels + c] = T::from_linear(avg, enc);
}
}
});
}
fn resample_weights(old_res: usize, new_res: usize) -> Vec<ResampleWeight> {
let filter_radius = 2.0;
let tau = 2.0;
(0..new_res)
.map(|i| {
let center = (i as Float + 0.5) * old_res as Float / new_res as Float;
let first_pixel = ((center - filter_radius) + 0.5).floor() as i32;
let mut weights = [0.0; 4];
let mut sum = 0.0;
for j in 0..4 {
let pos = (first_pixel + j) as Float + 0.5;
weights[j as usize] = windowed_sinc(pos - center, filter_radius, tau);
sum += weights[j as usize];
}
let inv_sum = 1.0 / sum;
for w in &mut weights {
*w *= inv_sum;
}
ResampleWeight {
first_pixel,
weight: weights,
}
})
.collect()
}
fn generate_tiles(res: Point2i, tile_size: i32) -> Vec<Bounds2i> {
let nx = (res.x() + tile_size - 1) / tile_size;
let ny = (res.y() + tile_size - 1) / tile_size;
let mut tiles = Vec::new();
for y in 0..ny {
for x in 0..nx {
let p_min = Point2i::new(x * tile_size, y * tile_size);
let p_max = Point2i::new(
(x * tile_size + tile_size).min(res.x()),
(y * tile_size + tile_size).min(res.y()),
);
tiles.push(Bounds2i::from_points(p_min, p_max));
}
}
tiles
}
fn compute_resize_tile(
in_buf: &[Float],
in_extent: Bounds2i,
out_extent: Bounds2i,
n_channels: usize,
x_w: &[ResampleWeight],
y_w: &[ResampleWeight],
) -> Vec<Float> {
let nx_out = (out_extent.p_max.x() - out_extent.p_min.x()) as usize;
let ny_out = (out_extent.p_max.y() - out_extent.p_min.y()) as usize;
let nx_in = (in_extent.p_max.x() - in_extent.p_min.x()) as usize;
let ny_in = (in_extent.p_max.y() - in_extent.p_min.y()) as usize;
let mut x_buf = vec![0.0; n_channels * ny_in * nx_out];
// Resize X
for y in 0..ny_in {
for x in 0..nx_out {
let x_global = out_extent.p_min.x() + x as i32;
let w = &x_w[x_global as usize];
let x_in_start = (w.first_pixel - in_extent.p_min.x()) as usize;
let in_idx_base = (y * nx_in + x_in_start) * n_channels;
let out_idx_base = (y * nx_out + x) * n_channels;
for c in 0..n_channels {
let mut val = 0.0;
for k in 0..4 {
val += w.weight[k] * in_buf[in_idx_base + k * n_channels + c];
}
x_buf[out_idx_base + c] = val;
}
}
}
let mut out_buf = vec![0.0; n_channels * nx_out * ny_out];
// Resize Y
for x in 0..nx_out {
for y in 0..ny_out {
let y_global = out_extent.p_min.y() + y as i32;
let w = &y_w[y_global as usize];
let y_in_start = (w.first_pixel - in_extent.p_min.y()) as usize;
let in_idx_base = (y_in_start * nx_out + x) * n_channels;
let out_idx_base = (y * nx_out + x) * n_channels;
let stride = nx_out * n_channels;
for c in 0..n_channels {
let mut val = 0.0;
for k in 0..4 {
val += w.weight[k] * x_buf[in_idx_base + k * stride + c];
}
out_buf[out_idx_base + c] = val.max(0.0);
}
}
}
out_buf
}

47
src/image/pixel.rs Normal file
View file

@ -0,0 +1,47 @@
use crate::core::pbrt::Float;
use crate::utils::color::{ColorEncoding, ColorEncodingTrait};
use half::f16;
/// Allows writing generic algorithms that work on any image format.
pub trait PixelStorage: Copy + Send + Sync + 'static + PartialEq {
fn from_linear(val: Float, encoding: ColorEncoding) -> Self;
fn to_linear(self, encoding: ColorEncoding) -> Float;
}
impl PixelStorage for f32 {
#[inline(always)]
fn from_linear(val: Float, _enc: ColorEncoding) -> Self {
val
}
#[inline(always)]
fn to_linear(self, _enc: ColorEncoding) -> Float {
self
}
}
impl PixelStorage for f16 {
#[inline(always)]
fn from_linear(val: Float, _enc: ColorEncoding) -> Self {
f16::from_f32(val)
}
#[inline(always)]
fn to_linear(self, _enc: ColorEncoding) -> Float {
self.to_f32()
}
}
impl PixelStorage for u8 {
#[inline(always)]
fn from_linear(val: Float, enc: ColorEncoding) -> Self {
let mut out = [0u8];
enc.from_linear_slice(&[val], &mut out);
out[0]
}
#[inline(always)]
fn to_linear(self, enc: ColorEncoding) -> Float {
let mut out = [0.0];
enc.to_linear_slice(&[self], &mut out);
out[0]
}
}

1189
src/integrators/mod.rs Normal file

File diff suppressed because it is too large Load diff

264
src/integrators/pipeline.rs Normal file
View file

@ -0,0 +1,264 @@
use crate::core::{options::PBRTOptions, sampler::get_camera_sample};
use crate::image::{Image, ImageMetadata};
use indicatif::{ProgressBar, ProgressStyle};
use std::io::Write;
use std::path::Path;
use super::*;
struct PbrtProgress {
bar: ProgressBar,
}
impl PbrtProgress {
fn new(total_work: u64, description: &str, quiet: bool) -> Self {
if quiet {
return Self {
bar: ProgressBar::hidden(),
};
}
let bar = ProgressBar::new(total_work);
bar.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40.cyan/blue} {pos:>7}/{len:7} {msg}")
.unwrap()
.progress_chars("=>-"),
);
bar.set_message(description.to_string());
Self { bar }
}
fn update(&self, amount: u64) {
self.bar.inc(amount);
}
fn done(&self) {
self.bar.finish_with_message("Done");
}
fn elapsed_seconds(&self) -> f32 {
self.bar.elapsed().as_secs_f32()
}
}
fn generate_tiles(bounds: Bounds2i) -> Vec<Bounds2i> {
let mut tiles = Vec::new();
const TILE_SIZE: i32 = 16;
for y in (bounds.p_min.y()..bounds.p_max.y()).step_by(TILE_SIZE as usize) {
for x in (bounds.p_min.x()..bounds.p_max.x()).step_by(TILE_SIZE as usize) {
let p_min = Point2i::new(x, y);
let p_max = Point2i::new(
(x + TILE_SIZE).min(bounds.p_max.x()),
(y + TILE_SIZE).min(bounds.p_max.y()),
);
tiles.push(Bounds2i::from_points(p_min, p_max));
}
}
tiles
}
pub fn render<T>(
integrator: &T,
_base: &IntegratorBase,
camera: &Camera,
sampler_prototype: &Sampler,
) where
T: RayIntegratorTrait,
{
let options = get_options();
if let Some((p_pixel, sample_index)) = options.debug_start {
let s_index = sample_index as usize;
let scratch = Bump::new();
let mut tile_sampler = sampler_prototype.clone();
tile_sampler.start_pixel_sample(p_pixel, s_index, None);
evaluate_pixel_sample(
integrator,
camera,
&mut tile_sampler,
p_pixel,
s_index,
&scratch,
);
return;
}
let pixel_bounds = camera.get_film().pixel_bounds();
let spp = sampler_prototype.samples_per_pixel();
let total_work = (pixel_bounds.area() as u64) * (spp as u64);
let progress = PbrtProgress::new(total_work, "Rendering", options.quiet);
let mut wave_start = 0;
let mut wave_end = 1;
let mut next_wave_size = 1;
let mut reference_image: Option<Image> = None;
let mut mse_out_file: Option<std::fs::File> = None;
if let Some(ref_path) = &options.mse_reference_image {
let image_and_metadata =
Image::read(Path::new(&ref_path), None).expect("Could not load image");
let image = image_and_metadata.image;
let metadata = image_and_metadata.metadata;
let resolution = image.resolution();
// reference_image = Some(image);
let mse_pixel_bounds = metadata
.pixel_bounds
.unwrap_or_else(|| Bounds2i::from_points(Point2i::new(0, 0), resolution));
if !mse_pixel_bounds.overlaps(&pixel_bounds) {
panic!("Output pixel bounds dont fit inside the reference image");
}
let crop_p_min = Point2i::from(pixel_bounds.p_min - mse_pixel_bounds.p_min);
let crop_p_max = Point2i::from(pixel_bounds.p_max - mse_pixel_bounds.p_min);
let crop_bounds = Bounds2i::from_points(crop_p_min, crop_p_max);
let cropped_image = image.crop(crop_bounds).clone();
let cropped_resolution = cropped_image.resolution();
let expected_res = Point2i::new(
pixel_bounds.p_max.x() - pixel_bounds.p_min.x(),
pixel_bounds.p_max.y() - pixel_bounds.p_min.y(),
);
reference_image = Some(cropped_image);
assert_eq!(
cropped_resolution, expected_res,
"Cropped reference image resolution mismatch"
);
if let Some(out_path) = &options.mse_reference_output {
mse_out_file = Some(
std::fs::File::create(out_path)
.expect(&format!("Failed to create MSE output file: {}", out_path)),
);
}
}
let tiles = generate_tiles(pixel_bounds);
while wave_start < spp {
tiles.par_iter().for_each(|tile_bounds| {
let mut arena = Bump::with_capacity(65 * 1024);
let mut sampler = sampler_prototype.clone();
for p_pixel in tile_bounds {
for sample_index in wave_start..wave_end {
sampler.start_pixel_sample(*p_pixel, sample_index, None);
evaluate_pixel_sample(
integrator,
camera,
&mut sampler,
*p_pixel,
sample_index,
&arena,
);
arena.reset();
}
}
let work_done = (tile_bounds.area() as u64) * ((wave_end - wave_start) as u64);
progress.update(work_done);
});
wave_start = wave_end;
wave_end = (wave_end + next_wave_size).min(spp);
if reference_image.is_none() {
next_wave_size = (2 * next_wave_size).min(64);
}
if wave_start == spp {
progress.done();
}
if wave_start == spp || options.write_partial_images || reference_image.is_some() {
let mut metadata = ImageMetadata {
render_time_seconds: Some(progress.elapsed_seconds()),
samples_per_pixel: Some(wave_start as i32),
..Default::default()
};
if wave_start == spp || options.write_partial_images {
camera.init_metadata(&mut metadata);
camera
.get_film()
.write_image(&metadata, 1.0 / wave_start as Float);
}
if let Some(ref_img) = &reference_image {
let splat_scale = 1.0 / (wave_start as Float);
let film_metadata = ImageMetadata::default();
let film_image = camera.get_film().get_image(&film_metadata, splat_scale);
let (mse_values, _mse_debug_img) =
film_image.mse(film_image.all_channels_desc(), ref_img, false);
let mse_avg = mse_values.average();
if let Some(file) = &mut mse_out_file {
writeln!(file, "{}, {:.9}", wave_start, mse_avg).ok();
file.flush().ok();
}
metadata.mse = Some(mse_avg);
}
}
}
}
pub fn evaluate_pixel_sample<T: RayIntegratorTrait>(
integrator: &T,
camera: &Camera,
sampler: &mut Sampler,
pixel: Point2i,
_sample_index: usize,
scratch: &Bump,
) {
let mut lu = sampler.get1d();
if get_options().disable_wavelength_jitter {
lu = 0.5;
}
let lambda = camera.get_film().sample_wavelengths(lu);
let mut film = camera.get_film();
let filter = film.get_filter();
let camera_sample = get_camera_sample(sampler, pixel, filter);
if let Some(mut camera_ray) = camera.generate_ray_differential(camera_sample, &lambda) {
debug_assert!(camera_ray.ray.d.norm() > 0.999);
debug_assert!(camera_ray.ray.d.norm() < 1.001);
let ray_diff_scale = (sampler.samples_per_pixel() as Float).sqrt().max(0.125);
if get_options().disable_pixel_jitter {
camera_ray.ray.scale_differentials(ray_diff_scale);
}
let initialize_visible_surface = film.uses_visible_surface();
let (mut l, visible_surface) = integrator.li(
camera_ray.ray,
&lambda,
sampler,
scratch,
initialize_visible_surface,
);
l *= camera_ray.weight;
if l.has_nans() || l.y(&lambda).is_infinite() {
l = SampledSpectrum::new(0.);
}
film.add_sample(
pixel,
l,
&lambda,
visible_surface.as_ref(),
camera_sample.filter_weight,
);
}
}

View file

@ -1,11 +1,13 @@
#![allow(unused_imports, dead_code)]
#![feature(float_erf)]
#![feature(f16)]
#![feature(generic_const_exprs)]
mod camera;
mod core;
mod geometry;
mod image;
mod integrators;
mod lights;
mod shapes;
mod spectra;
mod utils;

View file

@ -1,19 +1,255 @@
use super::{
DenselySampledSpectrum, LightBase, LightBounds, LightLiSample, LightSampleContext, LightTrait,
LightType, RGBIlluminantSpectrum, SampledSpectrum, SampledWavelengths, Spectrum, SpectrumTrait,
};
use crate::core::interaction::{
Interaction, InteractionTrait, SimpleInteraction, SurfaceInteraction,
};
use crate::core::medium::MediumInterface;
use crate::core::pbrt::Float;
use crate::shapes::ShapeTrait;
use crate::utils::spectrum::Spectrum;
use crate::core::pbrt::{Float, PI};
use crate::core::texture::{
FloatTexture, FloatTextureTrait, TextureEvalContext, TextureEvaluator,
UniversalTextureEvaluator,
};
use crate::geometry::{
Bounds3f, Normal3f, Point2f, Point2fi, Point2i, Point3f, Point3fi, Ray, Vector3f, VectorLike,
};
use crate::image::Image;
use crate::shapes::{Shape, ShapeSample, ShapeSampleContext, ShapeTrait};
use crate::utils::color::RGB;
use crate::utils::colorspace::RGBColorSpace;
use crate::utils::hash::hash_float;
use crate::utils::transform::Transform;
use std::sync::Arc;
pub struct DiffuseAreaLight<'a> {
pub l_emit: Spectrum,
pub shape: Arc<&'a dyn ShapeTrait>,
pub two_sided: bool,
pub area: Float,
pub flags: u8,
pub n_samples: i32,
pub medium_interface: MediumInterface,
light_to_world: Transform<Float>,
world_to_light: Transform<Float>,
#[derive(Clone, Debug)]
pub struct DiffuseAreaLight {
base: LightBase,
shape: Shape,
alpha: Option<FloatTexture>,
area: Float,
two_sided: bool,
lemit: Arc<DenselySampledSpectrum>,
scale: Float,
image: Option<Image>,
image_color_space: Option<Arc<RGBColorSpace>>,
}
impl DiffuseAreaLight {
#[allow(clippy::too_many_arguments)]
pub fn new(
render_from_light: Transform<Float>,
medium_interface: MediumInterface,
le: Spectrum,
scale: Float,
shape: Shape,
alpha: FloatTexture,
image: Option<Image>,
image_color_space: Option<Arc<RGBColorSpace>>,
two_sided: bool,
) -> Self {
let is_constant_zero = match &alpha {
FloatTexture::FloatConstant(tex) => tex.evaluate(&TextureEvalContext::default()) == 0.0,
_ => false,
};
let (light_type, stored_alpha) = if is_constant_zero {
(LightType::DeltaPosition, None)
} else {
(LightType::Area, Some(alpha))
};
let base = LightBase::new(light_type, &render_from_light, &medium_interface);
let lemit = LightBase::lookup_spectrum(&le);
if let Some(im) = &image {
let desc = im
.get_channel_desc(&["R", "G", "B"])
.expect("Image used for DiffuseAreaLight doesn't have R, G, B channels");
assert_eq!(3, desc.size(), "Image channel description size mismatch");
assert!(
desc.is_identity(),
"Image channel description is not identity"
);
assert!(
image_color_space.is_some(),
"Image provided but ColorSpace is missing"
);
}
let is_triangle_or_bilinear = matches!(shape, Shape::Triangle(_) | Shape::BilinearPatch(_));
if render_from_light.has_scale(None) && !is_triangle_or_bilinear {
println!(
"Scaling detected in rendering to light space transformation! \
The system has numerous assumptions, implicit and explicit, \
that this transform will have no scale factors in it. \
Proceed at your own risk; your image may have errors."
);
}
Self {
base,
area: shape.area(),
shape,
alpha: stored_alpha,
two_sided,
lemit,
scale,
image,
image_color_space,
}
}
fn alpha_masked(&self, intr: &Interaction) -> bool {
let Some(alpha_tex) = &self.alpha else {
return false;
};
let ctx = TextureEvalContext::from(intr);
let a = UniversalTextureEvaluator.evaluate_float(alpha_tex, &ctx);
if a >= 1.0 {
return false;
}
if a <= 0.0 {
return true;
}
hash_float(&intr.p()) > a
}
}
impl LightTrait for DiffuseAreaLight {
fn base(&self) -> &LightBase {
&self.base
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
let mut l = SampledSpectrum::new(0.);
if let Some(image) = &self.image {
for y in 0..image.resolution().y() {
for x in 0..image.resolution().x() {
let mut rgb = RGB::default();
for c in 0..3 {
rgb[c] = image.get_channel(Point2i::new(x, y), c);
}
l += RGBIlluminantSpectrum::new(
self.image_color_space.as_ref().unwrap(),
RGB::clamp_zero(rgb),
)
.sample(&lambda);
}
}
l *= self.scale / (image.resolution().x() * image.resolution().y()) as Float;
} else {
l = self.lemit.sample(&lambda) * self.scale;
}
let two_side = if self.two_sided { 2. } else { 1. };
PI * two_side * self.area * l
}
fn sample_li(
&self,
ctx: &LightSampleContext,
u: Point2f,
lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
let shape_ctx = ShapeSampleContext::new(ctx.pi, ctx.n, ctx.ns, 0.0);
let ss = self.shape.sample_from_context(&shape_ctx, u)?;
let mut intr: SurfaceInteraction = ss.intr.as_ref().clone();
intr.common.medium_interface = Some(self.base.medium_interface.clone());
let p = intr.p();
let n = intr.n();
let uv = intr.uv;
let generic_intr = Interaction::Surface(intr);
if self.alpha_masked(&generic_intr) {
return None;
}
let wi = (p - ctx.p()).normalize();
let le = self.l(p, n, uv, -wi, lambda);
if le.is_black() {
return None;
}
Some(LightLiSample::new(le, wi, ss.pdf, generic_intr))
}
fn pdf_li(&self, ctx: &LightSampleContext, wi: Vector3f, _allow_incomplete_pdf: bool) -> Float {
let shape_ctx = ShapeSampleContext::new(ctx.pi, ctx.n, ctx.ns, 0.);
self.shape.pdf_from_context(&shape_ctx, wi)
}
fn l(
&self,
p: Point3f,
n: Normal3f,
mut uv: Point2f,
w: Vector3f,
lambda: &SampledWavelengths,
) -> SampledSpectrum {
if self.two_sided && n.dot(w.into()) < 0. {
return SampledSpectrum::new(0.);
}
let intr = Interaction::Surface(SurfaceInteraction::new_minimal(
Point3fi::new_from_point(p),
uv,
));
if self.alpha_masked(&intr) {
return SampledSpectrum::new(0.);
}
if let Some(image) = &self.image {
let mut rgb = RGB::default();
uv[1] = 1. - uv[1];
for c in 0..3 {
rgb[c] = image.bilerp_channel(uv, c);
}
let spec = RGBIlluminantSpectrum::new(
self.image_color_space.as_ref().unwrap(),
RGB::clamp_zero(rgb),
);
self.scale * spec.sample(lambda)
} else {
self.scale * self.lemit.sample(lambda)
}
}
fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
unimplemented!()
}
fn bounds(&self) -> Option<LightBounds> {
let mut phi = 0.;
if let Some(image) = &self.image {
for y in 0..image.resolution.y() {
for x in 0..image.resolution.x() {
for c in 0..3 {
phi += image.get_channel(Point2i::new(x, y), c);
}
}
}
} else {
phi = self.lemit.max_value();
}
let nb = self.shape.normal_bounds();
Some(LightBounds::new(
&self.shape.bounds(),
nb.w,
phi,
nb.cos_theta,
(PI / 2.).cos(),
self.two_sided,
))
}
}

588
src/lights/infinite.rs Normal file
View file

@ -0,0 +1,588 @@
use crate::{
core::medium::Medium,
geometry::Frame,
spectra::RGBIlluminantSpectrum,
utils::{
color::RGB,
colorspace::RGBColorSpace,
math::equal_area_square_to_sphere,
sampling::{
AliasTable, PiecewiseConstant2D, WindowedPiecewiseConstant2D, sample_uniform_sphere,
uniform_sphere_pdf,
},
},
};
use rayon::prelude::*;
use crate::core::pbrt::clamp_t;
use crate::image::{PixelFormat, WrapMode};
use std::path::Path;
use super::{
Arc, Bounds2f, Bounds3f, DenselySampledSpectrum, Float, Image, Interaction, LightBase,
LightBounds, LightLiSample, LightSampleContext, LightTrait, LightType, MediumInterface,
Normal3f, PI, Point2f, Point2i, Point3f, Ray, SampledSpectrum, SampledWavelengths,
SimpleInteraction, Spectrum, Transform, Vector3f, VectorLike, equal_area_sphere_to_square,
square,
};
#[derive(Debug, Clone)]
pub struct InfiniteUniformLight {
base: LightBase,
lemit: Arc<DenselySampledSpectrum>,
scale: Float,
scene_center: Point3f,
scene_radius: Float,
}
impl InfiniteUniformLight {
pub fn new(render_from_light: Transform<Float>, le: Spectrum, scale: Float) -> Self {
let base = LightBase::new(
LightType::Infinite,
&render_from_light,
&MediumInterface::default(),
);
let lemit = LightBase::lookup_spectrum(&le);
Self {
base,
lemit,
scale,
scene_center: Point3f::default(),
scene_radius: 0.,
}
}
}
impl LightTrait for InfiniteUniformLight {
fn base(&self) -> &LightBase {
&self.base
}
fn sample_li(
&self,
ctx: &LightSampleContext,
u: Point2f,
lambda: &SampledWavelengths,
allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
if allow_incomplete_pdf {
return None;
}
let wi = sample_uniform_sphere(u);
let pdf = uniform_sphere_pdf();
let intr_simple = SimpleInteraction::new_interface(
ctx.p() + wi * (2. * self.scene_radius),
Some(MediumInterface::default()),
);
let intr = Interaction::Simple(intr_simple);
Some(LightLiSample::new(
self.scale * self.lemit.sample(lambda),
wi,
pdf,
intr,
))
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
4. * PI * PI * square(self.scene_radius) * self.scale * self.lemit.sample(&lambda)
}
fn pdf_li(
&self,
_ctx: &LightSampleContext,
_wi: Vector3f,
allow_incomplete_pdf: bool,
) -> Float {
if allow_incomplete_pdf {
return 0.;
}
uniform_sphere_pdf()
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, _ray: &Ray, lambda: &SampledWavelengths) -> SampledSpectrum {
self.scale * self.lemit.sample(lambda)
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
todo!()
}
fn bounds(&self) -> Option<LightBounds> {
todo!()
}
}
#[derive(Clone, Debug)]
pub struct InfiniteImageLight {
base: LightBase,
image: Image,
image_color_space: Arc<RGBColorSpace>,
scale: Float,
scene_radius: Float,
scene_center: Point3f,
distrib: PiecewiseConstant2D,
compensated_distrib: PiecewiseConstant2D,
}
impl InfiniteImageLight {
pub fn new(
render_from_light: Transform<Float>,
image: Image,
image_color_space: Arc<RGBColorSpace>,
scale: Float,
filename: String,
) -> Self {
let base = LightBase::new(
LightType::Infinite,
&render_from_light,
&MediumInterface::default(),
);
let desc = image
.get_channel_desc(&["R", "G", "B"])
.expect("Image used for DiffuseAreaLight doesn't have R, G, B channels");
assert_eq!(3, desc.size());
assert!(desc.is_identity());
if image.resolution().x() != image.resolution().y() {
panic!(
"{}: image resolution ({}, {}) is non-square. It's unlikely this is an equal area environment map.",
filename,
image.resolution.x(),
image.resolution.y()
);
}
let mut d = image.get_sampling_distribution_uniform();
let domain = Bounds2f::from_points(Point2f::new(0., 0.), Point2f::new(1., 1.));
let distrib = PiecewiseConstant2D::new_with_bounds(&d, domain);
let slice = &mut d.values; // or d.as_slice_mut()
let count = slice.len() as Float;
let sum: Float = slice.iter().sum();
let average = sum / count;
for v in slice.iter_mut() {
*v = (*v - average).max(0.0);
}
let all_zero = slice.iter().all(|&v| v == 0.0);
if all_zero {
for v in slice.iter_mut() {
*v = 1.0;
}
}
let compensated_distrib = PiecewiseConstant2D::new_with_bounds(&d, domain);
Self {
base,
image,
image_color_space,
scene_center: Point3f::default(),
scene_radius: 0.,
scale,
distrib,
compensated_distrib,
}
}
fn image_le(&self, uv: Point2f, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut rgb = RGB::default();
for c in 0..3 {
rgb[c] = self.image.lookup_nearest_channel_with_wrap(
uv,
c,
WrapMode::OctahedralSphere.into(),
);
}
let spec =
RGBIlluminantSpectrum::new(self.image_color_space.as_ref(), RGB::clamp_zero(rgb));
self.scale * spec.sample(lambda)
}
}
impl LightTrait for InfiniteImageLight {
fn base(&self) -> &LightBase {
&self.base
}
fn sample_li(
&self,
ctx: &LightSampleContext,
u: Point2f,
lambda: &SampledWavelengths,
allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
let (uv, map_pdf, _) = if allow_incomplete_pdf {
self.compensated_distrib.sample(u)
} else {
self.distrib.sample(u)
};
if map_pdf == 0. {
return None;
}
// Convert infinite light sample point to direction
let w_light = equal_area_square_to_sphere(uv);
let wi = self.base.render_from_light.apply_to_vector(w_light);
let pdf = map_pdf / (4. * PI);
// Return radiance value for infinite light direction
let mut simple_intr = SimpleInteraction::new_interface(
ctx.p() + wi * (2. * self.scene_radius),
Some(MediumInterface::default()),
);
simple_intr.common.medium_interface = Some(self.base.medium_interface.clone());
let intr = Interaction::Simple(simple_intr);
Some(LightLiSample::new(self.image_le(uv, lambda), wi, pdf, intr))
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
let mut sum_l = SampledSpectrum::new(0.);
let width = self.image.resolution.x();
let height = self.image.resolution.y();
for v in 0..height {
for u in 0..width {
let mut rgb = RGB::default();
for c in 0..3 {
rgb[c] = self.image.get_channel_with_wrap(
Point2i::new(u, v),
c,
WrapMode::OctahedralSphere.into(),
);
}
sum_l += RGBIlluminantSpectrum::new(
self.image_color_space.as_ref(),
RGB::clamp_zero(rgb),
)
.sample(&lambda);
}
}
4. * PI * PI * square(self.scene_radius) * self.scale * sum_l / (width * height) as Float
}
fn pdf_li(&self, _ctx: &LightSampleContext, wi: Vector3f, allow_incomplete_pdf: bool) -> Float {
let w_light = self.base.render_from_light.apply_inverse_vector(wi);
let uv = equal_area_sphere_to_square(w_light);
let pdf = if allow_incomplete_pdf {
self.compensated_distrib.pdf(uv)
} else {
self.distrib.pdf(uv)
};
pdf / (4. * PI)
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, ray: &Ray, lambda: &SampledWavelengths) -> SampledSpectrum {
let w_light = self
.base
.render_from_light
.apply_inverse_vector(ray.d)
.normalize();
let uv = equal_area_sphere_to_square(w_light);
self.image_le(uv, lambda)
}
fn preprocess(&mut self, scene_bounds: &Bounds3f) {
let (scene_center, scene_radius) = scene_bounds.bounding_sphere();
self.scene_center = scene_center;
self.scene_radius = scene_radius;
}
fn bounds(&self) -> Option<LightBounds> {
None
}
}
#[derive(Debug, Clone)]
pub struct InfinitePortalLight {
pub base: LightBase,
pub image: Image,
pub image_color_space: Arc<RGBColorSpace>,
pub scale: Float,
pub filename: String,
pub portal: [Point3f; 4],
pub portal_frame: Frame,
pub distribution: WindowedPiecewiseConstant2D,
pub scene_center: Point3f,
pub scene_radius: Float,
}
impl InfinitePortalLight {
fn base(&self) -> &LightBase {
&self.base
}
pub fn new(
render_from_light: Transform<Float>,
equal_area_image: &Image,
image_color_space: Arc<RGBColorSpace>,
scale: Float,
filename: String,
points: Vec<Point3f>,
) -> Self {
let base = LightBase::new(
LightType::Infinite,
&render_from_light,
&MediumInterface::default(),
);
let desc = equal_area_image
.get_channel_desc(&["R", "G", "B"])
.unwrap_or_else(|_| {
panic!(
"{}: image used for PortalImageInfiniteLight doesn't have R, G, B channels.",
filename
)
});
assert_eq!(3, desc.offset.len());
let src_res = equal_area_image.resolution;
if src_res.x() != src_res.y() {
panic!(
"{}: image resolution ({}, {}) is non-square. It's unlikely this is an equal area environment map.",
filename,
src_res.x(),
src_res.y()
);
}
if points.len() != 4 {
panic!(
"Expected 4 vertices for infinite light portal but given {}",
points.len()
);
}
let portal: [Point3f; 4] = [points[0], points[1], points[2], points[3]];
let p01 = (portal[1] - portal[0]).normalize();
let p12 = (portal[2] - portal[1]).normalize();
let p32 = (portal[2] - portal[3]).normalize();
let p03 = (portal[3] - portal[0]).normalize();
if (p01.dot(p32) - 1.0).abs() > 0.001 || (p12.dot(p03) - 1.0).abs() > 0.001 {
panic!("Infinite light portal isn't a planar quadrilateral (opposite edges)");
}
if p01.dot(p12).abs() > 0.001
|| p12.dot(p32).abs() > 0.001
|| p32.dot(p03).abs() > 0.001
|| p03.dot(p01).abs() > 0.001
{
panic!("Infinite light portal isn't a planar quadrilateral (perpendicular edges)");
}
let portal_frame = Frame::from_xy(p03, p01);
let width = src_res.x();
let height = src_res.y();
let mut new_pixels = vec![0.0 as Float; (width * height * 3) as usize];
new_pixels
.par_chunks_mut((width * 3) as usize)
.enumerate()
.for_each(|(y, row_pixels)| {
let y = y as i32;
for x in 0..width {
let uv = Point2f::new(
(x as Float + 0.5) / width as Float,
(y as Float + 0.5) / height as Float,
);
let (w_world, _) = Self::render_from_image(portal_frame, uv);
let w_local = render_from_light.apply_inverse_vector(w_world).normalize();
let uv_equi = equal_area_sphere_to_square(w_local);
let pixel_idx = (x * 3) as usize;
for c in 0..3 {
let val = equal_area_image.bilerp_channel_with_wrap(
uv_equi,
c,
WrapMode::OctahedralSphere.into(),
);
row_pixels[pixel_idx + c] = val;
}
}
});
let image = Image::new(
PixelFormat::F32,
src_res,
&["R", "G", "B"],
equal_area_image.encoding,
);
let duv_dw_closure = |p: Point2f| -> Float {
let (_, jacobian) = Self::render_from_image(portal_frame, p);
jacobian
};
let d = image.get_sampling_distribution(
duv_dw_closure,
Bounds2f::from_points(Point2f::new(0., 0.), Point2f::new(1., 1.)),
);
let distribution = WindowedPiecewiseConstant2D::new(d);
Self {
base,
image,
image_color_space,
scale,
scene_center: Point3f::default(),
scene_radius: 0.,
filename,
portal,
portal_frame,
distribution,
}
}
pub fn image_lookup(&self, uv: Point2f, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut rgb = RGB::default();
for c in 0..3 {
rgb[c] = self.image.lookup_nearest_channel(uv, c)
}
let spec =
RGBIlluminantSpectrum::new(self.image_color_space.as_ref(), RGB::clamp_zero(rgb));
self.scale * spec.sample(lambda)
}
pub fn image_from_render(&self, w_render: Vector3f) -> Option<(Point2f, Float)> {
let w = self.portal_frame.to_local(w_render);
if w.z() <= 0.0 {
return None;
}
let alpha = w.x().atan2(w.z());
let beta = w.y().atan2(w.z());
let duv_dw = square(PI) * (1. - square(w.x())) * (1. - square(w.y())) / w.z();
Some((
Point2f::new(
clamp_t((alpha + PI / 2.0) / PI, 0.0, 1.0),
clamp_t((beta + PI / 2.0) / PI, 0.0, 1.0),
),
duv_dw,
))
}
pub fn image_bounds(&self, p: Point3f) -> Option<Bounds2f> {
let (p0, _) = self.image_from_render((self.portal[0] - p).normalize())?;
let (p1, _) = self.image_from_render((self.portal[2] - p).normalize())?;
Some(Bounds2f::from_points(p0, p1))
}
pub fn area(&self) -> Float {
(self.portal[1] - self.portal[0]).norm() * (self.portal[3] - self.portal[0]).norm()
}
pub fn render_from_image(portal_frame: Frame, uv: Point2f) -> (Vector3f, Float) {
let alpha = -PI / 2.0 + uv.x() * PI;
let beta = -PI / 2.0 + uv.y() * PI;
let x = alpha.tan();
let y = beta.tan();
let w = Vector3f::new(x, y, 1.0).normalize();
let duv_dw = square(PI) * (1.0 - square(w.x())) * (1.0 - square(w.y())) / w.z();
(portal_frame.from_local(w), duv_dw)
}
}
impl LightTrait for InfinitePortalLight {
fn base(&self) -> &LightBase {
&self.base
}
fn sample_li(
&self,
ctx: &LightSampleContext,
u: Point2f,
lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
let b = self.image_bounds(ctx.p())?;
let (uv, map_pdf) = self.distribution.sample(u, b)?;
let (wi, duv_dw) = Self::render_from_image(self.portal_frame, uv);
if duv_dw == 0. {
return None;
}
let pdf = map_pdf / duv_dw;
let l = self.image_lookup(uv, lambda);
let pl = ctx.p() + 2. * self.scene_radius * wi;
let sintr = SimpleInteraction::new_interface(pl, Some(self.base.medium_interface.clone()));
let intr = Interaction::Simple(sintr);
Some(LightLiSample::new(l, wi, pdf, intr))
}
fn phi(&self, _lambda: SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn pdf_li(&self, ctx: &LightSampleContext, wi: Vector3f, _allow_incomplete_pdf: bool) -> Float {
let Some((uv, duv_dw)) = self.image_from_render(wi) else {
return 0.;
};
let Some(b) = self.image_bounds(ctx.p()) else {
return 0.;
};
let pdf = self.distribution.pdf(uv, b);
pdf / duv_dw
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, ray: &Ray, lambda: &SampledWavelengths) -> SampledSpectrum {
let uv = self.image_from_render(ray.d.normalize());
let b = self.image_bounds(ray.o);
match (uv, b) {
(Some((p, duv_dw)), Some(bounds)) if bounds.contains(p) => self.image_lookup(p, lambda),
_ => SampledSpectrum::new(0.0),
}
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
todo!()
}
fn bounds(&self) -> Option<LightBounds> {
None
}
}

View file

@ -1,27 +1,918 @@
pub mod diffuse;
pub mod infinite;
pub mod sampler;
#[derive(Debug, Clone)]
pub struct DiffuseAreaLight;
#[derive(Debug, Clone)]
pub struct DistantLight;
#[derive(Debug, Clone)]
pub struct GonioPhotometricLight;
#[derive(Debug, Clone)]
pub struct InfiniteAreaLight;
#[derive(Debug, Clone)]
pub struct PointLight;
#[derive(Debug, Clone)]
pub struct ProjectionLight;
#[derive(Debug, Clone)]
pub struct SpotLight;
use crate::core::interaction::{
Interaction, InteractionTrait, MediumInteraction, SimpleInteraction, SurfaceInteraction,
};
use crate::core::medium::MediumInterface;
use crate::core::pbrt::{Float, InternCache, PI};
use crate::geometry::{
Bounds2f, Bounds3f, DirectionCone, Normal3f, Point2f, Point2i, Point3f, Point3fi, Ray,
Vector3f, VectorLike, cos_theta,
};
use crate::image::Image;
use crate::spectra::{
DenselySampledSpectrum, LAMBDA_MAX, LAMBDA_MIN, RGBIlluminantSpectrum, SampledSpectrum,
SampledWavelengths, Spectrum, SpectrumTrait,
};
use crate::utils::color::RGB;
use crate::utils::colorspace::RGBColorSpace;
use crate::utils::math::{equal_area_sphere_to_square, radians, safe_sqrt, smooth_step, square};
use crate::utils::sampling::PiecewiseConstant2D;
use crate::utils::transform::Transform;
use bitflags::bitflags;
#[derive(Debug, Clone)]
pub enum Light {
DiffuseArea(Box<DiffuseAreaLight>),
Distant(Box<DistantLight>),
GonioPhotometric(Box<GonioPhotometricLight>),
InfiniteArea(Box<InfiniteAreaLight>),
Point(Box<PointLight>),
Projection(Box<ProjectionLight>),
Spot(Box<SpotLight>),
use enum_dispatch::enum_dispatch;
use std::sync::{Arc, OnceLock};
use diffuse::DiffuseAreaLight;
use infinite::{InfiniteImageLight, InfinitePortalLight, InfiniteUniformLight};
static SPECTRUM_CACHE: OnceLock<InternCache<DenselySampledSpectrum>> = OnceLock::new();
fn get_spectrum_cache() -> &'static InternCache<DenselySampledSpectrum> {
SPECTRUM_CACHE.get_or_init(InternCache::new)
}
bitflags! {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct LightType: u32 {
const DeltaPosition = 1;
const DeltaDirection = 2;
const Area = 4;
const Infinite = 8;
}
}
impl LightType {
pub fn is_infinite(&self) -> bool {
self.contains(LightType::Infinite)
}
pub fn is_delta_light(&self) -> bool {
self.contains(LightType::DeltaPosition) || self.contains(LightType::DeltaDirection)
}
}
#[derive(Debug, Clone)]
pub struct LightLeSample {
l: SampledSpectrum,
ray: Ray,
intr: Option<Interaction>,
pdf_pos: Float,
pdf_dir: Float,
}
#[derive(Debug, Default, Clone)]
pub struct LightSampleContext {
pub pi: Point3fi,
pub n: Normal3f,
pub ns: Normal3f,
}
impl LightSampleContext {
pub fn new(pi: Point3fi, n: Normal3f, ns: Normal3f) -> Self {
Self { pi, n, ns }
}
pub fn p(&self) -> Point3f {
self.pi.into()
}
}
impl From<&SurfaceInteraction> for LightSampleContext {
fn from(si: &SurfaceInteraction) -> Self {
Self {
pi: si.common.pi,
n: si.common.n,
ns: si.shading.n,
}
}
}
impl From<&MediumInteraction> for LightSampleContext {
fn from(mi: &MediumInteraction) -> Self {
Self {
pi: mi.common.pi,
n: Normal3f::default(),
ns: Normal3f::default(),
}
}
}
impl From<&Interaction> for LightSampleContext {
fn from(intr: &Interaction) -> Self {
match intr {
Interaction::Surface(si) => Self {
pi: si.common.pi,
n: si.common.n,
ns: si.shading.n,
},
Interaction::Medium(mi) => Self {
pi: mi.common.pi,
n: mi.common.n,
ns: mi.common.n,
},
Interaction::Simple(sim) => Self {
pi: sim.common.pi,
n: sim.common.n,
ns: sim.common.n,
},
}
}
}
#[derive(Debug, Clone)]
pub struct LightLiSample {
pub l: SampledSpectrum,
pub wi: Vector3f,
pub pdf: Float,
pub p_light: Arc<Interaction>,
}
impl LightLiSample {
pub fn new(l: SampledSpectrum, wi: Vector3f, pdf: Float, p_light: Interaction) -> Self {
Self {
l,
wi,
pdf,
p_light: Arc::new(p_light),
}
}
}
#[derive(Debug, Clone)]
pub struct LightBase {
pub light_type: LightType,
pub render_from_light: Transform<Float>,
pub medium_interface: MediumInterface,
}
impl LightBase {
pub fn new(
light_type: LightType,
render_from_light: &Transform<Float>,
medium_interface: &MediumInterface,
) -> Self {
Self {
light_type,
render_from_light: *render_from_light,
medium_interface: medium_interface.clone(),
}
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
SampledSpectrum::default()
}
pub fn light_type(&self) -> LightType {
self.light_type
}
pub fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
SampledSpectrum::default()
}
pub fn lookup_spectrum(s: &Spectrum) -> Arc<DenselySampledSpectrum> {
let cache = SPECTRUM_CACHE.get_or_init(InternCache::new);
let dense_spectrum = DenselySampledSpectrum::from_spectrum(s, LAMBDA_MIN, LAMBDA_MAX);
cache.lookup(dense_spectrum)
}
}
#[derive(Debug, Clone)]
pub struct LightBounds {
bounds: Bounds3f,
phi: Float,
w: Vector3f,
cos_theta_o: Float,
cos_theta_e: Float,
two_sided: bool,
}
impl LightBounds {
pub fn new(
bounds: &Bounds3f,
w: Vector3f,
phi: Float,
cos_theta_o: Float,
cos_theta_e: Float,
two_sided: bool,
) -> Self {
Self {
bounds: *bounds,
phi,
w,
cos_theta_o,
cos_theta_e,
two_sided,
}
}
pub fn centroid(&self) -> Point3f {
self.bounds.p_min + Vector3f::from(self.bounds.p_max) / 2.
}
pub fn importance(&self, p: Point3f, n: Normal3f) -> Float {
// Compute clamped squared distance to reference point
let pc = self.centroid();
let d2_raw = p.distance_squared(pc);
let d2 = d2_raw.max(self.bounds.diagonal().norm()) / 2.;
let cos_sub_clamped = |sin_theta_a: Float,
cos_theta_a: Float,
sin_theta_b: Float,
cos_theta_b: Float|
-> Float {
if cos_theta_a > cos_theta_b {
return 1.;
}
cos_theta_a * cos_theta_b + sin_theta_a * sin_theta_b
};
let sin_sub_clamped = |sin_theta_a: Float,
cos_theta_a: Float,
sin_theta_b: Float,
cos_theta_b: Float|
-> Float {
if cos_theta_a > cos_theta_b {
return 1.;
}
sin_theta_a * cos_theta_b - cos_theta_a * sin_theta_b
};
let wi = (p - pc).normalize();
let mut cos_theta_w = self.w.dot(wi);
if self.two_sided {
cos_theta_w = cos_theta_w.abs();
}
let sin_theta_w = safe_sqrt(1. - square(cos_theta_w));
let cos_theta_b = DirectionCone::bound_subtended_directions(&self.bounds, p).cos_theta;
let sin_theta_b = safe_sqrt(1. - square(cos_theta_b));
let sin_theta_o = safe_sqrt(1. - square(self.cos_theta_o));
let cos_theta_x = cos_sub_clamped(sin_theta_w, cos_theta_w, sin_theta_o, self.cos_theta_o);
let sin_theta_x = sin_sub_clamped(sin_theta_w, cos_theta_w, sin_theta_o, self.cos_theta_o);
let cos_theta_p = cos_sub_clamped(sin_theta_x, cos_theta_x, sin_theta_b, cos_theta_b);
if cos_theta_p <= self.cos_theta_e {
return 0.;
}
let mut importance = self.phi * cos_theta_p / d2;
if n != Normal3f::new(0., 0., 0.) {
let cos_theta_i = wi.abs_dot(n.into());
let sin_theta_i = safe_sqrt(1. - square(cos_theta_i));
let cos_thetap_i = cos_sub_clamped(sin_theta_i, cos_theta_i, sin_theta_b, cos_theta_b);
importance *= cos_thetap_i;
}
importance
}
pub fn union(a: &Self, b: &Self) -> Self {
if a.phi == 0. {
return a.clone();
}
if b.phi == 0. {
return b.clone();
}
let a_cone = DirectionCone::new(a.w, a.cos_theta_o);
let b_cone = DirectionCone::new(b.w, b.cos_theta_o);
let cone = DirectionCone::union(&a_cone, &b_cone);
let cos_theta_o = cone.cos_theta;
let cos_theta_e = a.cos_theta_e.min(b.cos_theta_e);
LightBounds::new(
&a.bounds.union(b.bounds),
cone.w,
a.phi + b.phi,
cos_theta_o,
cos_theta_e,
a.two_sided || b.two_sided,
)
}
}
#[enum_dispatch]
pub trait LightTrait: Send + Sync + std::fmt::Debug {
fn base(&self) -> &LightBase;
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum;
fn sample_li(
&self,
ctx: &LightSampleContext,
u: Point2f,
lambda: &SampledWavelengths,
allow_incomplete_pdf: bool,
) -> Option<LightLiSample>;
fn pdf_li(&self, ctx: &LightSampleContext, wi: Vector3f, allow_incomplete_pdf: bool) -> Float;
fn l(
&self,
p: Point3f,
n: Normal3f,
uv: Point2f,
w: Vector3f,
lambda: &SampledWavelengths,
) -> SampledSpectrum;
fn le(&self, ray: &Ray, lambda: &SampledWavelengths) -> SampledSpectrum;
fn preprocess(&mut self, scene_bounds: &Bounds3f);
fn bounds(&self) -> Option<LightBounds>;
fn light_type(&self) -> LightType {
self.base().light_type()
}
}
#[derive(Debug, Clone)]
#[enum_dispatch(LightTrait)]
#[allow(clippy::large_enum_variant)]
pub enum Light {
DiffuseArea(DiffuseAreaLight),
Distant(DistantLight),
Goniometric(GoniometricLight),
InfiniteUniform(InfiniteUniformLight),
InfiniteImage(InfiniteImageLight),
InfinitePortal(InfinitePortalLight),
Point(PointLight),
Projection(ProjectionLight),
Spot(SpotLight),
}
#[derive(Debug, Clone)]
pub struct DistantLight {
pub base: LightBase,
lemit: Arc<DenselySampledSpectrum>,
scale: Float,
scene_center: Point3f,
scene_radius: Float,
}
impl DistantLight {
pub fn new(render_from_light: &Transform<Float>, lemit: Spectrum, scale: Float) -> Self {
let base = LightBase::new(
LightType::DeltaDirection,
render_from_light,
&MediumInterface::default(),
);
let l_interned = LightBase::lookup_spectrum(&lemit);
Self {
base,
lemit: l_interned,
scale,
scene_center: Point3f::default(),
scene_radius: 0.,
}
}
}
impl LightTrait for DistantLight {
fn base(&self) -> &LightBase {
&self.base
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
self.scale * self.lemit.sample(&lambda) * PI * self.scene_radius.sqrt()
}
fn sample_li(
&self,
ctx: &LightSampleContext,
_u: Point2f,
lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
let wi = self
.base
.render_from_light
.apply_to_vector(Vector3f::new(0., 0., 1.))
.normalize();
let p_outside = ctx.p() + wi * 2. * self.scene_radius;
let li = self.scale * self.lemit.sample(lambda);
let intr = SimpleInteraction::new(
Point3fi::new_from_point(p_outside),
0.0,
Some(self.base.medium_interface.clone()),
);
Some(LightLiSample::new(li, wi, 1., Interaction::Simple(intr)))
}
fn pdf_li(
&self,
_ctx: &LightSampleContext,
_wi: Vector3f,
_allow_incomplete_pdf: bool,
) -> Float {
0.
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn preprocess(&mut self, scene_bounds: &Bounds3f) {
let (center, radius) = scene_bounds.bounding_sphere();
self.scene_center = center;
self.scene_radius = radius;
}
fn bounds(&self) -> Option<LightBounds> {
None
}
}
#[derive(Debug, Clone)]
pub struct GoniometricLight {
pub base: LightBase,
iemit: Arc<DenselySampledSpectrum>,
scale: Float,
image: Image,
distrib: PiecewiseConstant2D,
}
impl GoniometricLight {
pub fn new(
render_from_light: &Transform<Float>,
medium_interface: &MediumInterface,
iemit: Spectrum,
scale: Float,
image: Image,
) -> Self {
let base = LightBase::new(
LightType::DeltaPosition,
render_from_light,
medium_interface,
);
let i_interned = LightBase::lookup_spectrum(&iemit);
let d = image.get_sampling_distribution_uniform();
let distrib = PiecewiseConstant2D::new_with_data(&d);
Self {
base,
iemit: i_interned,
scale,
image,
distrib,
}
}
pub fn i(&self, w: Vector3f, lambda: &SampledWavelengths) -> SampledSpectrum {
let uv = equal_area_sphere_to_square(w);
self.scale * self.iemit.sample(lambda) * self.image.lookup_nearest_channel(uv, 0)
}
}
impl LightTrait for GoniometricLight {
fn base(&self) -> &LightBase {
&self.base
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
let mut sum_y = 0.;
for y in 0..self.image.resolution.y() {
for x in 0..self.image.resolution.x() {
sum_y += self.image.get_channel(Point2i::new(x, y), 0);
}
}
self.scale * self.iemit.sample(&lambda) * 4. * PI * sum_y
/ (self.image.resolution.x() * self.image.resolution.y()) as Float
}
fn sample_li(
&self,
_ctx: &LightSampleContext,
_u: Point2f,
_lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
todo!()
}
fn pdf_li(
&self,
_ctx: &LightSampleContext,
_wi: Vector3f,
_allow_incomplete_pdf: bool,
) -> Float {
0.
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
todo!()
}
fn bounds(&self) -> Option<LightBounds> {
todo!()
}
}
#[derive(Debug, Clone)]
pub struct PointLight {
base: LightBase,
i: Arc<DenselySampledSpectrum>,
scale: Float,
}
impl PointLight {
pub fn new(
render_from_light: Transform<Float>,
medium_interface: MediumInterface,
i: &Spectrum,
scale: Float,
) -> Self {
let base = LightBase::new(
LightType::DeltaPosition,
&render_from_light,
&medium_interface,
);
let i_interned = LightBase::lookup_spectrum(i);
Self {
base,
i: i_interned,
scale,
}
}
}
impl LightTrait for PointLight {
fn base(&self) -> &LightBase {
&self.base
}
fn sample_li(
&self,
ctx: &LightSampleContext,
_u: Point2f,
lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
let pi = self
.base
.render_from_light
.apply_to_interval(&Point3fi::default());
let p: Point3f = pi.into();
let wi = (p - ctx.p()).normalize();
let li = self.scale * self.i.sample(lambda) / p.distance_squared(ctx.p());
let intr = SimpleInteraction::new(pi, 0.0, Some(self.base.medium_interface.clone()));
Some(LightLiSample::new(li, wi, 1., Interaction::Simple(intr)))
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
4. * PI * self.scale * self.i.sample(&lambda)
}
fn pdf_li(
&self,
_ctx: &LightSampleContext,
_wi: Vector3f,
_allow_incomplete_pdf: bool,
) -> Float {
0.
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
todo!()
}
fn bounds(&self) -> Option<LightBounds> {
let p = self
.base
.render_from_light
.apply_to_point(Point3f::new(0., 0., 0.));
let phi = 4. * PI * self.scale * self.i.max_value();
Some(LightBounds::new(
&Bounds3f::from_points(p, p),
Vector3f::new(0., 0., 1.),
phi,
PI.cos(),
(PI / 2.).cos(),
false,
))
}
}
#[derive(Debug, Clone)]
pub struct ProjectionLight {
base: LightBase,
image: Image,
image_color_space: Arc<RGBColorSpace>,
scale: Float,
screen_bounds: Bounds2f,
hither: Float,
screen_from_light: Transform<Float>,
light_from_screen: Transform<Float>,
a: Float,
distrib: PiecewiseConstant2D,
}
impl ProjectionLight {
pub fn new(
render_from_light: Transform<Float>,
medium_interface: MediumInterface,
image: Image,
image_color_space: Arc<RGBColorSpace>,
scale: Float,
fov: Float,
) -> Self {
let base = LightBase::new(
LightType::DeltaPosition,
&render_from_light,
&medium_interface,
);
let aspect = image.resolution().x() as Float / image.resolution().y() as Float;
let screen_bounds = if aspect > 1. {
Bounds2f::from_points(Point2f::new(-aspect, -1.), Point2f::new(aspect, 1.))
} else {
Bounds2f::from_points(
Point2f::new(-1., 1. / aspect),
Point2f::new(1., 1. / aspect),
)
};
let hither = 1e-3;
let screen_from_light = Transform::perspective(fov, hither, 1e30).unwrap();
let light_from_screen = screen_from_light.inverse();
let opposite = (radians(fov) / 2.).tan();
let aspect_ratio = if aspect > 1. { aspect } else { 1. / aspect };
let a = 4. * square(opposite) * aspect_ratio;
let dwda = |p: Point2f| {
let w =
Vector3f::from(light_from_screen.apply_to_point(Point3f::new(p.x(), p.y(), 0.)));
cos_theta(w.normalize()).powi(3)
};
let d = image.get_sampling_distribution(dwda, screen_bounds);
let distrib = PiecewiseConstant2D::new_with_bounds(&d, screen_bounds);
Self {
base,
image,
image_color_space,
screen_bounds,
screen_from_light,
light_from_screen,
scale,
hither,
a,
distrib,
}
}
pub fn i(&self, w: Vector3f, lambda: SampledWavelengths) -> SampledSpectrum {
if w.z() < self.hither {
return SampledSpectrum::new(0.);
}
let ps = self.screen_from_light.apply_to_point(w.into());
if !self.screen_bounds.contains(Point2f::new(ps.x(), ps.y())) {
return SampledSpectrum::new(0.);
}
let uv = Point2f::from(self.screen_bounds.offset(&Point2f::new(ps.x(), ps.y())));
let mut rgb = RGB::default();
for c in 0..3 {
rgb[c] = self.image.lookup_nearest_channel(uv, c);
}
let s = RGBIlluminantSpectrum::new(self.image_color_space.as_ref(), RGB::clamp_zero(rgb));
self.scale * s.sample(&lambda)
}
}
impl LightTrait for ProjectionLight {
fn base(&self) -> &LightBase {
&self.base
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
let mut sum = SampledSpectrum::new(0.);
for y in 0..self.image.resolution.y() {
for x in 0..self.image.resolution.x() {
let ps = self.screen_bounds.lerp(Point2f::new(
(x as Float + 0.5) / self.image.resolution.x() as Float,
(y as Float + 0.5) / self.image.resolution.y() as Float,
));
let w_raw = Vector3f::from(self.light_from_screen.apply_to_point(Point3f::new(
ps.x(),
ps.y(),
0.,
)));
let w = w_raw.normalize();
let dwda = cos_theta(w).powi(3);
let mut rgb = RGB::default();
for c in 0..3 {
rgb[c] = self.image.get_channel(Point2i::new(x, y), c);
}
let s = RGBIlluminantSpectrum::new(
self.image_color_space.as_ref(),
RGB::clamp_zero(rgb),
);
sum += s.sample(&lambda) * dwda;
}
}
self.scale * self.a * sum / (self.image.resolution.x() * self.image.resolution.y()) as Float
}
fn sample_li(
&self,
_ctx: &LightSampleContext,
_u: Point2f,
_lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
todo!()
}
fn pdf_li(
&self,
_ctx: &LightSampleContext,
_wi: Vector3f,
_allow_incomplete_pdf: bool,
) -> Float {
todo!()
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
todo!()
}
fn bounds(&self) -> Option<LightBounds> {
todo!()
}
}
#[derive(Debug, Clone)]
pub struct SpotLight {
base: LightBase,
iemit: Arc<DenselySampledSpectrum>,
scale: Float,
cos_fallof_start: Float,
cos_fallof_end: Float,
}
impl SpotLight {
pub fn new(
render_from_light: &Transform<Float>,
medium_interface: &MediumInterface,
iemit: Spectrum,
scale: Float,
total_width: Float,
fallof_start: Float,
) -> Self {
let base = LightBase::new(
LightType::DeltaPosition,
render_from_light,
medium_interface,
);
let i_interned = LightBase::lookup_spectrum(&iemit);
let cos_fallof_end = radians(total_width).cos();
let cos_fallof_start = radians(fallof_start).cos();
assert!(fallof_start < total_width);
Self {
base,
iemit: i_interned,
scale,
cos_fallof_start,
cos_fallof_end,
}
}
pub fn i(&self, w: Vector3f, lambda: SampledWavelengths) -> SampledSpectrum {
smooth_step(cos_theta(w), self.cos_fallof_end, self.cos_fallof_start)
* self.scale
* self.iemit.sample(&lambda)
}
}
impl LightTrait for SpotLight {
fn base(&self) -> &LightBase {
&self.base
}
fn sample_li(
&self,
ctx: &LightSampleContext,
_u: Point2f,
lambda: &SampledWavelengths,
_allow_incomplete_pdf: bool,
) -> Option<LightLiSample> {
let pi = self
.base
.render_from_light
.apply_to_interval(&Point3fi::default());
let p: Point3f = pi.into();
let wi = (p - ctx.p()).normalize();
let w_light = self.base.render_from_light.apply_inverse_vector(-wi);
let li = self.i(w_light, *lambda) / p.distance_squared(ctx.p());
let intr = SimpleInteraction::new(pi, 0.0, Some(self.base.medium_interface.clone()));
Some(LightLiSample::new(li, wi, 1., Interaction::Simple(intr)))
}
fn phi(&self, lambda: SampledWavelengths) -> SampledSpectrum {
self.scale
* self.iemit.sample(&lambda)
* 2.
* PI
* ((1. - self.cos_fallof_start) + (self.cos_fallof_start - self.cos_fallof_end) / 2.)
}
fn pdf_li(
&self,
_ctx: &LightSampleContext,
_wi: Vector3f,
_allow_incomplete_pdf: bool,
) -> Float {
0.
}
fn l(
&self,
_p: Point3f,
_n: Normal3f,
_uv: Point2f,
_w: Vector3f,
_lambda: &SampledWavelengths,
) -> SampledSpectrum {
todo!()
}
fn le(&self, _ray: &Ray, _lambda: &SampledWavelengths) -> SampledSpectrum {
todo!()
}
fn preprocess(&mut self, _scene_bounds: &Bounds3f) {
todo!()
}
fn bounds(&self) -> Option<LightBounds> {
let p = self
.base
.render_from_light
.apply_to_point(Point3f::default());
let w = self
.base
.render_from_light
.apply_to_vector(Vector3f::new(0., 0., 1.))
.normalize();
let phi = self.scale * self.iemit.max_value() * 4. * PI;
let cos_theta_e = (self.cos_fallof_end.acos() - self.cos_fallof_start.acos()).cos();
Some(LightBounds::new(
&Bounds3f::from_points(p, p),
w,
phi,
self.cos_fallof_start,
cos_theta_e,
false,
))
}
}

521
src/lights/sampler.rs Normal file
View file

@ -0,0 +1,521 @@
use super::{
Bounds3f, Float, Light, LightBounds, LightSampleContext, LightTrait, Normal3f, PI, Point3f,
SampledSpectrum, SampledWavelengths, Vector3f, VectorLike, safe_sqrt, square,
};
use crate::geometry::primitives::OctahedralVector;
use crate::geometry::{DirectionCone, Normal};
use crate::utils::math::sample_discrete;
use std::collections::HashMap;
use std::sync::Arc;
use crate::core::pbrt::{ONE_MINUS_EPSILON, clamp_t, lerp};
use crate::utils::sampling::AliasTable;
use enum_dispatch::enum_dispatch;
#[derive(Clone, Copy, Debug, Default)]
#[repr(C)]
pub struct CompactLightBounds {
pub w: OctahedralVector,
pub phi: Float,
// [0..15] = qCosTheta_o
// [15..30] = qCosTheta_e
// [30..31] = twoSided
// [31..32] = Unused/Padding
packed_info: u32,
pub qb: [[u16; 3]; 2],
}
const _: () = assert!(std::mem::size_of::<CompactLightBounds>() == 24);
impl CompactLightBounds {
pub fn new(lb: &LightBounds, all_b: &Bounds3f) -> Self {
let q_cos_o = Self::quantize_cos(lb.cos_theta_o);
let q_cos_e = Self::quantize_cos(lb.cos_theta_e);
let two_sided = if lb.two_sided { 1 } else { 0 };
// | - twoSided (1) - | - qCosTheta_e (15) - | - qCosTheta_o (15) - |
let packed_info = (q_cos_o & 0x7FFF) | ((q_cos_e & 0x7FFF) << 15) | (two_sided << 30);
let mut qb = [[0u16; 3]; 2];
for i in 0..3 {
qb[0][i] = Self::quantize_bounds(lb.bounds.p_min[i], all_b.p_min[i], all_b.p_max[i])
.floor() as u16;
qb[1][i] = Self::quantize_bounds(lb.bounds.p_max[i], all_b.p_min[i], all_b.p_max[i])
.ceil() as u16;
}
Self {
w: OctahedralVector::new(lb.w.normalize()),
phi: lb.phi,
packed_info,
qb,
}
}
#[inline(always)]
pub fn two_sided(&self) -> bool {
(self.packed_info >> 30) & 1 == 1
}
#[inline(always)]
fn q_cos_theta_o(&self) -> u32 {
self.packed_info & 0x7FFF
}
#[inline(always)]
fn q_cos_theta_e(&self) -> u32 {
(self.packed_info >> 15) & 0x7FFF
}
#[inline]
pub fn cos_theta_o(&self) -> Float {
2.0 * (self.q_cos_theta_o() as Float / 32767.0) - 1.0
}
#[inline]
pub fn cos_theta_e(&self) -> Float {
2.0 * (self.q_cos_theta_e() as Float / 32767.0) - 1.0
}
pub fn importance(&self, p: Point3f, n: Normal3f, all_b: &Bounds3f) -> Float {
let bounds = self.bounds(all_b);
let cos_o = self.cos_theta_o();
let cos_e = self.cos_theta_e();
let pc = bounds.centroid();
let d2 = p.distance_squared(pc).max(bounds.diagonal().norm() * 0.5);
let cos_sub_clamped = |sin_a: Float, cos_a: Float, sin_b: Float, cos_b: Float| {
if cos_a > cos_b {
1.0
} else {
cos_a * cos_b + sin_a * sin_b
}
};
let sin_sub_clamped = |sin_a: Float, cos_a: Float, sin_b: Float, cos_b: Float| {
if cos_a > cos_b {
0.0
} else {
sin_a * cos_b - cos_a * sin_b
}
};
let wi = (p - pc).normalize();
let w_vec = self.w.to_vector();
let mut cos_w = w_vec.dot(wi);
if self.two_sided() {
cos_w = cos_w.abs();
}
let sin_w = safe_sqrt(1.0 - square(cos_w));
let cos_b = DirectionCone::bound_subtended_directions(&bounds, p).cos_theta;
let sin_b = safe_sqrt(1. - square(cos_b));
let sin_o = safe_sqrt(1. - square(cos_o));
let cos_x = cos_sub_clamped(sin_w, cos_w, sin_o, cos_o);
let sin_x = sin_sub_clamped(sin_w, cos_w, sin_o, cos_o);
let cos_p = cos_sub_clamped(sin_x, cos_x, sin_b, cos_b);
if cos_p <= cos_e {
return 0.;
}
let mut importance = self.phi * cos_p / d2;
if n != Normal3f::zero() {
let cos_i = wi.abs_dot(n.into());
let sin_i = safe_sqrt(1. - square(cos_i));
let cos_pi = cos_sub_clamped(sin_i, cos_i, sin_b, cos_b);
importance *= cos_pi;
}
importance
}
pub fn bounds(&self, all_b: &Bounds3f) -> Bounds3f {
let mut p_min = Point3f::default();
let mut p_max = Point3f::default();
for i in 0..3 {
let t_min = self.qb[0][i] as Float / 65535.0;
let t_max = self.qb[1][i] as Float / 65535.0;
p_min[i] = lerp(t_min, all_b.p_min[i], all_b.p_max[i]);
p_max[i] = lerp(t_max, all_b.p_min[i], all_b.p_max[i]);
}
Bounds3f::from_points(p_min, p_max)
}
fn quantize_cos(c: Float) -> u32 {
(32767.0 * ((c + 1.0) * 0.5)).floor() as u32
}
fn quantize_bounds(c: Float, min: Float, max: Float) -> Float {
if min == max {
return 0.0;
}
65535.0 * clamp_t((c - min) / (max - min), 0.0, 1.0)
}
}
#[derive(Debug, Clone)]
pub struct SampledLight {
pub light: Arc<Light>,
pub p: Float,
}
impl SampledLight {
pub fn new(light: Arc<Light>, p: Float) -> Self {
Self { light, p }
}
}
#[enum_dispatch]
pub trait LightSamplerTrait: Send + Sync + std::fmt::Debug {
fn sample_with_context(&self, ctx: &LightSampleContext, u: Float) -> Option<SampledLight>;
fn pmf_with_context(&self, ctx: &LightSampleContext, light: &Arc<Light>) -> Float;
fn sample(&self, u: Float) -> Option<SampledLight>;
fn pmf(&self, light: &Arc<Light>) -> Float;
}
#[derive(Clone, Debug)]
#[enum_dispatch(LightSamplerTrait)]
pub enum LightSampler {
Uniform(UniformLightSampler),
Power(PowerLightSampler),
BVH(BVHLightSampler),
}
#[derive(Clone, Debug)]
pub struct UniformLightSampler {
lights: Vec<Arc<Light>>,
}
impl UniformLightSampler {
pub fn new(lights: &[Arc<Light>]) -> Self {
Self {
lights: lights.to_vec(),
}
}
}
impl LightSamplerTrait for UniformLightSampler {
fn sample_with_context(&self, _ctx: &LightSampleContext, u: Float) -> Option<SampledLight> {
self.sample(u)
}
fn pmf_with_context(&self, _ctx: &LightSampleContext, light: &Arc<Light>) -> Float {
self.pmf(light)
}
fn sample(&self, u: Float) -> Option<SampledLight> {
if self.lights.is_empty() {
return None;
}
let light_index = (u as usize * self.lights.len()).min(self.lights.len() - 1);
Some(SampledLight {
light: self.lights[light_index].clone(),
p: 1. / self.lights.len() as Float,
})
}
fn pmf(&self, _light: &Arc<Light>) -> Float {
if self.lights.is_empty() {
return 0.;
}
1. / self.lights.len() as Float
}
}
#[derive(Clone, Debug)]
pub struct PowerLightSampler {
lights: Vec<Arc<Light>>,
light_to_index: HashMap<usize, usize>,
alias_table: AliasTable,
}
impl PowerLightSampler {
pub fn new(lights: &[Arc<Light>]) -> Self {
if lights.is_empty() {
return Self {
lights: Vec::new(),
light_to_index: HashMap::new(),
alias_table: AliasTable::new(&[]),
};
}
let mut lights_vec = Vec::with_capacity(lights.len());
let mut light_to_index = HashMap::with_capacity(lights.len());
let mut light_power = Vec::with_capacity(lights.len());
let lambda = SampledWavelengths::sample_visible(0.5);
for (i, light) in lights.iter().enumerate() {
lights_vec.push(light.clone());
let ptr = Arc::as_ptr(light) as usize;
light_to_index.insert(ptr, i);
let phi = SampledSpectrum::safe_div(&light.phi(lambda), &lambda.pdf());
light_power.push(phi.average());
}
let alias_table = AliasTable::new(&light_power);
Self {
lights: lights_vec,
light_to_index,
alias_table,
}
}
}
impl LightSamplerTrait for PowerLightSampler {
fn sample_with_context(&self, _ctx: &LightSampleContext, u: Float) -> Option<SampledLight> {
self.sample(u)
}
fn pmf_with_context(&self, _ctx: &LightSampleContext, light: &Arc<Light>) -> Float {
self.pmf(light)
}
fn sample(&self, u: Float) -> Option<SampledLight> {
if self.alias_table.size() == 0 {
return None;
}
let (light_index, pmf, _) = self.alias_table.sample(u);
Some(SampledLight {
light: self.lights[light_index].clone(),
p: pmf,
})
}
fn pmf(&self, light: &Arc<Light>) -> Float {
if self.alias_table.size() == 0 {
return 0.;
}
let ptr = Arc::as_ptr(light) as usize;
if let Some(&index) = self.light_to_index.get(&ptr) {
self.alias_table.pmf(index)
} else {
0.0
}
}
}
#[derive(Clone, Copy, Debug, Default)]
#[repr(C, align(32))]
pub struct LightBVHNode {
pub light_bounds: CompactLightBounds,
// Bit 31 (MSB) : isLeaf (1 bit)
// Bits 0..31 : childOrLightIndex (31 bits)
packed_data: u32,
}
const _: () = assert!(std::mem::size_of::<LightBVHNode>() == 32);
impl LightBVHNode {
/// Mask to isolate the Leaf Flag (Bit 31)
const LEAF_MASK: u32 = 0x8000_0000;
/// Mask to isolate the Index (Bits 0-30)
const INDEX_MASK: u32 = 0x7FFF_FFFF;
pub fn make_leaf(light_index: u32, cb: CompactLightBounds) -> Self {
debug_assert!(
(light_index & Self::LEAF_MASK) == 0,
"Light index too large"
);
Self {
light_bounds: cb,
// Set index and flip the MSB to 1
packed_data: light_index | Self::LEAF_MASK,
}
}
pub fn make_interior(child_index: u32, cb: CompactLightBounds) -> Self {
debug_assert!(
(child_index & Self::LEAF_MASK) == 0,
"Child index too large"
);
Self {
light_bounds: cb,
// Set index, MSB remains 0
packed_data: child_index,
}
}
#[inline(always)]
pub fn is_leaf(&self) -> bool {
(self.packed_data & Self::LEAF_MASK) != 0
}
#[inline(always)]
pub fn light_index(&self) -> u32 {
debug_assert!(self.is_leaf());
self.packed_data & Self::INDEX_MASK
}
#[inline(always)]
pub fn child_index(&self) -> u32 {
debug_assert!(!self.is_leaf());
self.packed_data & Self::INDEX_MASK
}
#[inline(always)]
pub fn child_or_light_index(&self) -> u32 {
self.packed_data & Self::INDEX_MASK
}
pub fn sample(&self, _ctx: &LightSampleContext, _u: Float) -> Option<SampledLight> {
todo!("Implement LightBVHNode::Sample logic")
}
}
#[derive(Clone, Debug)]
pub struct BVHLightSampler {
lights: Vec<Arc<Light>>,
infinite_lights: Vec<Arc<Light>>,
all_light_bounds: Bounds3f,
nodes: Vec<LightBVHNode>,
light_to_bit_trail: HashMap<usize, usize>,
}
impl BVHLightSampler {
fn evaluate_cost(&self, b: &LightBounds, bounds: &Bounds3f, dim: usize) -> Float {
let theta_o = b.cos_theta_o.acos();
let theta_e = b.cos_theta_e.acos();
let theta_w = (theta_o + theta_e).min(PI);
let sin_o = safe_sqrt(1. - square(b.cos_theta_o));
let m_omega = 2. * PI * (1. - b.cos_theta_o)
+ PI / 2.
* (2. * theta_w - (theta_o - 2. * theta_w).cos() - 2. * theta_o * sin_o
+ b.cos_theta_o);
let kr = bounds.diagonal().max_component_value() / bounds.diagonal()[dim];
b.phi * m_omega * kr * b.bounds.surface_area()
}
}
impl LightSamplerTrait for BVHLightSampler {
fn sample_with_context(&self, ctx: &LightSampleContext, mut u: Float) -> Option<SampledLight> {
let empty_nodes = if self.nodes.is_empty() { 0. } else { 1. };
let inf_size = self.infinite_lights.len() as Float;
let light_size = self.lights.len() as Float;
let p_inf = inf_size / (inf_size + empty_nodes);
if u < p_inf {
u /= p_inf;
let ind = (u * light_size).min(light_size - 1.) as usize;
let pmf = p_inf / inf_size;
Some(SampledLight::new(self.infinite_lights[ind].clone(), pmf))
} else {
if self.nodes.is_empty() {
return None;
}
let p = ctx.p();
let n = ctx.ns;
u = ((u - p_inf) / (1. - p_inf)).min(ONE_MINUS_EPSILON);
let mut node_ind = 0;
let mut pmf = 1. - p_inf;
loop {
let node = self.nodes[node_ind];
if !node.is_leaf() {
let children: [LightBVHNode; 2] = [
self.nodes[node_ind + 1],
self.nodes[node.child_or_light_index() as usize],
];
let ci: [Float; 2] = [
children[0]
.light_bounds
.importance(p, n, &self.all_light_bounds),
children[1]
.light_bounds
.importance(p, n, &self.all_light_bounds),
];
if ci[0] == 0. && ci[1] == 0. {
return None;
}
let mut node_pmf: Float = 0.;
let child = sample_discrete(&ci, u, Some(&mut node_pmf), Some(&mut u));
pmf *= node_pmf;
node_ind = if child == 0 {
node_ind + 1
} else {
node.child_or_light_index() as usize
};
} else {
if node_ind > 0
|| node.light_bounds.importance(p, n, &self.all_light_bounds) > 0.
{
return Some(SampledLight::new(
self.lights[node.child_or_light_index() as usize].clone(),
pmf,
));
}
return None;
}
}
}
}
fn pmf_with_context(&self, ctx: &LightSampleContext, light: &Arc<Light>) -> Float {
let ptr = Arc::as_ptr(light) as usize;
let empty_nodes = if self.nodes.is_empty() { 0. } else { 1. };
if self.light_to_bit_trail.contains_key(&ptr) {
return 1. / (self.infinite_lights.len() as Float + empty_nodes);
}
let mut bit_trail = self.light_to_bit_trail[&ptr];
let p = ctx.p();
let n = ctx.ns;
let p_inf = self.infinite_lights.len() as Float
/ (self.infinite_lights.len() as Float + empty_nodes);
let mut pmf = 1. - p_inf;
let mut node_ind = 0;
loop {
let node = self.nodes[node_ind];
if node.is_leaf() {
return pmf;
}
let child0 = self.nodes[node_ind + 1];
let child1 = self.nodes[node.child_or_light_index() as usize];
let ci = [
child0.light_bounds.importance(p, n, &self.all_light_bounds),
child1.light_bounds.importance(p, n, &self.all_light_bounds),
];
pmf *= ci[bit_trail & 1] / (ci[0] + ci[1]);
node_ind = if (bit_trail & 1) != 0 {
node.child_or_light_index() as usize
} else {
node_ind + 1
};
bit_trail >>= 1;
}
}
fn sample(&self, u: Float) -> Option<SampledLight> {
if self.lights.is_empty() {
return None;
}
let light_ind =
(u * self.lights.len() as Float).min(self.lights.len() as Float - 1.) as usize;
Some(SampledLight::new(
self.lights[light_ind].clone(),
1. / self.lights.len() as Float,
))
}
fn pmf(&self, _light: &Arc<Light>) -> Float {
if self.lights.is_empty() {
return 0.;
}
1. / self.lights.len() as Float
}
}

View file

@ -3,6 +3,7 @@ use super::{
Point2f, Point3f, Point3fi, Ray, ShapeIntersection, ShapeSample, ShapeSampleContext,
ShapeTrait, SurfaceInteraction, Vector3f,
};
use crate::core::interaction::InteractionTrait;
use crate::core::pbrt::{Float, clamp_t, gamma, lerp};
use crate::geometry::{Tuple, VectorLike, spherical_quad_area};
use crate::utils::math::{SquareMatrix, difference_of_products, quadratic};
@ -61,6 +62,7 @@ impl BilinearPatchShape {
} else {
const NA: usize = 3;
let mut p = [[Point3f::default(); NA + 1]; NA + 1];
#[allow(clippy::needless_range_loop)]
for i in 0..=NA {
let u = i as Float / NA as Float;
for j in 0..=NA {
@ -90,14 +92,14 @@ impl BilinearPatchShape {
&meshes[self.mesh_index]
}
fn get_data(&self) -> PatchData {
fn get_data(&self) -> PatchData<'_> {
let mesh = self.mesh();
let start_index = 4 * self.blp_index;
let v = &mesh.vertex_indices[start_index..start_index + 4];
let p00: Point3f = mesh.p[v[0] as usize];
let p10: Point3f = mesh.p[v[1] as usize];
let p01: Point3f = mesh.p[v[2] as usize];
let p11: Point3f = mesh.p[v[3] as usize];
let p00: Point3f = mesh.p[v[0]];
let p10: Point3f = mesh.p[v[1]];
let p01: Point3f = mesh.p[v[2]];
let p11: Point3f = mesh.p[v[3]];
let n = mesh
.n
.as_ref()
@ -141,7 +143,7 @@ impl BilinearPatchShape {
return false;
}
}
return true;
true
}
fn intersect_bilinear_patch(
@ -630,13 +632,14 @@ impl ShapeTrait for BilinearPatchShape {
}
}
fn pdf(&self, intr: Arc<&dyn Interaction>) -> Float {
let Some(si) = intr.as_any().downcast_ref::<SurfaceInteraction>() else {
return 0.;
fn pdf(&self, intr: &Interaction) -> Float {
let Interaction::Surface(si) = intr else {
return 0.0;
};
let data = self.get_data();
let uv = if let Some(uvs) = &data.mesh.uv {
Point2f::invert_bilinear(si.uv, &uvs)
Point2f::invert_bilinear(si.uv, uvs)
} else {
si.uv
};
@ -676,19 +679,17 @@ impl ShapeTrait for BilinearPatchShape {
let use_area_sampling = !self.rectangle
|| data.mesh.image_distribution.is_some()
|| spherical_quad_area(v00, v10, v01, v11) <= Self::MIN_SPHERICAL_SAMPLE_AREA;
if use_area_sampling {
let isect_pdf = self.pdf(Arc::new(&isect.intr));
let intr_wrapper = Interaction::Surface(isect.intr.clone());
let isect_pdf = self.pdf(&intr_wrapper);
let distsq = ctx.p().distance_squared(isect.intr.p());
let absdot = Vector3f::from(isect.intr.n()).abs_dot(-wi);
if absdot == 0. {
return 0.;
}
let pdf = isect_pdf * distsq / absdot;
if pdf.is_infinite() {
return 0.;
} else {
return pdf;
}
if pdf.is_infinite() { 0. } else { pdf }
} else {
let mut pdf = 1. / spherical_quad_area(v00, v10, v01, v11);
if ctx.ns != Normal3f::zero() {

View file

@ -1,3 +1,4 @@
use crate::core::interaction::InteractionTrait;
use crate::core::pbrt::{clamp_t, lerp};
use crate::utils::math::square;
use crate::utils::splines::{
@ -12,14 +13,14 @@ use super::{
};
use std::sync::Arc;
struct IntersectionContext<'a> {
ray: &'a Ray,
object_from_ray: &'a Transform<Float>,
common: &'a CurveCommon<'a>,
struct IntersectionContext {
ray: Ray,
object_from_ray: Arc<Transform<Float>>,
common: CurveCommon,
}
impl<'a> CurveShape<'a> {
pub fn new(common: CurveCommon<'a>, u_min: Float, u_max: Float) -> Self {
impl CurveShape {
pub fn new(common: CurveCommon, u_min: Float, u_max: Float) -> Self {
Self {
common,
u_min,
@ -76,9 +77,9 @@ impl<'a> CurveShape<'a> {
};
let context = IntersectionContext {
ray: &ray,
object_from_ray: &ray_from_object.inverse(),
common: &self.common,
ray,
object_from_ray: Arc::new(ray_from_object.inverse()),
common: self.common.clone(),
};
self.recursive_intersect(&context, t_max, &cp, self.u_min, self.u_max, max_depth)
@ -208,6 +209,7 @@ impl<'a> CurveShape<'a> {
true
}
#[allow(clippy::too_many_arguments)]
fn intersection_result(
&self,
context: &IntersectionContext,
@ -244,7 +246,7 @@ impl<'a> CurveShape<'a> {
let p_error = Vector3f::fill(hit_width);
let flip_normal = self.common.reverse_orientation ^ self.common.transform_swap_handedness;
let pi = Point3fi::new_with_error(context.ray.evaluate(t_hit), p_error);
let pi = Point3fi::new_with_error(context.ray.at(t_hit), p_error);
let intr = SurfaceInteraction::new(
pi,
Point2f::new(u, v),
@ -261,7 +263,7 @@ impl<'a> CurveShape<'a> {
}
}
impl ShapeTrait for CurveShape<'_> {
impl ShapeTrait for CurveShape {
fn bounds(&self) -> Bounds3f {
let cs_span = self.common.cp_obj;
let obj_bounds = bound_cubic_bezier(&cs_span, self.u_min, self.u_max);
@ -298,7 +300,7 @@ impl ShapeTrait for CurveShape<'_> {
self.intersect_ray(ray, t_max.unwrap_or(Float::INFINITY))
}
fn pdf(&self, _interaction: Arc<&dyn Interaction>) -> Float {
fn pdf(&self, _interaction: &Interaction) -> Float {
todo!()
}

View file

@ -3,6 +3,7 @@ use super::{
Point3fi, QuadricIntersection, Ray, ShapeIntersection, ShapeSample, ShapeSampleContext,
ShapeTrait, SurfaceInteraction, Transform, Vector3f, Vector3fi,
};
use crate::core::interaction::InteractionTrait;
use crate::core::pbrt::{gamma, lerp};
use crate::geometry::{Sqrt, Tuple, VectorLike};
use crate::utils::interval::Interval;
@ -10,10 +11,10 @@ use crate::utils::math::{difference_of_products, square};
use std::mem;
use std::sync::Arc;
impl<'a> CylinderShape<'a> {
impl CylinderShape {
pub fn new(
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
radius: Float,
z_min: Float,
@ -25,7 +26,7 @@ impl<'a> CylinderShape<'a> {
z_min,
z_max,
phi_max,
render_from_object,
render_from_object: render_from_object.clone(),
object_from_render,
reverse_orientation,
transform_swap_handedness: render_from_object.swaps_handedness(),
@ -55,12 +56,11 @@ impl<'a> CylinderShape<'a> {
return None;
}
let root_discrim = discrim.sqrt();
let q: Interval;
if Float::from(b) < 0. {
q = -0.5 * (b - root_discrim);
let q = if Float::from(b) < 0. {
-0.5 * (b - root_discrim)
} else {
q = -0.5 * (b + root_discrim);
}
-0.5 * (b + root_discrim)
};
let mut t0 = q / a;
let mut t1 = c / q;
if t0.low > t1.low {
@ -155,7 +155,7 @@ impl<'a> CylinderShape<'a> {
let flip_normal = self.reverse_orientation ^ self.transform_swap_handedness;
let wo_object = self.object_from_render.apply_to_vector(wo);
// (*renderFromObject)
let surf_point = SurfaceInteraction::new(
SurfaceInteraction::new(
Point3fi::new_with_error(p_hit, p_error),
Point2f::new(u, v),
wo_object,
@ -165,12 +165,11 @@ impl<'a> CylinderShape<'a> {
dndv,
time,
flip_normal,
);
surf_point
)
}
}
impl ShapeTrait for CylinderShape<'_> {
impl ShapeTrait for CylinderShape {
fn area(&self) -> Float {
(self.z_max - self.z_min) * self.radius * self.phi_max
}
@ -191,7 +190,7 @@ impl ShapeTrait for CylinderShape<'_> {
let t = t_max.unwrap_or(Float::INFINITY);
if let Some(isect) = self.basic_intersect(ray, t) {
let intr = self.interaction_from_intersection(isect.clone(), -ray.d, ray.time);
return Some(ShapeIntersection::new(intr, isect.t_hit));
Some(ShapeIntersection::new(intr, isect.t_hit))
} else {
None
}
@ -205,7 +204,7 @@ impl ShapeTrait for CylinderShape<'_> {
}
}
fn pdf(&self, _interaction: Arc<&dyn Interaction>) -> Float {
fn pdf(&self, _interaction: &Interaction) -> Float {
1. / self.area()
}
@ -218,9 +217,9 @@ impl ShapeTrait for CylinderShape<'_> {
if pdf.is_infinite() {
return 0.;
}
return pdf;
pdf
} else {
return 0.;
0.
}
}
@ -266,6 +265,6 @@ impl ShapeTrait for CylinderShape<'_> {
if ss.pdf.is_infinite() {
return None;
}
return Some(ss);
Some(ss)
}
}

View file

@ -3,19 +3,20 @@ use super::{
Point3fi, QuadricIntersection, Ray, ShapeIntersection, ShapeSample, ShapeSampleContext,
ShapeTrait, SurfaceInteraction, Transform, Vector3f,
};
use crate::core::interaction::InteractionTrait;
use crate::geometry::VectorLike;
use crate::utils::math::square;
use crate::utils::sampling::sample_uniform_disk_concentric;
use std::sync::Arc;
impl<'a> DiskShape<'a> {
impl DiskShape {
pub fn new(
radius: Float,
inner_radius: Float,
height: Float,
phi_max: Float,
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
) -> Self {
Self {
@ -23,7 +24,7 @@ impl<'a> DiskShape<'a> {
inner_radius,
height,
phi_max,
render_from_object,
render_from_object: render_from_object.clone(),
object_from_render,
reverse_orientation,
transform_swap_handedness: render_from_object.swaps_handedness(),
@ -88,7 +89,7 @@ impl<'a> DiskShape<'a> {
let p_error = Vector3f::zero();
let flip_normal = self.reverse_orientation ^ self.transform_swap_handedness;
let wo_object = self.object_from_render.apply_to_vector(wo);
let surf_point = SurfaceInteraction::new(
SurfaceInteraction::new(
Point3fi::new_with_error(p_hit, p_error),
Point2f::new(u, v),
wo_object,
@ -98,12 +99,11 @@ impl<'a> DiskShape<'a> {
dndv,
time,
flip_normal,
);
surf_point
)
}
}
impl ShapeTrait for DiskShape<'_> {
impl ShapeTrait for DiskShape {
fn area(&self) -> Float {
self.phi_max * 0.5 * (square(self.radius) - square(self.inner_radius))
}
@ -129,7 +129,7 @@ impl ShapeTrait for DiskShape<'_> {
let t = t_max.unwrap_or(Float::INFINITY);
if let Some(isect) = self.basic_intersect(ray, t) {
let intr = self.interaction_from_intersection(isect.clone(), -ray.d, ray.time);
return Some(ShapeIntersection::new(intr, isect.t_hit));
Some(ShapeIntersection::new(intr, isect.t_hit))
} else {
None
}
@ -184,10 +184,10 @@ impl ShapeTrait for DiskShape<'_> {
if ss.pdf.is_infinite() {
return None;
}
return Some(ss);
Some(ss)
}
fn pdf(&self, _interaction: Arc<&dyn Interaction>) -> Float {
fn pdf(&self, _interaction: &Interaction) -> Float {
1. / self.area()
}
@ -200,9 +200,9 @@ impl ShapeTrait for DiskShape<'_> {
if pdf.is_infinite() {
return 0.;
}
return pdf;
pdf
} else {
return 0.;
0.
}
}
}

View file

@ -5,50 +5,70 @@ pub mod disk;
pub mod sphere;
pub mod triangle;
use crate::core::interaction::{Interaction, MediumInteraction, SurfaceInteraction};
use crate::core::interaction::{
Interaction, InteractionTrait, MediumInteraction, SurfaceInteraction,
};
use crate::core::material::Material;
use crate::core::medium::{Medium, MediumInterface};
use crate::core::pbrt::{Float, PI};
use crate::geometry::{
Bounds3f, DirectionCone, Normal3f, Point2f, Point3f, Point3fi, Ray, Vector2f, Vector3f,
Vector3fi, VectorLike,
};
use crate::lights::Light;
use crate::utils::math::{next_float_down, next_float_up};
use crate::utils::transform::Transform;
use std::sync::Arc;
use enum_dispatch::enum_dispatch;
use std::sync::{Arc, Mutex};
#[derive(Debug, Clone)]
pub struct SphereShape<'a> {
pub struct SphereShape {
radius: Float,
z_min: Float,
z_max: Float,
theta_z_min: Float,
theta_z_max: Float,
phi_max: Float,
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
transform_swap_handedness: bool,
}
impl Default for SphereShape {
fn default() -> Self {
Self::new(
Transform::default().into(),
Transform::default().into(),
false,
1.0,
-1.0,
1.0,
360.0,
)
}
}
#[derive(Debug, Clone)]
pub struct CylinderShape<'a> {
pub struct CylinderShape {
radius: Float,
z_min: Float,
z_max: Float,
phi_max: Float,
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
transform_swap_handedness: bool,
}
#[derive(Debug, Clone)]
pub struct DiskShape<'a> {
pub struct DiskShape {
radius: Float,
inner_radius: Float,
height: Float,
phi_max: Float,
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
transform_swap_handedness: bool,
}
@ -75,37 +95,35 @@ pub enum CurveType {
}
#[derive(Debug, Clone)]
pub struct CurveCommon<'a> {
pub struct CurveCommon {
curve_type: CurveType,
cp_obj: [Point3f; 4],
width: [Float; 2],
n: [Normal3f; 2],
normal_angle: Float,
inv_sin_normal_angle: Float,
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
transform_swap_handedness: bool,
}
impl<'a> CurveCommon<'a> {
impl CurveCommon {
#[allow(clippy::too_many_arguments)]
pub fn new(
c: &[Point3f],
w0: Float,
w1: Float,
curve_type: CurveType,
norm: &[Vector3f],
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
) -> Self {
let transform_swap_handedness = render_from_object.swaps_handedness();
let width = [w0, w1];
assert_eq!(c.len(), 4);
let mut cp_obj = [Point3f::default(); 4];
for i in 0..4 {
cp_obj[i] = c[i];
}
let cp_obj: [Point3f; 4] = c[..4].try_into().unwrap();
let mut n = [Normal3f::default(); 2];
let mut normal_angle: Float = 0.;
@ -133,8 +151,8 @@ impl<'a> CurveCommon<'a> {
}
#[derive(Debug, Clone)]
pub struct CurveShape<'a> {
common: CurveCommon<'a>,
pub struct CurveShape {
common: CurveCommon,
u_min: Float,
u_max: Float,
}
@ -142,8 +160,8 @@ pub struct CurveShape<'a> {
// Define Intersection objects. This only varies for
#[derive(Debug, Clone)]
pub struct ShapeIntersection {
intr: SurfaceInteraction,
t_hit: Float,
pub intr: SurfaceInteraction,
pub t_hit: Float,
}
impl ShapeIntersection {
@ -151,14 +169,6 @@ impl ShapeIntersection {
Self { intr, t_hit }
}
pub fn intr(&self) -> &SurfaceInteraction {
&self.intr
}
pub fn intr_mut(&mut self) -> &mut SurfaceInteraction {
&mut self.intr
}
pub fn t_hit(&self) -> Float {
self.t_hit
}
@ -166,6 +176,18 @@ impl ShapeIntersection {
pub fn set_t_hit(&mut self, new_t: Float) {
self.t_hit = new_t;
}
pub fn set_intersection_properties(
&mut self,
mtl: Arc<Material>,
area: Arc<Light>,
prim_medium_interface: Option<MediumInterface>,
ray_medium: Option<Arc<Medium>>,
) {
let ray_medium = ray_medium.expect("Ray medium must be defined for intersection");
self.intr
.set_intersection_properties(mtl, area, prim_medium_interface, ray_medium);
}
}
#[derive(Debug, Clone)]
@ -209,16 +231,16 @@ impl BilinearIntersection {
#[derive(Clone)]
pub struct ShapeSample {
intr: Arc<SurfaceInteraction>,
pdf: Float,
pub intr: Arc<SurfaceInteraction>,
pub pdf: Float,
}
#[derive(Clone, Debug)]
pub struct ShapeSampleContext {
pi: Point3fi,
n: Normal3f,
ns: Normal3f,
time: Float,
pub pi: Point3fi,
pub n: Normal3f,
pub ns: Normal3f,
pub time: Float,
}
impl ShapeSampleContext {
@ -266,24 +288,32 @@ impl ShapeSampleContext {
}
}
#[derive(Debug, Clone)]
pub enum Shape<'a> {
Sphere(SphereShape<'a>),
Cylinder(CylinderShape<'a>),
Disk(DiskShape<'a>),
Triangle(TriangleShape),
BilinearPatch(BilinearPatchShape),
Curve(CurveShape<'a>),
}
pub trait ShapeTrait: Send + Sync + std::fmt::Debug {
#[enum_dispatch]
pub trait ShapeTrait {
fn bounds(&self) -> Bounds3f;
fn normal_bounds(&self) -> DirectionCone;
fn intersect(&self, ray: &Ray, t_max: Option<Float>) -> Option<ShapeIntersection>;
fn intersect_p(&self, ray: &Ray, t_max: Option<Float>) -> bool;
fn area(&self) -> Float;
fn pdf(&self, interaction: &Interaction) -> Float;
fn pdf_from_context(&self, ctx: &ShapeSampleContext, wi: Vector3f) -> Float;
fn sample(&self, u: Point2f) -> Option<ShapeSample>;
fn sample_from_context(&self, ctx: &ShapeSampleContext, u: Point2f) -> Option<ShapeSample>;
fn pdf(&self, interaction: Arc<&dyn Interaction>) -> Float;
fn pdf_from_context(&self, ctx: &ShapeSampleContext, wi: Vector3f) -> Float;
}
#[derive(Debug, Clone)]
#[enum_dispatch(ShapeTrait)]
pub enum Shape {
Sphere(SphereShape),
Cylinder(CylinderShape),
Disk(DiskShape),
Triangle(TriangleShape),
BilinearPatch(BilinearPatchShape),
Curve(CurveShape),
}
impl Default for Shape {
fn default() -> Self {
Shape::Sphere(SphereShape::default())
}
}

View file

@ -3,6 +3,7 @@ use super::{
QuadricIntersection, Ray, ShapeIntersection, ShapeSample, ShapeSampleContext, ShapeTrait,
SphereShape, SurfaceInteraction, Transform, Vector3f, Vector3fi,
};
use crate::core::interaction::InteractionTrait;
use crate::core::pbrt::{clamp_t, gamma};
use crate::geometry::{Frame, Sqrt, VectorLike, spherical_direction};
use crate::utils::interval::Interval;
@ -12,10 +13,10 @@ use crate::utils::sampling::sample_uniform_sphere;
use std::mem;
use std::sync::Arc;
impl<'a> SphereShape<'a> {
impl SphereShape {
pub fn new(
render_from_object: &'a Transform<Float>,
object_from_render: &'a Transform<Float>,
render_from_object: Arc<Transform<Float>>,
object_from_render: Arc<Transform<Float>>,
reverse_orientation: bool,
radius: Float,
z_min: Float,
@ -26,8 +27,8 @@ impl<'a> SphereShape<'a> {
let theta_z_max = clamp_t(z_max.min(z_max) / radius, -1., 1.).acos();
let phi_max = radians(clamp_t(phi_max, 0., 360.0));
Self {
render_from_object,
object_from_render,
render_from_object: render_from_object.clone(),
object_from_render: object_from_render.clone(),
radius,
z_min: clamp_t(z_min.min(z_max), -radius, radius),
z_max: clamp_t(z_min.max(z_max), -radius, radius),
@ -61,13 +62,12 @@ impl<'a> SphereShape<'a> {
}
let root_discrim = discrim.sqrt();
let q: Interval;
if Float::from(b) < 0. {
q = -0.5 * (b - root_discrim);
let q = if Float::from(b) < 0. {
-0.5 * (b - root_discrim)
} else {
q = -0.5 * (b + root_discrim);
}
-0.5 * (b + root_discrim)
};
let mut t0 = q / a;
let mut t1 = c / q;
@ -179,7 +179,7 @@ impl<'a> SphereShape<'a> {
let p_error = gamma(5) * Vector3f::from(p_hit).abs();
let flip_normal = self.reverse_orientation ^ self.transform_swap_handedness;
let wo_object = self.object_from_render.apply_to_vector(wo);
let surf_point = SurfaceInteraction::new(
SurfaceInteraction::new(
Point3fi::new_with_error(p_hit, p_error),
Point2f::new(u, v),
wo_object,
@ -189,13 +189,11 @@ impl<'a> SphereShape<'a> {
dndv,
time,
flip_normal,
);
// self.render_from_object.apply_to_point(surf_point)
surf_point
)
}
}
impl ShapeTrait for SphereShape<'_> {
impl ShapeTrait for SphereShape {
fn bounds(&self) -> Bounds3f {
self.render_from_object
.apply_to_bounds(Bounds3f::from_points(
@ -212,7 +210,7 @@ impl ShapeTrait for SphereShape<'_> {
self.phi_max * self.radius * (self.z_max - self.z_min)
}
fn pdf(&self, _interaction: Arc<&dyn Interaction>) -> Float {
fn pdf(&self, _interaction: &Interaction) -> Float {
1. / self.area()
}
@ -220,7 +218,7 @@ impl ShapeTrait for SphereShape<'_> {
let t = t_max.unwrap_or(Float::INFINITY);
if let Some(isect) = self.basic_intersect(ray, t) {
let intr = self.interaction_from_intersection(isect.clone(), -ray.d, ray.time);
return Some(ShapeIntersection::new(intr, isect.t_hit));
Some(ShapeIntersection::new(intr, isect.t_hit))
} else {
None
}

View file

@ -3,6 +3,7 @@ use super::{
ShapeIntersection, ShapeSample, ShapeSampleContext, ShapeTrait, SurfaceInteraction,
TriangleIntersection, TriangleShape, Vector2f, Vector3f,
};
use crate::core::interaction::InteractionTrait;
use crate::core::pbrt::gamma;
use crate::geometry::{Sqrt, Tuple, VectorLike, spherical_triangle_area};
use crate::utils::math::{difference_of_products, square};
@ -74,7 +75,7 @@ impl TriangleShape {
fn solid_angle(&self, p: Point3f) -> Float {
let data = self.get_data();
let [p0, p1, p2] = data.vertices;
spherical_triangle_area::<Float>(
spherical_triangle_area(
(p0 - p).normalize(),
(p1 - p).normalize(),
(p2 - p).normalize(),
@ -296,7 +297,7 @@ impl TriangleShape {
let mut ts = Vector3f::from(ns).cross(ss);
if ts.norm_squared() > 0. {
ss = ts.cross(Vector3f::from(ns)).into();
ss = ts.cross(Vector3f::from(ns));
} else {
(ss, ts) = Vector3f::from(ns).coordinate_system();
}
@ -319,7 +320,7 @@ impl TriangleShape {
isect.shading.n = ns;
isect.shading.dpdu = ss;
isect.shading.dpdv = ts.into();
isect.shading.dpdv = ts;
isect.dndu = dndu;
isect.dndv = dndv;
}
@ -346,14 +347,13 @@ impl ShapeTrait for TriangleShape {
self.get_data().area
}
fn pdf(&self, _interaction: Arc<&dyn Interaction>) -> Float {
fn pdf(&self, _interaction: &Interaction) -> Float {
1. / self.area()
}
fn pdf_from_context(&self, ctx: &ShapeSampleContext, wi: Vector3f) -> Float {
let solid_angle = self.solid_angle(ctx.p());
if solid_angle < Self::MIN_SPHERICAL_SAMPLE_AREA
|| solid_angle > Self::MAX_SPHERICAL_SAMPLE_AREA
if (Self::MIN_SPHERICAL_SAMPLE_AREA..Self::MAX_SPHERICAL_SAMPLE_AREA).contains(&solid_angle)
{
let ray = ctx.spawn_ray(wi);
return self.intersect(&ray, None).map_or(0., |isect| {
@ -391,27 +391,23 @@ impl ShapeTrait for TriangleShape {
let data = self.get_data();
let [p0, p1, p2] = data.vertices;
let solid_angle = self.solid_angle(ctx.p());
if solid_angle < Self::MIN_SPHERICAL_SAMPLE_AREA
|| solid_angle > Self::MAX_SPHERICAL_SAMPLE_AREA
if (Self::MIN_SPHERICAL_SAMPLE_AREA..Self::MAX_SPHERICAL_SAMPLE_AREA).contains(&solid_angle)
{
// Sample shape by area and compute incident direction wi
return self
.sample(u)
.map(|mut ss| {
let mut intr_clone = (*ss.intr).clone();
intr_clone.common.time = ctx.time;
ss.intr = Arc::new(intr_clone);
return self.sample(u).and_then(|mut ss| {
let mut intr_clone = (*ss.intr).clone();
intr_clone.common.time = ctx.time;
ss.intr = Arc::new(intr_clone);
let wi = (ss.intr.p() - ctx.p()).normalize();
if wi.norm_squared() == 0. {
return None;
}
let absdot = Vector3f::from(ss.intr.n()).abs_dot(-wi);
let d2 = ctx.p().distance_squared(ss.intr.p());
ss.pdf /= absdot / d2;
if ss.pdf.is_infinite() { None } else { Some(ss) }
})
.flatten();
let wi = (ss.intr.p() - ctx.p()).normalize();
if wi.norm_squared() == 0. {
return None;
}
let absdot = Vector3f::from(ss.intr.n()).abs_dot(-wi);
let d2 = ctx.p().distance_squared(ss.intr.p());
ss.pdf /= absdot / d2;
if ss.pdf.is_infinite() { None } else { Some(ss) }
});
}
// Sample spherical triangle from reference point
@ -434,9 +430,7 @@ impl ShapeTrait for TriangleShape {
pdf = bilinear_pdf(u, &w);
}
let Some((b, tri_pdf)) = sample_spherical_triangle(&[p0, p1, p2], ctx.p(), u) else {
return None;
};
let (b, tri_pdf) = sample_spherical_triangle(&[p0, p1, p2], ctx.p(), u)?;
if tri_pdf == 0. {
return None;
}

1001
src/spectra/color.rs Normal file

File diff suppressed because it is too large Load diff

107
src/spectra/colorspace.rs Normal file
View file

@ -0,0 +1,107 @@
use super::color::{RGB, RGBSigmoidPolynomial, RGBToSpectrumTable, XYZ};
use crate::core::geometry::Point2f;
use crate::core::pbrt::Float;
use crate::spectra::{DenselySampledSpectrum, SampledSpectrum, Spectrum};
use crate::utils::math::SquareMatrix;
use once_cell::sync::Lazy;
use std::cmp::{Eq, PartialEq};
use std::error::Error;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct RGBColorSpace {
pub r: Point2f,
pub g: Point2f,
pub b: Point2f,
pub w: Point2f,
pub illuminant: Spectrum,
pub rgb_to_spectrum_table: Arc<RGBToSpectrumTable>,
pub xyz_from_rgb: SquareMatrix<Float, 3>,
pub rgb_from_xyz: SquareMatrix<Float, 3>,
}
impl RGBColorSpace {
pub fn new(
r: Point2f,
g: Point2f,
b: Point2f,
illuminant: Spectrum,
rgb_to_spectrum_table: RGBToSpectrumTable,
) -> Result<Self, Box<dyn Error>> {
let w_xyz: XYZ = illuminant.to_xyz();
let w = w_xyz.xy();
let r_xyz = XYZ::from_xyy(r, Some(1.0));
let g_xyz = XYZ::from_xyy(g, Some(1.0));
let b_xyz = XYZ::from_xyy(b, Some(1.0));
let rgb_values = [
[r_xyz.x(), g_xyz.x(), b_xyz.x()],
[r_xyz.y(), g_xyz.y(), b_xyz.y()],
[r_xyz.z(), g_xyz.z(), g_xyz.z()],
];
let rgb = SquareMatrix::new(rgb_values);
let c: RGB = rgb.inverse()? * w_xyz;
let xyz_from_rgb = rgb * SquareMatrix::diag(&[c.r, c.g, c.b]);
let rgb_from_xyz = xyz_from_rgb
.inverse()
.expect("XYZ from RGB matrix is singular");
Ok(Self {
r,
g,
b,
w,
illuminant,
rgb_to_spectrum_table: Arc::new(rgb_to_spectrum_table),
xyz_from_rgb,
rgb_from_xyz,
})
}
pub fn to_xyz(&self, rgb: RGB) -> XYZ {
self.xyz_from_rgb * rgb
}
pub fn to_rgb(&self, xyz: XYZ) -> RGB {
self.rgb_from_xyz * xyz
}
pub fn to_rgb_coeffs(&self, rgb: RGB) -> RGBSigmoidPolynomial {
self.rgb_to_spectrum_table.to_polynomial(rgb)
}
pub fn convert_colorspace(&self, other: &RGBColorSpace) -> SquareMatrix<Float, 3> {
if self == other {
return SquareMatrix::default();
}
self.rgb_from_xyz * other.xyz_from_rgb
}
pub fn srgb() -> &'static Self {
static SRGB_SPACE: Lazy<RGBColorSpace> = Lazy::new(|| {
let r = Point2f::new(0.64, 0.33);
let g = Point2f::new(0.30, 0.60);
let b = Point2f::new(0.15, 0.06);
let illuminant = Spectrum::std_illuminant_d65();
let table = RGBToSpectrumTable::srgb();
RGBColorSpace::new(r, g, b, illuminant, table)
.expect("Failed to initialize standard sRGB color space")
});
&SRGB_SPACE
}
}
impl PartialEq for RGBColorSpace {
fn eq(&self, other: &Self) -> bool {
self.r == other.r
&& self.g == other.g
&& self.b == other.b
&& self.w == other.w
&& Arc::ptr_eq(&self.rgb_to_spectrum_table, &other.rgb_to_spectrum_table)
}
}

33
src/spectra/data.rs Normal file
View file

@ -0,0 +1,33 @@
use super::Spectrum;
use super::sampled::{LAMBDA_MAX, LAMBDA_MIN};
use super::simple::{DenselySampledSpectrum, PiecewiseLinearSpectrum};
use crate::core::cie;
use crate::core::pbrt::Float;
use once_cell::sync::Lazy;
fn create_cie_spectrum(data: &[Float]) -> Spectrum {
let pls = PiecewiseLinearSpectrum::from_interleaved(data);
let dss = DenselySampledSpectrum::from_spectrum(
&Spectrum::PiecewiseLinear(pls),
LAMBDA_MIN,
LAMBDA_MAX,
);
Spectrum::DenselySampled(dss)
}
pub(crate) fn cie_x() -> &'static Spectrum {
static X: Lazy<Spectrum> = Lazy::new(|| create_cie_spectrum(&cie::CIE_X));
&X
}
pub(crate) fn cie_y() -> &'static Spectrum {
static Y: Lazy<Spectrum> = Lazy::new(|| create_cie_spectrum(&cie::CIE_Y));
&Y
}
pub(crate) fn cie_z() -> &'static Spectrum {
static Z: Lazy<Spectrum> = Lazy::new(|| create_cie_spectrum(&cie::CIE_Z));
&Z
}

68
src/spectra/mod.rs Normal file
View file

@ -0,0 +1,68 @@
pub mod data;
pub mod rgb;
pub mod sampled;
pub mod simple;
use crate::core::pbrt::Float;
use crate::utils::color::{RGB, XYZ};
use crate::utils::colorspace::RGBColorSpace;
use enum_dispatch::enum_dispatch;
pub use data::*;
pub use rgb::*;
pub use sampled::{CIE_Y_INTEGRAL, LAMBDA_MAX, LAMBDA_MIN};
pub use sampled::{N_SPECTRUM_SAMPLES, SampledSpectrum, SampledWavelengths};
pub use simple::*; // CIE_X, etc
//
#[enum_dispatch]
pub trait SpectrumTrait {
fn evaluate(&self, lambda: Float) -> Float;
fn max_value(&self) -> Float;
}
#[enum_dispatch(SpectrumTrait)]
#[derive(Debug, Clone)]
pub enum Spectrum {
Constant(ConstantSpectrum),
DenselySampled(DenselySampledSpectrum),
PiecewiseLinear(PiecewiseLinearSpectrum),
Blackbody(BlackbodySpectrum),
RGBAlbedo(RGBAlbedoSpectrum),
}
impl Spectrum {
pub fn std_illuminant_d65() -> Self {
todo!()
}
pub fn to_xyz(&self) -> XYZ {
let x = self.inner_product(data::cie_x());
let y = self.inner_product(data::cie_y());
let z = self.inner_product(data::cie_z());
XYZ::new(x, y, z) / CIE_Y_INTEGRAL
}
fn to_rgb(&self, cs: &RGBColorSpace) -> RGB {
let xyz = self.to_xyz();
cs.to_rgb(xyz)
}
pub fn sample(&self, wavelengths: &SampledWavelengths) -> SampledSpectrum {
SampledSpectrum::from_fn(|i| self.evaluate(wavelengths[i]))
}
pub fn inner_product(&self, other: &Spectrum) -> Float {
let mut integral = 0.0;
// Iterate integer wavelengths.
for lambda in LAMBDA_MIN..=LAMBDA_MAX {
let l = lambda as Float;
integral += self.evaluate(l) * other.evaluate(l);
}
integral
}
pub fn is_constant(&self) -> bool {
matches!(self, Spectrum::Constant(_))
}
}

175
src/spectra/rgb.rs Normal file
View file

@ -0,0 +1,175 @@
use super::sampled::{
LAMBDA_MAX, LAMBDA_MIN, N_SPECTRUM_SAMPLES, SampledSpectrum, SampledWavelengths,
};
use crate::core::pbrt::Float;
use crate::spectra::{DenselySampledSpectrum, SpectrumTrait};
use crate::utils::color::{RGB, RGBSigmoidPolynomial, XYZ};
use crate::utils::colorspace::RGBColorSpace;
use std::sync::Arc;
#[derive(Debug, Clone, Copy)]
pub struct RGBAlbedoSpectrum {
rsp: RGBSigmoidPolynomial,
}
impl RGBAlbedoSpectrum {
pub fn new(cs: &RGBColorSpace, rgb: RGB) -> Self {
Self {
rsp: cs.to_rgb_coeffs(rgb),
}
}
pub fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut s = SampledSpectrum::default();
for i in 0..N_SPECTRUM_SAMPLES {
s[i] = self.rsp.evaluate(lambda[i]);
}
s
}
}
impl SpectrumTrait for RGBAlbedoSpectrum {
fn evaluate(&self, lambda: Float) -> Float {
self.rsp.evaluate(lambda)
}
fn max_value(&self) -> Float {
self.rsp.max_value()
}
}
#[derive(Debug, Clone, Copy)]
pub struct UnboundedRGBSpectrum {
scale: Float,
rsp: RGBSigmoidPolynomial,
}
impl UnboundedRGBSpectrum {
pub fn new(cs: RGBColorSpace, rgb: RGB) -> Self {
let m = rgb.r.max(rgb.g).max(rgb.b);
let scale = 2.0 * m;
let scaled_rgb = if scale != 0.0 {
rgb / scale
} else {
RGB::new(0.0, 0.0, 0.0)
};
Self {
scale,
rsp: cs.to_rgb_coeffs(scaled_rgb),
}
}
}
impl SpectrumTrait for UnboundedRGBSpectrum {
fn evaluate(&self, lambda: Float) -> Float {
self.scale * self.rsp.evaluate(lambda)
}
fn max_value(&self) -> Float {
self.scale * self.rsp.max_value()
}
}
#[derive(Debug, Clone, Default)]
pub struct RGBIlluminantSpectrum {
scale: Float,
rsp: RGBSigmoidPolynomial,
illuminant: Option<Arc<DenselySampledSpectrum>>,
}
impl RGBIlluminantSpectrum {
pub fn new(cs: &RGBColorSpace, rgb: RGB) -> Self {
let illuminant = &cs.illuminant;
let densely_sampled =
DenselySampledSpectrum::from_spectrum(illuminant, LAMBDA_MIN, LAMBDA_MAX);
let m = rgb.max();
let scale = 2. * m;
let rsp = cs.to_rgb_coeffs(if scale == 1. {
rgb / scale
} else {
RGB::new(0., 0., 0.)
});
Self {
scale,
rsp,
illuminant: Some(Arc::new(densely_sampled)),
}
}
pub fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
if self.illuminant.is_none() {
return SampledSpectrum::new(0.);
}
SampledSpectrum::from_fn(|i| self.scale * self.rsp.evaluate(lambda[i]))
}
}
impl SpectrumTrait for RGBIlluminantSpectrum {
fn evaluate(&self, lambda: Float) -> Float {
match &self.illuminant {
Some(illuminant) => {
self.scale * self.rsp.evaluate(lambda) * illuminant.evaluate(lambda)
}
None => 0.0,
}
}
fn max_value(&self) -> Float {
match &self.illuminant {
Some(illuminant) => self.scale * self.rsp.max_value() * illuminant.max_value(),
None => 0.0,
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct RGBSpectrum {
pub c: [Float; 3],
}
#[derive(Debug, Clone, Copy)]
pub struct RGBUnboundedSpectrum {
scale: Float,
rsp: RGBSigmoidPolynomial,
}
impl Default for RGBUnboundedSpectrum {
fn default() -> Self {
Self {
scale: 0.0,
rsp: RGBSigmoidPolynomial::default(),
}
}
}
impl RGBUnboundedSpectrum {
pub fn new(cs: &RGBColorSpace, rgb: RGB) -> Self {
let m = rgb.max();
let scale = 2.0 * m;
let rgb_norm = if scale > 0.0 {
rgb / scale
} else {
RGB::new(0.0, 0.0, 0.0)
};
let rsp = cs.to_rgb_coeffs(rgb_norm);
Self { scale, rsp }
}
pub fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
SampledSpectrum::from_fn(|i| self.scale * self.rsp.evaluate(lambda[i]))
}
}
impl SpectrumTrait for RGBUnboundedSpectrum {
fn evaluate(&self, lambda: Float) -> Float {
self.scale * self.rsp.evaluate(lambda)
}
fn max_value(&self) -> Float {
self.scale * self.rsp.max_value()
}
}

373
src/spectra/sampled.rs Normal file
View file

@ -0,0 +1,373 @@
use crate::core::pbrt::Float;
use std::ops::{
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
};
use super::{cie_x, cie_y, cie_z};
pub const CIE_Y_INTEGRAL: Float = 106.856895;
pub const N_SPECTRUM_SAMPLES: usize = 1200;
pub const LAMBDA_MIN: i32 = 360;
pub const LAMBDA_MAX: i32 = 830;
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct SampledSpectrum {
pub values: [Float; N_SPECTRUM_SAMPLES],
}
impl Default for SampledSpectrum {
fn default() -> Self {
Self {
values: [0.0; N_SPECTRUM_SAMPLES],
}
}
}
impl SampledSpectrum {
pub fn new(c: Float) -> Self {
Self {
values: [c; N_SPECTRUM_SAMPLES],
}
}
#[inline(always)]
pub fn from_fn<F>(cb: F) -> Self
where
F: FnMut(usize) -> Float,
{
Self {
values: std::array::from_fn(cb),
}
}
pub fn from_vector(v: Vec<Float>) -> Self {
let mut values = [0.0; N_SPECTRUM_SAMPLES];
let count = v.len().min(N_SPECTRUM_SAMPLES);
values[..count].copy_from_slice(&v[..count]);
Self { values }
}
pub fn is_black(&self) -> bool {
self.values.iter().all(|&sample| sample == 0.0)
}
pub fn has_nans(&self) -> bool {
self.values.iter().any(|&v| v.is_nan())
}
pub fn min_component_value(&self) -> Float {
self.values.iter().fold(Float::INFINITY, |a, &b| a.min(b))
}
pub fn max_component_value(&self) -> Float {
self.values
.iter()
.fold(Float::NEG_INFINITY, |a, &b| a.max(b))
}
pub fn average(&self) -> Float {
self.values.iter().sum::<Float>() / (N_SPECTRUM_SAMPLES as Float)
}
pub fn exp(&self) -> SampledSpectrum {
let values = self.values.map(|v| v.exp());
let ret = Self { values };
debug_assert!(!ret.has_nans());
ret
}
pub fn pow_int(&self, mut power: usize) -> Self {
let mut result = Self::new(1.0);
let mut base = *self;
while power > 0 {
if power % 2 == 1 {
result *= base;
}
base *= base;
power /= 2;
}
result
}
pub fn clamp_zero(s: &SampledSpectrum) -> Self {
let ret = SampledSpectrum::from_fn(|i| s[i].max(0.));
assert!(!ret.has_nans());
ret
}
pub fn safe_div(a: &SampledSpectrum, b: &SampledSpectrum) -> SampledSpectrum {
SampledSpectrum::from_fn(|i| if b[i] != 0. { a[i] / b[i] } else { 0. })
}
pub fn y(&self, lambda: &SampledWavelengths) -> Float {
let ys = cie_y().sample(lambda);
let pdf = lambda.pdf();
SampledSpectrum::safe_div(&(ys * *self), &pdf).average() / CIE_Y_INTEGRAL
}
}
impl<'a> IntoIterator for &'a SampledSpectrum {
type Item = &'a Float;
type IntoIter = std::slice::Iter<'a, Float>;
fn into_iter(self) -> Self::IntoIter {
self.values.iter()
}
}
impl<'a> IntoIterator for &'a mut SampledSpectrum {
type Item = &'a mut Float;
type IntoIter = std::slice::IterMut<'a, Float>;
fn into_iter(self) -> Self::IntoIter {
self.values.iter_mut()
}
}
impl Index<usize> for SampledSpectrum {
type Output = Float;
fn index(&self, i: usize) -> &Self::Output {
&self.values[i]
}
}
impl IndexMut<usize> for SampledSpectrum {
fn index_mut(&mut self, i: usize) -> &mut Self::Output {
&mut self.values[i]
}
}
impl Add for SampledSpectrum {
type Output = Self;
fn add(self, rhs: Self) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] += rhs.values[i];
}
ret
}
}
impl AddAssign for SampledSpectrum {
fn add_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] += rhs.values[i];
}
}
}
impl Sub for SampledSpectrum {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] -= rhs.values[i];
}
ret
}
}
impl SubAssign for SampledSpectrum {
fn sub_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] -= rhs.values[i];
}
}
}
impl Sub<SampledSpectrum> for Float {
type Output = SampledSpectrum;
fn sub(self, rhs: SampledSpectrum) -> SampledSpectrum {
SampledSpectrum::from_fn(|i| self - rhs[i])
}
}
impl Mul for SampledSpectrum {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] *= rhs.values[i];
}
ret
}
}
impl MulAssign for SampledSpectrum {
fn mul_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] *= rhs.values[i];
}
}
}
impl Mul<Float> for SampledSpectrum {
type Output = Self;
fn mul(self, rhs: Float) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] *= rhs;
}
ret
}
}
impl Mul<SampledSpectrum> for Float {
type Output = SampledSpectrum;
fn mul(self, rhs: SampledSpectrum) -> SampledSpectrum {
rhs * self
}
}
impl MulAssign<Float> for SampledSpectrum {
fn mul_assign(&mut self, rhs: Float) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] *= rhs;
}
}
}
impl DivAssign for SampledSpectrum {
fn div_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
debug_assert_ne!(0.0, rhs.values[i]);
self.values[i] /= rhs.values[i];
}
}
}
impl Div for SampledSpectrum {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
let mut ret = self;
ret /= rhs;
ret
}
}
impl Div<Float> for SampledSpectrum {
type Output = Self;
fn div(self, rhs: Float) -> Self::Output {
debug_assert_ne!(rhs, 0.0);
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] /= rhs;
}
ret
}
}
impl DivAssign<Float> for SampledSpectrum {
fn div_assign(&mut self, rhs: Float) {
debug_assert_ne!(rhs, 0.0);
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] /= rhs;
}
}
}
impl Neg for SampledSpectrum {
type Output = Self;
fn neg(self) -> Self::Output {
let mut ret = SampledSpectrum::new(0.0);
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] = -self.values[i];
}
ret
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct SampledWavelengths {
pub lambda: [Float; N_SPECTRUM_SAMPLES],
pub pdf: [Float; N_SPECTRUM_SAMPLES],
}
impl SampledWavelengths {
pub fn pdf(&self) -> SampledSpectrum {
SampledSpectrum::from_vector(self.pdf.to_vec())
}
pub fn secondary_terminated(&self) -> bool {
for i in 1..N_SPECTRUM_SAMPLES {
if self.pdf[i] != 0.0 {
return false;
}
}
true
}
pub fn terminate_secondary(self) -> Self {
if self.secondary_terminated() {
return self;
}
let mut new_pdf = [0.0; N_SPECTRUM_SAMPLES];
new_pdf[0] = self.pdf[0] / (N_SPECTRUM_SAMPLES as Float);
Self {
pdf: new_pdf,
..self
}
}
pub fn terminate_secondary_inplace(&mut self) {}
pub fn sample_uniform(u: Float, lambda_min: Float, lambda_max: Float) -> Self {
let mut lambda = [0.0; N_SPECTRUM_SAMPLES];
lambda[0] = crate::core::pbrt::lerp(u, lambda_min, lambda_min);
let delta = (lambda_max - lambda_min) / N_SPECTRUM_SAMPLES as Float;
for i in 1..N_SPECTRUM_SAMPLES {
lambda[i] = lambda[i - 1] + delta;
if lambda[i] > lambda_max {
lambda[i] = lambda_min + (lambda[i] - lambda_max);
}
}
let pdf = [1. / (lambda_max - lambda_min); N_SPECTRUM_SAMPLES];
Self { lambda, pdf }
}
pub fn sample_visible_wavelengths(u: Float) -> Float {
538.0 - 138.888889 * Float::atanh(0.85691062 - 1.82750197 * u)
}
pub fn visible_wavelengths_pdf(lambda: Float) -> Float {
if !(360.0..830.0).contains(&lambda) {
return 0.0;
}
0.0039398042 / (Float::cosh(0.0072 * (lambda - 538.0))).sqrt()
}
pub fn sample_visible(u: Float) -> Self {
let mut lambda = [0.0; N_SPECTRUM_SAMPLES];
let mut pdf = [0.0; N_SPECTRUM_SAMPLES];
for i in 0..N_SPECTRUM_SAMPLES {
let mut up = u + i as Float / N_SPECTRUM_SAMPLES as Float;
if up > 1.0 {
up -= 1.0;
}
lambda[i] = Self::sample_visible_wavelengths(up);
pdf[i] = Self::visible_wavelengths_pdf(lambda[i]);
}
Self { lambda, pdf }
}
}
impl Index<usize> for SampledWavelengths {
type Output = Float;
fn index(&self, i: usize) -> &Self::Output {
&self.lambda[i]
}
}
impl IndexMut<usize> for SampledWavelengths {
fn index_mut(&mut self, i: usize) -> &mut Self::Output {
&mut self.lambda[i]
}
}

272
src/spectra/simple.rs Normal file
View file

@ -0,0 +1,272 @@
use super::sampled::{LAMBDA_MAX, LAMBDA_MIN};
use crate::core::pbrt::Float;
use crate::spectra::{
N_SPECTRUM_SAMPLES, SampledSpectrum, SampledWavelengths, Spectrum, SpectrumTrait,
};
use std::hash::{Hash, Hasher};
#[derive(Debug, Clone, Copy)]
pub struct ConstantSpectrum {
c: Float,
}
impl ConstantSpectrum {
pub fn new(c: Float) -> Self {
Self { c }
}
}
impl SpectrumTrait for ConstantSpectrum {
fn evaluate(&self, _lambda: Float) -> Float {
self.c
}
fn max_value(&self) -> Float {
self.c
}
}
#[derive(Debug, Clone)]
pub struct DenselySampledSpectrum {
lambda_min: i32,
lambda_max: i32,
values: Vec<Float>,
}
impl DenselySampledSpectrum {
pub fn new(lambda_min: i32, lambda_max: i32) -> Self {
let n_values = (lambda_max - lambda_min + 1).max(0) as usize;
Self {
lambda_min,
lambda_max,
values: vec![0.0; n_values],
}
}
pub fn from_spectrum(spec: &Spectrum, lambda_min: i32, lambda_max: i32) -> Self {
let mut s = Self::new(lambda_min, lambda_max);
if s.values.is_empty() {
return s;
}
for lambda in lambda_min..=lambda_max {
let index = (lambda - lambda_min) as usize;
s.values[index] = spec.evaluate(lambda as Float);
}
s
}
pub fn from_function<F>(f: F, lambda_min: i32, lambda_max: i32) -> Self
where
F: Fn(Float) -> Float,
{
let mut s = Self::new(lambda_min, lambda_max);
if s.values.is_empty() {
return s;
}
for lambda in lambda_min..=lambda_max {
let index = (lambda - lambda_min) as usize;
s.values[index] = f(lambda as Float);
}
s
}
pub fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut s = SampledSpectrum::default();
for i in 0..N_SPECTRUM_SAMPLES {
let offset = lambda[i].round() as usize - LAMBDA_MIN as usize;
if offset >= self.values.len() {
s[i] = 0.;
} else {
s[i] = self.values[offset];
}
}
s
}
pub fn min_component_value(&self) -> Float {
self.values.iter().fold(Float::INFINITY, |a, &b| a.min(b))
}
pub fn max_component_value(&self) -> Float {
self.values
.iter()
.fold(Float::NEG_INFINITY, |a, &b| a.max(b))
}
pub fn average(&self) -> Float {
self.values.iter().sum::<Float>() / (N_SPECTRUM_SAMPLES as Float)
}
pub fn safe_div(&self, rhs: SampledSpectrum) -> Self {
let mut r = Self::new(1, 1);
for i in 0..N_SPECTRUM_SAMPLES {
r.values[i] = if rhs[i] != 0.0 {
self.values[i] / rhs.values[i]
} else {
0.0
}
}
r
}
pub fn scale(&mut self, factor: Float) {
for v in &mut self.values {
*v *= factor;
}
}
}
impl PartialEq for DenselySampledSpectrum {
fn eq(&self, other: &Self) -> bool {
if self.lambda_min != other.lambda_min
|| self.lambda_max != other.lambda_max
|| self.values.len() != other.values.len()
{
return false;
}
self.values
.iter()
.zip(&other.values)
.all(|(a, b)| a.to_bits() == b.to_bits())
}
}
impl Eq for DenselySampledSpectrum {}
impl Hash for DenselySampledSpectrum {
fn hash<H: Hasher>(&self, state: &mut H) {
self.lambda_min.hash(state);
self.lambda_max.hash(state);
for v in &self.values {
v.to_bits().hash(state);
}
}
}
impl SpectrumTrait for DenselySampledSpectrum {
fn evaluate(&self, lambda: Float) -> Float {
let offset = (lambda.round() as i32) - self.lambda_min;
if offset < 0 || offset as usize >= self.values.len() {
0.0
} else {
self.values[offset as usize]
}
}
fn max_value(&self) -> Float {
self.values.iter().fold(Float::MIN, |a, b| a.max(*b))
}
}
#[derive(Debug, Clone)]
pub struct PiecewiseLinearSpectrum {
samples: Vec<(Float, Float)>,
}
impl PiecewiseLinearSpectrum {
pub fn from_interleaved(data: &[Float]) -> Self {
assert!(
data.len().is_multiple_of(2),
"Interleaved data must have an even number of elements"
);
let mut samples = Vec::new();
for pair in data.chunks(2) {
samples.push((pair[0], pair[1]));
}
samples.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
Self { samples }
}
}
impl SpectrumTrait for PiecewiseLinearSpectrum {
fn evaluate(&self, lambda: Float) -> Float {
if self.samples.is_empty() {
return 0.0;
}
// Handle boundary conditions
if lambda <= self.samples[0].0 {
return self.samples[0].1;
}
if lambda >= self.samples.last().unwrap().0 {
return self.samples.last().unwrap().1;
}
let i = self.samples.partition_point(|s| s.0 < lambda);
let s1 = self.samples[i - 1];
let s2 = self.samples[i];
let t = (lambda - s1.0) / (s2.0 - s1.0);
(1.0 - t) * s1.1 + t * s2.1
}
fn max_value(&self) -> Float {
if self.samples.is_empty() {
return 0.0;
}
self.samples
.iter()
.map(|(_, value)| *value)
.fold(0.0, |a, b| a.max(b))
}
}
#[derive(Debug, Clone, Copy)]
pub struct BlackbodySpectrum {
temperature: Float,
normalization_factor: Float,
}
// Planck's Law
impl BlackbodySpectrum {
const C: Float = 299792458.0;
const H: Float = 6.62606957e-34;
const KB: Float = 1.3806488e-23;
pub fn new(temperature: Float) -> Self {
// Physical constants
let lambda_max = 2.8977721e-3 / temperature * 1e9;
let max_val = Self::planck_law(lambda_max, temperature);
Self {
temperature,
normalization_factor: if max_val > 0.0 { 1.0 / max_val } else { 0.0 },
}
}
fn planck_law(lambda_nm: Float, temp: Float) -> Float {
if temp <= 0.0 {
return 0.0;
}
let lambda_m = lambda_nm * 1e-9;
let c1 = 2.0 * Self::H * Self::C * Self::C;
let c2 = (Self::H * Self::C) / Self::KB;
let numerator = c1 / lambda_m.powi(5);
let denominator = (c2 / (lambda_m * temp)).exp() - 1.0;
if denominator.is_infinite() {
0.0
} else {
numerator / denominator
}
}
pub fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
SampledSpectrum::from_fn(|i| {
Self::planck_law(lambda[i], self.temperature) * self.normalization_factor
})
}
}
impl SpectrumTrait for BlackbodySpectrum {
fn evaluate(&self, lambda: Float) -> Float {
Self::planck_law(lambda, self.temperature) * self.normalization_factor
}
fn max_value(&self) -> Float {
let lambda_max = 2.8977721e-3 / self.temperature * 1e9;
Self::planck_law(lambda_max, self.temperature)
}
}

View file

@ -4,12 +4,14 @@ use std::ops::{
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
};
use super::spectrum::Spectrum;
use crate::core::pbrt::{Float, lerp};
use crate::geometry::Point2f;
use crate::spectra::Spectrum;
use crate::utils::math::SquareMatrix;
use once_cell::sync::Lazy;
use enum_dispatch::enum_dispatch;
pub trait Triplet {
fn from_triplet(c1: Float, c2: Float, c3: Float) -> Self;
}
@ -250,7 +252,7 @@ impl fmt::Display for XYZ {
}
}
#[derive(Debug, Clone)]
#[derive(Debug, Default, Copy, Clone)]
pub struct RGB {
pub r: Float,
pub g: Float,
@ -280,6 +282,14 @@ impl RGB {
pub fn average(&self) -> Float {
(self.r + self.g + self.b) / 3.0
}
pub fn max(&self) -> Float {
self.r.max(self.g).max(self.b)
}
pub fn clamp_zero(rgb: Self) -> Self {
RGB::new(rgb.r.max(0.), rgb.b.max(0.), rgb.b.max(0.))
}
}
impl Index<usize> for RGB {
@ -449,6 +459,52 @@ impl fmt::Display for RGB {
}
}
impl Mul<XYZ> for SquareMatrix<Float, 3> {
type Output = RGB;
fn mul(self, v: XYZ) -> RGB {
let r = self[0][0] * v.x + self[0][1] * v.y + self[0][2] * v.z;
let g = self[1][0] * v.x + self[1][1] * v.y + self[1][2] * v.z;
let b = self[2][0] * v.x + self[2][1] * v.y + self[2][2] * v.z;
RGB::new(r, g, b)
}
}
impl Mul<RGB> for SquareMatrix<Float, 3> {
type Output = XYZ;
fn mul(self, v: RGB) -> XYZ {
let x = self[0][0] * v.r + self[0][1] * v.g + self[0][2] * v.b;
let y = self[1][0] * v.r + self[1][1] * v.g + self[1][2] * v.b;
let z = self[2][0] * v.r + self[2][1] * v.g + self[2][2] * v.b;
XYZ::new(x, y, z)
}
}
pub trait MatrixMulColor {
fn mul_rgb(&self, v: RGB) -> RGB;
fn mul_xyz(&self, v: XYZ) -> XYZ;
}
impl MatrixMulColor for SquareMatrix<Float, 3> {
fn mul_rgb(&self, v: RGB) -> RGB {
let m = self;
RGB::new(
m[0][0] * v.r + m[0][1] * v.g + m[0][2] * v.b,
m[1][0] * v.r + m[1][1] * v.g + m[1][2] * v.b,
m[2][0] * v.r + m[2][1] * v.g + m[2][2] * v.b,
)
}
fn mul_xyz(&self, v: XYZ) -> XYZ {
let m = self;
XYZ::new(
m[0][0] * v.x + m[0][1] * v.y + m[0][2] * v.z,
m[1][0] * v.x + m[1][1] * v.y + m[1][2] * v.z,
m[2][0] * v.x + m[2][1] * v.y + m[2][2] * v.z,
)
}
}
pub const RES: usize = 64;
pub type CoefficientArray = [[[[[Float; 3]; RES]; RES]; RES]; 3];
@ -458,6 +514,14 @@ pub struct RGBToSpectrumTable {
coeffs: &'static CoefficientArray,
}
impl RGBToSpectrumTable {
pub fn srgb() -> Self {
// use crate::core::constants::{RGB_TO_SPECTRUM_Z_NODES, RGB_TO_SPECTRUM_COEFFS};
// Self::new(&RGB_TO_SPECTRUM_Z_NODES, &RGB_TO_SPECTRUM_COEFFS)
todo!("Link the static constant arrays for sRGB coefficients here")
}
}
#[derive(Debug, Default, Copy, Clone)]
pub struct RGBSigmoidPolynomial {
c0: Float,
@ -485,7 +549,7 @@ impl RGBSigmoidPolynomial {
pub fn max_value(&self) -> Float {
let lambda = -self.c1 / (2.0 * self.c0);
let result = self.evaluate(360.0).max(self.evaluate(830.0));
if lambda >= 360.0 && lambda <= 830.0 {
if (360.0..830.0).contains(&lambda) {
return result.max(self.evaluate(lambda));
}
result
@ -519,12 +583,10 @@ impl RGBToSpectrumTable {
} else {
maxc = 2;
}
} else if rgb[1] > rgb[2] {
maxc = 1;
} else {
if rgb[1] > rgb[2] {
maxc = 1;
} else {
maxc = 2;
}
maxc = 2;
}
let z = rgb[maxc];
@ -538,6 +600,7 @@ impl RGBToSpectrumTable {
let dy = (y - yi) as usize;
let dz = (z - self.z_nodes[zi]) / (self.z_nodes[zi + 1] - self.z_nodes[zi]);
let mut c = [0.0; 3];
#[allow(clippy::needless_range_loop)]
for i in 0..3 {
let co = |dx: usize, dy: usize, dz: usize| {
self.coeffs[maxc][zi as usize + dz][yi as usize + dy][xi as usize + dx][i]
@ -592,7 +655,8 @@ pub fn white_balance(src_white: Point2f, target_white: Point2f) -> SquareMatrix<
XYZ_FROM_LMS * lms_correct * LMS_FROM_XYZ
}
pub trait ColorEncoding: 'static + Send + Sync + fmt::Debug + fmt::Display {
#[enum_dispatch]
pub trait ColorEncodingTrait: 'static + Send + Sync + fmt::Debug + fmt::Display {
fn from_linear_slice(&self, vin: &[Float], vout: &mut [u8]);
fn to_linear_slice(&self, vin: &[u8], vout: &mut [Float]);
fn to_float_linear(&self, v: Float) -> Float;
@ -601,9 +665,22 @@ pub trait ColorEncoding: 'static + Send + Sync + fmt::Debug + fmt::Display {
}
}
#[derive(Debug)]
#[enum_dispatch(ColorEncodingTrait)]
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum ColorEncoding {
Linear(LinearEncoding),
SRGB(SRGBEncoding),
}
impl fmt::Display for ColorEncoding {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Encoding")
}
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub struct LinearEncoding;
impl ColorEncoding for LinearEncoding {
impl ColorEncodingTrait for LinearEncoding {
fn from_linear_slice(&self, vin: &[Float], vout: &mut [u8]) {
for (i, &v) in vin.iter().enumerate() {
vout[i] = (v.clamp(0.0, 1.0) * 255.0 + 0.5) as u8;
@ -625,9 +702,9 @@ impl fmt::Display for LinearEncoding {
}
}
#[derive(Debug)]
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub struct SRGBEncoding;
impl ColorEncoding for SRGBEncoding {
impl ColorEncodingTrait for SRGBEncoding {
fn from_linear_slice(&self, vin: &[Float], vout: &mut [u8]) {
for (i, &v_linear) in vin.iter().enumerate() {
let v = v_linear.clamp(0.0, 1.0);
@ -662,8 +739,8 @@ impl fmt::Display for SRGBEncoding {
}
}
pub static LINEAR: Lazy<&'static dyn ColorEncoding> = Lazy::new(|| &LinearEncoding);
pub static SRGB: Lazy<&'static dyn ColorEncoding> = Lazy::new(|| &SRGBEncoding);
pub const LINEAR: ColorEncoding = ColorEncoding::Linear(LinearEncoding);
pub const SRGB: ColorEncoding = ColorEncoding::SRGB(SRGBEncoding);
const SRGB_TO_LINEAR_LUT: [Float; 256] = [
0.0000000000,

View file

@ -1,20 +1,17 @@
use super::color::{RGB, RGBSigmoidPolynomial, RGBToSpectrumTable, XYZ};
use super::math::SquareMatrix;
use super::spectrum::{DenselySampledSpectrum, SampledSpectrum, Spectrum};
use crate::core::pbrt::Float;
use crate::geometry::Point2f;
use crate::spectra::{DenselySampledSpectrum, SampledSpectrum, Spectrum};
use once_cell::sync::Lazy;
use std::cmp::{Eq, PartialEq};
use std::error::Error;
use std::sync::Arc;
pub enum ColorEncoding {
Linear,
SRGB,
}
#[derive(Debug, Clone)]
pub struct RGBColorspace {
pub struct RGBColorSpace {
pub r: Point2f,
pub g: Point2f,
pub b: Point2f,
@ -25,7 +22,7 @@ pub struct RGBColorspace {
pub rgb_from_xyz: SquareMatrix<Float, 3>,
}
impl RGBColorspace {
impl RGBColorSpace {
pub fn new(
r: Point2f,
g: Point2f,
@ -33,7 +30,7 @@ impl RGBColorspace {
illuminant: Spectrum,
rgb_to_spectrum_table: RGBToSpectrumTable,
) -> Result<Self, Box<dyn Error>> {
let w_xyz = illuminant.to_xyz();
let w_xyz: XYZ = illuminant.to_xyz();
let w = w_xyz.xy();
let r_xyz = XYZ::from_xyy(r, Some(1.0));
let g_xyz = XYZ::from_xyy(g, Some(1.0));
@ -44,12 +41,11 @@ impl RGBColorspace {
[r_xyz.z(), g_xyz.z(), g_xyz.z()],
];
let rgb = SquareMatrix::new(rgb_values);
let c = rgb.inverse()? * w_xyz;
let xyz_from_rgb_m = [[c[0], 0.0, 0.0], [0.0, c[1], 0.0], [0.0, 0.0, c[2]]];
let xyz_from_rgb = rgb * SquareMatrix::new(xyz_from_rgb_m);
let c: RGB = rgb.inverse()? * w_xyz;
let xyz_from_rgb = rgb * SquareMatrix::diag(&[c.r, c.g, c.b]);
let rgb_from_xyz = xyz_from_rgb
.inverse()
.expect("Failed to invert the XYZfromRGB matrix. Is it singular?");
.expect("XYZ from RGB matrix is singular");
Ok(Self {
r,
@ -64,27 +60,43 @@ impl RGBColorspace {
}
pub fn to_xyz(&self, rgb: RGB) -> XYZ {
self.xyz_from_rgb.transform_to_xyz(rgb)
self.xyz_from_rgb * rgb
}
pub fn to_rgb(&self, xyz: XYZ) -> RGB {
self.rgb_from_xyz.transform_to_rgb(xyz)
self.rgb_from_xyz * xyz
}
pub fn to_rgb_coeffs(&self, rgb: RGB) -> RGBSigmoidPolynomial {
self.rgb_to_spectrum_table.to_polynomial(rgb)
}
pub fn convert_colorspace(&self, other: &RGBColorspace) -> SquareMatrix<Float, 3> {
pub fn convert_colorspace(&self, other: &RGBColorSpace) -> SquareMatrix<Float, 3> {
if self == other {
return SquareMatrix::default();
}
self.rgb_from_xyz * other.xyz_from_rgb
}
pub fn srgb() -> &'static Self {
static SRGB_SPACE: Lazy<RGBColorSpace> = Lazy::new(|| {
let r = Point2f::new(0.64, 0.33);
let g = Point2f::new(0.30, 0.60);
let b = Point2f::new(0.15, 0.06);
let illuminant = Spectrum::std_illuminant_d65();
let table = RGBToSpectrumTable::srgb();
RGBColorSpace::new(r, g, b, illuminant, table)
.expect("Failed to initialize standard sRGB color space")
});
&SRGB_SPACE
}
}
impl PartialEq for RGBColorspace {
impl PartialEq for RGBColorSpace {
fn eq(&self, other: &Self) -> bool {
self.r == other.r
&& self.g == other.g

View file

@ -1,14 +1,27 @@
use crate::core::pbrt::{Float, lerp};
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hash, Hasher};
use std::ops::{Index, IndexMut};
use std::ops::{Add, Index, IndexMut, Mul, Sub};
use std::sync::RwLock;
use crate::geometry::{Bounds2i, Bounds3i, Point2i, Point3f, Point3i, Vector3f, Vector3i};
use crate::geometry::{
Bounds2i, Bounds3f, Bounds3i, Point2i, Point3f, Point3i, Vector2i, Vector3f, Vector3i,
};
#[derive(Debug)]
pub trait Interpolatable:
Copy + Default + Add<Output = Self> + Sub<Output = Self> + Mul<Float, Output = Self>
{
}
impl<T> Interpolatable for T where
T: Copy + Default + Add<Output = T> + Sub<Output = T> + Mul<Float, Output = T>
{
}
#[derive(Debug, Clone)]
pub struct Array2D<T> {
values: Vec<T>,
extent: Bounds2i,
pub values: Vec<T>,
pub extent: Bounds2i,
}
impl<T> Array2D<T> {
@ -22,6 +35,27 @@ impl<T> Array2D<T> {
Self { values, extent }
}
pub fn new_with_dims(nx: usize, ny: usize) -> Self
where
T: Default,
{
Self::new(Bounds2i::from_points(
Point2i::new(0, 0),
Point2i::new(nx as i32, ny as i32),
))
}
pub fn new_filled(width: usize, height: usize, value: T) -> Self
where
T: Clone,
{
let extent = Bounds2i::from_points(
Point2i::new(0, 0),
Point2i::new(width as i32, height as i32),
);
Self::new_from_bounds(extent, value)
}
pub fn new_from_bounds(extent: Bounds2i, default_val: T) -> Self
where
T: Clone,
@ -62,15 +96,212 @@ impl<T> Array2D<T> {
impl<T> Index<Point2i> for Array2D<T> {
type Output = T;
fn index(&self, p: Point2i) -> &Self::Output {
let idx = self.get_index(p);
fn index(&self, mut p: Point2i) -> &Self::Output {
p -= Vector2i::from(self.extent.p_min);
let width = self.extent.p_max.x() - self.extent.p_min.x();
let idx = (p.x() + width * p.y()) as usize;
&self.values[idx]
}
}
impl<T> IndexMut<Point2i> for Array2D<T> {
fn index_mut(&mut self, p: Point2i) -> &mut Self::Output {
let idx = self.get_index(p);
fn index_mut(&mut self, mut p: Point2i) -> &mut Self::Output {
p -= Vector2i::from(self.extent.p_min);
let width = self.extent.p_max.x() - self.extent.p_min.x();
let idx = (p.x() + width * p.y()) as usize;
&mut self.values[idx]
}
}
impl<T> Index<(usize, usize)> for Array2D<T> {
type Output = T;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
&self[(x as i32, y as i32)]
}
}
impl<T> IndexMut<(usize, usize)> for Array2D<T> {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
&mut self[(x as i32, y as i32)]
}
}
impl<T> Index<(i32, i32)> for Array2D<T> {
type Output = T;
fn index(&self, index: (i32, i32)) -> &Self::Output {
self.index(Point2i::new(index.0, index.1))
}
}
impl<T> IndexMut<(i32, i32)> for Array2D<T> {
fn index_mut(&mut self, index: (i32, i32)) -> &mut Self::Output {
self.index_mut(Point2i::new(index.0, index.1))
}
}
#[derive(Debug, Clone)]
pub struct SampledGrid<T> {
values: Vec<T>,
nx: i32,
ny: i32,
nz: i32,
}
impl<T> SampledGrid<T> {
pub fn new(values: Vec<T>, nx: i32, ny: i32, nz: i32) -> Self {
assert_eq!(
values.len(),
(nx * ny * nz) as usize,
"Grid dimensions do not match data size"
);
Self { values, nx, ny, nz }
}
pub fn empty() -> Self {
Self {
values: Vec::new(),
nx: 0,
ny: 0,
nz: 0,
}
}
pub fn bytes_allocated(&self) -> usize {
self.values.len() * std::mem::size_of::<T>()
}
pub fn x_size(&self) -> i32 {
self.nx
}
pub fn y_size(&self) -> i32 {
self.ny
}
pub fn z_size(&self) -> i32 {
self.nz
}
pub fn lookup_int_convert<F, U>(&self, p: Point3i, convert: F) -> U
where
F: Fn(&T) -> U,
U: Default,
{
let sample_bounds = Bounds3i::from_points(
Point3i::new(0, 0, 0),
Point3i::new(self.nx, self.ny, self.nz),
);
if !sample_bounds.contains_exclusive(p) {
return U::default();
}
let idx = (p.z() * self.ny + p.y()) * self.nx + p.x();
convert(&self.values[idx as usize])
}
pub fn lookup_int(&self, p: Point3i) -> T
where
T: Clone + Default,
{
self.lookup_int_convert(p, |v| v.clone())
}
pub fn lookup_convert<F, U>(&self, p: Point3f, convert: F) -> U
where
F: Fn(&T) -> U + Copy,
U: Interpolatable + Default,
{
let p_samples = Point3f::new(
p.x() * self.nx as Float - 0.5,
p.y() * self.ny as Float - 0.5,
p.z() * self.nz as Float - 0.5,
);
let pi = Point3i::from(p_samples.floor());
let d = p_samples - Point3f::from(pi);
// Helper to retrieve corners with conversion
let lk = |offset: Vector3i| -> U { self.lookup_int_convert(pi + offset, convert) };
// Trilinear interpolation
let d00 = lerp(
d.x(),
lk(Vector3i::new(0, 0, 0)),
lk(Vector3i::new(1, 0, 0)),
);
let d10 = lerp(
d.x(),
lk(Vector3i::new(0, 1, 0)),
lk(Vector3i::new(1, 1, 0)),
);
let d01 = lerp(
d.x(),
lk(Vector3i::new(0, 0, 1)),
lk(Vector3i::new(1, 0, 1)),
);
let d11 = lerp(
d.x(),
lk(Vector3i::new(0, 1, 1)),
lk(Vector3i::new(1, 1, 1)),
);
lerp(d.z(), lerp(d.y(), d00, d10), lerp(d.y(), d01, d11))
}
pub fn lookup(&self, p: Point3f) -> T
where
T: Interpolatable + Clone,
{
self.lookup_convert(p, |v| *v)
}
pub fn max_value_convert<F, U>(&self, bounds: Bounds3f, convert: F) -> U
where
F: Fn(&T) -> U,
U: PartialOrd + Default,
{
let ps = [
Point3f::new(
bounds.p_min.x() * self.nx as Float - 0.5,
bounds.p_min.y() * self.ny as Float - 0.5,
bounds.p_min.z() * self.nz as Float - 0.5,
),
Point3f::new(
bounds.p_max.x() * self.nx as Float - 0.5,
bounds.p_max.y() * self.ny as Float - 0.5,
bounds.p_max.z() * self.nz as Float - 0.5,
),
];
let pi_min = Point3i::from(ps[0].floor()).max(Point3i::new(0, 0, 0));
let pi_max = (Point3i::from(ps[1].floor()) + Vector3i::new(1, 1, 1)).min(Point3i::new(
self.nx - 1,
self.ny - 1,
self.nz - 1,
));
// Initialize with the first voxel
let mut max_value = self.lookup_int_convert(pi_min, &convert);
for z in pi_min.z()..=pi_max.z() {
for y in pi_min.y()..=pi_max.y() {
for x in pi_min.x()..=pi_max.x() {
let val = self.lookup_int_convert(Point3i::new(x, y, z), &convert);
if val > max_value {
max_value = val;
}
}
}
}
max_value
}
pub fn max_value(&self, bounds: Bounds3f) -> T
where
T: PartialOrd + Default + Clone,
{
self.max_value_convert(bounds, |v| v.clone())
}
}

View file

@ -1,8 +1,8 @@
use image::error;
use image_rs::{ImageError as IError, error};
use std::fmt;
use thiserror::Error;
use crate::utils::image::PixelFormat;
use crate::image::PixelFormat;
#[derive(Error, Debug)]
pub enum LlsError {
@ -40,7 +40,7 @@ pub enum ImageError {
Io(#[from] std::io::Error),
#[error("Image file error: {0}")]
Image(#[from] image::ImageError),
Image(#[from] IError),
#[error("EXR file error: {0}")]
Exr(#[from] exr::error::Error),

View file

@ -1,14 +1,99 @@
use std::hash::{Hash, Hasher};
use crate::core::pbrt::Float;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
const U32_TO_F32_SCALE: f32 = 1.0 / 4294967296.0;
pub fn hash_float<T: Hash>(args: T) -> Float {
let mut hasher = DefaultHasher::new();
args.hash(&mut hasher);
let hash_u64 = hasher.finish();
let hash_u32 = hash_u64 as u32;
(hash_u32 as f32) * U32_TO_F32_SCALE
#[inline(always)]
pub fn mix_bits(mut v: u64) -> u64 {
v ^= v >> 31;
v = v.wrapping_mul(0x7fb5d329728ea185);
v ^= v >> 27;
v = v.wrapping_mul(0x81dadef4bc2dd44d);
v ^= v >> 33;
v
}
pub fn murmur_hash_64a(key: &[u8], seed: u64) -> u64 {
const M: u64 = 0xc6a4a7935bd1e995;
const R: i32 = 47;
let len = key.len();
let mut h = seed ^ ((len as u64).wrapping_mul(M));
// We chunk the slice into 8-byte segments
let chunks = key.chunks_exact(8);
let remainder = chunks.remainder();
for chunk in chunks {
// Safe conversion from [u8; 8] to u64
let mut k = u64::from_ne_bytes(chunk.try_into().unwrap());
k = k.wrapping_mul(M);
k ^= k >> R;
k = k.wrapping_mul(M);
h ^= k;
h = h.wrapping_mul(M);
}
// Handle the tail (remaining bytes)
if !remainder.is_empty() {
// We handle the switch-case fallthrough logic by building a u64
let mut k_tail = 0u64;
// Load bytes into the u64 based on length
for (i, &byte) in remainder.iter().enumerate() {
k_tail ^= (byte as u64) << (i * 8);
}
h ^= k_tail;
h = h.wrapping_mul(M);
}
h ^= h >> R;
h = h.wrapping_mul(M);
h ^= h >> R;
h
}
pub fn hash_buffer<T: ?Sized>(data: &T, seed: u64) -> u64 {
let len = std::mem::size_of_val(data);
let ptr = data as *const T as *const u8;
let bytes = unsafe { std::slice::from_raw_parts(ptr, len) };
murmur_hash_64a(bytes, seed)
}
#[macro_export]
macro_rules! hash_values {
( $( $x:expr ),* ) => {
{
// Create a packed buffer on stack matching C++ behavior
// We use a temporary tuple or array to pack bits
#[repr(C, packed)]
struct PackedData {
$(_field: Box<[u8]>),* // Phantom, logic below is simpler
}
// Calculate total size and create a byte buffer
let mut buffer = Vec::new(); // Or use a small stack array if size known
$(
let s = std::slice::from_raw_parts(
&$x as *const _ as *const u8,
std::mem::size_of_val(&$x)
);
buffer.extend_from_slice(s);
)*
$crate::utils::hash::murmur_hash_64a(&buffer, 0)
}
}
}
pub fn hash_float<T>(data: &T) -> Float
where
T: Copy + ?Sized, // Ensure it's plain old data
{
let h = hash_buffer(&data, 0);
(h as u32) as Float * 2.3283064365386963e-10 // 0x1p-32f
}

File diff suppressed because it is too large Load diff

View file

@ -44,6 +44,21 @@ impl Interval {
pub fn is_empty(&self) -> bool {
self.low > self.high
}
pub fn abs(&self) -> Self {
if self.low >= 0.0 {
return *self;
}
if self.high < 0.0 {
return -(*self);
}
Self {
low: 0.0,
high: next_float_up((-self.low).max(self.high)),
}
}
}
impl Default for Interval {
@ -65,6 +80,20 @@ impl Add for Interval {
}
}
impl Add<Interval> for Float {
type Output = Interval;
fn add(self, rhs: Interval) -> Self::Output {
Interval::new(self) + rhs
}
}
impl Sub<Interval> for Float {
type Output = Interval;
fn sub(self, rhs: Interval) -> Self::Output {
Interval::new(self) - rhs
}
}
impl Sub for Interval {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
use crate::core::pbrt::Float;
use crate::core::sampler::PiecewiseConstant2D;
use crate::geometry::{Normal3f, Point2f, Point3f, Vector3f};
use crate::utils::sampling::PiecewiseConstant2D;
use crate::utils::transform::Transform;
use std::sync::Arc;
@ -19,6 +19,7 @@ pub struct TriangleMesh {
}
impl TriangleMesh {
#[allow(clippy::too_many_arguments)]
pub fn new(
render_from_object: &Transform<Float>,
reverse_orientation: bool,

471
src/utils/mipmap.rs Normal file
View file

@ -0,0 +1,471 @@
use crate::core::pbrt::{Float, lerp};
use crate::geometry::{Lerp, Point2f, Point2i, Vector2f, VectorLike};
use crate::image::{Image, ImageAndMetadata, PixelData, PixelFormat, WrapMode, WrapMode2D};
use crate::utils::color::{ColorEncoding, RGB};
use crate::utils::colorspace::RGBColorSpace;
use crate::utils::math::{safe_sqrt, square};
use std::path::Path;
use std::hash::{Hash, Hasher};
use std::ops::{Add, Mul, Sub};
use std::sync::Arc;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FilterFunction {
Point,
Bilinear,
Trilinear,
Ewa,
}
impl std::fmt::Display for FilterFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
FilterFunction::Ewa => "EWA",
FilterFunction::Trilinear => "trilinear",
FilterFunction::Bilinear => "bilinear",
FilterFunction::Point => "point",
};
write!(f, "{}", s)
}
}
#[derive(Debug, Clone, Copy)]
pub struct MIPMapFilterOptions {
pub filter: FilterFunction,
pub max_anisotropy: f32,
}
impl Default for MIPMapFilterOptions {
fn default() -> Self {
Self {
filter: FilterFunction::Ewa,
max_anisotropy: 8.0,
}
}
}
impl PartialEq for MIPMapFilterOptions {
fn eq(&self, other: &Self) -> bool {
self.filter == other.filter
&& self.max_anisotropy.to_bits() == other.max_anisotropy.to_bits()
}
}
impl Eq for MIPMapFilterOptions {}
impl Hash for MIPMapFilterOptions {
fn hash<H: Hasher>(&self, state: &mut H) {
self.filter.hash(state);
// Hash the bits, not the float value
self.max_anisotropy.to_bits().hash(state);
}
}
pub trait MIPMapSample:
Copy + Add<Output = Self> + Sub<Output = Self> + Mul<Float, Output = Self> + std::fmt::Debug
{
fn zero() -> Self;
fn sample_bilerp(image: &Image, st: Point2f, wrap: WrapMode2D) -> Self;
fn sample_texel(image: &Image, st: Point2i, wrap: WrapMode2D) -> Self;
}
impl MIPMapSample for Float {
fn zero() -> Self {
0.
}
fn sample_bilerp(image: &Image, st: Point2f, wrap: WrapMode2D) -> Self {
image.bilerp_channel_with_wrap(st, 0, wrap)
}
fn sample_texel(image: &Image, st: Point2i, wrap: WrapMode2D) -> Self {
image.get_channel_with_wrap(st, 0, wrap)
}
}
impl MIPMapSample for RGB {
fn zero() -> Self {
RGB::new(0., 0., 0.)
}
fn sample_bilerp(image: &Image, st: Point2f, wrap: WrapMode2D) -> Self {
let nc = image.n_channels();
if nc >= 3 {
let r = image.bilerp_channel_with_wrap(st, 0, wrap);
let g = image.bilerp_channel_with_wrap(st, 1, wrap);
let b = image.bilerp_channel_with_wrap(st, 2, wrap);
RGB::new(r, g, b)
} else {
let v = image.bilerp_channel_with_wrap(st, 0, wrap);
RGB::new(v, v, v)
}
}
fn sample_texel(image: &Image, st: Point2i, wrap: WrapMode2D) -> Self {
let nc = image.n_channels();
if nc >= 3 {
let r = image.get_channel_with_wrap(st, 0, wrap);
let g = image.get_channel_with_wrap(st, 1, wrap);
let b = image.get_channel_with_wrap(st, 2, wrap);
RGB::new(r, g, b)
} else {
let v = image.get_channel_with_wrap(st, 0, wrap);
RGB::new(v, v, v)
}
}
}
#[derive(Debug)]
pub struct MIPMap {
pyramid: Vec<Image>,
color_space: Option<RGBColorSpace>,
wrap_mode: WrapMode,
options: MIPMapFilterOptions,
}
impl MIPMap {
pub fn new(
image: Image,
color_space: Option<RGBColorSpace>,
wrap_mode: WrapMode,
options: MIPMapFilterOptions,
) -> Self {
let pyramid = Image::generate_pyramid(image, wrap_mode);
Self {
pyramid,
color_space,
wrap_mode,
options,
}
}
pub fn level_resolution(&self, level: usize) -> Point2i {
self.pyramid[level].resolution()
}
pub fn levels(&self) -> usize {
self.pyramid.len()
}
pub fn get_rgb_colorspace(&self) -> Option<RGBColorSpace> {
self.color_space.clone()
}
pub fn get_level(&self, level: usize) -> &Image {
&self.pyramid[level]
}
pub fn filter<T: MIPMapSample>(
&self,
st: Point2f,
mut dst0: Vector2f,
mut dst1: Vector2f,
) -> T {
if self.options.filter != FilterFunction::Ewa {
// Compute largest change in texture coordinates
let width = 2.0
* [
dst0.x().abs(),
dst0.y().abs(),
dst1.x().abs(),
dst1.y().abs(),
]
.into_iter()
.reduce(Float::max)
.unwrap_or(0.0);
// Compute MIP Map level
// n_levels - 1 + log2(width) maps width=1.0 to the top level (1x1)
let n_levels = self.levels() as Float;
let level = n_levels - 1.0 + width.max(1e-8).log2();
if level >= n_levels - 1.0 {
return self.texel(self.levels() - 1, Point2i::new(0, 0));
}
let i_level = level.floor() as usize;
return match self.options.filter {
FilterFunction::Point => {
let resolution = self.level_resolution(i_level);
let sti = Point2i::new(
(st.x() * resolution.x() as Float - 0.5).round() as i32,
(st.y() * resolution.y() as Float - 0.5).round() as i32,
);
self.texel(i_level, sti)
}
FilterFunction::Bilinear => self.bilerp(i_level, st),
FilterFunction::Trilinear => {
// Interpolate between current level and next level
let v0 = self.bilerp(i_level, st);
let v1 = self.bilerp(i_level + 1, st);
let t = level - i_level as Float;
lerp(t, v0, v1)
}
FilterFunction::Ewa => unreachable!(),
};
}
if dst0.norm_squared() < dst1.norm_squared() {
std::mem::swap(&mut dst0, &mut dst1);
}
let longer_len = dst0.norm();
let mut shorter_len = dst1.norm();
// If ellipse is too thin, fatten the minor axis to limit the number
// of texels
if shorter_len * self.options.max_anisotropy < longer_len && shorter_len > 0.0 {
let scale = longer_len / (shorter_len * self.options.max_anisotropy);
dst1 *= scale;
shorter_len *= scale;
}
if shorter_len == 0.0 {
return self.bilerp(0, st);
}
let lod = (self.levels() as Float - 1.0 + shorter_len.log2()).max(0.0);
let ilod = lod.floor() as usize;
let v0 = self.ewa(ilod, st, dst0, dst1);
let v1 = self.ewa(ilod + 1, st, dst0, dst1);
lerp(lod - ilod as Float, v0, v1)
}
fn texel<T: MIPMapSample>(&self, level: usize, st: Point2i) -> T {
if level >= self.levels() {
panic!("MIPMap level out of bounds");
}
let image = &self.pyramid[level];
let wrap_2d = WrapMode2D {
uv: [self.wrap_mode; 2],
};
T::sample_texel(image, st, wrap_2d)
}
fn bilerp<T: MIPMapSample>(&self, level: usize, st: Point2f) -> T {
let image = &self.pyramid[level];
let wrap_2d = WrapMode2D {
uv: [self.wrap_mode; 2],
};
T::sample_bilerp(image, st, wrap_2d)
}
fn ewa<T: MIPMapSample>(
&self,
level: usize,
mut st: Point2f,
mut dst0: Vector2f,
mut dst1: Vector2f,
) -> T {
if level > self.levels() {
return self.texel(self.levels() - 1, Point2i::new(0, 0));
}
let level_res = self.level_resolution(level);
st[0] = st[0] * level_res[0] as Float - 0.5;
st[1] = st[1] * level_res[1] as Float - 0.5;
dst0[0] *= level_res[0] as Float;
dst0[1] *= level_res[1] as Float;
dst1[0] *= level_res[0] as Float;
dst1[1] *= level_res[1] as Float;
let mut a = square(dst0[1]) + square(dst1[1]) + 1.;
let mut b = -2. * (dst0[0] + dst0[1] + dst1[1]);
let mut c = square(dst0[0]) + square(dst1[0]) + 1.;
let inv_f = 1. / (a * c - square(b) * 0.25);
a *= inv_f;
b *= inv_f;
c *= inv_f;
let det = -square(b) + 4. * a * c;
let inv_det = 1. / det;
let u_sqrt = safe_sqrt(det * c);
let v_sqrt = safe_sqrt(det * a);
let s0: i32 = (st[0] - 2. * inv_det * u_sqrt).ceil() as i32;
let s1: i32 = (st[0] + 2. * inv_det * u_sqrt).floor() as i32;
let t0: i32 = (st[1] - 2. * inv_det * v_sqrt).ceil() as i32;
let t1: i32 = (st[1] + 2. * inv_det * v_sqrt).floor() as i32;
let mut sum = T::zero();
let mut sum_wts = 0.;
for it in t0..=t1 {
let tt = it as Float - st[1];
for is in s0..=s1 {
let ss = is as Float - st[0];
// Compute squared radius and filter texel if inside ellipse
let r2 = a * square(ss) + b * ss * tt + c * square(tt);
if r2 < 1.0 {
// Map r2 to LUT index
let index = (r2 * MIP_FILTER_LUT_SIZE as Float)
.min((MIP_FILTER_LUT_SIZE - 1) as Float)
as usize;
let weight = MIP_FILTER_LUT[index];
// Accumulate
sum = sum + self.texel::<T>(level, Point2i::new(is, it)) * weight;
sum_wts += weight;
}
}
}
sum * (1. / sum_wts)
}
pub fn create_from_file(
filename: &Path,
options: MIPMapFilterOptions,
wrap_mode: WrapMode,
encoding: ColorEncoding,
) -> Result<MIPMap, ()> {
let image_and_metadata = Image::read(filename, Some(encoding)).unwrap();
let image = image_and_metadata.image;
// if image.n_channels() != 1 {
// let rgba_dsc = image.all_channels_desc();
// }
Ok(MIPMap::new(
image,
image_and_metadata.metadata.colorspace,
wrap_mode,
options,
))
}
}
static MIP_FILTER_LUT_SIZE: usize = 128;
static MIP_FILTER_LUT: [Float; MIP_FILTER_LUT_SIZE] = [
// MIPMap EWA Lookup Table Values
0.864664733,
0.849040031,
0.83365953,
0.818519294,
0.80361563,
0.788944781,
0.774503231,
0.760287285,
0.746293485,
0.732518315,
0.718958378,
0.705610275,
0.692470789,
0.679536581,
0.666804492,
0.654271305,
0.641933978,
0.629789352,
0.617834508,
0.606066525,
0.594482362,
0.583079159,
0.571854174,
0.560804546,
0.549927592,
0.539220572,
0.528680861,
0.518305838,
0.50809288,
0.498039544,
0.488143265,
0.478401601,
0.468812168,
0.45937258,
0.450080454,
0.440933526,
0.431929469,
0.423066139,
0.414341331,
0.405752778,
0.397298455,
0.388976216,
0.380784035,
0.372719884,
0.364781618,
0.356967449,
0.34927541,
0.341703475,
0.334249914,
0.32691282,
0.319690347,
0.312580705,
0.305582166,
0.298692942,
0.291911423,
0.285235822,
0.278664529,
0.272195935,
0.265828371,
0.259560347,
0.253390193,
0.247316495,
0.241337672,
0.235452279,
0.229658857,
0.223955944,
0.21834214,
0.212816045,
0.207376286,
0.202021524,
0.196750447,
0.191561714,
0.186454013,
0.181426153,
0.176476851,
0.171604887,
0.166809067,
0.162088141,
0.157441005,
0.152866468,
0.148363426,
0.143930718,
0.139567271,
0.135272011,
0.131043866,
0.126881793,
0.122784719,
0.11875169,
0.114781633,
0.11087364,
0.107026696,
0.103239879,
0.0995122194,
0.0958427936,
0.0922307223,
0.0886750817,
0.0851749927,
0.0817295909,
0.0783380121,
0.0749994367,
0.0717130303,
0.0684779733,
0.0652934611,
0.0621587038,
0.0590728968,
0.0560353249,
0.0530452281,
0.0501018465,
0.0472044498,
0.0443523228,
0.0415447652,
0.0387810767,
0.0360605568,
0.0333825648,
0.0307464004,
0.0281514227,
0.0255970061,
0.0230824798,
0.0206072628,
0.0181707144,
0.0157722086,
0.013411209,
0.0110870898,
0.0087992847,
0.0065472275,
0.00433036685,
0.0021481365,
0.,
];

View file

@ -1,15 +1,79 @@
use std::sync::atomic::{AtomicU64, Ordering};
pub mod color;
pub mod colorspace;
pub mod containers;
pub mod error;
pub mod hash;
pub mod image;
pub mod interval;
pub mod math;
pub mod mesh;
pub mod mipmap;
pub mod quaternion;
pub mod rng;
pub mod sampling;
pub mod scattering;
pub mod spectrum;
pub mod sobol;
pub mod splines;
pub mod transform;
#[inline]
pub fn partition_slice<T, F>(data: &mut [T], predicate: F) -> usize
where
F: Fn(&T) -> bool,
{
let mut i = 0;
for j in 0..data.len() {
if predicate(&data[j]) {
data.swap(i, j);
i += 1;
}
}
i
}
#[derive(Debug)]
pub struct AtomicFloat {
bits: AtomicU64,
}
impl AtomicFloat {
pub fn new(value: f64) -> Self {
Self {
bits: AtomicU64::new(value.to_bits()),
}
}
pub fn load(&self) -> f64 {
f64::from_bits(self.bits.load(Ordering::Relaxed))
}
pub fn store(&self, value: f64) {
self.bits.store(value.to_bits(), Ordering::Relaxed);
}
pub fn add(&self, value: f64) {
let mut current_bits = self.bits.load(Ordering::Relaxed);
loop {
let current_val = f64::from_bits(current_bits);
let new_val = current_val + value;
let new_bits = new_val.to_bits();
match self.bits.compare_exchange_weak(
current_bits,
new_bits,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(x) => current_bits = x,
}
}
}
}
impl Default for AtomicFloat {
fn default() -> Self {
Self::new(0.0)
}
}

View file

@ -134,17 +134,17 @@ impl Quaternion {
#[inline]
pub fn angle_between(&self, rhs: Quaternion) -> Float {
if self.dot(rhs) < 0.0 {
return PI - 2. * safe_asin((self.v + rhs.v).norm() / 2.);
PI - 2. * safe_asin((self.v + rhs.v).norm() / 2.)
} else {
return 2. * safe_asin((rhs.v - self.v).norm() / 2.);
2. * safe_asin((rhs.v - self.v).norm() / 2.)
}
}
pub fn slerp(t: Float, q1: Quaternion, q2: Quaternion) -> Quaternion {
let theta = q1.angle_between(q2);
let sin_theta_over_theta = sinx_over_x(theta);
return q1 * (1. - t) * sinx_over_x((1. - t) * theta) / sin_theta_over_theta
+ q2 * t * sinx_over_x(t * theta) / sin_theta_over_theta;
q1 * (1. - t) * sinx_over_x((1. - t) * theta) / sin_theta_over_theta
+ q2 * t * sinx_over_x(t * theta) / sin_theta_over_theta
}
pub fn length(&self) -> Float {

72
src/utils/rng.rs Normal file
View file

@ -0,0 +1,72 @@
#[derive(Debug, Clone)]
pub struct Rng {
state: u64,
inc: u64,
}
impl Default for Rng {
fn default() -> Self {
Self {
state: 0x853c49e6748fea9b,
inc: 0xda3e39cb94b95bdb,
}
}
}
impl Rng {
pub fn set_sequence(&mut self, init_seq: u64) {
self.state = 0;
self.inc = (init_seq << 1) | 1;
self.uniform_u32(); // Warm up
// PCG32_DEFAULT_STATE
self.state = self.state.wrapping_add(0x853c49e6748fea9b);
self.uniform_u32();
}
pub fn set_sequence_with_offset(&mut self, sequence_index: u64, offset: u64) {
self.state = 0;
self.inc = (sequence_index << 1) | 1;
self.uniform_u32();
self.state = self.state.wrapping_add(offset);
self.uniform_u32();
}
pub fn new(sequence_index: u64) -> Self {
let mut rng = Self { state: 0, inc: 0 };
rng.set_sequence(sequence_index);
rng
}
pub fn new_with_offset(sequence_index: u64, offset: u64) -> Self {
let mut rng = Self { state: 0, inc: 0 };
rng.set_sequence_with_offset(sequence_index, offset);
rng
}
pub fn advance(&mut self, delta: u64) {
// TODO: Implementation of PCG32 advance/seek
for _ in 0..delta {
self.uniform_u32();
}
}
pub fn uniform<T>(&mut self) -> T
where
T: From<f32>,
{
// Convert u32 to float [0,1)
let v = self.uniform_u32();
let f = (v as f32) * 2.3283064365386963e-10; // 1 / 2^32
T::from(f)
}
fn uniform_u32(&mut self) -> u32 {
let oldstate = self.state;
self.state = oldstate
.wrapping_mul(6364136223846793005)
.wrapping_add(self.inc);
let xorshifted = (((oldstate >> 18) ^ oldstate) >> 27) as u32;
let rot = (oldstate >> 59) as u32;
(xorshifted >> rot) | (xorshifted << ((0u32.wrapping_sub(rot)) & 31))
}
}

View file

@ -1,11 +1,19 @@
use super::math::safe_sqrt;
use crate::check_rare;
use crate::core::pbrt::{
Float, INV_2_PI, INV_PI, ONE_MINUS_EPSILON, PI, PI_OVER_2, PI_OVER_4, clamp_t, lerp,
Float, INV_2_PI, INV_4_PI, INV_PI, ONE_MINUS_EPSILON, PI, PI_OVER_2, PI_OVER_4, clamp_t,
evaluate_polynomial, find_interval, lerp,
};
use crate::core::pbrt::{RARE_EVENT_CONDITION_MET, RARE_EVENT_TOTAL_CALLS};
use crate::geometry::{Frame, Point2f, Point3f, Vector2f, Vector2i, Vector3f, VectorLike};
use crate::utils::math::{difference_of_products, square, sum_of_products};
use crate::geometry::{
Bounds2f, Frame, Point2f, Point2i, Point3f, Vector2f, Vector2i, Vector3f, VectorLike,
};
use crate::utils::containers::Array2D;
use crate::utils::math::{
catmull_rom_weights, difference_of_products, logistic, newton_bisection, square,
sum_of_products,
};
use crate::utils::rng::Rng;
use std::sync::atomic::{AtomicU64, Ordering as SyncOrdering};
pub fn sample_linear(u: Float, a: Float, b: Float) -> Float {
@ -34,6 +42,16 @@ pub fn invert_bilinear_sample(p: Point2f, w: &[Float]) -> Point2f {
)
}
pub fn power_heuristic(nf: i32, f_pdf: Float, ng: i32, g_pdf: Float) -> Float {
let f = nf as Float * f_pdf;
let g = ng as Float * g_pdf;
square(f) / (square(f) + square(g))
}
pub fn uniform_sphere_pdf() -> Float {
INV_4_PI
}
pub fn bilinear_pdf(p: Point2f, w: &[Float]) -> Float {
if p.x() < 0. || p.x() > 1. || p.y() < 0. || p.y() > 1. {
return 0.;
@ -285,12 +303,15 @@ pub fn invert_spherical_rectangle_sample(
let hv = [lerp(u1[0], h0, h1), lerp(u1[1], h0, h1)];
let hvsq = [square(hv[0]), square(hv[1])];
let yz = [(hv[0] * dd) / (1. - hvsq[0]), (hv[1] * dd) / (1. - hvsq[1])];
let u = if (yz[0] - yv).abs() < (yz[1] - yv).abs() {
if (yz[0] - yv).abs() < (yz[1] - yv).abs() {
Point2f::new(clamp_t(u0, 0., 1.), u1[0])
} else {
Point2f::new(clamp_t(u0, 0., 1.), u1[1])
};
u
}
}
pub fn sample_exponential(u: Float, a: Float) -> Float {
(1. - u).ln() / a
}
pub fn sample_spherical_triangle(
@ -425,6 +446,174 @@ pub fn invert_spherical_triangle_sample(
Some(Point2f::new(clamp_t(u0, 0.0, 1.0), clamp_t(u1, 0.0, 1.0)))
}
pub fn sample_catmull_rom(
nodes: &[Float],
f: &[Float],
big_f: &[Float],
mut u: Float,
) -> (Float, Float, Float) {
assert_eq!(nodes.len(), f.len());
assert_eq!(f.len(), big_f.len());
u *= big_f.last().copied().unwrap_or(0.);
let i = find_interval(big_f.len(), |i| big_f[i] <= u);
let x0 = nodes[i];
let x1 = nodes[i + 1];
let f0 = f[i];
let f1 = f[i + 1];
let width = x1 - x0;
// Approximate derivatives using finite differences
let d0 = if i > 0 {
width * (f1 - f[i - 1]) / (x1 - nodes[i - 1])
} else {
f1 - f0
};
let d1 = if i + 2 < nodes.len() {
width * (f[i + 2] - f0) / (nodes[i + 2] - x0)
} else {
f1 - f0
};
u = (u - big_f[i]) / width;
let fhat_coeffs = [
f0,
d0,
-2.0 * d0 - d1 + 3.0 * (f1 - f0),
d0 + d1 + 2.0 * (f0 - f1),
];
let fhat_coeffs_ref = &fhat_coeffs;
let big_fhat_coeffs = [
0.0,
f0,
0.5 * d0,
(1.0 / 3.0) * (-2.0 * d0 - d1) + f1 - f0,
0.25 * (d0 + d1) + 0.5 * (f0 - f1),
];
let big_fhat_coeffs_ref = &big_fhat_coeffs;
let mut fhat = 0.;
let mut big_fhat = 0.;
let eval = |t: Float| -> (Float, Float) {
big_fhat = evaluate_polynomial(t, big_fhat_coeffs_ref).unwrap_or(0.);
fhat = evaluate_polynomial(t, fhat_coeffs_ref).unwrap_or(0.);
(big_fhat - u, fhat)
};
let t = newton_bisection(0., 1., eval);
(
x0 + width * t,
fhat,
fhat / big_f.last().copied().unwrap_or(1.0),
)
}
pub fn sample_catmull_rom_2d(
nodes1: &[Float],
nodes2: &[Float],
values: &[Float],
cdf: &[Float],
alpha: Float,
mut u: Float,
) -> (Float, Float, Float) {
let (offset, weights) = match catmull_rom_weights(nodes1, alpha) {
Some(res) => res,
None => return (0., 0., 0.),
};
let n2 = nodes2.len();
let interpolate = |array: &[Float], idx: usize| -> Float {
let mut v = 0.;
for i in 0..4 {
if weights[i] != 0. {
v += array[(offset + i) * n2 + idx] * weights[i];
}
}
v
};
let maximum = interpolate(cdf, n2 - 1);
if maximum == 0. {
return (0., 0., 0.);
}
u *= maximum;
let idx = find_interval(n2, |i| interpolate(cdf, i) <= u);
let f0 = interpolate(values, idx);
let f1 = interpolate(values, idx + 1);
let x0 = nodes2[idx];
let x1 = nodes2[idx + 1];
let width = x1 - x0;
let d0 = if idx > 0 {
width * (f1 - interpolate(values, idx - 1)) / (x1 - nodes2[idx - 1])
} else {
f1 - f0
};
let d1 = if idx + 2 < n2 {
width * (interpolate(values, idx + 2) - f0) / (nodes2[idx + 2] - x0)
} else {
f1 - f0
};
u = (u - interpolate(cdf, idx)) / width;
let fhat_coeffs = [
f0,
d0,
-2.0 * d0 - d1 + 3.0 * (f1 - f0),
d0 + d1 + 2.0 * (f0 - f1),
];
let fhat_coeffs_ref = &fhat_coeffs;
let big_fhat_coeffs = [
0.0,
f0,
0.5 * d0,
(1.0 / 3.0) * (-2.0 * d0 - d1) + f1 - f0,
0.25 * (d0 + d1) + 0.5 * (f0 - f1),
];
let big_fhat_coeffs_ref = &big_fhat_coeffs;
let mut big_fhat = 0.0;
let mut fhat = 0.0;
let eval = |t: Float| -> (Float, Float) {
big_fhat = evaluate_polynomial(t, big_fhat_coeffs_ref).unwrap_or(0.);
fhat = evaluate_polynomial(t, fhat_coeffs_ref).unwrap_or(0.);
(big_fhat - u, fhat)
};
let t = newton_bisection(0.0, 1.0, eval);
let sample = x0 + width * t;
let fval = fhat;
let pdf = fhat / maximum;
(sample, fval, pdf)
}
pub fn sample_logistic(u: Float, s: Float) -> Float {
-s * (1. / u - 1.).ln()
}
pub fn invert_logistic_sample(x: Float, s: Float) -> Float {
1. / (1. + (-x / s).exp())
}
pub fn trimmed_logistic_pdf(x: Float, s: Float, a: Float, b: Float) -> Float {
if x < a || x > b {
return 0.;
}
let p = |val: Float| invert_logistic_sample(val, s);
logistic(x, s) / (p(b) - p(a))
}
pub fn sample_trimmed_logistic(u: Float, s: Float, a: Float, b: Float) -> Float {
let p = |val: Float| invert_logistic_sample(val, s);
let u = lerp(u, p(a), p(b));
let x = sample_logistic(u, s);
clamp_t(x, a, b)
}
pub fn uniform_hemisphere_pdf() -> Float {
INV_2_PI
}
@ -496,6 +685,473 @@ pub struct PLSample {
pub pdf: Float,
}
#[derive(Debug, Clone)]
pub struct PiecewiseConstant1D {
pub func: Vec<Float>,
pub cdf: Vec<Float>,
pub min: Float,
pub max: Float,
pub func_integral: Float,
}
impl PiecewiseConstant1D {
pub fn new(f: &[Float]) -> Self {
Self::new_with_bounds(f, 0., 1.)
}
pub fn new_with_bounds(f: &[Float], min: Float, max: Float) -> Self {
assert!(max > min);
let n = f.len();
let mut func = Vec::with_capacity(n);
for &val in f {
func.push(val.abs());
}
let mut cdf = vec![0.; n + 1];
for i in 1..=n {
debug_assert!(func[i - 1] >= 0.);
cdf[i] = cdf[i - 1] + func[i - 1] * (max - min) / n as Float;
}
let func_integral = cdf[n];
if func_integral == 0. {
let n_float = n as Float;
cdf.iter_mut()
.enumerate()
.for_each(|(i, c)| *c = i as Float / n_float);
} else {
let inv_integral = 1.0 / func_integral;
cdf.iter_mut().for_each(|c| *c *= inv_integral);
}
Self {
func,
cdf,
func_integral,
min,
max,
}
}
pub fn integral(&self) -> Float {
self.func_integral
}
pub fn size(&self) -> usize {
self.func.len()
}
pub fn sample(&self, u: Float) -> (Float, Float, usize) {
let o = find_interval(self.cdf.len(), |idx| self.cdf[idx] <= u);
let mut du = u - self.cdf[o];
if self.cdf[o + 1] - self.cdf[o] > 0. {
du /= self.cdf[o + 1] - self.cdf[o];
}
debug_assert!(!du.is_nan());
let value = lerp((o as Float + du) / self.size() as Float, self.min, self.max);
let pdf_val = if self.func_integral > 0. {
self.func[o] / self.func_integral
} else {
0.
};
(value, pdf_val, o)
}
}
#[derive(Debug, Clone)]
pub struct PiecewiseConstant2D {
pub p_conditional_v: Vec<PiecewiseConstant1D>,
pub p_marginal: PiecewiseConstant1D,
pub domain: Bounds2f,
}
impl PiecewiseConstant2D {
pub fn new(data: &Array2D<Float>, nu: usize, nv: usize, domain: Bounds2f) -> Self {
let mut p_conditional_v = Vec::with_capacity(nv);
for v in 0..nv {
let start = v * nu;
let end = start + nu;
p_conditional_v.push(PiecewiseConstant1D::new_with_bounds(
&data.as_slice()[start..end],
domain.p_min.x(),
domain.p_max.x(),
));
}
let marginal_func: Vec<Float> = p_conditional_v.iter().map(|p| p.integral()).collect();
let p_marginal = PiecewiseConstant1D::new_with_bounds(
&marginal_func,
domain.p_min.y(),
domain.p_max.y(),
);
Self {
p_conditional_v,
p_marginal,
domain,
}
}
pub fn new_with_bounds(data: &Array2D<Float>, domain: Bounds2f) -> Self {
Self::new(data, data.x_size(), data.y_size(), domain)
}
pub fn new_with_data(data: &Array2D<Float>) -> Self {
let nx = data.x_size();
let ny = data.y_size();
Self::new(
data,
nx,
ny,
Bounds2f::from_points(Point2f::new(0., 0.), Point2f::new(1., 1.)),
)
}
pub fn resolution(&self) -> Point2i {
Point2i::new(
self.p_conditional_v[0].size() as i32,
self.p_conditional_v[1].size() as i32,
)
}
pub fn integral(&self) -> f32 {
self.p_marginal.integral()
}
pub fn sample(&self, u: Point2f) -> (Point2f, f32, Point2i) {
let (d1, pdf1, off_y) = self.p_marginal.sample(u.y());
let (d0, pdf0, off_x) = self.p_conditional_v[off_y].sample(u.x());
let pdf = pdf0 * pdf1;
let offset = Point2i::new(off_x as i32, off_y as i32);
(Point2f::new(d0, d1), pdf, offset)
}
pub fn pdf(&self, p: Point2f) -> f32 {
let p_offset = self.domain.offset(&p);
let nu = self.p_conditional_v[0].size();
let nv = self.p_marginal.size();
let iu = (p_offset.x() * nu as f32).clamp(0.0, nu as f32 - 1.0) as usize;
let iv = (p_offset.y() * nv as f32).clamp(0.0, nv as f32 - 1.0) as usize;
let integral = self.p_marginal.integral();
if integral == 0.0 {
0.0
} else {
self.p_conditional_v[iv].func[iu] / integral
}
}
}
#[derive(Debug, Clone)]
pub struct SummedAreaTable {
sum: Array2D<f64>,
}
impl SummedAreaTable {
pub fn new(values: &Array2D<Float>) -> Self {
let width = values.x_size();
let height = values.y_size();
let mut sum = Array2D::<f64>::new_with_dims(width, height);
sum[(0, 0)] = values[(0, 0)] as f64;
for x in 1..width {
sum[(x, 0)] = values[(x, 0)] as f64 + sum[(x - 1, 0)];
}
for y in 1..height {
sum[(0, y)] = values[(0, y)] as f64 + sum[(0, y - 1)];
}
for y in 1..height {
for x in 1..width {
let term = values[(x, y)] as f64;
let left = sum[(x - 1, y)];
let up = sum[(x, y - 1)];
let diag = sum[(x - 1, y - 1)];
sum[(x, y)] = term + left + up - diag;
}
}
Self { sum }
}
pub fn integral(&self, extent: Bounds2f) -> Float {
let s = self.lookup(extent.p_max.x(), extent.p_max.y())
- self.lookup(extent.p_min.x(), extent.p_max.y())
+ self.lookup(extent.p_min.x(), extent.p_min.y())
- self.lookup(extent.p_max.x(), extent.p_min.y());
let total_area = (self.sum.x_size() * self.sum.y_size()) as f64;
(s / total_area).max(0.0) as Float
}
fn lookup(&self, mut x: Float, mut y: Float) -> f64 {
x *= self.sum.x_size() as Float;
y *= self.sum.y_size() as Float;
let x0 = x as i32;
let y0 = y as i32;
let v00 = self.lookup_int(x0, y0);
let v10 = self.lookup_int(x0 + 1, y0);
let v01 = self.lookup_int(x0, y0 + 1);
let v11 = self.lookup_int(x0 + 1, y0 + 1);
let dx = (x - x0 as Float) as f64;
let dy = (y - y0 as Float) as f64;
(1.0 - dx) * (1.0 - dy) * v00
+ dx * (1.0 - dy) * v10
+ (1.0 - dx) * dy * v01
+ dx * dy * v11
}
fn lookup_int(&self, x: i32, y: i32) -> f64 {
if x == 0 || y == 0 {
return 0.0;
}
let ix = (x - 1).min(self.sum.x_size() as i32 - 1) as usize;
let iy = (y - 1).min(self.sum.y_size() as i32 - 1) as usize;
self.sum[(ix, iy)]
}
}
#[derive(Debug, Clone)]
pub struct WindowedPiecewiseConstant2D {
sat: SummedAreaTable,
func: Array2D<Float>,
}
impl WindowedPiecewiseConstant2D {
pub fn new(func: Array2D<Float>) -> Self {
let sat = SummedAreaTable::new(&func);
Self { sat, func }
}
pub fn sample(&self, u: Point2f, b: Bounds2f) -> Option<(Point2f, Float)> {
let b_int = self.sat.integral(b);
if b_int == 0.0 {
return None;
}
let px = |x: Float| -> Float {
let mut bx = b;
bx.p_max[0] = x;
self.sat.integral(bx) / b_int
};
let nx = self.func.x_size();
let px_val = Self::sample_bisection(px, u.x(), b.p_min.x(), b.p_max.x(), nx);
let nx_f = nx as Float;
let x_start = (px_val * nx_f).floor() / nx_f;
let x_end = (px_val * nx_f).ceil() / nx_f;
let mut b_cond = Bounds2f::from_points(
Point2f::new(x_start, b.p_min.y()),
Point2f::new(x_end, b.p_max.y()),
);
if b_cond.p_min.x() == b_cond.p_max.x() {
b_cond.p_max[0] += 1.0 / nx_f;
}
let cond_integral = self.sat.integral(b_cond);
if cond_integral == 0.0 {
return None;
}
let py = |y: Float| -> Float {
let mut by = b_cond;
by.p_max[1] = y;
self.sat.integral(by) / cond_integral
};
let ny = self.func.y_size();
let py_val = Self::sample_bisection(py, u.y(), b.p_min.y(), b.p_max.y(), ny);
let p = Point2f::new(px_val, py_val);
let pdf = self.eval(p) / b_int;
Some((p, pdf))
}
pub fn pdf(&self, p: Point2f, b: Bounds2f) -> Float {
let func_int = self.sat.integral(b);
if func_int == 0.0 {
return 0.0;
}
self.eval(p) / func_int
}
fn eval(&self, p: Point2f) -> Float {
let nx = self.func.x_size();
let ny = self.func.y_size();
let ix = ((p.x() * nx as Float) as i32).min(nx as i32 - 1).max(0) as usize;
let iy = ((p.y() * ny as Float) as i32).min(ny as i32 - 1).max(0) as usize;
self.func[(ix, iy)]
}
fn sample_bisection<F>(p_func: F, u: Float, mut min: Float, mut max: Float, n: usize) -> Float
where
F: Fn(Float) -> Float,
{
let n_f = n as Float;
while (n_f * max).ceil() - (n_f * min).floor() > 1.0 {
debug_assert!(p_func(min) <= u + 1e-5);
debug_assert!(p_func(max) >= u - 1e-5);
let mid = (min + max) / 2.0;
if p_func(mid) > u {
max = mid;
} else {
min = mid;
}
}
let numerator = u - p_func(min);
let denominator = p_func(max) - p_func(min);
let t = if denominator == 0.0 {
0.5
} else {
numerator / denominator
};
let res = crate::core::pbrt::lerp(t, min, max);
res.clamp(min, max)
}
}
#[derive(Debug, Clone, Copy)]
pub struct Bin {
q: Float,
p: Float,
alias: usize,
}
#[derive(Debug, Clone)]
pub struct AliasTable {
bins: Vec<Bin>,
}
impl AliasTable {
pub fn new(weights: &[Float]) -> Self {
let n = weights.len();
if n == 0 {
return Self { bins: Vec::new() };
}
let sum: f64 = weights.iter().map(|&w| w as f64).sum();
assert!(sum > 0.0, "Sum of weights must be positive");
let mut bins = Vec::with_capacity(n);
for &w in weights {
bins.push(Bin {
p: (w as f64 / sum) as Float,
q: 0.0,
alias: 0,
});
}
struct Outcome {
p_hat: f64,
index: usize,
}
let mut under = Vec::with_capacity(n);
let mut over = Vec::with_capacity(n);
for (i, bin) in bins.iter().enumerate() {
let p_hat = (bin.p as f64) * (n as f64);
if p_hat < 1.0 {
under.push(Outcome { p_hat, index: i });
} else {
over.push(Outcome { p_hat, index: i });
}
}
while !under.is_empty() && !over.is_empty() {
let un = under.pop().unwrap();
let ov = over.pop().unwrap();
bins[un.index].q = un.p_hat as Float;
bins[un.index].alias = ov.index;
let p_excess = un.p_hat + ov.p_hat - 1.0;
if p_excess < 1.0 {
under.push(Outcome {
p_hat: p_excess,
index: ov.index,
});
} else {
over.push(Outcome {
p_hat: p_excess,
index: ov.index,
});
}
}
while let Some(ov) = over.pop() {
bins[ov.index].q = 1.0;
bins[ov.index].alias = ov.index;
}
while let Some(un) = under.pop() {
bins[un.index].q = 1.0;
bins[un.index].alias = un.index;
}
Self { bins }
}
pub fn sample(&self, u: Float) -> (usize, Float, Float) {
let n = self.bins.len();
let val = u * (n as Float);
let offset = std::cmp::min(val as usize, n - 1);
let up = (val - (offset as Float)).min(ONE_MINUS_EPSILON);
let bin = &self.bins[offset];
if up < bin.q {
debug_assert!(bin.p > 0.0);
let pmf = bin.p;
let u_remapped = (up / bin.q).min(ONE_MINUS_EPSILON);
(offset, pmf, u_remapped)
} else {
let alias_idx = bin.alias;
let alias_p = self.bins[alias_idx].p;
debug_assert!(alias_p > 0.0);
let u_remapped = ((up - bin.q) / (1.0 - bin.q)).min(ONE_MINUS_EPSILON);
(alias_idx, alias_p, u_remapped)
}
}
pub fn size(&self) -> usize {
self.bins.len()
}
pub fn pmf(&self, index: usize) -> Float {
self.bins[index].p
}
}
#[derive(Debug, Clone)]
pub struct PiecewiseLinear2D<const N: usize> {
size: Vector2i,
@ -640,7 +1296,7 @@ impl<const N: usize> PiecewiseLinear2D<N> {
&param_weights,
)
};
let row = Self::find_interval(marginal_size, |idx| fetch_marginal(idx) <= sample.y());
let row = find_interval(marginal_size, |idx| fetch_marginal(idx) <= sample.y());
let marginal_cdf_row = fetch_marginal(row);
sample[1] -= marginal_cdf_row;
let r0 = self.lookup(
@ -680,7 +1336,7 @@ impl<const N: usize> PiecewiseLinear2D<N> {
);
(1.0 - sample.y()) * v0 + sample.y() * v1
};
let col = Self::find_interval(self.size.x() as u32, |idx| {
let col = find_interval(self.size.x() as u32, |idx| {
fetch_conditional(idx) <= sample.x()
});
sample[0] -= fetch_conditional(col);
@ -825,22 +1481,6 @@ impl<const N: usize> PiecewiseLinear2D<N> {
pdf * self.inv_patch_size.x() * self.inv_patch_size.y()
}
fn find_interval(size: u32, pred: impl Fn(u32) -> bool) -> u32 {
let mut first = 1u32;
let mut size = size - 1;
while size > 0 {
let half = size >> 1;
let middle = first + half;
if pred(middle) {
first = middle + 1;
size -= half + 1;
} else {
size = half;
}
}
first.saturating_sub(1)
}
fn get_slice_info(&self, params: [Float; N]) -> (u32, [(Float, Float); N]) {
let mut param_weight = [(0.0, 0.0); N];
let mut slice_offset = 0u32;
@ -851,7 +1491,7 @@ impl<const N: usize> PiecewiseLinear2D<N> {
continue;
}
let param_index = Self::find_interval(self.param_size[dim], |idx| {
let param_index = find_interval(self.param_size[dim], |idx| {
self.param_values[dim][idx as usize] <= params[dim]
});
@ -882,17 +1522,134 @@ impl<const N: usize> PiecewiseLinear2D<N> {
for mask in 0..num_corners {
let mut offset = 0u32;
let mut weight: Float = 1.0;
for d in 0..N {
let bit = (mask >> d) & 1;
if bit == 1 {
offset += self.param_strides[d] * size;
weight *= param_weight[d].1;
let mut current_mask = mask;
for (weights, &stride) in param_weight.iter().zip(&self.param_strides) {
if (current_mask & 1) == 1 {
offset += stride * size;
weight *= weights.1;
} else {
weight *= param_weight[d].0;
weight *= weights.0;
}
current_mask >>= 1;
}
result += weight * data[(i0 + offset) as usize];
}
result
}
}
#[derive(Clone, Debug)]
pub struct WeightedReservoirSampler<T> {
rng: Rng,
weight_sum: Float,
reservoir_weight: Float,
reservoir: Option<T>,
}
impl<T> Default for WeightedReservoirSampler<T> {
fn default() -> Self {
Self {
rng: Rng::default(),
weight_sum: 0.0,
reservoir_weight: 0.0,
reservoir: None,
}
}
}
impl<T> WeightedReservoirSampler<T> {
pub fn new(seed: u64) -> Self {
let mut rng = Rng::default();
rng.set_sequence(seed);
Self {
rng,
weight_sum: 0.0,
reservoir_weight: 0.0,
reservoir: None,
}
}
pub fn seed(&mut self, seed: u64) {
self.rng.set_sequence(seed);
}
pub fn add(&mut self, sample: T, weight: Float) -> bool {
self.weight_sum += weight;
let p = weight / self.weight_sum;
if self.rng.uniform::<Float>() < p {
self.reservoir = Some(sample);
self.reservoir_weight = weight;
return true;
}
// debug_assert!(self.weight_sum < 1e80);
false
}
pub fn add_closure<F>(&mut self, func: F, weight: Float) -> bool
where
F: FnOnce() -> T,
{
self.weight_sum += weight;
let p = weight / self.weight_sum;
if self.rng.uniform::<Float>() < p {
self.reservoir = Some(func());
self.reservoir_weight = weight;
return true;
}
// debug_assert!(self.weight_sum < 1e80);
false
}
pub fn copy_from(&mut self, other: &Self)
where
T: Clone,
{
self.weight_sum = other.weight_sum;
self.reservoir = other.reservoir.clone();
self.reservoir_weight = other.reservoir_weight;
}
pub fn has_sample(&self) -> bool {
self.weight_sum > 0.0
}
pub fn get_sample(&self) -> Option<&T> {
self.reservoir.as_ref()
}
pub fn sample_probability(&self) -> Float {
if self.weight_sum == 0.0 {
0.0
} else {
self.reservoir_weight / self.weight_sum
}
}
pub fn weight_sum(&self) -> Float {
self.weight_sum
}
pub fn reset(&mut self) {
self.reservoir_weight = 0.0;
self.weight_sum = 0.0;
self.reservoir = None;
}
pub fn merge(&mut self, other: &WeightedReservoirSampler<T>)
where
T: Clone,
{
// debug_assert!(self.weight_sum + other.weight_sum < 1e80);
if let Some(other_sample) = &other.reservoir
&& self.add(other_sample.clone(), other.weight_sum)
{
self.reservoir_weight = other.reservoir_weight;
}
}
}

View file

@ -1,11 +1,11 @@
use super::math::safe_sqrt;
use super::sampling::sample_uniform_disk_polar;
use super::spectrum::{N_SPECTRUM_SAMPLES, SampledSpectrum};
use crate::core::pbrt::{Float, PI, clamp_t, lerp};
use crate::geometry::{
Normal3f, Point2f, Vector2f, Vector3f, VectorLike, abs_cos_theta, cos_phi, cos2_theta, sin_phi,
tan2_theta,
};
use crate::spectra::{N_SPECTRUM_SAMPLES, SampledSpectrum};
use crate::utils::math::square;
use num::complex::Complex;
@ -151,7 +151,7 @@ pub fn fr_dielectric(cos_theta_i: Float, eta: Float) -> Float {
pub fn fr_complex(cos_theta_i: Float, eta: Complex<Float>) -> Float {
let cos_corr = clamp_t(cos_theta_i, 0., 1.);
let sin2_theta_i = 1. - square(cos_corr);
let sin2_theta_t: Complex<Float> = (sin2_theta_i / square(eta)).into();
let sin2_theta_t: Complex<Float> = sin2_theta_i / square(eta);
let cos2_theta_t: Complex<Float> = (1. - sin2_theta_t).sqrt();
let r_parl = (eta * cos_corr - cos2_theta_t) / (eta * cos_corr + cos2_theta_t);

7295
src/utils/sobol.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,721 +0,0 @@
use once_cell::sync::Lazy;
use std::fmt;
use std::ops::{
Add, AddAssign, Div, DivAssign, Index, IndexMut, Mul, MulAssign, Neg, Sub, SubAssign,
};
use std::sync::Arc;
use super::color::{RGB, RGBSigmoidPolynomial, XYZ};
use super::colorspace::RGBColorspace;
use crate::core::cie;
use crate::core::pbrt::Float;
pub const CIE_Y_INTEGRAL: Float = 106.856895;
pub const N_SPECTRUM_SAMPLES: usize = 1200;
pub const LAMBDA_MIN: i32 = 360;
pub const LAMBDA_MAX: i32 = 830;
#[derive(Debug, Copy, Clone)]
pub struct SampledSpectrum {
values: [Float; N_SPECTRUM_SAMPLES],
}
impl Default for SampledSpectrum {
fn default() -> Self {
Self {
values: [0.0; N_SPECTRUM_SAMPLES],
}
}
}
impl SampledSpectrum {
pub fn new(c: Float) -> Self {
Self {
values: [c; N_SPECTRUM_SAMPLES],
}
}
pub fn from_vector(v: Vec<Float>) -> Self {
let mut s = Self::new(0.0);
for i in 0..N_SPECTRUM_SAMPLES {
s.values[i] = v[i];
}
s
}
pub fn is_black(&self) -> bool {
self.values.iter().all(|&sample| sample == 0.0)
}
pub fn has_nans(&self) -> bool {
self.values.iter().any(|&v| v.is_nan())
}
pub fn min_component_value(&self) -> Float {
self.values.iter().fold(Float::INFINITY, |a, &b| a.min(b))
}
pub fn max_component_value(&self) -> Float {
self.values
.iter()
.fold(Float::NEG_INFINITY, |a, &b| a.max(b))
}
pub fn average(&self) -> Float {
self.values.iter().sum::<Float>() / (N_SPECTRUM_SAMPLES as Float)
}
pub fn safe_div(&self, rhs: SampledSpectrum) -> Self {
let mut r = SampledSpectrum::new(0.0);
for i in 0..N_SPECTRUM_SAMPLES {
r.values[i] = if rhs[i] != 0.0 {
self.values[i] / rhs.values[i]
} else {
0.0
}
}
r
}
pub fn to_xyz(&self, lambda: &SampledWavelengths) -> XYZ {
let x = spectra::X.sample(lambda);
let y = spectra::Y.sample(lambda);
let z = spectra::Z.sample(lambda);
let pdf = lambda.pdf();
XYZ::new(
(*self * x).safe_div(pdf).average(),
(*self * y).safe_div(pdf).average(),
(*self * z).safe_div(pdf).average(),
) / CIE_Y_INTEGRAL
}
pub fn to_rgb(&self, lambda: &SampledWavelengths, c: &RGBColorspace) -> RGB {
let xyz = self.to_xyz(lambda);
c.to_rgb(xyz)
}
}
impl Index<usize> for SampledSpectrum {
type Output = Float;
fn index(&self, i: usize) -> &Self::Output {
&self.values[i]
}
}
impl IndexMut<usize> for SampledSpectrum {
fn index_mut(&mut self, i: usize) -> &mut Self::Output {
&mut self.values[i]
}
}
impl Add for SampledSpectrum {
type Output = Self;
fn add(self, rhs: Self) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] += rhs.values[i];
}
ret
}
}
impl AddAssign for SampledSpectrum {
fn add_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] += rhs.values[i];
}
}
}
impl Sub for SampledSpectrum {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] -= rhs.values[i];
}
ret
}
}
impl SubAssign for SampledSpectrum {
fn sub_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] -= rhs.values[i];
}
}
}
impl Sub<SampledSpectrum> for Float {
type Output = SampledSpectrum;
fn sub(self, rhs: SampledSpectrum) -> SampledSpectrum {
let mut ret = SampledSpectrum::new(0.0);
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] = self - rhs.values[i];
}
ret
}
}
impl Mul for SampledSpectrum {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] *= rhs.values[i];
}
ret
}
}
impl MulAssign for SampledSpectrum {
fn mul_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] *= rhs.values[i];
}
}
}
impl Mul<Float> for SampledSpectrum {
type Output = Self;
fn mul(self, rhs: Float) -> Self {
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] *= rhs;
}
ret
}
}
impl Mul<SampledSpectrum> for Float {
type Output = SampledSpectrum;
fn mul(self, rhs: SampledSpectrum) -> SampledSpectrum {
rhs * self
}
}
impl MulAssign<Float> for SampledSpectrum {
fn mul_assign(&mut self, rhs: Float) {
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] *= rhs;
}
}
}
impl DivAssign for SampledSpectrum {
fn div_assign(&mut self, rhs: Self) {
for i in 0..N_SPECTRUM_SAMPLES {
debug_assert_ne!(0.0, rhs.values[i]);
self.values[i] /= rhs.values[i];
}
}
}
impl Div for SampledSpectrum {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
let mut ret = self;
ret /= rhs;
ret
}
}
impl Div<Float> for SampledSpectrum {
type Output = Self;
fn div(self, rhs: Float) -> Self::Output {
debug_assert_ne!(rhs, 0.0);
let mut ret = self;
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] /= rhs;
}
ret
}
}
impl DivAssign<Float> for SampledSpectrum {
fn div_assign(&mut self, rhs: Float) {
debug_assert_ne!(rhs, 0.0);
for i in 0..N_SPECTRUM_SAMPLES {
self.values[i] /= rhs;
}
}
}
impl Neg for SampledSpectrum {
type Output = Self;
fn neg(self) -> Self::Output {
let mut ret = SampledSpectrum::new(0.0);
for i in 0..N_SPECTRUM_SAMPLES {
ret.values[i] = -self.values[i];
}
ret
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct SampledWavelengths {
pub lambda: [Float; N_SPECTRUM_SAMPLES],
pub pdf: [Float; N_SPECTRUM_SAMPLES],
}
impl SampledWavelengths {
pub fn pdf(&self) -> SampledSpectrum {
SampledSpectrum::from_vector(self.pdf.to_vec())
}
pub fn secondary_terminated(&self) -> bool {
for i in 1..N_SPECTRUM_SAMPLES {
if self.pdf[i] != 0.0 {
return false;
}
}
true
}
pub fn terminate_secondary(&mut self) {
if !self.secondary_terminated() {
for i in 1..N_SPECTRUM_SAMPLES {
self.pdf[i] = 0.0;
}
self.pdf[0] /= N_SPECTRUM_SAMPLES as Float;
}
}
pub fn sample_uniform(u: Float, lambda_min: Float, lambda_max: Float) -> Self {
let mut lambda = [0.0; N_SPECTRUM_SAMPLES];
lambda[0] = crate::core::pbrt::lerp(u, lambda_min, lambda_min);
let delta = (lambda_max - lambda_min) / N_SPECTRUM_SAMPLES as Float;
for i in 1..N_SPECTRUM_SAMPLES {
lambda[i] = lambda[i - 1] + delta;
if lambda[i] > lambda_max {
lambda[i] = lambda_min + (lambda[i] - lambda_max);
}
}
let mut pdf = [0.0; N_SPECTRUM_SAMPLES];
for i in 0..N_SPECTRUM_SAMPLES {
pdf[i] = 1.0 / (lambda_max - lambda_min);
}
Self { lambda, pdf }
}
pub fn sample_visible_wavelengths(u: Float) -> Float {
538.0 - 138.888889 * Float::atanh(0.85691062 - 1.82750197 * u)
}
pub fn visible_wavelengths_pdf(lambda: Float) -> Float {
if lambda < 360.0 || lambda > 830.0 {
return 0.0;
}
0.0039398042 / (Float::cosh(0.0072 * (lambda - 538.0))).sqrt()
}
pub fn sample_visible(u: Float) -> Self {
let mut lambda = [0.0; N_SPECTRUM_SAMPLES];
let mut pdf = [0.0; N_SPECTRUM_SAMPLES];
for i in 0..N_SPECTRUM_SAMPLES {
let mut up = u + i as Float / N_SPECTRUM_SAMPLES as Float;
if up > 1.0 {
up -= 1.0;
}
lambda[i] = Self::sample_visible_wavelengths(up);
pdf[i] = Self::visible_wavelengths_pdf(lambda[i]);
}
Self { lambda, pdf }
}
}
impl Index<usize> for SampledWavelengths {
type Output = Float;
fn index(&self, i: usize) -> &Self::Output {
&self.lambda[i]
}
}
impl IndexMut<usize> for SampledWavelengths {
fn index_mut(&mut self, i: usize) -> &mut Self::Output {
&mut self.lambda[i]
}
}
#[derive(Debug, Clone)]
pub enum Spectrum {
Constant(ConstantSpectrum),
DenselySampled(DenselySampledSpectrum),
PiecewiseLinear(PiecewiseLinearSpectrum),
Blackbody(BlackbodySpectrum),
RGBAlbedo(RGBAlbedoSpectrum),
}
impl Spectrum {
pub fn sample_at(&self, lambda: Float) -> Float {
match self {
Spectrum::Constant(s) => s.sample_at(lambda),
Spectrum::DenselySampled(s) => s.sample_at(lambda),
Spectrum::PiecewiseLinear(s) => s.sample_at(lambda),
Spectrum::Blackbody(s) => s.sample_at(lambda),
Spectrum::RGBAlbedo(s) => s.sample_at(lambda),
}
}
pub fn max_value(&self) -> Float {
match self {
Spectrum::Constant(_s) => 0.0,
Spectrum::DenselySampled(_s) => 0.0,
Spectrum::PiecewiseLinear(_s) => 0.0,
Spectrum::Blackbody(_s) => 0.0,
Spectrum::RGBAlbedo(s) => s.max_value(),
}
}
pub fn sample(&self, wavelengths: &SampledWavelengths) -> SampledSpectrum {
let mut s = SampledSpectrum::default();
for i in 0..N_SPECTRUM_SAMPLES {
s[i] = self.sample_at(wavelengths[i]);
}
s
}
pub fn to_xyz(&self) -> XYZ {
XYZ::new(
inner_product(&spectra::X, self),
inner_product(&spectra::Y, self),
inner_product(&spectra::Z, self),
) / CIE_Y_INTEGRAL
}
}
pub fn inner_product(f: &Spectrum, g: &Spectrum) -> Float {
let mut integral = 0.0;
for lambda in LAMBDA_MIN..=LAMBDA_MAX {
integral += f.sample_at(lambda as f32) * g.sample_at(lambda as f32);
}
integral
}
#[derive(Debug, Clone, Copy)]
pub struct ConstantSpectrum {
c: Float,
}
impl ConstantSpectrum {
pub fn new(c: Float) -> Self {
Self { c }
}
pub fn sample_at(&self, _lambda: Float) -> Float {
self.c
}
fn max_value(&self) -> Float {
self.c
}
}
#[derive(Debug, Clone, Copy)]
pub struct RGBAlbedoSpectrum {
rsp: RGBSigmoidPolynomial,
}
impl RGBAlbedoSpectrum {
pub fn new(cs: &RGBColorspace, rgb: RGB) -> Self {
Self {
rsp: cs.to_rgb_coeffs(rgb),
}
}
pub fn sample_at(&self, lambda: Float) -> Float {
self.rsp.evaluate(lambda)
}
pub fn max_value(&self) -> Float {
self.rsp.max_value()
}
fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut s = SampledSpectrum::default();
for i in 0..N_SPECTRUM_SAMPLES {
s[i] = self.rsp.evaluate(lambda[i]);
}
s
}
}
#[derive(Debug, Clone, Copy)]
pub struct UnboundedRGBSpectrum {
scale: Float,
rsp: RGBSigmoidPolynomial,
}
impl UnboundedRGBSpectrum {
pub fn new(cs: RGBColorspace, rgb: RGB) -> Self {
let m = rgb.r.max(rgb.g).max(rgb.b);
let scale = 2.0 * m;
let scaled_rgb = if scale != 0.0 {
rgb / scale
} else {
RGB::new(0.0, 0.0, 0.0)
};
Self {
scale,
rsp: cs.to_rgb_coeffs(scaled_rgb),
}
}
pub fn sample_at(&self, lambda: Float) -> Float {
self.scale * self.rsp.evaluate(lambda)
}
pub fn max_value(&self) -> Float {
self.scale * self.rsp.max_value()
}
}
#[derive(Debug, Clone, Default)]
pub struct RGBIlluminantSpectrum {
scale: Float,
rsp: RGBSigmoidPolynomial,
illuminant: Option<Arc<DenselySampledSpectrum>>,
}
impl RGBIlluminantSpectrum {
pub fn sample_at(&self, lambda: Float) -> Float {
match &self.illuminant {
Some(illuminant) => {
self.scale * self.rsp.evaluate(lambda) * illuminant.sample_at(lambda)
}
None => 0.0,
}
}
pub fn max_value(&self) -> Float {
match &self.illuminant {
Some(illuminant) => self.scale * self.rsp.max_value() * illuminant.max_value(),
None => 0.0,
}
}
}
#[derive(Debug, Clone)]
pub struct DenselySampledSpectrum {
lambda_min: i32,
lambda_max: i32,
values: Vec<Float>,
}
impl DenselySampledSpectrum {
pub fn new(lambda_min: i32, lambda_max: i32) -> Self {
let n_values = (lambda_max - lambda_min + 1).max(0) as usize;
Self {
lambda_min,
lambda_max,
values: vec![0.0; n_values],
}
}
pub fn from_spectrum(spec: &Spectrum, lambda_min: i32, lambda_max: i32) -> Self {
let mut s = Self::new(lambda_min, lambda_max);
if s.values.is_empty() {
return s;
}
for lambda in lambda_min..=lambda_max {
let index = (lambda - lambda_min) as usize;
s.values[index] = spec.sample_at(lambda as Float);
}
s
}
pub fn from_function<F>(f: F, lambda_min: i32, lambda_max: i32) -> Self
where
F: Fn(Float) -> Float,
{
let mut s = Self::new(lambda_min, lambda_max);
if s.values.is_empty() {
return s;
}
for lambda in lambda_min..=lambda_max {
let index = (lambda - lambda_min) as usize;
s.values[index] = f(lambda as Float);
}
s
}
pub fn sample_at(&self, lambda: Float) -> Float {
let offset = (lambda.round() as i32) - self.lambda_min;
if offset < 0 || offset as usize >= self.values.len() {
0.0
} else {
self.values[offset as usize]
}
}
pub fn sample(&self, lambda: &SampledWavelengths) -> SampledSpectrum {
let mut s = SampledSpectrum::default();
for i in 0..N_SPECTRUM_SAMPLES {
let offset = lambda[i].round() as usize - LAMBDA_MIN as usize;
if offset >= self.values.len() {
s[i] = 0.;
} else {
s[i] = self.values[offset];
}
}
s
}
pub fn max_value(&self) -> Float {
self.values.iter().fold(Float::MIN, |a, b| a.max(*b))
}
pub fn min_component_value(&self) -> Float {
self.values.iter().fold(Float::INFINITY, |a, &b| a.min(b))
}
pub fn max_component_value(&self) -> Float {
self.values
.iter()
.fold(Float::NEG_INFINITY, |a, &b| a.max(b))
}
pub fn average(&self) -> Float {
self.values.iter().sum::<Float>() / (N_SPECTRUM_SAMPLES as Float)
}
pub fn safe_div(&self, rhs: SampledSpectrum) -> Self {
let mut r = Self::new(1, 1);
for i in 0..N_SPECTRUM_SAMPLES {
r.values[i] = if rhs[i] != 0.0 {
self.values[i] / rhs.values[i]
} else {
0.0
}
}
r
}
}
#[derive(Debug, Clone)]
pub struct PiecewiseLinearSpectrum {
samples: Vec<(Float, Float)>,
}
impl PiecewiseLinearSpectrum {
pub fn from_interleaved(data: &[Float]) -> Self {
assert!(
data.len() % 2 == 0,
"Interleaved data must have an even number of elements"
);
let mut samples = Vec::new();
for pair in data.chunks(2) {
samples.push((pair[0], pair[1]));
}
// PBRT requires the samples to be sorted by wavelength for interpolation.
samples.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
Self { samples }
}
fn sample_at(&self, lambda: Float) -> Float {
if self.samples.is_empty() {
return 0.0;
}
// Handle boundary conditions
if lambda <= self.samples[0].0 {
return self.samples[0].1;
}
if lambda >= self.samples.last().unwrap().0 {
return self.samples.last().unwrap().1;
}
let i = self.samples.partition_point(|s| s.0 < lambda);
let s1 = self.samples[i - 1];
let s2 = self.samples[i];
let t = (lambda - s1.0) / (s2.0 - s1.0);
(1.0 - t) * s1.1 + t * s2.1
}
}
#[derive(Debug, Clone, Copy)]
pub struct BlackbodySpectrum {
temperature: Float,
normalization_factor: Float,
}
// Planck's Law
impl BlackbodySpectrum {
const C: Float = 299792458.0;
const H: Float = 6.62606957e-34;
const KB: Float = 1.3806488e-23;
pub fn new(temperature: Float) -> Self {
let lambda_max = 2.8977721e-3 / temperature * 1e9;
let max_val = Self::planck_law(lambda_max, temperature);
Self {
temperature,
normalization_factor: if max_val > 0.0 { 1.0 / max_val } else { 0.0 },
}
}
fn planck_law(lambda_nm: Float, temp: Float) -> Float {
if temp <= 0.0 {
return 0.0;
}
let lambda_m = lambda_nm * 1e-9; // Convert nm to meters
let c1 = 2.0 * Self::H * Self::C * Self::C;
let c2 = (Self::H * Self::C) / Self::KB;
let numerator = c1 / lambda_m.powi(5);
let denominator = (c2 / (lambda_m * temp)).exp() - 1.0;
if denominator.is_infinite() {
0.0
} else {
numerator / denominator
}
}
fn sample_at(&self, lambda: Float) -> Float {
Self::planck_law(lambda, self.temperature) * self.normalization_factor
}
}
#[derive(Debug, Clone, Copy)]
pub struct RGBSpectrum {
pub c: [Float; 3],
}
#[derive(Debug, Clone, Copy)]
pub struct RGBUnboundedSpectrum(pub RGBSpectrum);
pub mod spectra {
use super::*;
pub static X: Lazy<Spectrum> = Lazy::new(|| {
let pls = PiecewiseLinearSpectrum::from_interleaved(&cie::CIE_X);
let dss = DenselySampledSpectrum::from_spectrum(
&Spectrum::PiecewiseLinear(pls),
LAMBDA_MIN,
LAMBDA_MAX,
);
Spectrum::DenselySampled(dss)
});
pub static Y: Lazy<Spectrum> = Lazy::new(|| {
let pls = PiecewiseLinearSpectrum::from_interleaved(&cie::CIE_Y);
let dss = DenselySampledSpectrum::from_spectrum(
&Spectrum::PiecewiseLinear(pls),
LAMBDA_MIN,
LAMBDA_MAX,
);
Spectrum::DenselySampled(dss)
});
pub static Z: Lazy<Spectrum> = Lazy::new(|| {
let pls = PiecewiseLinearSpectrum::from_interleaved(&cie::CIE_Z);
let dss = DenselySampledSpectrum::from_spectrum(
&Spectrum::PiecewiseLinear(pls),
LAMBDA_MIN,
LAMBDA_MAX,
);
Spectrum::DenselySampled(dss)
});
}

View file

@ -65,12 +65,11 @@ pub fn evaluate_cubic_bezier(cp: &[Point3f], u: Float) -> (Point3f, Vector3f) {
lerp(u, cp[2], cp[3]),
];
let cp2 = [lerp(u, cp1[0], cp1[1]), lerp(u, cp1[1], cp1[2])];
let deriv: Vector3f;
if (cp2[1] - cp2[0]).norm_squared() > 0. {
deriv = (cp2[1] - cp2[0]) * 3.;
let deriv = if (cp2[1] - cp2[0]).norm_squared() > 0. {
(cp2[1] - cp2[0]) * 3.
} else {
deriv = cp[3] - cp[0]
}
cp[3] - cp[0]
};
(lerp(u, cp2[0], cp2[1]), deriv)
}

View file

@ -1,11 +1,16 @@
use num_traits::Float as NumFloat;
use std::error::Error;
use std::fmt::{self, Display};
use std::iter::{Product, Sum};
use std::ops::{Add, Div, Index, IndexMut, Mul};
use std::sync::Arc;
use super::color::{RGB, XYZ};
use super::math::{SquareMatrix, safe_acos};
use super::math::{SquareMatrix, radians, safe_acos};
use super::quaternion::Quaternion;
use crate::core::interaction::{
Interaction, InteractionData, InteractionTrait, MediumInteraction, SurfaceInteraction,
};
use crate::core::pbrt::{Float, gamma};
use crate::geometry::{
Bounds3f, Normal, Normal3f, Point, Point3f, Point3fi, Ray, Vector, Vector3f, Vector3fi,
@ -19,20 +24,19 @@ pub struct Transform<T: NumFloat> {
m_inv: SquareMatrix<T, 4>,
}
impl<T: NumFloat> Transform<T> {
impl<T: NumFloat + Sum + Product> Transform<T> {
pub fn new(m: SquareMatrix<T, 4>, m_inv: SquareMatrix<T, 4>) -> Self {
Self { m, m_inv }
}
pub fn from_matrix(m: SquareMatrix<T, 4>) -> Result<Self, Box<dyn Error>> {
pub fn from_matrix(m: SquareMatrix<T, 4>) -> Result<Self, InversionError> {
let inv = m.inverse()?;
Ok(Self { m, m_inv: inv })
}
pub fn identity() -> Self {
let m: SquareMatrix<T, 4> = SquareMatrix::identity();
let m_inv = m.clone();
Self { m, m_inv }
Self { m, m_inv: m }
}
pub fn is_identity(&self) -> bool {
@ -41,21 +45,28 @@ impl<T: NumFloat> Transform<T> {
pub fn inverse(&self) -> Self {
Self {
m: self.m_inv.clone(),
m_inv: self.m.clone(),
m: self.m_inv,
m_inv: self.m,
}
}
pub fn apply_inverse(&self, p: Point<T, 3>) -> Point<T, 3> {
self.clone() * p
*self * p
}
pub fn apply_inverse_vector(&self, v: Vector<T, 3>) -> Vector<T, 3> {
self.clone() * v
let x = v.x();
let y = v.y();
let z = v.z();
Vector::<T, 3>::new(
self.m_inv[0][0] * x + self.m_inv[0][1] * y + self.m_inv[0][2] * z,
self.m_inv[1][0] * x + self.m_inv[1][1] * y + self.m_inv[1][2] * z,
self.m_inv[2][0] * x + self.m_inv[2][1] * y + self.m_inv[2][2] * z,
)
}
pub fn apply_inverse_normal(&self, n: Normal<T, 3>) -> Normal<T, 3> {
self.clone() * n
*self * n
}
pub fn swaps_handedness(&self) -> bool {
@ -64,7 +75,17 @@ impl<T: NumFloat> Transform<T> {
[self.m[1][0], self.m[1][1], self.m[1][2]],
[self.m[2][0], self.m[2][1], self.m[2][2]],
]);
return s.determinant() < T::zero();
s.determinant() < T::zero()
}
pub fn get_matrix(&self) -> SquareMatrix<T, 4> {
self.m
}
}
impl<T: NumFloat + Sum + Product> Default for Transform<T> {
fn default() -> Self {
Self::identity()
}
}
@ -79,9 +100,9 @@ impl Transform<Float> {
let wp = self.m[3][0] * x + self.m[3][1] * y + self.m[3][2] * z + self.m[3][3];
if wp == 1. {
return Point3f::new(xp, yp, zp);
Point3f::new(xp, yp, zp)
} else {
return Point3f::new(xp / wp, yp / wp, zp / wp);
Point3f::new(xp / wp, yp / wp, zp / wp)
}
}
@ -95,9 +116,9 @@ impl Transform<Float> {
let wp = self.m[3][0] * x + self.m[3][1] * y + self.m[3][2] * z;
if wp == 1. {
return Vector3f::new(xp, yp, zp);
Vector3f::new(xp, yp, zp)
} else {
return Vector3f::new(xp / wp, yp / wp, zp / wp);
Vector3f::new(xp / wp, yp / wp, zp / wp)
}
}
@ -111,9 +132,9 @@ impl Transform<Float> {
let wp = self.m[3][0] * x + self.m[3][1] * y + self.m[3][2] * z;
if wp == 1. {
return Normal3f::new(xp, yp, zp);
Normal3f::new(xp, yp, zp)
} else {
return Normal3f::new(xp / wp, yp / wp, zp / wp);
Normal3f::new(xp / wp, yp / wp, zp / wp)
}
}
@ -171,9 +192,9 @@ impl Transform<Float> {
}
if wp == 1. {
return Point3fi::new_with_error(Point([xp, yp, zp]), p_error);
Point3fi::new_with_error(Point([xp, yp, zp]), p_error)
} else {
return Point3fi::new_with_error(Point([xp / wp, yp / wp, zp / wp]), p_error);
Point3fi::new_with_error(Point([xp / wp, yp / wp, zp / wp]), p_error)
}
}
@ -202,13 +223,150 @@ impl Transform<Float> {
}
if wp == 1. {
return Vector3fi::new_with_error(Vector3f::new(xp, yp, zp), v_error);
Vector3fi::new_with_error(Vector3f::new(xp, yp, zp), v_error)
} else {
return Vector3fi::new_with_error(Vector3f::new(xp / wp, yp / wp, zp / wp), v_error);
Vector3fi::new_with_error(Vector3f::new(xp / wp, yp / wp, zp / wp), v_error)
}
}
pub fn to_quaternion(&self) -> Quaternion {
pub fn apply_to_interaction(&self, inter: &Interaction) -> Interaction {
match inter {
Interaction::Surface(si) => {
let mut ret = si.clone();
ret.common.pi = self.apply_to_interval(&si.common.pi);
let n = self.apply_to_normal(si.common.n);
ret.common.wo = self.apply_to_vector(si.common.wo).normalize();
ret.dpdu = self.apply_to_vector(si.dpdu);
ret.dpdv = self.apply_to_vector(si.dpdv);
ret.dndu = self.apply_to_normal(si.dndu);
ret.dndv = self.apply_to_normal(si.dndv);
ret.dpdx = self.apply_to_vector(si.dpdx);
ret.dpdy = self.apply_to_vector(si.dpdy);
let shading_n = self.apply_to_normal(si.shading.n);
ret.shading.n = shading_n.normalize();
ret.shading.dpdu = self.apply_to_vector(si.shading.dpdu);
ret.shading.dpdv = self.apply_to_vector(si.shading.dpdv);
ret.shading.dndu = self.apply_to_normal(si.shading.dndu);
ret.shading.dndv = self.apply_to_normal(si.shading.dndv);
ret.common.n = n.normalize().face_forward(ret.shading.n.into());
Interaction::Surface(ret)
}
Interaction::Medium(mi) => {
let mut ret = mi.clone();
ret.common.pi = self.apply_to_interval(&mi.common.pi);
ret.common.n = self.apply_to_normal(mi.common.n).normalize();
ret.common.wo = self.apply_to_vector(mi.common.wo).normalize();
Interaction::Medium(ret)
}
Interaction::Simple(sim) => {
let mut ret = sim.clone();
ret.common.pi = self.apply_to_interval(&sim.common.pi);
if sim.common.n != Default::default() {
ret.common.n = self.apply_to_normal(sim.common.n).normalize();
}
if sim.common.wo != Default::default() {
ret.common.wo = self.apply_to_vector(sim.common.wo).normalize();
}
Interaction::Simple(ret)
}
}
}
pub fn apply_inverse_interval(&self, p: &Point3fi) -> Point3fi {
let x = Float::from(p.x());
let y = Float::from(p.y());
let z = Float::from(p.z());
let m_inv = &self.m_inv;
// Compute transformed coordinates from point
let xp = (m_inv[0][0] * x + m_inv[0][1] * y) + (m_inv[0][2] * z + m_inv[0][3]);
let yp = (m_inv[1][0] * x + m_inv[1][1] * y) + (m_inv[1][2] * z + m_inv[1][3]);
let zp = (m_inv[2][0] * x + m_inv[2][1] * y) + (m_inv[2][2] * z + m_inv[2][3]);
let wp = (m_inv[3][0] * x + m_inv[3][1] * y) + (m_inv[3][2] * z + m_inv[3][3]);
// Compute absolute error for transformed point
let g3 = gamma(3);
let p_out_error = if p.is_exact() {
Vector3f::new(
g3 * (m_inv[0][0] * x).abs() + (m_inv[0][1] * y).abs() + (m_inv[0][2] * z).abs(),
g3 * (m_inv[1][0] * x).abs() + (m_inv[1][1] * y).abs() + (m_inv[1][2] * z).abs(),
g3 * (m_inv[2][0] * x).abs() + (m_inv[2][1] * y).abs() + (m_inv[2][2] * z).abs(),
)
} else {
let p_in_error = p.error();
let g3_plus_1 = g3 + 1.0;
Vector3f::new(
g3_plus_1
* (m_inv[0][0].abs() * p_in_error.x()
+ m_inv[0][1].abs() * p_in_error.y()
+ m_inv[0][2].abs() * p_in_error.z())
+ g3 * ((m_inv[0][0] * x).abs()
+ (m_inv[0][1] * y).abs()
+ (m_inv[0][2] * z).abs()
+ m_inv[0][3].abs()),
g3_plus_1
* (m_inv[1][0].abs() * p_in_error.x()
+ m_inv[1][1].abs() * p_in_error.y()
+ m_inv[1][2].abs() * p_in_error.z())
+ g3 * ((m_inv[1][0] * x).abs()
+ (m_inv[1][1] * y).abs()
+ (m_inv[1][2] * z).abs()
+ m_inv[1][3].abs()),
g3_plus_1
* (m_inv[2][0].abs() * p_in_error.x()
+ m_inv[2][1].abs() * p_in_error.y()
+ m_inv[2][2].abs() * p_in_error.z())
+ g3 * ((m_inv[2][0] * x).abs()
+ (m_inv[2][1] * y).abs()
+ (m_inv[2][2] * z).abs()
+ m_inv[2][3].abs()),
)
};
if wp == 1.0 {
Point3fi::new_with_error(Point3f::new(xp, yp, zp), p_out_error)
} else {
Point3fi::new_with_error(Point3f::new(xp / wp, yp / wp, zp / wp), p_out_error)
}
}
pub fn apply_inverse_ray(&self, r: &Ray, t_max: Option<Float>) -> (Ray, Float) {
let mut o = self.apply_inverse_interval(&Point3fi::new_from_point(r.o));
let d = self.apply_inverse_vector(r.d);
// Offset ray origin to edge of error bounds and compute _tMax_
let mut t = 0.;
let length_squared = d.norm_squared();
if length_squared > 0. {
let o_error = Vector3f::new(o.x().width() / 2., o.y().width() / 2., o.z().width() / 2.);
let dt = d.abs().dot(o_error) / length_squared;
o = o + Vector3fi::from(d * dt);
if let Some(t_max) = t_max {
t = t_max - dt;
}
}
(
Ray::new(Point3f::from(o), d, Some(r.time), r.medium.clone()),
t,
)
}
pub fn to_quaternion(self) -> Quaternion {
let trace = self.m.trace();
let mut quat = Quaternion::default();
if trace > 0. {
@ -282,6 +440,18 @@ impl Transform<Float> {
Self { m, m_inv }
}
pub fn perspective(fov: Float, n: Float, f: Float) -> Result<Transform<Float>, InversionError> {
let persp: SquareMatrix<Float, 4> = SquareMatrix::new([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., f / (f - n), -f * n / (f - n)],
[0., 0., 1., 0.],
]);
let inv_tan_ang = 1. / (radians(fov) / 2.).tan();
let persp_transform = Transform::from_matrix(persp)?;
Ok(Transform::scale(inv_tan_ang, inv_tan_ang, 1.) * persp_transform)
}
pub fn orthographic(z_near: Float, z_far: Float) -> Self {
Self::scale(1., 1., 1. / (z_far - z_near)) * Self::translate(Vector3f::new(0., 0., -z_near))
}
@ -309,7 +479,7 @@ impl Transform<Float> {
m[2][1] = a.y() * a.z() * (1. - cos_theta) + a.x() * sin_theta;
m[2][2] = a.z() * a.z() + (1. - a.z() * a.z()) * cos_theta;
m[2][3] = 0.;
Transform::new(m.clone(), m.transpose())
Transform::new(m, m.transpose())
}
pub fn rotate_from_to(from: Vector3f, to: Vector3f) -> Self {
@ -336,7 +506,7 @@ impl Transform<Float> {
+ 4. * uv / (uu * vv) * v[i] * u[j];
}
}
Transform::new(r.clone(), r.transpose())
Transform::new(r, r.transpose())
}
pub fn rotate_x(theta: Float) -> Self {
@ -349,7 +519,7 @@ impl Transform<Float> {
[0., 0., 0., 1.],
]);
Self {
m: m.clone(),
m,
m_inv: m.transpose(),
}
}
@ -364,7 +534,7 @@ impl Transform<Float> {
[0., 0., 0., 1.],
]);
Self {
m: m.clone(),
m,
m_inv: m.transpose(),
}
}
@ -379,14 +549,14 @@ impl Transform<Float> {
[0., 0., 0., 1.],
]);
Self {
m: m.clone(),
m,
m_inv: m.transpose(),
}
}
pub fn decompose(&self) -> (Vector3f, SquareMatrix<Float, 4>, SquareMatrix<Float, 4>) {
let t = Vector3f::new(self.m[0][3], self.m[1][3], self.m[2][3]);
let mut m = self.m.clone();
let mut m = self.m;
for i in 0..3 {
m[i][3] = 0.;
m[3][i] = m[i][3];
@ -395,14 +565,14 @@ impl Transform<Float> {
m[3][3] = 1.;
let mut norm: Float;
let mut r = m.clone();
let mut r = m;
let mut count = 0;
loop {
let rit = r
.transpose()
.inverse()
.expect("Transform is not decomposable");
let rnext = (r.clone() + rit.clone()) / 2.;
let rnext = (r + rit) / 2.;
norm = 0.0;
for i in 0..3 {
let n = (r[i][0] - rnext[i][0]).abs()
@ -423,6 +593,20 @@ impl Transform<Float> {
* m;
(t, r, s)
}
pub fn has_scale(&self, tolerance: Option<Float>) -> bool {
let la2 = self
.apply_to_vector(Vector3f::new(1., 0., 0.))
.norm_squared();
let lb2 = self
.apply_to_vector(Vector3f::new(0., 1., 0.))
.norm_squared();
let lc2 = self
.apply_to_vector(Vector3f::new(0., 0., 1.))
.norm_squared();
let tol = tolerance.unwrap_or(1e-3);
(la2 - 1.).abs() > tol || (lb2 - 1.).abs() > tol || (lc2 - 1.).abs() > tol
}
}
impl<T: num_traits::Float> PartialEq for Transform<T> {
@ -442,15 +626,15 @@ impl<T: NumFloat> Mul for Transform<T> {
}
}
impl<'a, 'b, T> Mul<&'b Transform<T>> for &'a Transform<T>
impl<'b, T> Mul<&'b Transform<T>> for &Transform<T>
where
T: NumFloat,
{
type Output = Transform<T>;
fn mul(self, rhs: &'b Transform<T>) -> Self::Output {
Transform {
m: self.m.clone() * rhs.m.clone(),
m_inv: rhs.m_inv.clone() * self.m_inv.clone(),
m: self.m * rhs.m,
m_inv: rhs.m_inv * self.m_inv,
}
}
}
@ -580,6 +764,7 @@ impl DerivativeTerm {
}
}
#[derive(Debug, Clone)]
pub struct AnimatedTransform {
pub start_transform: Transform<Float>,
pub end_transform: Transform<Float>,
@ -608,8 +793,8 @@ impl AnimatedTransform {
if !actually_animated {
return Self {
start_transform: start_transform.clone(),
end_transform: end_transform.clone(),
start_transform: *start_transform,
end_transform: *end_transform,
start_time,
end_time,
actually_animated: false,
@ -1752,7 +1937,7 @@ impl AnimatedTransform {
std::array::from_fn(|_| DerivativeTerm::default()),
)
};
return AnimatedTransform {
AnimatedTransform {
start_transform: *start_transform,
end_transform: *end_transform,
start_time,
@ -1767,7 +1952,7 @@ impl AnimatedTransform {
c3,
c4,
c5,
};
}
}
pub fn apply_point(&self, p: Point3f, time: Float) -> Point3f {
@ -1800,6 +1985,14 @@ impl AnimatedTransform {
t.apply_to_ray(r, t_max)
}
pub fn apply_interaction(&self, si: &Interaction) -> Interaction {
if !self.actually_animated {
return self.start_transform.apply_to_interaction(si);
}
let t = self.interpolate(si.time());
t.apply_to_interaction(si)
}
pub fn interpolate(&self, time: Float) -> Transform<Float> {
if !self.actually_animated || time <= self.start_time {
return self.start_transform;
@ -1817,28 +2010,37 @@ impl AnimatedTransform {
let scale_transform =
Transform::from_matrix(scale).expect("Scale matrix is not inversible");
return Transform::translate(trans) * Transform::from(rotate) * scale_transform;
Transform::translate(trans) * Transform::from(rotate) * scale_transform
}
pub fn apply_inverse_point(&self, p: Point3f, time: Float) -> Point3f {
if !self.actually_animated {
return self.start_transform.apply_inverse(p);
}
return self.interpolate(time).apply_inverse(p);
self.interpolate(time).apply_inverse(p)
}
pub fn apply_inverse_vector(&self, v: Vector3f, time: Float) -> Vector3f {
if !self.actually_animated {
return self.start_transform.apply_inverse_vector(v);
}
return self.interpolate(time).apply_inverse_vector(v);
self.interpolate(time).apply_inverse_vector(v)
}
pub fn apply_inverse_normal(&self, n: Normal3f, time: Float) -> Normal3f {
if !self.actually_animated {
return self.start_transform.apply_inverse_normal(n);
}
return self.interpolate(time).apply_inverse_normal(n);
self.interpolate(time).apply_inverse_normal(n)
}
pub fn motion_bounds(&self, b: &Bounds3f) -> Bounds3f {
if !self.actually_animated {
return self.start_transform.apply_to_bounds(*b);
}
self.start_transform
.apply_to_bounds(*b)
.union(self.end_transform.apply_to_bounds(*b))
}
}