From 2518f9054aec6ba033a102fb802dd7bf643731f2 Mon Sep 17 00:00:00 2001 From: Thomas Rouch Date: Wed, 22 Jun 2022 11:43:47 +0200 Subject: [PATCH] :art: format tabulations --- include/neural-graphics-primitives/common.h | 6 +- .../common_device.cuh | 98 +++++++++---------- include/neural-graphics-primitives/testbed.h | 4 +- src/python_api.cu | 6 +- src/testbed_nerf.cu | 12 +-- 5 files changed, 63 insertions(+), 63 deletions(-) diff --git a/include/neural-graphics-primitives/common.h b/include/neural-graphics-primitives/common.h index f96694d85..1d6d8b6ec 100644 --- a/include/neural-graphics-primitives/common.h +++ b/include/neural-graphics-primitives/common.h @@ -82,9 +82,9 @@ enum class ERenderMode : int { static constexpr const char* RenderModeStr = "AO\0Shade\0Normals\0Positions\0Depth\0Distortion\0Cost\0Slice\0\0"; enum class ECameraMode : int { - Perspective, - Orthographic, - Environment + Perspective, + Orthographic, + Environment }; static constexpr const char* CameraModeStr = "Perspective\0Orthographic\0Environment\0\0"; diff --git a/include/neural-graphics-primitives/common_device.cuh b/include/neural-graphics-primitives/common_device.cuh index 30ef267b8..0ad58fc8e 100644 --- a/include/neural-graphics-primitives/common_device.cuh +++ b/include/neural-graphics-primitives/common_device.cuh @@ -270,7 +270,7 @@ inline __host__ __device__ Ray pixel_to_ray( bool snap_to_pixel_centers = false, float focus_z = 1.0f, float dof = 0.0f, - const ECameraMode camera_mode = ECameraMode::Perspective, + const ECameraMode camera_mode = ECameraMode::Perspective, const CameraDistortion& camera_distortion = {}, const float* __restrict__ distortion_data = nullptr, const Eigen::Vector2i distortion_resolution = Eigen::Vector2i::Zero() @@ -278,61 +278,61 @@ inline __host__ __device__ Ray pixel_to_ray( Eigen::Vector2f offset = ld_random_pixel_offset(snap_to_pixel_centers ? 0 : spp); Eigen::Vector2f uv = (pixel.cast() + offset).cwiseQuotient(resolution.cast()); - const Eigen::Vector3f shift = {parallax_shift.x(), parallax_shift.y(), 0.f}; + const Eigen::Vector3f shift = {parallax_shift.x(), parallax_shift.y(), 0.f}; Eigen::Vector3f dir; - Eigen::Vector3f head_pos; - if(camera_mode == ECameraMode::Orthographic){ - dir = {0.f, 0.f, 1.f}; // Camera forward - head_pos = { - (uv.x() - screen_center.x()) * (float)resolution.x() / focal_length.x(), - (uv.y() - screen_center.y()) * (float)resolution.y() / focal_length.y(), - 0.0f - }; + Eigen::Vector3f head_pos; + if(camera_mode == ECameraMode::Orthographic){ + dir = {0.f, 0.f, 1.f}; // Camera forward + head_pos = { + (uv.x() - screen_center.x()) * (float)resolution.x() / focal_length.x(), + (uv.y() - screen_center.y()) * (float)resolution.y() / focal_length.y(), + 0.0f + }; head_pos += shift; - dir -= shift / parallax_shift.z(); // we could use focus_z here in the denominator. for now, we pack m_scale in here. - } - else if(camera_mode == ECameraMode::Environment){ - // Camera convention: XYZ <-> Right Down Front - head_pos = {0.f, 0.f, 0.f}; - const float phi = (uv.y()-0.5) * M_PI; - const float theta = (uv.x()-0.5) * 2.0 * M_PI; - const float cos_phi = std::cos(phi); - dir = { - cos_phi*std::sin(theta), - std::sin(phi), - cos_phi*std::cos(theta) - }; + dir -= shift / parallax_shift.z(); // we could use focus_z here in the denominator. for now, we pack m_scale in here. + } + else if(camera_mode == ECameraMode::Environment){ + // Camera convention: XYZ <-> Right Down Front + head_pos = {0.f, 0.f, 0.f}; + const float phi = (uv.y()-0.5) * M_PI; + const float theta = (uv.x()-0.5) * 2.0 * M_PI; + const float cos_phi = std::cos(phi); + dir = { + cos_phi*std::sin(theta), + std::sin(phi), + cos_phi*std::cos(theta) + }; // Parallax isn't handled - } - else { // Perspective - head_pos = {0.f, 0.f, 0.f}; - if (camera_distortion.mode == ECameraDistortionMode::FTheta) { - dir = f_theta_undistortion(uv - screen_center, camera_distortion.params, {1000.f, 0.f, 0.f}); - if (dir.x() == 1000.f) { - return {{1000.f, 0.f, 0.f}, {0.f, 0.f, 1.f}}; // return a point outside the aabb so the pixel is not rendered - } - } else if (camera_distortion.mode == ECameraDistortionMode::LatLong) { + } + else { // Perspective + head_pos = {0.f, 0.f, 0.f}; + if (camera_distortion.mode == ECameraDistortionMode::FTheta) { + dir = f_theta_undistortion(uv - screen_center, camera_distortion.params, {1000.f, 0.f, 0.f}); + if (dir.x() == 1000.f) { + return {{1000.f, 0.f, 0.f}, {0.f, 0.f, 1.f}}; // return a point outside the aabb so the pixel is not rendered + } + } else if (camera_distortion.mode == ECameraDistortionMode::LatLong) { dir = latlong_to_dir(uv); } else { - dir = { - (uv.x() - screen_center.x()) * (float)resolution.x() / focal_length.x(), - (uv.y() - screen_center.y()) * (float)resolution.y() / focal_length.y(), - 1.0f - }; - if (camera_distortion.mode == ECameraDistortionMode::Iterative) { - iterative_camera_undistortion(camera_distortion.params, &dir.x(), &dir.y()); - } - } - if (distortion_data) { - dir.head<2>() += read_image<2>(distortion_data, distortion_resolution, uv); - } + dir = { + (uv.x() - screen_center.x()) * (float)resolution.x() / focal_length.x(), + (uv.y() - screen_center.y()) * (float)resolution.y() / focal_length.y(), + 1.0f + }; + if (camera_distortion.mode == ECameraDistortionMode::Iterative) { + iterative_camera_undistortion(camera_distortion.params, &dir.x(), &dir.y()); + } + } + if (distortion_data) { + dir.head<2>() += read_image<2>(distortion_data, distortion_resolution, uv); + } head_pos += shift; - dir -= shift / parallax_shift.z(); // we could use focus_z here in the denominator. for now, we pack m_scale in here. - } + dir -= shift / parallax_shift.z(); // we could use focus_z here in the denominator. for now, we pack m_scale in here. + } - dir = camera_matrix.block<3, 3>(0, 0) * dir; - Eigen::Vector3f origin = camera_matrix.block<3, 3>(0, 0) * head_pos + camera_matrix.col(3); + dir = camera_matrix.block<3, 3>(0, 0) * dir; + Eigen::Vector3f origin = camera_matrix.block<3, 3>(0, 0) * head_pos + camera_matrix.col(3); if (dof == 0.0f) { return {origin, dir}; @@ -439,7 +439,7 @@ inline __host__ __device__ Eigen::Vector2f motion_vector_3d( snap_to_pixel_centers, 1.0f, 0.0f, - camera_mode, + camera_mode, camera_distortion, nullptr, Eigen::Vector2i::Zero() diff --git a/include/neural-graphics-primitives/testbed.h b/include/neural-graphics-primitives/testbed.h index d42ba6fb3..9aa7a7e95 100644 --- a/include/neural-graphics-primitives/testbed.h +++ b/include/neural-graphics-primitives/testbed.h @@ -156,7 +156,7 @@ class Testbed { int show_accel, float cone_angle_constant, ERenderMode render_mode, - ECameraMode camera_mode, + ECameraMode camera_mode, cudaStream_t stream ); @@ -466,7 +466,7 @@ class Testbed { float m_bounding_radius = 1; float m_exposure = 0.f; - ECameraMode m_camera_mode = ECameraMode::Perspective; + ECameraMode m_camera_mode = ECameraMode::Perspective; ERenderMode m_render_mode = ERenderMode::Shade; EMeshRenderMode m_mesh_render_mode = EMeshRenderMode::VertexNormals; diff --git a/src/python_api.cu b/src/python_api.cu index c45cb4b58..dc186f9b3 100644 --- a/src/python_api.cu +++ b/src/python_api.cu @@ -238,9 +238,9 @@ PYBIND11_MODULE(pyngp, m) { .value("Slice", ERenderMode::Slice) .export_values(); - py::enum_(m, "CameraMode") + py::enum_(m, "CameraMode") .value("Perspective", ECameraMode::Perspective) - .value("Orthographic", ECameraMode::Orthographic) + .value("Orthographic", ECameraMode::Orthographic) .value("Environment", ECameraMode::Environment) .export_values(); @@ -430,7 +430,7 @@ PYBIND11_MODULE(pyngp, m) { .def_readwrite("shall_train_network", &Testbed::m_train_network) .def_readwrite("render_groundtruth", &Testbed::m_render_ground_truth) .def_readwrite("render_mode", &Testbed::m_render_mode) - .def_readwrite("camera_mode", &Testbed::m_camera_mode) + .def_readwrite("camera_mode", &Testbed::m_camera_mode) .def_readwrite("slice_plane_z", &Testbed::m_slice_plane_z) .def_readwrite("dof", &Testbed::m_dof) .def_readwrite("autofocus", &Testbed::m_autofocus) diff --git a/src/testbed_nerf.cu b/src/testbed_nerf.cu index 30676ff54..5b1bc206f 100644 --- a/src/testbed_nerf.cu +++ b/src/testbed_nerf.cu @@ -1791,7 +1791,7 @@ __global__ void init_rays_with_payload_kernel_nerf( const float* __restrict__ distortion_data, const Vector2i distortion_resolution, ERenderMode render_mode, - ECameraMode camera_mode + ECameraMode camera_mode ) { uint32_t x = threadIdx.x + blockDim.x * blockIdx.x; uint32_t y = threadIdx.y + blockDim.y * blockIdx.y; @@ -1822,7 +1822,7 @@ __global__ void init_rays_with_payload_kernel_nerf( snap_to_pixel_centers, plane_z, dof, - camera_mode, + camera_mode, camera_distortion, distortion_data, distortion_resolution @@ -1972,7 +1972,7 @@ void Testbed::NerfTracer::init_rays_from_camera( int show_accel, float cone_angle_constant, ERenderMode render_mode, - ECameraMode camera_mode, + ECameraMode camera_mode, cudaStream_t stream ) { // Make sure we have enough memory reserved to render at the requested resolution @@ -2004,7 +2004,7 @@ void Testbed::NerfTracer::init_rays_from_camera( distortion_data, distortion_resolution, render_mode, - camera_mode + camera_mode ); m_n_rays_initialized = resolution.x() * resolution.y(); @@ -2267,7 +2267,7 @@ void Testbed::render_nerf(CudaRenderBuffer& render_buffer, const Vector2i& max_r m_nerf.show_accel, m_nerf.cone_angle_constant, render_mode, - m_camera_mode, + m_camera_mode, stream ); @@ -2445,7 +2445,7 @@ void Testbed::Nerf::Training::export_camera_extrinsics(const std::string& filena trajectory.emplace_back(frame); } std::ofstream file(filename); - file << std::setw(2) << trajectory << std::endl; + file << std::setw(2) << trajectory << std::endl; } Eigen::Matrix Testbed::Nerf::Training::get_camera_extrinsics(int frame_idx) {