1 单帧降噪
在这个部分,你需要对有噪声的输入图像eC,使用联合双边滤波核 J 进行降
噪,最终得到降噪后的图像 C(J) ,我们的联合双边滤波核定义如下:
对于$ D_{normal}(i,j)$`项,我们可以考虑以下的情况:对于一个立方体,任意相邻
的两个面我们都不希望它们互相有贡献,因此我们定义 \(D_{ normal}\)项为两个法线间
的夹角 (弧度),即:
对于\(D_{plane}(i,j)\)项,我们可以考虑以下的情况:有一本书放在一张桌子上,桌
子和书的平面完全平行。我们一定不希望书和桌子上的像素会互相贡献。因此,我
们定义这项为点 i 到 j 的单位向量与 i 点法线的点积,即:
此外,\(D_{plane}(i,j)\)项提供了一种比只是简单计算两个深度的差值更好的指标。考虑 Cornell box 场景中,左右两侧的墙几乎平行于视线方向,即深度变化较快。在这种情况下,简单的深度差值会使得同一面墙上的很多像素点无法贡献到这个墙本身。
这里你需要完成函数Denoiser::Filter,它的输入参数为当前帧的信息,返回降噪后的图像。
code:
Buffer2D<Float3> Denoiser::Filter(const FrameInfo &frameInfo) {
int height = frameInfo.m_beauty.m_height;
int width = frameInfo.m_beauty.m_width;
Buffer2D<Float3> filteredImage = CreateBuffer2D<Float3>(width, height);
const int kernelRadius = 16;
#pragma omp parallel for
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// TODO: Joint bilateral filter
//filteredImage(x, y) = frameInfo.m_beauty(x, y);
Float3 rgb = Float3();
float w_sum = 0;
for (int offsetX = -kernelRadius; offsetX <= kernelRadius; offsetX++) {
for (int offsetY = -kernelRadius; offsetY <= kernelRadius; offsetY++) {
int x_ = x + offsetX;
int y_ = y + offsetY;
if (x_ >= 0 && x_ < width && y_ >= 0 && y_ < height) {
auto p1 = frameInfo.m_position(x, y);
auto p2 = frameInfo.m_position(x_, y_);
auto c1 = frameInfo.m_beauty(x, y);
Float3 c2 = frameInfo.m_beauty(x_, y_);
Float3 n1 = frameInfo.m_normal(x, y);
Float3 n2 = frameInfo.m_normal(x_, y_);
//n1 = Normalize(n1);
//n2 = Normalize(n2);
//if (Length(n1) <= 0 || Length(n2) <= 0) {
// std::cout << "x_=" << x_ << " y_=" << y_
// << "Length(n1) <= 0 || Length(n2) <= 0" << std::endl;
// continue;
//}
float dn = acos(std::min(1.0f,std::max(Dot(n1, n2),0.0f)));
if (Length(n1) <= 0 || Length(n2) <= 0) {
dn = 0.0f;//todo ������ͼ�з���ò�Ʋ��ԣ�
}
//std::cout << "dn:" <<dn << std::endl;
if (std::isnan(dn)) {
std::cout << "dn:" << " nan!" << std::endl;
std::cout << "n1:" << n1 << std::endl;
std::cout << "n2:" << n2 << std::endl;
}
auto len = Length(p2 - p1);
//std::cout << "len:" << len << std::endl;
float dp = len != 0 ? Dot(n1, Normalize(p2 - p1)) : 0;
float p_d = -SqrDistance(p1, p2) / (2 * m_sigmaCoord*m_sigmaCoord);
float c_d = -SqrDistance(c1, c2) / (2 * m_sigmaColor*m_sigmaColor);
float n_d = -dn * dn / (2 * m_sigmaNormal*m_sigmaNormal);
float plane_d = -dp * dp / (2 * m_sigmaPlane*m_sigmaPlane);
if (std::isnan(p_d)) std::cout << "p_d:" << " nan!" << std::endl;
if (std::isnan(c_d)) std::cout << "c_d:" << " nan!" << std::endl;
if (std::isnan(n_d)) std::cout << "n_d:" << " nan!" << std::endl;
if (std::isnan(plane_d)) std::cout << "plane_d:" << " nan!" << std::endl;
float dd = p_d + c_d + n_d + plane_d;
float w = exp(dd);
/*std::cout << "xoffset=" << offsetX << " yoffset=" << offsetY
<<" p_d="<<p_d
<< " c_d=" << c_d
<< " n_d=" << n_d
<< " plane_d=" << plane_d
<< " dd=" << dd << " w=" << w << std::endl;*/
rgb += c2 * w;
//if (std::isnan(w) ) std::cout << "w:" << " nan!" << std::endl;
//auto c = c2;
//if (std::isnan(c.x) || std::isnan(c.y) || std::isnan(c.z)) {
// std::cout << "x:" << x_ << " y:" << y_ << " nan!" << std::endl;
//}
//c = rgb;
//std::cout << "rgb:" << rgb << std::endl;
//if (std::isnan(c.x) || std::isnan(c.y) || std::isnan(c.z)) {
// std::cout << "rgb:" << rgb << std::endl;
// std::cout << "rgb nan!" << std::endl;
//}
w_sum += w;
}
}
}
if (w_sum == 0.0f) {
std::cout << "w_sum == 0.0f" << std::endl;
filteredImage(x, y) = frameInfo.m_beauty(x, y);
}
else {
filteredImage(x, y) = rgb / w_sum;
auto c = rgb;
//if (std::isnan(c.x) || std::isnan(c.y) || std::isnan(c.z)) {
// std::cout << "x:" << x << " y:" << y << " nan!" << std::endl;
//}
}
//auto c = filteredImage(x, y);
//if (std::isnan(c.x) || std::isnan(c.y) || std::isnan(c.z)) {
// std::cout << "x:" << x << " y:" << y << " nan!" << std::endl;
//}
}
}
return filteredImage;
}
2 投影上一帧结果
在这个部分,你需要计算当前帧每个像素在上一帧的对应点,并将上一帧的结果投影到当前帧。
我们利用已知的几何信息来找到对应的上一帧像素,公式如下:
\[Screen_{i−1} = P_{i−1}V_{i−1}M_{i−1}M^{−1}_iWorld_i \]其中下角标的 i 代表第 i 帧,M表示物体坐标系到世界坐标系的矩阵,V 表示世界坐标系到摄像机坐标系的矩阵,P表示摄像机坐标系到屏幕坐标系的矩阵。
在找到对应像素后,我们需要检查是否合法,这里我们使用两个简单的指标
来检查:
- 上一帧是否在屏幕内。
- 上一帧和当前帧的物体的标号。
这里你需要完成函数 Denoiser::Reprojection,它的输入参数为当前帧的
信息。该函数会将上一帧的结果 (保存在 m_accColor) 投影到当前帧 (保存在
m_accColor)。并将投影是否合法保存在 m_valid 以供我们在累积多帧信息时
使用。
codes:
void Denoiser::Reprojection(const FrameInfo &frameInfo) {
int height = m_accColor.m_height;
int width = m_accColor.m_width;
Matrix4x4 preWorldToScreen =
m_preFrameInfo.m_matrix[m_preFrameInfo.m_matrix.size() - 1];
Matrix4x4 preWorldToCamera =
m_preFrameInfo.m_matrix[m_preFrameInfo.m_matrix.size() - 2];
#pragma omp parallel for
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
// TODO: Reproject
m_valid(x, y) = false;
m_misc(x, y) = Float3(0.f);
#ifdef _Reproject
auto objId = frameInfo.m_id(x, y);
if (objId != -1) {
auto local2World = frameInfo.m_matrix[objId];
auto worldPos = frameInfo.m_position(x, y);
auto localPos = Inverse(local2World)(worldPos, Float3::EType::Point);
auto preLocal2World = m_preFrameInfo.m_matrix[objId];
auto preWorldPos = preLocal2World(localPos, Float3::EType::Point);
auto preScreen = preWorldToScreen(preWorldPos, Float3::EType::Point);
//std::cout << "preScreen=" << preScreen.x << " " << preScreen.y << std::endl;
bool valid = true;
if (preScreen.x < 0 || preScreen.x >= width || preScreen.y < 0 || preScreen.y >= height) {
valid = false;
}
if (valid) {
auto preObjId = m_preFrameInfo.m_id(preScreen.x, preScreen.y);
if (preObjId != objId) {
valid = false;
//����һ֡����ͬһ�����壬���Լ�����ɫ
//m_misc(x, y) = frameInfo.m_beauty(x, y);
}
}
m_valid(x, y) = valid;
if (valid) {
//std::cout << "preScreen=" << preScreen.x << " " << preScreen.y << std::endl;
m_misc(x, y) = m_accColor(preScreen.x, preScreen.y);
}
}
else {
m_valid(x, y) = false;
//m_misc(x, y) = m_accColor(x, y);
}
#endif // _Reproject
}
}
std::swap(m_misc, m_accColor);
}
3 累积多帧信息
在这个部分,你需要将已经降噪的当前帧图像 \(\overline C_i\) ,与已经降噪的上一帧图像
\(\overline C_{i-1}\)进行结合,公式如下:
其中对于 α 的选择,当我们在上一帧没有找到合法的对应点时,将 α 设为 1。
对于Clamp部分,我们首先需要计算\(\overline C_i\)在 7×7 的邻域内的均值 µ 和方差 σ,
然后我们将上一帧的颜色\(\overline C_{i-1}\)Clamp 在 (µ − kσ,µ + kσ) 范围内。
这里你需要完成函数Denoiser::TemporalAccumulation,它的输入参数为我们降噪过的当前帧图像。该函数会将最终结果保存在m_accColor,这也就是我们最终的降噪结果。
code:
void Denoiser::TemporalAccumulation(const Buffer2D<Float3> &curFilteredColor) {
int height = m_accColor.m_height;
int width = m_accColor.m_width;
int kernelRadius = 3;
#pragma omp parallel for
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
Float3 color = m_accColor(x, y);
#ifdef _TemporalAccumulation
// TODO: Temporal clamp
//�����ֵ�ͷ���
Float3 mean = Float3(), sigma = Float3();
int count = 0;
for (int offsetX = -kernelRadius; offsetX <= kernelRadius; offsetX++) {
for (int offsetY = -kernelRadius; offsetY <= kernelRadius; offsetY++) {
int x_ = x + offsetX;
int y_ = y + offsetY;
if (x_ >= 0 && x_ < width && y_ >= 0 && y_ < height) {
count++;
auto c = curFilteredColor(x_, y_);
mean += c;
}
}
}
//std::cout << "mean1=" << mean << std::endl;
mean /= (float)count;
//std::cout << "count=" << count << std::endl;
//std::cout << "mean2=" << mean << std::endl;
for (int offsetX = -kernelRadius; offsetX <= kernelRadius; offsetX++) {
for (int offsetY = -kernelRadius; offsetY <= kernelRadius; offsetY++) {
int x_ = x + offsetX;
int y_ = y + offsetY;
if (x_ >= 0 && x_ < width && y_ >= 0 && y_ < height) {
auto diff = (curFilteredColor(x_, y_) - mean);
sigma += diff * diff;
}
}
}
sigma /= (float)count;
float K = 3.0f;
auto min = mean - sigma*K;
auto max = mean + sigma * K;
auto clamp = Max(Min(color, max), min);
// TODO: Exponential moving average
auto valid = m_valid(x, y);
if (!valid) {
m_misc(x, y) = curFilteredColor(x, y);
}
else {
float alpha = 0.33f;
m_misc(x, y) = Lerp(clamp, curFilteredColor(x, y), alpha);
}
//m_misc(x, y) = clamp;
//m_misc(x, y) = max;//test
#else
//float alpha = 1.0f;
//m_misc(x, y) = Lerp(color, curFilteredColor(x, y), alpha);
m_misc(x, y) = curFilteredColor(x, y);
#endif // _TemporalAccumulation
}
}
std::swap(m_misc, m_accColor);
}
Buffer2D<Float3> Denoiser::ProcessFrame(const FrameInfo &frameInfo) {
// Filter current frame
Buffer2D<Float3> filteredColor;
filteredColor = Filter(frameInfo);
// Reproject previous frame color to current
if (m_useTemportal) {
Reprojection(frameInfo);
TemporalAccumulation(filteredColor);
}
else {
Init(frameInfo, filteredColor);
}
// Maintain
Maintain(frameInfo);
if (!m_useTemportal) {
m_useTemportal = true;
}
return m_accColor;
}
4 结果
处理前:
处理后: