论文中Gini系数的计算
def cal_sparsity(x):
# print(x.shape)
n=x.shape[0]
# x=x.reshape(x.shape.prob)
x=x.abs()
x,_=x.sort()
# print(x)
Gx=0
for k in range(n):
Gx+=x[k]*(n-k+0.5)
if(x.sum()==0):
Gx=0
else:
Gx*=2/(n*x.sum())
Gx=1-Gx
if(math.isinf(Gx)):
Gx=1
return Gx
花了一晚上的时间看带绝对值的线性规划怎么转化为标准型,在“目标函数带绝对值号的特殊非线性规划问题”这篇文章中找到了证明:
然后发现cvxpy是支持绝对值计算的= =,使用cp.pnorm(D @ x, p=1)
就行。
定义问题和对应求解器:
点击查看代码
import cvxpy as cp
import torch
from cvxpylayers.torch import CvxpyLayer
n, m = size[2], patternnum # number of unkown pixels, number of measurements
k = 2*n+1
x = cp.Variable(n)
A = cp.Parameter((m, n))
b = cp.Parameter(m)
D = cp.Parameter((k, n))
constraints = [A @ x - b == 0]
objective = cp.Minimize(cp.pnorm(D @ x, p=1))
problem = cp.Problem(objective, constraints)
assert problem.is_dpp()
cvxpylayer = CvxpyLayer(problem, parameters=[A, b, D], variables=[x])
D_tch = torch.zeros(k,n)
A_tch = light_pattern.to(dtype=torch.float32)
lamda=0.8471/0.8588
for i in range(n):
D_tch[i][i]=1
D_tch[n][0]=lamda
for i in range(n+1,k-1):
D_tch[i][i-n-1]=lamda
D_tch[i][i-n]=-1*lamda
D_tch[k-1][n-1]=lamda
直接使用这个求解器:
点击查看代码
loss_avg=0
# Gx=0
# Gdx=0
for i in range(n_valid):
volume=valid_datasets[i]
volume=torch.tensor(volume,dtype=torch.float32)
volume_pred=torch.zeros(volume.shape)
for x in range(size[0]):
for y in range(size[1]):
x_gt=volume[x][y]
# Gx+=cal_sparsity(x_gt)
# dx=torch.zeros(size[2]-1)
# for z in range(size[2]-1):
# dx[z]=x_gt[z]-x_gt[z+1]
# Gdx+=cal_sparsity(dx)
measurement = A_tch@x_gt
measurement=measurement+、
measurement*noise_intensity*torch.randn(measurement.shape)
b_tch=measurement
# solve the problem
solution, = cvxpylayer(A_tch, b_tch, D_tch)
# print(solution)
volume_pred[x][y]=solution
loss=F.mse_loss(volume_pred,volume)
print(loss)
print("avg:",loss_avg/n_valid)
# print("Gx:",Gx/(n_valid*size[1]*size[0]))
# print("Gdx:",Gdx/(n_valid*size[1]*size[0]))
将求解器作为网路的一部分使用:
点击查看代码
class MyNet(nn.Module):
def __init__(self,pattern_num,density_shape,cuda):
super(MyNet, self).__init__()
self.device=cuda
self.density_shape=density_shape
# other parameter omitted
self.light_pattern=torch.randint(2,[patternnum,size[2]])
self.noise_intensity=0.1
n, m = density_shape[2], pattern_num # number of unkown pixels, number of measurements
k = 2*n+1
x = cp.Variable(n)
A = cp.Parameter((m, n))
b = cp.Parameter(m)
D = cp.Parameter((k, n))
constraints = [A @ x - b == 0]
objective = cp.Minimize(cp.pnorm(D @ x, p=1))
problem = cp.Problem(objective, constraints)
assert problem.is_dpp()
self.cvxpylayer = CvxpyLayer(problem, parameters=[A, b, D], variables=[x])
self.D_tch = torch.zeros(k,n).to(device=self.device)
self.A_tch = self.light_pattern.to(dtype=torch.float32)
lamda=0.8471/0.8588
for i in range(n):
self.D_tch[i][i]=1
self.D_tch[n][0]=lamda
for i in range(n+1,k-1):
self.D_tch[i][i-n-1]=lamda
self.D_tch[i][i-n]=-1*lamda
self.D_tch[k-1][n-1]=lamda
def forward(self,volume):
# print(volume.shape)
volume=encode(other parameter,volume)
batch_size=volume.shape[0]
volume_pred=torch.zeros(volume.shape).to(device=self.device)
x_gt=volume.reshape(batch_size*self.density_shape[0]*self.density_shape[1],self.density_shape[2])
measurement = torch.einsum('BN,MN->BM',x_gt,self.A_tch)
measurement=measurement+measurement*self.noise_intensity*torch.randn(measurement.shape).to(device=self.device)
b_tch=measurement
# solve the problem
solution, = self.cvxpylayer(self.A_tch, b_tch, self.D_tch)
# print(solution.shape)
volume_pred=solution.reshape(volume.shape)
return volume_pred
求解器是可以传入batchsize组数据求解的,但是实际上并不会并行求解,增大batchsize之后平均到每组x上的求解时间并没有变化。。。cvxpylayer内部到底是怎么写的导致这个问题就不知道了。
标签:Compressive,Recovering,Media,self,torch,volume,shape,tch,cp From: https://www.cnblogs.com/zyx45889/p/17560749.html