-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathadmmGSLOPE.R
121 lines (96 loc) · 3.48 KB
/
admmGSLOPE.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Written by Micha³ Makowski
# TODO
# Checks
# Lamda default value
# install.packages("SLOPE")
require(SLOPE)
source("OWL1prox.R")
# sampleCovariance - sample covariance
# mu - Augmented Lagrangian parameter
# lambda - sequence of lambda regularizators (ordered L1 norm)
# maxIter - maximum number of iterations
# absoluteEpsilon - used in residual stopping criterium
# verbose - console output
gslopeADMM <- function(sampleCovariance,
lambda = NULL,
penalizeDiagonal = FALSE,
mu = 1.1,
Y = NULL,
maxIter = 1e5,
absoluteEpsilon = 1e-4,
# relativeEpsilon = 1e-4,
verbose = TRUE)
{
# Console output
if(verbose)
{
cat("Starting ADMM gsLOPE procedure...")
progressBar <- txtProgressBar(min = 0, max = 1/absoluteEpsilon, style = 3)
setTxtProgressBar(progressBar, 0)
}
p <- ncol(sampleCovariance)
# Sequence length
entriesNumber <- sum(1:(p-!penalizeDiagonal))
# Lambda preparation
lambda <- sort(lambda, decreasing = T)
if(length(lambda) == p^2)
{
if(penalizeDiagonal)
{
lambda <- c(lambda[1:p], lambda[seq(from = p+1, to = length(lambda), by = 2)])
} else
{
lambda <- lambda[seq(from = p+1, to = length(lambda), by = 2)]
}
} else if(length(lambda) < entriesNumber)
{
lambda <- c(lambda, rep(0, times = entriesNumber - length(lambda)))
} else if(length(lambda) > entriesNumber)
{
lambda <- lambda[1:entriesNumber]
}
# Initialization
Z <- sampleCovariance*0 # Initialize Lagragian to be nothing (seems to work well)
if(is.null(Y))
Y <- Z
X <- diag(nrow = p)
# ADMM algotithm
for(n in 1:maxIter)
{
# Solve sub-problem to solve X
Ctilde <- Y-Z-sampleCovariance/mu
Ceigen <- eigen(Ctilde, symmetric = TRUE)
CeigenVal <- Ceigen$val
CeigenVec <- Ceigen$vec
Fmu <- 1/2*diag(CeigenVal+sqrt(CeigenVal*CeigenVal+4/mu))
X <- CeigenVec%*%Fmu%*%t(CeigenVec)
# Solve sub-problem to solve Y
Yold <- Y
# Y <- softThresholding(X+Z, lambda/mu)
Y <- matrixOWL1prox(X+Z, lambda/mu, penalizeDiagonal)
# Update the Lagrangian
Z <- Z + mu*(X-Y)
# Residuals
primalResidual <- norm(X-Y, type = "F")
dualResidual <- norm(mu*(Y-Yold), type = "F")
# Stopping criteria
primalEpsilon <- absoluteEpsilon # + relativeEpsilon*max(l2norm(X), l2norm(Y))
# dualEpsilon <- absoluteEpsilon # + relativeEpsilon*l2norm(Z)
if(verbose)
setTxtProgressBar(progressBar, min(1/primalResidual, 1/dualResidual, 1/absoluteEpsilon))
if(primalResidual < primalEpsilon & dualResidual < primalEpsilon)
break
}
X[abs(X) < absoluteEpsilon] <- 0
if(verbose)
close(progressBar)
return(list(sampleCovariance = sampleCovariance,
lambda = lambda,
lagrangianParameter = mu,
diagonalPenalization = penalizeDiagonal,
precisionMatrix = X,
covarianceMatrix = solve(X),
residuals = c(primalResidual, dualResidual),
iterations = n,
epsilon = absoluteEpsilon))
}