Reputation: 55
I am working on implementing an algorithm where I try to find 5 vectors out of 20 which are "furthest apart", using some measure. To do that i use combnPrime where I get a list of some 77000 vectors representing all 5-vector grouped combinations. Each vector has around 25.
To parallelize the below loops, I tried doParallel library, but I keep messing it up somehow and get -inf as a result. I read the doParallel documentation and could not apply what I saw there to my case, it is highly probable that my lack of knowledge of R makes the problem seem a bit harder than it actually is
#df2can be thought of as (thanks to @Oliver):
df2 <- as.data.frame(replicate(20, rnorm(10)))
names(df2) <- LETTERS[1:20]
comb <- combnPrim(df2,5)
range <- length(comb)/5
result_vector <- vector(mode="list",length = range )
for (i in seq(range))
{
total <- as.numeric(0)
for ( j in seq(4))
{
for ( k in seq(j+1,5))
{
diff <- sum( ( mapply( '/',unlist( comb[,i][j] ) - unlist( comb[,i][k] ), ( unlist(comb[,i][j] ) + unlist( comb[,i][k] )) / 2 )^2))
total = total + diff
}
}
result_vector[[i]] <- total
}
So the question is how could I approach this problem to make this computation run faster. My approach was to parallize the outer most loop, where range
variable is ~15000. All threads would need access to comb and share the variable result_vector
. I believe my approach is not impossible but I would need some guidance.
Upvotes: 2
Views: 120
Reputation: 12569
I tested two new variants (the functions iloop()
and cloop()
):
# https://www.bioconductor.org/packages/release/bioc/html/RBGL.html
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#
# BiocManager::install("RBGL")
# BiocManager::install("gRbase")
library("gRbase")
library("proxy") ## proxy::dist()
library("microbenchmark")
#df2can be thought of as (thanks to @Oliver):
df2 <- as.data.frame(replicate(20, rnorm(10)))
names(df2) <- LETTERS[1:20]
comb <- combnPrim(df2,5, simplify = TRUE)
ori <- function(comb) {
range <- length(comb)/5
result_vector <- vector(mode="list",length = range )
for (i in seq(range))
{
total <- as.numeric(0)
for ( j in seq(4))
{
for ( k in seq(j+1,5))
{
diff <- sum( ( mapply( '/',unlist( comb[,i][j] ) - unlist( comb[,i][k] ), ( unlist(comb[,i][j] ) + unlist( comb[,i][k] )) / 2 )^2))
total = total + diff
}
}
result_vector[[i]] <- total
}
return(result_vector)
}
nomapply <- function(comb) {
range <- ncol(comb) ## length(comb)/5
result_vector <- vector(mode="list",length = range )
for (i in seq(range)) {
total <- as.numeric(0)
for ( j in seq(4)) for ( k in seq(j+1,5)) {
diff <- sum( ( (unlist( comb[,i][j] ) - unlist( comb[,i][k] )) /
( unlist(comb[,i][j] ) + unlist( comb[,i][k] )) / 2 )^2)
total = total + diff
}
result_vector[[i]] <- total
}
return(result_vector)
}
ind <- function(comb) {
range <- ncol(comb) ## length(comb)/5
result_vector <- vector(mode="list",length = range )
for (i in seq(range)) {
total <- as.numeric(0)
for ( j in seq(4)) for ( k in seq(j+1,5)) {
diff <- sum( ( (unlist( comb[j,i] ) - unlist( comb[j,i] )) /
( unlist(comb[j,i] ) + unlist( comb[k,i] )) / 2 )^2)
total = total + diff
}
result_vector[[i]] <- total
}
return(result_vector)
}
nounlist <- function(comb) {
range <- ncol(comb) ## length(comb)/5
result_vector <- vector(mode="list",length = range )
for (i in seq(range)) {
total <- as.numeric(0)
for ( j in seq(4)) for ( k in seq(j+1,5)) {
diff <- sum( ( (comb[j,i][[1]] - comb[j,i][[1]]) / ( comb[j,i][[1]] + comb[k,i][[1]]) / 2 )^2)
total = total + diff
}
result_vector[[i]] <- total
}
return(result_vector)
}
range <- ncol(comb) ## length(comb)/5
fn_dist <- function(x, y) sum(((x-y) / ((x+y) / 2))^2)
iloop <- function(i) {
total <- as.numeric(0)
for ( j in seq(4)) {
for ( k in seq(j+1,5)) {
diff <- fn_dist(comb[j,i][[1]], comb[k,i][[1]])
total = total + diff
}
}
return(total)
}
# result_vector <- sapply(1:range, iloop)
cloop <- function(ci) {
total <- as.numeric(0)
for ( j in seq(4)) {
for ( k in seq(j+1,5)) {
diff <- fn_dist(ci[j][[1]], ci[k][[1]])
total = total + diff
}
}
return(total)
}
# result_vector <- apply(comb, 2, cloop)
# r <- apply(comb,2, function(l) sum(proxy::dist(method = fn_dist, do.call(rbind,l))))
microbenchmark(orig=ori(comb), orig2=nomapply(comb), orig3=ind(comb), orig4=nounlist(comb),
iloop=sapply(1:range, iloop), cloop=apply(comb, 2, cloop), unit = "relative",
proxy=apply(comb,2, function(l) sum(proxy::dist(method = fn_dist, do.call(rbind,l)))),
times=10)
These are the results:
# > microbenchmark(orig=ori(comb), orig2=nomapply(comb), orig3=ind(comb), orig4=nounlist(comb),
# + iloop=sapply(1:range, iloop), cloop=apply(comb, 2, cloop), unit = "relative",
# + proxy=apply(comb,2, function(l) sum(proxy::dist(method = fn_dist, do.call(rbind,l)))),
# + times=10)
# Unit: relative
# expr min lq mean median uq max neval cld
# orig 8.647526 8.648012 8.429268 8.597876 8.551316 7.1967369 10 e
# orig2 2.613248 2.627175 2.564267 2.612007 2.633428 2.1851621 10 d
# orig3 1.949486 1.969982 1.911910 1.933789 1.963484 1.6318174 10 b
# orig4 1.000000 1.000000 1.000000 1.000000 1.000000 1.0000000 10 a
# iloop 1.127511 1.146384 1.118755 1.149810 1.140409 0.9477470 10 a
# cloop 1.137061 1.154385 1.128315 1.149292 1.143234 0.9702812 10 a
# proxy 2.142964 2.127388 2.078447 2.100761 2.067607 1.9183790 10 c
The little changes in the inner loop gave the most performance gain:
/
for the vectors (no mapply()
)...[[1]]
instead of unlist()
. To have clear code I would prefer the variant cloop()
or using proxy::dist()
.
Upvotes: 1
Reputation: 11255
This approach relies on creating a helper function and then doing the inner loop using the base combn()
function.
fn_dist <- function(x, y){
sum(((x - y) / ((x+y) / 2))^2)
}
system.time({
result_vector3 <- apply(comb, 2, function(comb_i) sum(combn(5, 2, FUN = function(x) fn_dist(comb_i[[x[1]]], comb_i[[x[2]]]))))
})
# user system elapsed
# 1.12 0.00 1.15
The use of apply
was intentional as future_apply
is very easy to use. Unfortunately, it performs worse for my 2-core machine:
library(future.apply)
plan(multiprocess)
system.time({
result_vector_future <- future_apply(comb, 2, function(comb_i) sum(combn(5, 2, FUN = function(x) fn_dist(comb_i[[x[1]]], comb_i[[x[2]]]))))
})
# user system elapsed
# 1.59 0.03 1.92
If you prefer a for loop, these small changes make it similar in performance to the regure apply
statement:
system.time({
for (i in seq(range)){
total <- as.numeric(0)
comb_i <- comb[, i]
for ( j in seq(4))
{
for ( k in seq(j+1,5))
{
diff <- fn_dist(comb_i[[j]], comb_i[[k]])
# diff <- sum( ( (unlist( comb[,i][j] ) - unlist( comb[,i][k] )) / (( unlist(comb[,i][j]) + unlist( comb[,i][k] ) ) / 2 ) )^2 )
total = total + diff
}
}
result_vector[[i]] <- total
}
})
# user system elapsed
# 1.24 0.05 1.32
For reference, using @jogo's suggestion and removing just the mapply
helps a lot but these workarounds help out a bit more.
system.time({
for (i in seq(range)){
total <- as.numeric(0)
# comb_i <- comb[, i]
for ( j in seq(4))
{
for ( k in seq(j+1,5))
{
# diff <- fn_dist(comb_i[[j]], comb_i[[k]])
diff <- sum( ( (unlist( comb[,i][j] ) - unlist( comb[,i][k] )) / (( unlist(comb[,i][j]) + unlist( comb[,i][k] ) ) / 2 ) )^2 )
total = total + diff
}
}
result_vector[[i]] <- total
}
})
# user system elapsed
# 2.40 0.06 2.50
And finally, this is very similar to dist
. If you are comfortable with the default methods, you could use:
system.time({
results_different_method <- apply(comb,2, function(l) sum(stats::dist(do.call(rbind,l))))
})
# user system elapsed
# 0.70 0.00 0.74
library(proxy)
system.time({
result_same_as_OP <- apply(comb, 2, function (l) sum(proxy::dist(do.call(rbind, l), method = fn_dist)))
})
# user system elapsed
# 1.58 0.05 1.67
And I tried to get it down to a one liner but it was slower:
system.time({
result_final <- combn(ncol(df2), 5, FUN = function(cols) sum(proxy::dist(t(df2[, cols]), method = fn_dist)))
})
user system elapsed
3.71 0.08 3.80
I'll organize these thoughts more later.
Upvotes: 2