Changho Kim
Changho Kim

Reputation: 7

removing for loop using apply with custom function in R

I'm trying to run an automatic t-test for all the variables along for each group.

However, using for-loop on large data seem to burden my computer, and simply stops working.

Would there be a way to remove the for-loop and make the code run faster?

the sample code calculates every possible combination of t-test for the given data using "for-loop" and "combn"

( i.e. Sepal.Width, Sepal.Length, Petal.Length, Petal.Width for (setosa_vs_versicolor, setosa_vs_virginica, versicolor_vs_virginica"))

then saves every t-test result into a blank matrix ( 4 rows for each variable and 3 column for each comparison

The data set I'm trying to use this code on has 36 groups along with 103 variables

Aiming for a complete overhaul on the following code, which is a complete mess with multiple for-loops and seems to take forever for such data https://github.com/CHKim5/LMSstat/blob/master/R/Allstats.R

system.time(
{data(iris)

Test<-as.data.frame(matrix(data = NA,nrow = 4, ncol = 3))

for (t in 1:(ncol(iris)-1)){
  Test[t,]<-combn(as.character(unique(iris$Species)),2,
 function(x) t.test(
x =iris[iris$Species == x[1],][,t],
y =iris[iris$Species == x[2],][,t])[["p.value"]])
  
}
}
)

Upvotes: 0

Views: 170

Answers (2)

David
David

Reputation: 10162

If you want the fastest time possible, use a benchmark (below I use bench::mark()).

There are many improvements that can be done. When the goal is speed, the data.table package is usually a good start.

Additionally, you want to make sure that all computation that are redundant are taken out of the hot-code (the code within your loop).

The fastest code I was able to come up with is this:

library(data.table)
iris <- as.data.table(iris)

# split the dataset by your target group
species_split <- split(iris, iris$Species)

# create a wrapper for the t-test
split_t_test <- function(x, i) t.test(species_split[[x[1]]][[i]],
                                      species_split[[x[2]]][[i]])[["p.value"]]

# iterate over the columns and compute the t-tests
res <- lapply(seq_len(ncol(iris) - 1),
              function(i) as.list(combn(names(species_split), 2, split_t_test, i = i)))

# combine the results
df <- rbindlist(res)

Which is around 10 times faster than your original code.

Detailed Benchmark

1) define functions

original_function <- function(data) {
  Test <- as.data.frame(matrix(data = NA, nrow = 4, ncol = 3))
  for (t in 1:(ncol(data) - 1)) {
    Test[t, ] <- combn(
      as.character(unique(data$Species)),
      2,
      function(x) {
        t.test(
          x = data[data$Species == x[1], ][, t],
          y = data[data$Species == x[2], ][, t]
        )[["p.value"]]
      }
    )
  }
  return(Test)
}

# take out as much as possible from the loop
base_function <- function(data) {
  unique_species <- as.character(unique(data$Species))
  
  t_test_function <- function(x, i) 
    t.test(data[data$Species == x[1], ][, i],
           data[data$Species == x[2], ][, i])[["p.value"]]
  
  res <- lapply(seq_len(ncol(data) - 1),
                function(i) {
                  combn(unique_species, 2, t_test_function, i = i)
                })
  
  return(do.call(rbind, res))
}

# split the dataset first to avoid the lookup for the Species in the loop
split_function <- function(data) {
  species_split <- base::split(data, data$Species)
  split_t_test <- function(x, i) 
    t.test(species_split[[x[1]]][, i],
           species_split[[x[2]]][, i])[["p.value"]]
  res <- lapply(seq_len(ncol(data) - 1),
                function(i) combn(names(species_split), 2, split_t_test, i = i))
  
  return(do.call(rbind, res))
}

# use data.table
datatable_version <- function(data) {
  unique_species <- as.character(data[, unique(Species)])
  
  dt_t_test <- function(x, i) 
    t.test(data[Species == x[1]][[i]], data[Species == x[2]][[i]])[["p.value"]]
  
  rbindlist(lapply(seq_len(ncol(data) - 1), 
                   function(i) as.list(combn(unique_species, 2, dt_t_test, i = i))))
}

# combine the split and data.table
dt_split <- function(data) {
  species_split <- split(data, data$Species)
  split_t_test <- function(x, i) 
    t.test(species_split[[x[1]]][[i]],
           species_split[[x[2]]][[i]])[["p.value"]]
  res <- lapply(seq_len(ncol(data) - 1),
                function(i) as.list(combn(names(species_split), 2, split_t_test, i = i)))
  
  return(rbindlist(res))
}

2) Compute Benchmarks

library(data.table)
iris_dt <- as.data.table(iris)

bench::mark(
  original = original_function(iris),
  base = base_function(iris),
  split = split_function(iris),
  datatable = datatable_version(iris_dt),
  dt_split = dt_split(iris_dt),
  check = FALSE # datatable returns a data.table not a data.frame
)
#> # A tibble: 5 x 13
#>   expression      min   median `itr/sec` mem_alloc `gc/sec` n_itr  n_gc total_time result memory          time         gc           
#>   <bch:expr> <bch:tm> <bch:tm>     <dbl> <bch:byt>    <dbl> <int> <dbl>   <bch:tm> <list> <list>          <list>       <list>       
#> 1 original     7.52ms   11.1ms      51.4   245.5KB     0       26     0      505ms <NULL> <Rprofmem [605~ <bench_tm [~ <tibble [26 ~
#> 2 base         6.75ms   9.38ms     102.    243.6KB     0       51     0      500ms <NULL> <Rprofmem [603~ <bench_tm [~ <tibble [51 ~
#> 3 split        2.56ms   3.48ms     216.     44.3KB     2.57    84     1      389ms <NULL> <Rprofmem [167~ <bench_tm [~ <tibble [85 ~
#> 4 datatable     7.9ms   9.99ms      83.7   562.2KB     2.15    39     1      466ms <NULL> <Rprofmem [439~ <bench_tm [~ <tibble [40 ~
#> 5 dt_split     2.74ms   3.32ms     277.    161.6KB     0      139     0      502ms <NULL> <Rprofmem [660~ <bench_tm [~ <tibble [139~

Compute the benchmarks on a larger file with 100,000 observations.

set.seed(15212)
idx <- sample.int(nrow(iris), 1e5, replace = TRUE)
large_iris <- iris[idx, ]
large_iris_dt <- iris_dt[idx, ]

bench::mark(
  original = original_function(large_iris),
  base = base_function(large_iris),
  split = split_function(large_iris),
  datatable = datatable_version(large_iris_dt),
  dt_split = dt_split(large_iris_dt),
  check = FALSE, # datatable returns a data.table not a data.frame
  min_time = 2
)
#> # A tibble: 5 x 13
#>   expression      min   median `itr/sec` mem_alloc `gc/sec` n_itr  n_gc total_time result memory           time        gc           
#>   <bch:expr> <bch:tm> <bch:tm>     <dbl> <bch:byt>    <dbl> <int> <dbl>   <bch:tm> <list> <list>           <list>      <list>       
#> 1 original    158.4ms  179.2ms      5.49   147.8MB     9.99    11    20         2s <NULL> <Rprofmem [617 ~ <bench_tm ~ <tibble [11 ~
#> 2 base        145.3ms  167.9ms      5.84   146.8MB    10.2     12    21      2.05s <NULL> <Rprofmem [793 ~ <bench_tm ~ <tibble [12 ~
#> 3 split        19.8ms   23.7ms     31.9     25.2MB    11.0     64    22      2.01s <NULL> <Rprofmem [146 ~ <bench_tm ~ <tibble [64 ~
#> 4 datatable    49.9ms   72.4ms     12.7       68MB    10.3     26    21      2.04s <NULL> <Rprofmem [392 ~ <bench_tm ~ <tibble [26 ~
#> 5 dt_split     17.4ms   18.7ms     38.8       23MB    10.9     78    22      2.01s <NULL> <Rprofmem [174 ~ <bench_tm ~ <tibble [78 ~

Upvotes: 1

iago
iago

Reputation: 3256

An option would be the next:

library(tidyverse)
# Either the next 3:
# library(purrr)
# library(dplyr)
# library(tidyr)

combn(unique(iris$Species), 2, simplify = FALSE) %>% 
  structure(names = sapply(., function(.x) paste0(.x, collapse = "-"))) %>% 
  map_dfr(~iris %>% 
             pivot_longer(cols = -Species) %>% 
             pivot_wider(id_cols = name, names_from = Species, values_from = value) %>% 
             unnest(cols = c(-name))  %>% 
             nest_by(name) %>% 
             mutate(tt = list(t.test(data[[.x[[1]]]], data[[.x[[2]]]]))) %>% 
             summarise(across(-data, broom::tidy), .groups = "drop") %>% 
             mutate(across(c(-name), ~.$p.value)), 
          .id = "comb") %>% 
  pivot_wider(names_from = comb, values_from = tt)

# A tibble: 4 × 4
  name         `setosa-versicolor` `setosa-virginica` `versicolor-virginica`
  <chr>                      <dbl>              <dbl>                  <dbl>
1 Petal.Length            9.93e-46           9.27e-50               4.90e-22
2 Petal.Width             2.72e-47           2.44e-48               2.11e-25
3 Sepal.Length            3.75e-17           3.97e-25               1.87e- 7
4 Sepal.Width             2.48e-15           4.57e- 9               1.82e- 3

Upvotes: 0

Related Questions