Carolin
Carolin

Reputation: 549

R // count rows and sum col value if multiple conditions in other columns of a data.table are met // efficient & fast data.table solution

I have a large data table (~41 Mio. rows * 20+ col) and want to do a row-wise calculation conditioned on outcomes in other rows of the data table. Specifically, I want to make to calculations. (1) Calculate the number of sales transactions (ID in from_ID) a specific ID (col ID) has made at the time of each purchase transaction (timestamp) (2) the sum of total sales volume (col Value // from_ID) an ID has made at the time of each purchase transaction (col ID)

I have a solution that works, however it is quite inefficient and slow. I am wondering if there is a faster / more efficient (probably) data.table based solution to the problem.

Here is my reproducible example, the columns I want to calculate are "prior sales" and "prior sales amount":

Data

timestamp = c(
  "2018-04-04 00:39:02", "2018-06-04 00:50:22", "2018-09-04 03:07:29", 
  "2018-12-04 02:15:57", "2018-08-04 02:15:57", "2018-09-04 02:15:57", 
  "2018-10-04 02:15:57", "2018-12-20 02:15:57"
) 
ID = as.character(c(1,1,1,1,10,9,8,7))
from_ID = as.character(c(4,5,4,8,1,1,1,1))
Value = c(100,150,50,200,50,100,150,40)
data_sample = as.data.frame(cbind(timestamp, ID, from_ID,Value), stringsAsFactors = F)

data_sample$timestamp = as.POSIXct(data_sample$timestamp)
data_sample$Value = as.numeric(data_sample$Value)

# Approach 
prior_sales = data.frame()
prior_sales_amount = data.frame()

for (i in 1:nrow(data_sample)) {
  row = data_sample[i,]
  sales = subset(data_sample, data_sample$from_ID == row$ID & data_sample$timestamp < row$timestamp)

  prior_s = nrow(sales)
  prior_sales = rbind(prior_sales, prior_s)

  prior_s_a = ifelse(prior_s == 0, 0, sum(sales$Value))
  prior_sales_amount = rbind(prior_sales_amount, prior_s_a)
}

data_sample = cbind(data_sample, prior_sales, prior_sales_amount)

Upvotes: 2

Views: 203

Answers (3)

chinsoon12
chinsoon12

Reputation: 25225

Here is another option using rolling join:

#small tweak to timestamp to take care of < rather than <=
lu <- data_sample[, .(timestamp=timestamp+1, x1=1L:.N, x2=cumsum(Value)), from_ID]

#rolling join to look up the latest figures
data_sample[, c("x1", "x2") := lu[.SD, on=.(from_ID=ID, timestamp), roll=Inf,
    .(fcoalesce(x1, 0L), fcoalesce(x2, 0))]]

timing code:

library(data.table)
set.seed(0L)
nr <- 1e6
nID <- 1e4
data_sample <- data.table(ID=sample(nID, nr, TRUE), from_ID=sample(nID, nr, TRUE), 
    timestamp=1522773542 + sample(nr*10, nr), Value=rnorm(nr))
setorder(data_sample, ID, timestamp, from_ID)
DT0 <- copy(data_sample)
DT1 <- copy(data_sample)

mtd0 <- function() {
    DT0[, c("x1", "x2") := 
        .SD[.SD, on = .(from_ID = ID, timestamp < timestamp), 
            .(X0L = .N, X0 = sum(Value, na.rm = TRUE)), 
            by = .EACHI
        ][, .(X0L, X0)]]    
}

mtd1 <- function() {
    lu <- DT1[, .(timestamp=timestamp+1, x1=1L:.N, x2=cumsum(Value)), from_ID]
    DT1[, c("x1", "x2") := lu[.SD, on=.(from_ID=ID, timestamp), roll=Inf,
        .(fcoalesce(x1, 0L), fcoalesce(x2, 0))]]
}

fsetequal(DT0, DT1)
#[1] TRUE

microbenchmark::microbenchmark(mtd0(), mtd1(), times=1L)

timings:

Unit: milliseconds
   expr      min       lq     mean   median       uq      max neval
 mtd0() 1468.356 1468.356 1468.356 1468.356 1468.356 1468.356     1
 mtd1()  248.641  248.641  248.641  248.641  248.641  248.641     1

Upvotes: 0

jay.sf
jay.sf

Reputation: 72593

In base R you could do this.

data_sample <- cbind(data_sample, t(apply(data_sample, 1, function(x) {
  r <- data_sample[data_sample$from_ID == x[["ID"]] &
                     data_sample$timestamp < x[["timestamp"]], ]
  c(x1=NROW(r), x2=sum(r$Value))
})))

data_sample
#             timestamp ID from_ID Value x1  x2
# 1 2018-04-04 00:39:02  1       4   100  0   0
# 2 2018-06-04 00:50:22  1       5   150  0   0
# 3 2018-09-04 03:07:29  1       4    50  2 150
# 4 2018-12-04 02:15:57  1       8   200  3 300
# 5 2018-08-04 02:15:57 10       1    50  0   0
# 6 2018-09-04 02:15:57  9       1   100  0   0
# 7 2018-10-04 02:15:57  8       1   150  0   0
# 8 2018-12-20 02:15:57  7       1    40  0   0

Upvotes: 0

s_baldur
s_baldur

Reputation: 33498

Here is something

library(data.table)
setDT(data_sample)
data_sample[, c("X0L", "X0") := data_sample[.SD, 
                                            on = .(from_ID = ID, timestamp < timestamp), 
                                            .(X0L = .N, X0 = sum(Value, na.rm = TRUE)), 
                                            by = .EACHI
                                            ][, 
                                              .(X0L, X0)]]


             timestamp ID from_ID Value X0L  X0
1: 2018-04-04 00:39:02  1       4   100   0   0
2: 2018-06-04 00:50:22  1       5   150   0   0
3: 2018-09-04 03:07:29  1       4    50   2 150
4: 2018-12-04 02:15:57  1       8   200   3 300
5: 2018-08-04 02:15:57 10       1    50   0   0
6: 2018-09-04 02:15:57  9       1   100   0   0
7: 2018-10-04 02:15:57  8       1   150   0   0
8: 2018-12-20 02:15:57  7       1    40   0   0

Upvotes: 2

Related Questions