stats_noob
stats_noob

Reputation: 5897

Randomly Replacing Values in a Data Frame

I am working with the R programming language. Suppose I have the following data frame:

a = rnorm(100,10,1)
b = rnorm(100,10,5)
c = rnorm(100,10,10)
d = as.factor(sample( LETTERS[1:3], 100, replace=TRUE, prob=c(0.5, 0.3, 0.2) ))

my_data = data.frame(a,b,c,d)

 head(my_data)
          a         b          c d
1 12.433326 10.573004   2.586044 A
2  9.985524  8.903590  25.806358 C
3  9.538077 13.875609 -11.572231 C
4  9.342444  6.483715   4.056420 B
5  8.825197  8.633457   6.357470 A
6  9.121292  7.988194  15.999959 B

My Question : For any row

I could do this using base R in a very ineffective way:

A <- my_data[which(my_data$d == "A"), ]
B <- my_data[which(my_data$d == "B"), ]
C <- my_data[which(my_data$d == "C"), ]

A$a_new <- sample( LETTERS[1:2], nrow(A), replace=TRUE, prob=c(0.2, 0.8) )
A$b_new <- sample( LETTERS[1:2], nrow(A), replace=TRUE, prob=c(0.3, 0.7) )
A$c_new <- sample( LETTERS[1:2], nrow(A), replace=TRUE, prob=c(0.5, 0.5) )

A$a_new2 = ifelse(A$a_new == "A", A$a, 0)
A$b_new2 = ifelse(A$b_new == "B", A$b, 0)
A$c_new2 = ifelse(A$b_new == "C", A$c, 0)

B$a_new <- sample( LETTERS[1:2], nrow(B), replace=TRUE, prob=c(0.5, 0.5) )
B$b_new <- sample( LETTERS[1:2], nrow(B), replace=TRUE, prob=c(0.6, 0.4) )
B$c_new <- sample( LETTERS[1:2], nrow(B), replace=TRUE, prob=c(0.5, 0.5) )

B$a_new2 = ifelse(B$a_new == "A", B$a, 0)
B$b_new2 = ifelse(B$b_new == "B", B$b, 0)
B$c_new2 = ifelse(B$b_new == "C", B$c, 0)

C$a_new <- sample( LETTERS[1:2], nrow(C), replace=TRUE, prob=c(0.2, 0.8) )
C$b_new <- sample( LETTERS[1:2], nrow(C), replace=TRUE, prob=c(0.15, 0.85) )
C$c_new <- sample( LETTERS[1:2], nrow(C), replace=TRUE, prob=c(0.8, 0.2) )

C$a_new2 = ifelse(C$a_new == "A", C$a, 0)
C$b_new2 = ifelse(C$b_new == "B", C$b, 0)
C$c_new2 = ifelse(C$b_new == "C", C$c, 0)

final = rbind(A,B,C)

head(final)
           a         b         c d a_new b_new c_new   a_new2    b_new2 c_new2
1  12.433326 10.573004  2.586044 A     A     B     B 12.43333 10.573004      0
5   8.825197  8.633457  6.357470 A     B     B     B  0.00000  8.633457      0
7   9.594164 10.600787 27.190108 A     B     A     B  0.00000  0.000000      0
10  8.441369  1.944389 11.250866 A     B     A     B  0.00000  0.000000      0
11  9.192280 13.970166 -2.829124 A     B     B     A  0.00000 13.970166      0
12  9.916996 12.970319  3.472191 A     B     A     A  0.00000  0.000000      0

Does anyone know if there is a more efficient way to solve this problem? Perhaps it could be done with the DPLYR library and the mutate() function?

Thanks!

Upvotes: 0

Views: 848

Answers (2)

r2evans
r2evans

Reputation: 160407

This is similar but without nesting or the need for mapping.

Reproducible random data:

set.seed(42)
my_data <- local({
  a = rnorm(100,10,1)
  b = rnorm(100,10,5)
  c = rnorm(100,10,10)
  d = as.factor(sample( LETTERS[1:3], 100, replace=TRUE, prob=c(0.5, 0.3, 0.2) ))
  data.frame(a,b,c,d)
})
head(my_data)
#           a         b          c d
# 1 11.370958 16.004827 -10.009292 A
# 2  9.435302 15.223755  13.337772 A
# 3 10.363128  4.983957  21.713251 B
# 4 10.632863 19.242410  30.595392 A
# 5 10.404268  6.666133  -3.768616 B
# 6  9.893875 10.527569  -1.508556 A

Produce another frame with probabilities per-column/per-group:

otherframe <- data.frame(d = c("A", "B", "C"), prob_a = c(0.2, 0.5, 0.2), prob_b = c(0.3, 0.6, 0.15), prob_c = c(0.5, 0.5, 0.2))
otherframe
#   d prob_a prob_b prob_c
# 1 A    0.2   0.30    0.5
# 2 B    0.5   0.60    0.5
# 3 C    0.2   0.15    0.2

Its use. First, I'll (re)use a function from another answer:

# adapted from https://stackoverflow.com/a/70402493/3358272
my_func2 <- function(x, prop, def = 0) replace(x, sample(length(x), size = ceiling(prop * length(x)), replace = FALSE), def)

out <- left_join(my_data, otherframe, by = "d") %>%
  group_by(d) %>%
  mutate(
    a = my_func2(a, prob_a[1]),
    b = my_func2(b, prob_b[1]),
    c = my_func2(c, prob_c[1])
  ) %>%
  ungroup() %>%
  select(-prob_a, -prob_b, -prob_c)
out
# # A tibble: 100 x 7
#        a     b     c d
#    <dbl> <dbl> <dbl> <chr>
#  1 11.4   0     0    A
#  2  9.44 15.2   0    A
#  3 10.4   4.98 21.7  B
#  4 10.6   0     0    A
#  5  0     0     0    B
#  6  0    10.5   0    A
#  7  0     7.89  2.94 B
#  8  9.91  9.39  0    A
#  9 12.0  10.9   3.54 A
# 10  9.94 10.6   0    B
# # ... with 90 more rows

And validation that the proportion of 0s makes sense.

out %>%
  group_by(d) %>%
  summarize(across(a:c, ~ sum(abs(.) < 1e-12) / n()))
# # A tibble: 3 x 4
#   d         a     b     c
#   <chr> <dbl> <dbl> <dbl>
# 1 A     0.208 0.302 0.509
# 2 B     0.517 0.621 0.517
# 3 C     0.222 0.167 0.222

Upvotes: 1

kybazzi
kybazzi

Reputation: 1030

Here is a solution using the tidyverse (in particular, purrr and dplyr). My strategy:

  1. Define the replacement probabilities for each value of d.

  2. Split the data based on the value of d.

  3. Use purrr::map2() to iterate over each sub-data-frame and replacement probability vector, and do the appropriate replacements. Caution: you must make sure the order of everything is consistent - that is, the order of a, b, and c with relation to the replacement probabilities, and also the order of the split data in step 2 to the order of the probability vectors in step 1.

  4. Merge the results back into a single data frame.

library(tidyverse)

replacement_probs = list(
  A = c(0.2, 0.4, 0.5),
  B = c(0.5, 0.6, 0.5),
  C = c(0.2, 0.15, 0.2)
)

updated_data = my_data %>% 
  split(.$d) %>% 
  map2(replacement_probs, function(sub_data, probs) {
    d = sub_data$d
    map2_df(
      select(sub_data, a, b, c),
      probs,
      function(x, p) {
        x[sample(length(x), length(x) * p)] = 0
        x
      }
    ) %>% 
      mutate(d = d)
  }) %>%
  reduce(bind_rows)

The output:

# A tibble: 1,000 x 4
       a     b     c d    
   <dbl> <dbl> <dbl> <fct>
 1  9.05  0     0    A    
 2 11.6   7.15  0    A    
 3  8.65  7.31  0    A    
 4  0     0     8.26 A    
 5  0     4.61  0    A    
 6 10.3   0    21.4  A    
 7 11.9  19.1   0    A    
 8  9.19  9.42  0    A    
 9  9.54 13.5   0    A    
10  9.34  7.08  0    A    
# ... with 990 more rows

We could verify our work by splitting the data by d again and counting the number of zeros. Note that I changed the sample size to 1000 to make it clear that it worked.

updated_data %>% 
  split(.$d) %>% 
  map(~ map_dbl(.x, ~ sum(.x == 0) / length(.x)))

# Output
$A
        a         b         c         d 
0.1983806 0.3987854 0.5000000 0.0000000 

$B
        a         b         c         d 
0.4983389 0.5980066 0.4983389 0.0000000 

$C
        a         b         c         d 
0.2000000 0.1463415 0.2000000 0.0000000 

Upvotes: 2

Related Questions