amitkb3
amitkb3

Reputation: 313

Speedup R code to remove stop words from string by vectorizing

I have a successfully running code listed below to removes stop words from text and also corresponding Part of speech[POS]. But it takes a while to run on large volume around 4 hours. I was thinking if i get rid of the for loops by vectoring, it would speed. But i don't know if its possible or if its useful. I needed help in speeding up code by using a better way.

I can remove stop words using tm packge R tm removeWords stopwords is not removing stopwords but i need to remove the corresponding POS tag, which is not possible in tm package.

Note: I have been able to parallelize the outermost for loop using foreeach to run on 12 cores.

code:

# Reproducible data
# id is to identify the source
# phrase contains original string
# modifiedphrase contains string with stop words removved

id <- c(1,2,3)
phrase <- c("choice_for_selection","accordingly_choices_for_selection", "only_top_selection")
pos <-  c("NN JJ NN","NN JJ NN NN", "NNS NN NNS") #fake part of speech
df <- as.data.frame(cbind(id,phrase,pos))
df<-cbind(df,df$phrase) # creating copy of the phrase to modify it 
df<-cbind(df,df$pos) # creating copy of the pos to modify it
colnames(df) <- c("id","phrase","pos","modifiedphrase","modpos")
df$modifiedphrase<-as.character(df$modifiedphrase)
df$modpos<-as.character(df$modpos)

# stop words list
library(tm)
SWList<-  stopwords(kind = "SMART")

library(stringr)

#Code to remove stop words in strings
# the first outermost for loop i am able to parallelize using foreach
for(i in 1:length(df[,1])){
  tokensplit<-str_split(df[i,"phrase"],"_")[[1]]
  possplit<-str_split(df[i,"pos"]," ")[[1]]
   change=0
  forremoval=NULL
  for(j in 1:length(tokensplit)){
    if(tokensplit[j] %in% SWList){
      change=1
      forremoval<-append(forremoval,j)
      tmppos<-paste(possplit[-forremoval],collapse=" ")
    }
  }
  if(change==1){
    tmp<-paste(tokensplit[-forremoval],collapse="_")
    if(length(tmp)==0){
      tmp=""
      tmppos=""
    }
  df[i,"modifiedphrase"]=tmp
  df[i,"modpos"]=tmppos
  }
}

# Final output
print(df)

  id                            phrase         pos    modifiedphrase modpos
1  1              choice_for_selection    NN JJ NN  choice_selection  NN NN
2  2 accordingly_choices_for_selection NN JJ NN NN choices_selection  JJ NN
3  3                only_top_selection  NNS NN NNS     top_selection NN NNS

> 

Upvotes: 1

Views: 544

Answers (1)

nfmcclure
nfmcclure

Reputation: 3141

Here's an sapply version:

# stop words list
library(tm)
SWList <- stopwords(kind = "SMART")

df$modpos <- apply(df[,c('phrase', 'pos')], 1, function(x){
    paste(strsplit(x[2],' ')[[1]][!((strsplit(x[1],'_')[[1]])%in%SWList)], collapse=" ")
})

df$modifiedphrase <- sapply(df$modified, function(x) {
    paste(setdiff(strsplit(x,"_")[[1]],SWList),collapse="_")
})

I know it's fake data, but you might also want to consider removing the apostrophes in your stopwords:

SWList = gsub('\'','',SWList)

Update

Efficiency check:

(1) Setup data function: So we can setup data before each efficiency check.

setup_data = function(){
  id <- c(1,2,3)
  phrase <- c("choice_for_selection","accordingly_choices_for_selection", "only_top_selection")
  pos <-  c("NN JJ NN","NN JJ NN NN", "NNS NN NNS") #fake part of speech
  df <- as.data.frame(cbind(id,phrase,pos))
  df<-cbind(df,df$phrase) # creating copy of the phrase to modify it 
  df<-cbind(df,df$pos) # creating copy of the pos to modify it
  colnames(df) <- c("id","phrase","pos","modifiedphrase","modpos")
  df$modifiedphrase<-as.character(df$modifiedphrase)
  df$modpos<-as.character(df$modpos)
  return(df)
}

(2) The original For-loop method:

forloop_method = function(){
    for(i in 1:length(df[,1])){
    tokensplit<-str_split(df[i,"phrase"],"_")[[1]]
    possplit<-str_split(df[i,"pos"]," ")[[1]]
    change=0
    forremoval=NULL
    for(j in 1:length(tokensplit)){
      if(tokensplit[j] %in% SWList){
        change=1
        forremoval<-append(forremoval,j)
        tmppos<-paste(possplit[-forremoval],collapse=" ")
      }
    }
    if(change==1){
      tmp<-paste(tokensplit[-forremoval],collapse="_")
      if(length(tmp)==0){
        tmp=""
        tmppos=""
      }
      df[i,"modifiedphrase"]=tmp
      df[i,"modpos"]=tmppos
    }
  }
}

(3) Apply method:

apply_method = function(){
  df$modpos <- apply(df[,c('phrase', 'pos')], 1, function(x){
    paste(strsplit(x[2],' ')[[1]][!((strsplit(x[1],'_')[[1]])%in%SWList)], collapse=" ")
  })

  df$modifiedphrase <- sapply(df$modified, function(x) {
    paste(setdiff(strsplit(x,"_")[[1]],SWList),collapse="_")
  })
}

(4) Efficiency in Microseconds using the 'microbenchmark' package:

library(microbenchmark)
df = setup_data()
microbenchmark(forloop_method(), unit='us')

Unit: microseconds
           expr     min       lq     mean  median      uq      max neval
 forloop_method 884.229 965.2805 1050.775 992.224 1032.69 2680.374   100

df = setup_data()
microbenchmark(apply_method, unit='us')

Unit: microseconds
         expr   min    lq    mean median    uq    max neval
 apply_method 0.018 0.025 0.49948  0.026 0.027 45.379   100

1050.775/0.49948 = 2103.738 times speedup on my system.

Upvotes: 1

Related Questions