Oren
Oren

Reputation: 437

How to read only a subset columns in a CSV file using Spring batch FlatFileItemReader?

I have a reader who knows how to read a CSV file with 14 columns, i would like it to be able to receive a file with many columns (~500) and read only these 14 columns, i know that the solution should include FieldSetMapper (according to this question: read only selective columns from csv file using spring batch )but i couldn't find a proper example for it. here is my current reader:

@Bean
public FlatFileItemReader<RowInput> csvRowsReader() {
    FlatFileItemReader<RowInput> reader = new FlatFileItemReader<>();
    Resource resource = new FileSystemResource(new File(FileManager.getInstance().getInputFileLocation()));
    reader.setResource(resource);

    reader.setLinesToSkip(1);
    reader.setLineMapper(new DefaultLineMapper<RowInput>(){{
        setLineTokenizer(new DelimitedLineTokenizer(){{
            setNames(new String[]{"Field_1", "Field_2", "Field_3", "Field_4", "Field_5",
                    "Field_6", "Field_7", "Field_8", "Field_9", "Field_10", "Field_11",
                    "Field_12", "Field_13", "Field_14"});
        }});
        setFieldSetMapper(new BeanWrapperFieldSetMapper<RowInput>(){{
            setTargetType(RowInput.class);
        }});
    }});


    reader.setLinesToSkip(1);
    return reader;
}

And the exception is:

Caused by: org.springframework.batch.item.file.transform.IncorrectTokenCountException: Incorrect number of tokens found in record: expected 14 actual 544

FieldSetMapper that i tried to use:

    public class InputFieldSetMapper implements FieldSetMapper<RowInput>{

    public RowInput mapFieldSet(FieldSet fs) {

        if (fs == null) {
            return null;
        }

        RowInput input = new RowInput();
        input.setField1(fs.readString("Field_1"));
        input.setField2(fs.readString("Field_2"));
        // and so on...

        return input;
    }
}

Upvotes: 2

Views: 4202

Answers (1)

Mahmoud Ben Hassine
Mahmoud Ben Hassine

Reputation: 31600

You need to set the includedFields property on the LineTokenizer to specify which fields to include when parsing the input file. In your case, it should be something like:

@Bean
public FlatFileItemReader<RowInput> csvRowsReader() {
    FlatFileItemReader<RowInput> reader = new FlatFileItemReader<>();
    Resource resource = new FileSystemResource(new File(FileManager.getInstance().getInputFileLocation()));
    reader.setResource(resource);

    reader.setLinesToSkip(1);
    reader.setLineMapper(new DefaultLineMapper<RowInput>(){{
        setLineTokenizer(new DelimitedLineTokenizer(){{
            setNames(new String[]{"Field_1", "Field_2", "Field_3", "Field_4", "Field_5",
                    "Field_6", "Field_7", "Field_8", "Field_9", "Field_10", "Field_11",
                    "Field_12", "Field_13", "Field_14"});
            setIncludedFields(0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10 ,11 ,12 ,13);
        }});
        setFieldSetMapper(new BeanWrapperFieldSetMapper<RowInput>(){{
            setTargetType(RowInput.class);
        }});
    }});

    return reader;
}

EDIT: add an example with non sequential fields

import org.springframework.batch.core.Job;
import org.springframework.batch.core.JobParameters;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
import org.springframework.batch.core.configuration.annotation.JobBuilderFactory;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.core.launch.JobLauncher;
import org.springframework.batch.item.ItemWriter;
import org.springframework.batch.item.file.FlatFileItemReader;
import org.springframework.batch.item.file.builder.FlatFileItemReaderBuilder;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.ClassPathResource;

@Configuration
@EnableBatchProcessing
public class MyJob {

    @Autowired
    private JobBuilderFactory jobs;

    @Autowired
    private StepBuilderFactory steps;

    @Bean
    public FlatFileItemReader<Person> itemReader() {
        return new FlatFileItemReaderBuilder<Person>()
                .name("personItemReader")
                .resource(new ClassPathResource("persons.csv"))
                .delimited()
                .includedFields(new Integer[] {0, 2})
                .names(new String[] {"id", "lastName"})
                .targetType(Person.class)
                .build();
    }

    @Bean
    public ItemWriter<Person> itemWriter() {
        return items -> {
            for (Person item : items) {
                System.out.println("person = " + item);
            }
        };
    }

    @Bean
    public Step step() {
        return steps.get("step")
                .<Person, Person>chunk(1)
                .reader(itemReader())
                .writer(itemWriter())
                .build();
    }

    @Bean
    public Job job() {
        return jobs.get("job")
                .start(step())
                .build();
    }

    public static void main(String[] args) throws Exception {
        ApplicationContext context = new AnnotationConfigApplicationContext(MyJob.class);
        JobLauncher jobLauncher = context.getBean(JobLauncher.class);
        Job job = context.getBean(Job.class);
        jobLauncher.run(job, new JobParameters());
    }

    public static class Person {
        String id;
        String firstName;
        String lastName;
        int age;

        public Person() {
        }

        public String getId() {
            return id;
        }

        public void setId(String id) {
            this.id = id;
        }

        public String getFirstName() {
            return firstName;
        }

        public void setFirstName(String firstName) {
            this.firstName = firstName;
        }

        public String getLastName() {
            return lastName;
        }

        public void setLastName(String lastName) {
            this.lastName = lastName;
        }

        public int getAge() {
            return age;
        }

        public void setAge(int age) {
            this.age = age;
        }

        @Override
        public String toString() {
            return "Person{" +
                    "id='" + id + '\'' +
                    ", firstName='" + firstName + '\'' +
                    ", lastName='" + lastName + '\'' +
                    ", age=" + age +
                    '}';
        }
    }

}

The input file persons.csv is the following:

1,foo1,bar1,10
2,foo2,bar2,20

The example shows how to map only the id and lastName fields.

Hope this helps.

Upvotes: 5

Related Questions