Reputation: 65
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class StubMapper extends Mapper<LongWritable, Text, Text, MinMaxCountTuple> {
private Text outUserId = new Text();
private MinMaxCountTuple outTuple = new MinMaxCountTuple();
private final static SimpleDateFormat frmt =
new SimpleDateFormat("yyyy-MM--dd'T'HH:mm:ss.SSS");
// public static HashMap<String, String> getMapFromCSV(String filePath) throws IOException
// {
//
// HashMap<String, String> words = new HashMap<String, String>();
//
// /*BufferedReader in = new BufferedReader(new FileReader(filePath));
//
// String line;
// //= in.readLine())
// while ((line = in.readLine()) != null) {
// String columns[] = line.split(",");
// if (!words.containsKey(columns[1])) {
// words.put(columns[1], columns[6]);
// }
//
// }
//
// return words;
//
// */
//
//
//
// String line=filePath;
//
// while(line!=null){
//
// String columns[] = line.split(",");
// if (columns.length>6){
// if (!words.containsKey(columns[1])) {
// words.put(columns[1], columns[6]);
// }
// }
//
// }
// return words;
// }
@Override
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
// HashMap<String, String> parsed = getMapFromCSV(value.toString());
//String columns[] = value.toString().split("\t");
// String strDate = parsed.get("CheckoutDateTime");
//String userId = columns[1];
//String strDate = columns[6];
if(value.toString().startsWith("BibNumber"))
{
return;
}
// String userId = parsed.get("BibNumber");
String data[] = value.toString().split(",",-1);
String userId = data[0];
String DateTime = data[5];
Date creationDate = frmt.parse(DateTime);
outTuple.setMin(creationDate);
outTuple.setMax(creationDate);
outTuple.setCount(1);
outUserId.set(userId);
context.write(outUserId, outTuple);
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.io.Writable;
public class MinMaxCountTuple implements Writable{
private Date min = new Date();
private Date max = new Date();
private long count = 0;
private final static SimpleDateFormat frmt = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");
public Date getMin()
{
return min;
}
public void setMin(Date min)
{
this.min = min;
}
public Date getMax()
{
return max;
}
public void setMax(Date max)
{
this.max = max;
}
public long getCount()
{
return count;
}
public void setCount(long count)
{
this.count = count;
}
@Override
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeLong(min.getTime());
out.writeLong(max.getTime());
out.writeLong(count);
}
public String toString()
{
return frmt.format(min) + "\t" + frmt.format(max) + "\t" + count;
}
@Override
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub
min = new Date(in.readLong());
max = new Date(in.readLong());
count = in.readLong();
}
}
These two codes are mapper class and minmax class which finds maximum of checkoutdate time. Basically, what I am trying to do is to get some outputs which date will mostly be rented for a book. So, I just used key and value as a userId and checkoutdatetime in csv file. Code worked well, but the problem is mapper input showed the size of data, however, mapper output just had 0 size of file which means it didn't get some output from the input. I am not getting which part is wrong. I put up my screen shot of my csv file. Please enlighten me, it would be really appreciated. Thanks. If you need more information of my code, just let me know, I will put up things more.
18/03/30 01:38:41 INFO mapred.JobClient: Map input records=3794727
18/03/30 01:38:41 INFO mapred.JobClient: Map output records=0
18/03/30 01:38:41 INFO mapred.JobClient: Map output bytes=0
18/03/30 01:38:41 INFO mapred.JobClient: Input split bytes=416
18/03/30 01:38:41 INFO mapred.JobClient: Combine input records=0
18/03/30 01:38:41 INFO mapred.JobClient: Combine output records=0
18/03/30 01:38:41 INFO mapred.JobClient: Reduce input groups=0
18/03/30 01:38:41 INFO mapred.JobClient: Reduce shuffle bytes=24
18/03/30 01:38:41 INFO mapred.JobClient: Reduce input records=0
18/03/30 01:38:41 INFO mapred.JobClient: Reduce output records=0
Upvotes: 0
Views: 183
Reputation: 331
The Mapper code looks fine. Have you explicitly added Output Key and Output Value in the Driver.
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(MinMaxCountTuple.class);
You can try if it is not mentioned in the driver.
Upvotes: 1