SortedWordCount源代码以及过程分析
运行截图:
]
代码逻辑:
Sort.java
//Sort.java--目的key从大到小排序
package com;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class Sort{
public static class SimpleMapper
extends Mapper<IntWritable,Text,RevertKey,Text>{
public void map(IntWritable key,Text value,Context context/*获取的key为单词数量,value为单词内容*/
) throws IOException, InterruptedException{
RevertKey newkey =new RevertKey(key);/*目的key从大到小排序,hadoop中IntWritable默认从小到大排序,map的输出key作为一个自定义的key命名RevertKey,RevertKey希望实现从大到小的排序*/
context.write(newkey,value);
}
}
public static class SimpleReducer
extends Reducer<RevertKey,Text,Text,IntWritable>{
public void reduce(RevertKey key,Iterable<Text>values,
Context context
) throws IOException, InterruptedException{
for (Text val : values) {//value迭代器迭代
context.write(val,key.getKey());//单词内容,次数
}
}
}
/*
public static class SimpleReducer
extends Reducer<RevertKey,Text,Text,IntWritable>{
public void reduce(RevertKey key,Iterable<Text> values,
Context context
) throws IOException,InterruptedException{
for(Text val : values){
context.write(val,key.getKey());
}
}
}
*/
public static class RevertKey
implements WritableComparable<RevertKey>{
private IntWritable key;//真实的成员KEY
public RevertKey(){
key = new IntWritable();
}
public RevertKey(IntWritable key){
this.key = key;
}
public IntWritable getKey(){
return key;
}
@Override
public int compareTo(RevertKey other) {
return -key.compareTo(other.getKey());//完成从大到小的排序,设置compareTo方法的一个反序前面加‘-’
}
@Override
public void readFields(DataInput in) throws IOException {
key.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
key.write(out);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
//这里需要配置参数即输入和输出的HDFS的文件路径
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
// JobConf conf1 = new JobConf(WordCount.class);
Job job = new Job(conf, "Sort");//Job(Configuration conf, String jobName) 设置job名称和
job.setJarByClass(Sort.class);
job.setMapperClass(SimpleMapper.class); //为job设置Mapper类
job.setReducerClass(SimpleReducer.class); //为job设置Reduce类
job.setMapOutputKeyClass(RevertKey.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class); //设置输出key的类型
job.setOutputValueClass(IntWritable.class);// 设置输出value的类型
job.setInputFormatClass(SequenceFileInputFormat.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0])); //为map-reduce任务设置InputFormat实现类 设置输入路径
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));//为map-reduce任务设置OutputFormat实现类 设置输出路径
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
WordCount.java
//WordCount.java,最终结果为单词数量和单词内容形成一个映射
package com;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class WordCount {
/**
* MapReduceBase类:实现了Mapper和Reducer接口的基类(其中的方法只是实现接口,而未作任何事情)
* Mapper接口:
* WritableComparable接口:实现WritableComparable的类可以相互比较。所有被用作key的类应该实现此接口。
* Reporter 则可用于报告整个应用的运行进度,本例中未使用。
*
*/
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
/**
* LongWritable, IntWritable, Text 均是 Hadoop 中实现的用于封装 Java 数据类型的类,这些类实现了WritableComparable接口,
* 都能够被串行化从而便于在分布式环境中进行数据交换,你可以将它们分别视为long,int,String 的替代品。
*/
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();//Text 实现了BinaryComparable类可以作为key值
/**
* Mapper接口中的map方法:
* void map(K1 key, V1 value, OutputCollector<K2,V2> output, Reporter reporter)
* 映射一个单个的输入k/v对到一个中间的k/v对
* 输出对不需要和输入对是相同的类型,输入对可以映射到0个或多个输出对。
* OutputCollector接口:收集Mapper和Reducer输出的<k,v>对。
* OutputCollector接口的collect(k, v)方法:增加一个(k,v)对到output
*/
public void map(Object key, Text value, Context context) throws IOException,
InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());//得到什么值
//System.out.println("value什么东西 : "+value.toString());
//System.out.println("key什么东西 : "+key.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text,IntWritable,IntWritable,Text> {/*数据类型声明设置*/
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException,
InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(result, key);
}
}
public static class IntSumCombiner/*Combiner设置<不利于查看中间数据>,减少map和reduce中间的数据量,减少reduce拖取数据量,加快任务的性能*/
extends Reducer<Text,IntWritable,Text,IntWritable>{/*输入数据和输出数据类型必须一致*/
private IntWritable result = new IntWritable();
public void reduce(Text key,Iterable<IntWritable> values,
Context context
)throws IOException,InterruptedException{
int sum=0;
for (IntWritable val : values){
sum += val.get();
}
result.set(sum);
context.write(key,result);
}
}
public static void main(String[] args) throws Exception {
/**
* JobConf:map/reduce的job配置类,向hadoop框架描述map-reduce执行的工作
* 构造方法:JobConf()、JobConf(Class exampleClass)、JobConf(Configuration conf)等
*/
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
//这里需要配置参数即输入和输出的HDFS的文件路径
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
// JobConf conf1 = new JobConf(WordCount.class);
Job job = new Job(conf, "word count");//Job(Configuration conf, String jobName) 设置job名称和
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class); //为job设置Mapper类
job.setCombinerClass(IntSumCombiner.class); //为job设置Combiner类
job.setReducerClass(IntSumReducer.class); //为job设置Reduce类
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class); /*声明map<key,value>类型,如果不声明就是和最终输出是一致的*/
job.setOutputKeyClass(IntWritable.class); //设置输出key的类型 ; 将原始的wordcount的最终输出的数据格式<key,value>的数据类型呼唤,做排序的输入
job.setOutputValueClass(Text.class);// 设置输出value的类型
job.setOutputFormatClass(SequenceFileOutputFormat.class);//方便第二个任务做输入,SequenceFile是Hadoop API提供的一种二进制文件支持。这种二进制文件直接将<key,value>对序列化到文件中,一般对小文件可以使用这种文件合并,即将文件名作为key,文件内容作为value序列化到大文件中。
FileInputFormat.addInputPath(job, new Path(otherArgs[0])); //为map-reduce任务设置InputFormat实现类 设置输入路径
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));//为map-reduce任务设置OutputFormat实现类 设置输出路径
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
//shuffle error是计数器输出
可查看下hadoop的源代码,我看的是cdh版本的hadoop源代码hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
这些系统自带的计数器是在配置文件中配置的,可以在以下文件中找到。
./hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/lib/output/FileOutputFormatCounter.properties
./hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/lib/input/FileInputFormatCounter.properties
./hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/TaskCounter.properties
./hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/JobCounter.properties
./hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/FileSystemCounter.properties
//另外请注意,SequenceFileOutputFormat,输出的内容是不可读的!
//Shuffle Error统计在Shuffle中的错误情况,我这输出表示任务map到reduce之间没什么错误。
flase,是指当前的mapreduce不是的uber mode的。 uber mode是mapreduce 2.x中一个特殊的mapreduce执行方式,它将map/reduce任务放到ApplicationMaster中执行,而不是分布式执行。这用于执行数据集很小的任务或者测试任务时使用。