package cagy.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.StringUtils;
public class WordCountMapper extends Mapper<LongWritable,Text ,Text, LongWritable>
{
protected void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException
{
String line =value.toString();
//切分
String[] words=StringUtils.split(line,' ');
//word,1
for(String word:words)
{
context.write(new Text(word),new LongWritable(1));
}
}
}
package cagy.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;
public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable>
{
protected void reduce(Text key,Iterable<LongWritable> values,Context context) throws IOException, InterruptedException
{
long count=0;
for(LongWritable value:values)
{
count+=value.get();
}
//out
context.write(key,new LongWritable(count));
}
}
package cagy.mapreduce.wordcount;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/***
*
*
* 初始化JOB
* 最后丢给hadoop
* @author Administrator
*
*/
public class WordCountRunner {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
// TODO Auto-generated method stub
Configuration conf=new Configuration();
Job wcjob=Job.getInstance(conf);
wcjob.setJarByClass(WordCountRunner.class);//设置JOB
wcjob.setMapperClass(WordCountMapper.class);
wcjob.setReducerClass(WordCountReducer.class);
//MAP输出的KV数据类型
wcjob.setMapOutputKeyClass(Text.class);
wcjob.setMapOutputValueClass(LongWritable.class);
//reducer输出的KV类型
wcjob.setOutputKeyClass(Text.class);
wcjob.setOutputValueClass(LongWritable.class);
//HDFS路劲
FileInputFormat.setInputPaths(wcjob, "hdfs://192.168.0.109:9000/wc/data");
//制定结果输出路劲
FileOutputFormat.setOutputPath(wcjob, new Path("hdfs://192.168.0.109:9000/wc/output1"));
boolean res=wcjob.waitForCompletion(true);
// System.exit(res?0:1);
}
}
最后导出JAR FILE,上传到HADOOP集群
执行命令
hadoop jar wc.jar cagy.mapreduce.wordcount.WordCountRunner
标签:wcjob,org,hadoop,mapreduce,JOB,import,apache,操作 From: https://blog.51cto.com/u_14650780/5998666