结合案例讲解MapReduce重要知识点 --------- 简单排序

版权声明:个人原创,转载请标注! https://blog.csdn.net/Z_Date/article/details/83863932
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * 
 * @author lyd
 *简单排序
 *
 *数据:
 *
 *899
 *45
 *654564
 *432
 *45236
 *76
 *654
 *32
 *643
 *45
 *754
 *34
 *
 *
 *词频统计并按照次数高低排序??取前三个??
 *hello qianfeng hello qianfeng world hello hadoop hello qianfeng hadoop
 hello 4
 qianfeng 3
 hadoop 2
 *
 */
public class SortSample {
	//自定义myMapper
	public static class MyMapper extends Mapper<LongWritable, Text, IntWritable, Text>{
		//只在map方法运行之前执行一次。(仅执行一次)
		@Override
		protected void setup(Context context)
				throws IOException, InterruptedException {
		}

		@Override
		protected void map(LongWritable key, Text value,Context context)
				throws IOException, InterruptedException {
			context.write(new IntWritable(Integer.parseInt(value.toString())), new Text(""));
		}
		
		//map方法运行完后执行一次(仅执行一次)
		@Override
		protected void cleanup(Context context)
				throws IOException, InterruptedException {
		}
	}
	
	/*//自定义myReducer
	public static class MyReducer extends Reducer<Text, Text, Text, Text>{
		//在reduce方法执行之前执行一次。(仅一次)
		@Override
		protected void setup(Context context)
				throws IOException, InterruptedException {
		}

		@Override
		protected void reduce(Text key, Iterable<Text> value,Context context)
				throws IOException, InterruptedException {
			context.write(new Text(value.toString()), new Text(""));
		}
		//在reduce方法执行之后执行一次。(仅一次)
		@Override
		protected void cleanup(Context context)
				throws IOException, InterruptedException {
		}
	}*/
	
	/**
	 * job的驱动方法
	 * @param args
	 */
	public static void main(String[] args) {
		try {
			//1、获取Conf
			Configuration conf = new Configuration();
			conf.set("fs.defaultFS", "hdfs://hadoop01:9000");
			//2、创建job
			Job job = Job.getInstance(conf, "model01");
			//3、设置运行job的class
			job.setJarByClass(SortSample.class);
			//4、设置map相关属性
			job.setMapperClass(MyMapper.class);
			job.setMapOutputKeyClass(IntWritable.class);
			job.setMapOutputValueClass(Text.class);
			FileInputFormat.addInputPath(job, new Path(args[0]));
			
			//5、设置reduce相关属性
			/*job.setReducerClass(MyReducer.class);
			job.setOutputKeyClass(Text.class);
			job.setOutputValueClass(Text.class);*/
			//判断输出目录是否存在,若存在则删除
			FileSystem fs = FileSystem.get(conf);
			if(fs.exists(new Path(args[1]))){
				fs.delete(new Path(args[1]), true);
			}
			FileOutputFormat.setOutputPath(job, new Path(args[1]));
			
			//6、提交运行job
			int isok = job.waitForCompletion(true) ? 0 : 1;
			
			//退出
			System.exit(isok);
			
		} catch (IOException | ClassNotFoundException | InterruptedException e) {
			e.printStackTrace();
		}
	}
}

猜你喜欢

转载自blog.csdn.net/Z_Date/article/details/83863932