版权声明:个人原创,转载请标注! https://blog.csdn.net/Z_Date/article/details/83861593
在map运行时获取被处理数据所在文件的文件名
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
/**
*
* @author lyd
*
* 数据:
* 如chinese.txt 其内容如下
* 小明 78
* 小红 80
* 小白 79
*
* math.txt:
* 小明 68
* 小红 70
* 小白 69
*
* enlish.txt:
* 小明 88
* 小红 90
* 小白 89
*
* 输出:
* chinese 79
* math 69
* english 89
关键代码
MyMapper类中的
InputSplit is = context.getInputSplit();
String fileName = ((FileSplit)is).getPath().getName();
*/
public class AvgDemo {
//自定义myMapper
public static class MyMapper extends Mapper<LongWritable, Text, Text, Text>{
//只在map方法运行之前执行一次。(仅执行一次)
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
}
@Override
protected void map(LongWritable key, Text value,Context context)
throws IOException, InterruptedException {
String line = value.toString();
String lines [] = line.split(" ");
//获取文件名字来作为key
InputSplit is = context.getInputSplit();
String fileName = ((FileSplit)is).getPath().getName();
context.write(new Text(fileName), new Text(lines[1]));
}
//map方法运行完后执行一次(仅执行一次)
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
}
}
//自定义myReducer
public static class MyReducer extends Reducer<Text, Text, Text, Text>{
//在reduce方法执行之前执行一次。(仅一次)
@Override
protected void setup(Context context)
throws IOException, InterruptedException {
}
@Override
protected void reduce(Text key, Iterable<Text> value,Context context)
throws IOException, InterruptedException {
double counter = 0;
int sum = 0;
for (Text t : value) {
counter += Double.parseDouble(t.toString());
sum ++;
}
context.write(key, new Text((counter/sum)+""));
}
//在reduce方法执行之后执行一次。(仅一次)
@Override
protected void cleanup(Context context)
throws IOException, InterruptedException {
}
}
/**
* job的驱动方法
* @param args
*/
public static void main(String[] args) {
try {
//1、获取Conf
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://hadoop01:9000");
//2、创建job
Job job = Job.getInstance(conf, "model01");
//3、设置运行job的class
job.setJarByClass(AvgDemo.class);
//4、设置map相关属性
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
//5、设置reduce相关属性
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
//判断输出目录是否存在,若存在则删除
FileSystem fs = FileSystem.get(conf);
if(fs.exists(new Path(args[1]))){
fs.delete(new Path(args[1]), true);
}
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//6、提交运行job
int isok = job.waitForCompletion(true) ? 0 : 1;
//退出
System.exit(isok);
} catch (IOException | ClassNotFoundException | InterruptedException e) {
e.printStackTrace();
}
}
}