mapreduce將若干小檔案合成大檔案

fan_rockrock發表於2015-11-19

1、思路:

http://blog.yfteach.com/?p=815,注意原文中有一個錯誤,就是FileInputformat中並沒有找到createRecordReader這個方法,應該在TextInputFormat中有,而不是textFileInputFormat

2、編碼:

      第一步:編寫將整個檔案作為一條記錄處理的類,即實現FileInputFormat.

注意:FileInputFormat本身有很多子類,也實現了很多不同的輸入格式,如下


       特別注意:KeyValueTextInputFormat這個是以一行的<key,value>形式作為輸入的,預設分隔符是Tab鍵,比如可以使用KeyValueTextInputFormat對WordCount程式進行修改

package com.SmallFilesToSequenceFileConverter.hadoop;

import java.io.IOException;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

//將整個檔案作為一條記錄處理
public class WholeFileInputFormat extends FileInputFormat<NullWritable,Text>{

	//表示檔案不可分
	@Override
	protected boolean isSplitable(JobContext context, Path filename) {
		return false;
	}

	@Override
	public RecordReader<NullWritable, Text> createRecordReader(
			InputSplit split, TaskAttemptContext context) throws IOException,
			InterruptedException {
		WholeRecordReader reader=new WholeRecordReader();
		reader.initialize(split, context);
		return reader;
	}
  
}

第二步:實現RecordReader,為自定義的InputFormat服務

package com.SmallFilesToSequenceFileConverter.hadoop;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;

//實現RecordReader,為自定義的InputFormat服務
public class WholeRecordReader extends RecordReader<NullWritable,Text>{

	private FileSplit fileSplit;
	private Configuration conf;
	private Text value=new Text();
	private boolean processed=false;//表示記錄是否被處理過
	@Override
	public NullWritable getCurrentKey() throws IOException,
			InterruptedException {
		return NullWritable.get();
	}

	@Override
	public Text getCurrentValue() throws IOException,
			InterruptedException {
		return value;
	}

	@Override
	public float getProgress() throws IOException, InterruptedException {
		 return processed? 1.0f : 0.0f;
	}
	@Override
	public void initialize(InputSplit split, TaskAttemptContext context)
			throws IOException, InterruptedException {
		this.fileSplit=(FileSplit)split;
	    this.conf=context.getConfiguration();
	}

	@Override
	public boolean nextKeyValue() throws IOException, InterruptedException {
		if(!processed)
		{
			byte[]contents=new byte[(int)fileSplit.getLength()];
			Path file=fileSplit.getPath();
			FileSystem fs=file.getFileSystem(conf);
			FSDataInputStream in=null;
			try{
				in=fs.open(file);
				IOUtils.readFully(in, contents, 0, contents.length);
			    value.set(contents,0,contents.length);
			}
			finally{
				IOUtils.closeStream(in);
			}
			processed=true;
			return true;
		}
		return false;
	}

	

	@Override
	public void close() throws IOException {
		// TODO Auto-generated method stub
		
	}

}

第三步:編寫主類

package com.SmallFilesToSequenceFileConverter.hadoop;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class SmallFilesToSequenceFileConverter{
	private static class SequenceFileMapper extends Mapper<NullWritable,Text,Text,Text>
	{
		private Text filenameKey;
		//setup在task之前呼叫,用來初始化filenamekey
		
		@Override
		protected void setup(Context context)
				throws IOException, InterruptedException {
			InputSplit split=context.getInputSplit();
		    Path path=((FileSplit)split).getPath();
		    filenameKey=new Text(path.toString());
		}
		
		@Override
		protected void map(NullWritable key,Text value,Context context)
				throws IOException, InterruptedException {
			context.write(filenameKey, value);
		}
	}
	
	public static void main(String[] args) throws Exception {
		Configuration conf=new Configuration();
		Job job=Job.getInstance(conf,"SmallFilesToSequenceFileConverter");
		
		job.setJarByClass(SmallFilesToSequenceFileConverter.class);
		
		job.setInputFormatClass(WholeFileInputFormat.class);
		//job.setOutputFormatClass(TextOutputFormat.class);
		
		
		job.setMapperClass(SequenceFileMapper.class);
		
		//設定最終的輸出
	    job.setOutputKeyClass(Text.class);
	    job.setOutputValueClass(Text.class);
	    
		FileInputFormat.addInputPath(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job,new Path(args[1]));
	
	    System.exit(job.waitForCompletion(true) ? 0:1);
	}
}


3、測試

原始檔案:在input目錄下有a、b、c、d、e五個檔案,

a檔案的資料是:a a a a a 

                           a a a a a

其他同理,

得到最終的結果如下:


可以看到檔名和資料為一行存在同一個檔案當中!

相關文章