hadoop map reduce自定义数据类型时注意顺序,否则报错。

自定义数据类型,实现Writable接口,重写write方法和readFields方法时,在操作字段时,必须保证顺序,如果在write方法先写id字段,则在readFields也先读id字段。否则报错:

package com.my.hadoop;

import org.apache.hadoop.io.Writable;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

public class PairWritable implements Writable{

    private int id;
    private String name;

    public PairWritable() {
    }

    public PairWritable(int id, String name) {
       set(id,name);
    }

    public void set(int id,String name){
        this.setId(id);
        this.setName(name);
    }

    public int getId() {
        return id;
    }

    public void setId(int id) {
        this.id = id;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }

    @Override
    public String toString() {
        return id+"\t"+name;
    }

    /**
     * 写字段的顺序与读字段的顺序必须一致。如在write中首先读id,则read方法中,必须先读id。不然会出错
     * @param dataOutput
     * @throws IOException
     */
    @Override
    public void write(DataOutput dataOutput) throws IOException {
        dataOutput.writeInt(id);
        dataOutput.writeUTF(name);
    }

    @Override
    public void readFields(DataInput dataInput) throws IOException {
        this.id=dataInput.readInt();
        this.name=dataInput.readUTF();
    }

    @Override
    public int hashCode() {
        return super.hashCode();
    }

    @Override
    public boolean equals(Object obj) {
        return super.equals(obj);
    }
}

猜你喜欢

转载自my.oschina.net/u/3771618/blog/1616392