/** * 普通事务Spout */ public class MyTxSpout implements ITransactionalSpout<MyMata>{ private static final long serialVersionUID = 1L; /** * 数据源 */ Map<Long, String> dbMap = null; public MyTxSpout() { Random random = new Random(); dbMap = new HashMap<Long, String> (); String[] hosts = { "www.taobao.com" }; String[] session_id = { "ABYH6Y4V4SCVXTG6DPB4VH9U123", "XXYH6YCGFJYERTT834R52FDXV9U34", "BBYH61456FGHHJ7JL89RG5VV9UYU7", "CYYH6Y2345GHI899OFG4V9U567", "VVVYH6Y4V4SFXZ56JIPDPB4V678" }; String[] time = { "2014-01-07 08:40:50", "2014-01-07 08:40:51", "2014-01-07 08:40:52", "2014-01-07 08:40:53", "2014-01-07 09:40:49", "2014-01-07 10:40:49", "2014-01-07 11:40:49", "2014-01-07 12:40:49" }; for (long i = 0; i < 100; i++) { dbMap.put(i,hosts[0]+"\t"+session_id[random.nextInt(5)]+"\t"+time[random.nextInt(8)]); } } public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declare(new Fields("tx","log")); } public Map<String, Object> getComponentConfiguration() { return null; } public org.apache.storm.transactional.ITransactionalSpout.Coordinator<MyMata> getCoordinator(Map conf, TopologyContext context) { /** * 发射该metadata(事务tuple)到“batch emit”流 */ return new MyCoordinator(); } public org.apache.storm.transactional.ITransactionalSpout.Emitter<MyMata> getEmitter(Map conf, TopologyContext context) { /** * 逐个发射实际batch的tuple */ return new MyEmitter(dbMap); } }
2、事务Spout创建一个新的事务(元数据)metadata
2、1元数据定义
public class MyMata implements Serializable{ /** * metadata(元数据)中包含当前事务可以从哪个point进行重放数据,存放在zookeeper中的,spout可以通过Kryo从zookeeper中序列化和反序列化该元数据。 */ private static final long serialVersionUID = 1L; private long beginPoint ;//事务开始位置 private int num ;//batch 的tuple个数 @Override public String toString() { return getBeginPoint()+"----"+getNum(); } public long getBeginPoint() { return beginPoint; } public void setBeginPoint(long beginPoint) { this.beginPoint = beginPoint; } public int getNum() { return num; } public void setNum(int num) { this.num = num; } }
2、2 获得(元数据)metadata,逐个发射实际batch的tuple
public class MyEmitter implements ITransactionalSpout.Emitter<MyMata> { Map<Long, String> dbMap = null; public MyEmitter(Map<Long, String> dbMap) { this.dbMap = dbMap; } //逐个发射实际batch的tuple public void emitBatch(TransactionAttempt tx, MyMata coordinatorMeta, BatchOutputCollector collector) { long beginPoint = coordinatorMeta.getBeginPoint();// 从上一个批次获得开始位置 int num = coordinatorMeta.getNum();// 从批次中获取批次数量 for (long i = beginPoint; i < num + beginPoint; i++) { if (dbMap.get(i) == null) { continue; } collector.emit(new Values(tx, dbMap.get(i))); } } public void cleanupBefore(BigInteger txid) { } public void close() { } }
------------------------spout与上篇相同-----------------------------------------
3、按天统计数据事务Bolt,会从Emitter接收数据处理,处理完成,提交给finishBatch方法处理。
public class MyDailyBatchBolt implements IBatchBolt<TransactionAttempt> { /** * 按天统计数据 */ private static final long serialVersionUID = 1L; Map<String, Integer> countMap = new HashMap<String, Integer>(); BatchOutputCollector collector ; Integer count = null; String today = null; TransactionAttempt tx = null; @Override public void execute(Tuple tuple) { // TODO Auto-generated method stub String log = tuple.getString(1); tx = (TransactionAttempt)tuple.getValue(0); if (log != null && log.split("\\t").length >=3 ) { today = DateFmt.getCountDate(log.split("\\t")[2], DateFmt.date_short) ; count = countMap.get(today); if(count == null) { count = 0; } count ++ ; countMap.put(today, count); } } @Override public void finishBatch() { collector.emit(new Values(tx,today,count)); } @Override public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt id) { // TODO Auto-generated method stub this.collector = collector; } @Override public void declareOutputFields(OutputFieldsDeclarer declarer) { // TODO Auto-generated method stub declarer.declare(new Fields("tx","date","count")); } @Override public Map<String, Object> getComponentConfiguration() { // TODO Auto-generated method stub return null; } }
4、接收统计数据,累加汇总MyDailyCommitterBolt,batch之间强制按照顺序进行提交
public class MyDailyCommitterBolt extends BaseTransactionalBolt implements ICommitter{ /** * 接收统计数据,累加汇总 */ private static final long serialVersionUID = 1L; public static final String GLOBAL_KEY = "GLOBAL_KEY"; public static Map<String, DbValue> dbMap = new HashMap<String, DbValue>() ; Map<String, Integer> countMap = new HashMap<String, Integer>(); TransactionAttempt id ; BatchOutputCollector collector; String today = null; @Override public void execute(Tuple tuple) { today = tuple.getString(1) ; Integer count = tuple.getInteger(2); id = (TransactionAttempt)tuple.getValue(0); if (today !=null && count != null) { Integer batchCount = countMap.get(today) ; if (batchCount == null) { batchCount = 0; } batchCount += count ; countMap.put(today, batchCount); } } @Override public void finishBatch() { // TODO Auto-generated method stub if (countMap.size() > 0) { DbValue value = dbMap.get(GLOBAL_KEY); DbValue newValue ; if (value == null || !value.txid.equals(id.getTransactionId())) { //更新数据库 newValue = new DbValue(); newValue.txid = id.getTransactionId() ; newValue.dateStr = today; if (value == null) { newValue.count = countMap.get(today) ; }else { newValue.count = value.count + countMap.get("2014-01-07") ; } dbMap.put(GLOBAL_KEY, newValue); }else { newValue = value; } } System.out.println("total==========================:"+dbMap.get(GLOBAL_KEY).count); // collector.emit(tuple) } @Override public void prepare(Map conf, TopologyContext context, BatchOutputCollector collector, TransactionAttempt id) { // TODO Auto-generated method stub this.id = id ; this.collector = collector; } @Override public void declareOutputFields(OutputFieldsDeclarer declarer) { // TODO Auto-generated method stub } public static class DbValue { BigInteger txid; int count = 0; String dateStr; } }
5、topo类
public class MyDailyTopo { /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub TransactionalTopologyBuilder builder = new TransactionalTopologyBuilder("ttbId","spoutid",new MyTxSpout(),1); builder.setBolt("bolt1", new MyDailyBatchBolt(),3).shuffleGrouping("spoutid"); builder.setBolt("committer", new MyDailyCommitterBolt(),1).shuffleGrouping("bolt1") ; Config conf = new Config() ; conf.setDebug(true); if (args.length > 0) { try { StormSubmitter.submitTopology(args[0], conf, builder.buildTopology()); } catch (AlreadyAliveException e) { e.printStackTrace(); } catch (InvalidTopologyException e) { e.printStackTrace(); } }else { LocalCluster localCluster = new LocalCluster(); localCluster.submitTopology("mytopology", conf, builder.buildTopology()); } } }
6、测试结果
引用
启动一个事务:0----10
total==========================:10
启动一个事务:10----10
total==========================:20
启动一个事务:20----10
total==========================:30
total==========================:10
启动一个事务:10----10
total==========================:20
启动一个事务:20----10
total==========================:30