kafka+storm+hbase架构设计

13 篇文章 0 订阅
  kafka+storm+hbase架构设计
kafka+storm+hbase架构设计:kafka作为分布式消息系统,实时消息系统,有生产者和消费者;storm作为大数据的实时处理系统;hbase是apache hadoop 的数据库,其具有高效的读写性能!
这里把kafka生产的数据作为storm的源头spout来消费,经过bolt处理把结果保存到hbase。

基础环境:这里就不介绍了!!
hadoop集群(zookeeper)
kafka集群
storm集群

1、kafka测试API(包括生产者消费者)

生产者
  1. import java.util.Properties;  
  2. import kafka.producer.KeyedMessage;  
  3. import kafka.producer.ProducerConfig;  
  4. public class Producer extends Thread {  
  5.     private final kafka.javaapi.producer.Producer<Integer, String> producer;  
  6.     private final String topic;  
  7.     private final Properties props = new Properties();  
  8.   
  9.     public Producer(String topic) {  
  10.         props.put("serializer.class", "kafka.serializer.StringEncoder");      
  11.         props.put("metadata.broker.list","192.168.80.20:9092,192.168.80.21:9092,192.168.80.22:9092");  
  12.         producer = new kafka.javaapi.producer.Producer<Integer, String>(new ProducerConfig(props));  
  13.         this.topic = topic;  
  14.     }  
  15.   
  16.     public void run() {  
  17.         for (int i = 0; i < 2000; i++) {  
  18.             String messageStr = new String("Message_" + i);  
  19.             System.out.println("product:"+messageStr);  
  20.             producer.send(new KeyedMessage<Integer, String>(topic, messageStr));  
  21.         }  
  22.   
  23.     }  
  24.   
  25.     public static void main(String[] args) {  
  26.         Producer producerThread = new Producer(KafkaProperties.topic);  
  27.         producerThread.start();  
  28.     }  
  29. }  
import java.util.Properties;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class Producer extends Thread {
    private final kafka.javaapi.producer.Producer<Integer, String> producer;
    private final String topic;
    private final Properties props = new Properties();

    public Producer(String topic) {
        props.put("serializer.class", "kafka.serializer.StringEncoder");    
        props.put("metadata.broker.list","192.168.80.20:9092,192.168.80.21:9092,192.168.80.22:9092");
        producer = new kafka.javaapi.producer.Producer<Integer, String>(new ProducerConfig(props));
        this.topic = topic;
    }

    public void run() {
        for (int i = 0; i < 2000; i++) {
            String messageStr = new String("Message_" + i);
            System.out.println("product:"+messageStr);
            producer.send(new KeyedMessage<Integer, String>(topic, messageStr));
        }

    }

    public static void main(String[] args) {
        Producer producerThread = new Producer(KafkaProperties.topic);
        producerThread.start();
    }
}



2、消费者测试API:
  1. import java.util.HashMap;  
  2. import java.util.List;  
  3. import java.util.Map;  
  4. import java.util.Properties;  
  5. import kafka.consumer.ConsumerConfig;  
  6. import kafka.consumer.ConsumerIterator;  
  7. import kafka.consumer.KafkaStream;  
  8. import kafka.javaapi.consumer.ConsumerConnector;  
  9. public class Consumer extends Thread {  
  10.     private final ConsumerConnector consumer;  
  11.     private final String topic;  
  12.   
  13.     public Consumer(String topic) {  
  14.         consumer = kafka.consumer.Consumer  
  15.                 .createJavaConsumerConnector(createConsumerConfig());  
  16.         this.topic = topic;  
  17.     }  
  18.   
  19.     private static ConsumerConfig createConsumerConfig() {  
  20.         Properties props = new Properties();  
  21.         props.put("zookeeper.connect", KafkaProperties.zkConnect);  
  22.         props.put("group.id", KafkaProperties.groupId);  
  23.         //props.put("zookeeper.session.timeout.ms", "400");  
  24.         //props.put("zookeeper.sync.time.ms", "200");  
  25.         props.put("auto.commit.interval.ms", "60000");//  
  26.   
  27.         return new ConsumerConfig(props);  
  28.   
  29.     }  
  30. // push消费方式,服务端推送过来。主动方式是pull  
  31.     public void run() {  
  32.         Map<String, Integer> topicCountMap = new HashMap<String, Integer>();  
  33.         topicCountMap.put(topic, new Integer(1));  
  34.         Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer  
  35.                 .createMessageStreams(topicCountMap);  
  36.         KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);  
  37.         ConsumerIterator<byte[], byte[]> it = stream.iterator();  
  38.           
  39.         while (it.hasNext()){  
  40.             //逻辑处理  
  41.             System.out.println("consumer:"+new String(it.next().message()));  
  42.               
  43.         }  
  44.               
  45.     }  
  46.   
  47.     public static void main(String[] args) {  
  48.         Consumer consumerThread = new Consumer(KafkaProperties.topic);  
  49.         consumerThread.start();  
  50.     }  
  51. }  
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
public class Consumer extends Thread {
    private final ConsumerConnector consumer;
    private final String topic;

    public Consumer(String topic) {
        consumer = kafka.consumer.Consumer
                .createJavaConsumerConnector(createConsumerConfig());
        this.topic = topic;
    }

    private static ConsumerConfig createConsumerConfig() {
        Properties props = new Properties();
        props.put("zookeeper.connect", KafkaProperties.zkConnect);
        props.put("group.id", KafkaProperties.groupId);
        //props.put("zookeeper.session.timeout.ms", "400");
        //props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "60000");//

        return new ConsumerConfig(props);

    }
// push消费方式,服务端推送过来。主动方式是pull
    public void run() {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(1));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer
                .createMessageStreams(topicCountMap);
        KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
        ConsumerIterator<byte[], byte[]> it = stream.iterator();
        
        while (it.hasNext()){
            //逻辑处理
            System.out.println("consumer:"+new String(it.next().message()));
            
        }
            
    }

    public static void main(String[] args) {
        Consumer consumerThread = new Consumer(KafkaProperties.topic);
        consumerThread.start();
    }
}



3、定义kafka消费者的一些常量:
  1. public interface KafkaProperties  
  2. {  
  3.   final static String zkConnect = "192.168.80.20:2181,192.168.80.20:2181,192.168.80.20:2181";  
  4.   final static  String groupId = "group";  
  5.   final static String topic = "test";  
  6. }  
public interface KafkaProperties
{
  final static String zkConnect = "192.168.80.20:2181,192.168.80.20:2181,192.168.80.20:2181";
  final static  String groupId = "group";
  final static String topic = "test";
}



4、在进行项目之前准备一些hbase工具类:


  1. import java.util.List;  
  2. import org.apache.hadoop.hbase.client.Put;  
  3. import org.apache.hadoop.hbase.client.Result;  
  4. public interface HBaseDAO {  
  5.   
  6.     public void save(Put put,String tableName) ;  
  7.     public void insert(String tableName,String rowKey,String family,String quailifer,String value) ;  
  8.     public void save(List<Put>Put ,String tableName) ;  
  9.     public Result getOneRow(String tableName,String rowKey) ;  
  10.     public List<Result> getRows(String tableName,String rowKey_like) ;  
  11.     public List<Result> getRows(String tableName, String rowKeyLike, String cols[]) ;  
  12.     public List<Result> getRows(String tableName,String startRow,String stopRow) ;  
  13. }  
import java.util.List;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
public interface HBaseDAO {

    public void save(Put put,String tableName) ;
    public void insert(String tableName,String rowKey,String family,String quailifer,String value) ;
    public void save(List<Put>Put ,String tableName) ;
    public Result getOneRow(String tableName,String rowKey) ;
    public List<Result> getRows(String tableName,String rowKey_like) ;
    public List<Result> getRows(String tableName, String rowKeyLike, String cols[]) ;
    public List<Result> getRows(String tableName,String startRow,String stopRow) ;
}




  1. import java.io.IOException;  
  2. import java.util.ArrayList;  
  3. import java.util.List;  
  4. import org.apache.hadoop.conf.Configuration;  
  5. import org.apache.hadoop.hbase.client.Get;  
  6. import org.apache.hadoop.hbase.client.HConnection;  
  7. import org.apache.hadoop.hbase.client.HConnectionManager;  
  8. import org.apache.hadoop.hbase.client.HTableInterface;  
  9. import org.apache.hadoop.hbase.client.Put;  
  10. import org.apache.hadoop.hbase.client.Result;  
  11. import org.apache.hadoop.hbase.client.ResultScanner;  
  12. import org.apache.hadoop.hbase.client.Scan;  
  13. import org.apache.hadoop.hbase.filter.PrefixFilter;  
  14. import HBaseDAO;  
  15.   
  16. public class HBaseDAOImp implements HBaseDAO{  
  17.   
  18.     HConnection hTablePool = null;  
  19.     public HBaseDAOImp()  
  20.     {  
  21.         Configuration conf = new Configuration();  
  22.         conf.set("hbase.zookeeper.quorum","192.168.80.20,192.168.80.21,192.168.80.22");  
  23.         conf.set("hbase.rootdir", "hdfs://cluster/hbase");  
  24.         try {  
  25.             hTablePool = HConnectionManager.createConnection(conf) ;  
  26.         } catch (IOException e) {  
  27.             e.printStackTrace();  
  28.         }  
  29.     }  
  30.     @Override  
  31.     public void save(Put put, String tableName) {  
  32.         // TODO Auto-generated method stub  
  33.         HTableInterface table = null;  
  34.         try {  
  35.             table = hTablePool.getTable(tableName) ;  
  36.             table.put(put) ;  
  37.               
  38.         } catch (Exception e) {  
  39.             e.printStackTrace() ;  
  40.         }finally{  
  41.             try {  
  42.                 table.close() ;  
  43.             } catch (IOException e) {  
  44.                 e.printStackTrace();  
  45.             }  
  46.         }  
  47.     }  
  48.     @Override  
  49.     public void insert(String tableName, String rowKey, String family,  
  50.             String quailifer, String value) {  
  51.         // TODO Auto-generated method stub  
  52.         HTableInterface table = null;  
  53.         try {  
  54.             table = hTablePool.getTable(tableName) ;  
  55.             Put put = new Put(rowKey.getBytes());  
  56.             put.add(family.getBytes(), quailifer.getBytes(), value.getBytes()) ;  
  57.             table.put(put);  
  58.         } catch (Exception e) {  
  59.             e.printStackTrace();  
  60.         }finally  
  61.         {  
  62.             try {  
  63.                 table.close() ;  
  64.             } catch (IOException e) {  
  65.                 e.printStackTrace();  
  66.             }  
  67.         }  
  68.     }  
  69.       
  70.     @Override  
  71.     public void save(List<Put> Put, String tableName) {  
  72.         // TODO Auto-generated method stub  
  73.         HTableInterface table = null;  
  74.         try {  
  75.             table = hTablePool.getTable(tableName) ;  
  76.             table.put(Put) ;  
  77.         }  
  78.         catch (Exception e) {  
  79.             // TODO: handle exception  
  80.         }finally  
  81.         {  
  82.             try {  
  83.                 table.close() ;  
  84.             } catch (IOException e) {  
  85.                 e.printStackTrace();  
  86.             }  
  87.         }  
  88.           
  89.     }  
  90.   
  91.   
  92.     @Override  
  93.     public Result getOneRow(String tableName, String rowKey) {  
  94.         // TODO Auto-generated method stub  
  95.         HTableInterface table = null;  
  96.         Result rsResult = null;  
  97.         try {  
  98.             table = hTablePool.getTable(tableName) ;  
  99.             Get get = new Get(rowKey.getBytes()) ;  
  100.             rsResult = table.get(get) ;  
  101.         } catch (Exception e) {  
  102.             e.printStackTrace() ;  
  103.         }  
  104.         finally  
  105.         {  
  106.             try {  
  107.                 table.close() ;  
  108.             } catch (IOException e) {  
  109.                 e.printStackTrace();  
  110.             }  
  111.         }  
  112.         return rsResult;  
  113.     }  
  114.   
  115.     @Override  
  116.     public List<Result> getRows(String tableName, String rowKeyLike) {  
  117.         // TODO Auto-generated method stub  
  118.         HTableInterface table = null;  
  119.         List<Result> list = null;  
  120.         try {  
  121.             table = hTablePool.getTable(tableName) ;  
  122.             PrefixFilter filter = new PrefixFilter(rowKeyLike.getBytes());  
  123.             Scan scan = new Scan();  
  124.             scan.setFilter(filter);  
  125.             ResultScanner scanner = table.getScanner(scan) ;  
  126.             list = new ArrayList<Result>() ;  
  127.             for (Result rs : scanner) {  
  128.                 list.add(rs) ;  
  129.             }  
  130.         } catch (Exception e) {  
  131.             e.printStackTrace() ;  
  132.         }  
  133.         finally  
  134.         {  
  135.             try {  
  136.                 table.close() ;  
  137.             } catch (IOException e) {  
  138.                 e.printStackTrace();  
  139.             }  
  140.         }  
  141.         return list;  
  142.     }  
  143.       
  144.     public List<Result> getRows(String tableName, String rowKeyLike ,String cols[]) {  
  145.         // TODO Auto-generated method stub  
  146.         HTableInterface table = null;  
  147.         List<Result> list = null;  
  148.         try {  
  149.             table = hTablePool.getTable(tableName) ;  
  150.             PrefixFilter filter = new PrefixFilter(rowKeyLike.getBytes());  
  151.             Scan scan = new Scan();  
  152.             for (int i = 0; i < cols.length; i++) {  
  153.                 scan.addColumn("cf".getBytes(), cols[i].getBytes()) ;  
  154.             }  
  155.             scan.setFilter(filter);  
  156.             ResultScanner scanner = table.getScanner(scan) ;  
  157.             list = new ArrayList<Result>() ;  
  158.             for (Result rs : scanner) {  
  159.                 list.add(rs) ;  
  160.             }  
  161.         } catch (Exception e) {  
  162.             e.printStackTrace() ;  
  163.         }  
  164.         finally  
  165.         {  
  166.             try {  
  167.                 table.close() ;  
  168.             } catch (IOException e) {  
  169.                 e.printStackTrace();  
  170.             }  
  171.         }  
  172.         return list;  
  173.     }  
  174.     public List<Result> getRows(String tableName,String startRow,String stopRow)  
  175.     {  
  176.         HTableInterface table = null;  
  177.         List<Result> list = null;  
  178.         try {  
  179.             table = hTablePool.getTable(tableName) ;  
  180.             Scan scan = new Scan() ;  
  181.             scan.setStartRow(startRow.getBytes()) ;  
  182.             scan.setStopRow(stopRow.getBytes()) ;  
  183.             ResultScanner scanner = table.getScanner(scan) ;  
  184.             list = new ArrayList<Result>() ;  
  185.             for (Result rsResult : scanner) {  
  186.                 list.add(rsResult) ;  
  187.             }  
  188.               
  189.         }catch (Exception e) {  
  190.             e.printStackTrace() ;  
  191.         }  
  192.         finally  
  193.         {  
  194.             try {  
  195.                 table.close() ;  
  196.             } catch (IOException e) {  
  197.                 e.printStackTrace();  
  198.             }  
  199.         }  
  200.         return list;  
  201.     }  
  202.       
  203.     public static void main(String[] args) {  
  204.         // TODO Auto-generated method stub  
  205.         HBaseDAO dao = new HBaseDAOImp();  
  206.         List<Put> list = new ArrayList<Put>();  
  207.         Put put = new Put("aa".getBytes());  
  208.         put.add("cf".getBytes(), "name".getBytes(), "zhangsan".getBytes()) ;  
  209.         list.add(put) ;  
  210. //        dao.save(put, "test") ;  
  211.         put.add("cf".getBytes(), "addr".getBytes(), "beijing".getBytes()) ;  
  212.         list.add(put) ;  
  213.         put.add("cf".getBytes(), "age".getBytes(), "30".getBytes()) ;  
  214.         list.add(put) ;  
  215.         put.add("cf".getBytes(), "tel".getBytes(), "13567882341".getBytes()) ;  
  216.         list.add(put) ;  
  217.           
  218.         dao.save(list, "test");  
  219. //        dao.save(put, "test") ;  
  220. //        dao.insert("test", "testrow", "cf", "age", "35") ;  
  221. //        dao.insert("test", "testrow", "cf", "cardid", "12312312335") ;  
  222. //        dao.insert("test", "testrow", "cf", "tel", "13512312345") ;  
  223.           
  224.     }  
  225.   
  226. }  
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import HBaseDAO;

public class HBaseDAOImp implements HBaseDAO{

    HConnection hTablePool = null;
    public HBaseDAOImp()
    {
        Configuration conf = new Configuration();
        conf.set("hbase.zookeeper.quorum","192.168.80.20,192.168.80.21,192.168.80.22");
        conf.set("hbase.rootdir", "hdfs://cluster/hbase");
        try {
            hTablePool = HConnectionManager.createConnection(conf) ;
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    @Override
    public void save(Put put, String tableName) {
        // TODO Auto-generated method stub
        HTableInterface table = null;
        try {
            table = hTablePool.getTable(tableName) ;
            table.put(put) ;
            
        } catch (Exception e) {
            e.printStackTrace() ;
        }finally{
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
    @Override
    public void insert(String tableName, String rowKey, String family,
            String quailifer, String value) {
        // TODO Auto-generated method stub
        HTableInterface table = null;
        try {
            table = hTablePool.getTable(tableName) ;
            Put put = new Put(rowKey.getBytes());
            put.add(family.getBytes(), quailifer.getBytes(), value.getBytes()) ;
            table.put(put);
        } catch (Exception e) {
            e.printStackTrace();
        }finally
        {
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
    
    @Override
    public void save(List<Put> Put, String tableName) {
        // TODO Auto-generated method stub
        HTableInterface table = null;
        try {
            table = hTablePool.getTable(tableName) ;
            table.put(Put) ;
        }
        catch (Exception e) {
            // TODO: handle exception
        }finally
        {
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        
    }


    @Override
    public Result getOneRow(String tableName, String rowKey) {
        // TODO Auto-generated method stub
        HTableInterface table = null;
        Result rsResult = null;
        try {
            table = hTablePool.getTable(tableName) ;
            Get get = new Get(rowKey.getBytes()) ;
            rsResult = table.get(get) ;
        } catch (Exception e) {
            e.printStackTrace() ;
        }
        finally
        {
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return rsResult;
    }

    @Override
    public List<Result> getRows(String tableName, String rowKeyLike) {
        // TODO Auto-generated method stub
        HTableInterface table = null;
        List<Result> list = null;
        try {
            table = hTablePool.getTable(tableName) ;
            PrefixFilter filter = new PrefixFilter(rowKeyLike.getBytes());
            Scan scan = new Scan();
            scan.setFilter(filter);
            ResultScanner scanner = table.getScanner(scan) ;
            list = new ArrayList<Result>() ;
            for (Result rs : scanner) {
                list.add(rs) ;
            }
        } catch (Exception e) {
            e.printStackTrace() ;
        }
        finally
        {
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return list;
    }
    
    public List<Result> getRows(String tableName, String rowKeyLike ,String cols[]) {
        // TODO Auto-generated method stub
        HTableInterface table = null;
        List<Result> list = null;
        try {
            table = hTablePool.getTable(tableName) ;
            PrefixFilter filter = new PrefixFilter(rowKeyLike.getBytes());
            Scan scan = new Scan();
            for (int i = 0; i < cols.length; i++) {
                scan.addColumn("cf".getBytes(), cols[i].getBytes()) ;
            }
            scan.setFilter(filter);
            ResultScanner scanner = table.getScanner(scan) ;
            list = new ArrayList<Result>() ;
            for (Result rs : scanner) {
                list.add(rs) ;
            }
        } catch (Exception e) {
            e.printStackTrace() ;
        }
        finally
        {
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return list;
    }
    public List<Result> getRows(String tableName,String startRow,String stopRow)
    {
        HTableInterface table = null;
        List<Result> list = null;
        try {
            table = hTablePool.getTable(tableName) ;
            Scan scan = new Scan() ;
            scan.setStartRow(startRow.getBytes()) ;
            scan.setStopRow(stopRow.getBytes()) ;
            ResultScanner scanner = table.getScanner(scan) ;
            list = new ArrayList<Result>() ;
            for (Result rsResult : scanner) {
                list.add(rsResult) ;
            }
            
        }catch (Exception e) {
            e.printStackTrace() ;
        }
        finally
        {
            try {
                table.close() ;
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return list;
    }
    
    public static void main(String[] args) {
        // TODO Auto-generated method stub
        HBaseDAO dao = new HBaseDAOImp();
        List<Put> list = new ArrayList<Put>();
        Put put = new Put("aa".getBytes());
        put.add("cf".getBytes(), "name".getBytes(), "zhangsan".getBytes()) ;
        list.add(put) ;
//        dao.save(put, "test") ;
        put.add("cf".getBytes(), "addr".getBytes(), "beijing".getBytes()) ;
        list.add(put) ;
        put.add("cf".getBytes(), "age".getBytes(), "30".getBytes()) ;
        list.add(put) ;
        put.add("cf".getBytes(), "tel".getBytes(), "13567882341".getBytes()) ;
        list.add(put) ;
        
        dao.save(list, "test");
//        dao.save(put, "test") ;
//        dao.insert("test", "testrow", "cf", "age", "35") ;
//        dao.insert("test", "testrow", "cf", "cardid", "12312312335") ;
//        dao.insert("test", "testrow", "cf", "tel", "13512312345") ;
        
    }

}



下面正式编写简单项目代码
1)实现写kafka生产者:
  1. import java.util.Properties;  
  2. import java.util.Random;  
  3. import DateFmt;  
  4. import backtype.storm.utils.Utils;  
  5. import kafka.producer.KeyedMessage;  
  6. import kafka.producer.ProducerConfig;  
  7.   
  8. public class Producer extends Thread {  
  9.     private final kafka.javaapi.producer.Producer<Integer, String> producer;  
  10.     private final String topic;  
  11.     private final Properties props = new Properties();  
  12.   
  13.     public Producer(String topic) {  
  14.         props.put("serializer.class", "kafka.serializer.StringEncoder");// 字符串消息  
  15.         props.put("metadata.broker.list", "192.168.80.20:9092,192.168.80.21:9092,192.168.80.22:9092");  
  16.         producer = new kafka.javaapi.producer.Producer<Integer, String>( new ProducerConfig(props));  
  17.         this.topic = topic;  
  18.     }  
  19.   
  20.     public void run() {  
  21.         // order_id,order_amt,create_time,area_id  
  22.         Random random = new Random();  
  23.         String[] order_amt = { "10.10", "20.10", "30.10","40.0", "60.10" };  
  24.         String[] area_id = { "1","2","3","4","5" };  
  25.           
  26.         int i =0 ;  
  27.         while(true) {  
  28.             i ++ ;  
  29.             String messageStr = i+"\t"+order_amt[random.nextInt(5)]+"\t"+DateFmt.getCountDate(null, DateFmt.date_long)+"\t"+area_id[random.nextInt(5)] ;  
  30.             System.out.println("product:"+messageStr);  
  31.             producer.send(new KeyedMessage<Integer, String>(topic, messageStr));  
  32.             //Utils.sleep(1000) ;  
  33.   
  34.         }  
  35.   
  36.     }  
  37.   
  38.     public static void main(String[] args) {  
  39.         Producer producerThread = new Producer(KafkaProperties.topic);  
  40.         producerThread.start();  
  41.     }  
  42. }  
import java.util.Properties;
import java.util.Random;
import DateFmt;
import backtype.storm.utils.Utils;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

public class Producer extends Thread {
    private final kafka.javaapi.producer.Producer<Integer, String> producer;
    private final String topic;
    private final Properties props = new Properties();

    public Producer(String topic) {
        props.put("serializer.class", "kafka.serializer.StringEncoder");// 字符串消息
        props.put("metadata.broker.list", "192.168.80.20:9092,192.168.80.21:9092,192.168.80.22:9092");
        producer = new kafka.javaapi.producer.Producer<Integer, String>( new ProducerConfig(props));
        this.topic = topic;
    }

    public void run() {
        // order_id,order_amt,create_time,area_id
        Random random = new Random();
        String[] order_amt = { "10.10", "20.10", "30.10","40.0", "60.10" };
        String[] area_id = { "1","2","3","4","5" };
        
        int i =0 ;
        while(true) {
            i ++ ;
            String messageStr = i+"\t"+order_amt[random.nextInt(5)]+"\t"+DateFmt.getCountDate(null, DateFmt.date_long)+"\t"+area_id[random.nextInt(5)] ;
            System.out.println("product:"+messageStr);
            producer.send(new KeyedMessage<Integer, String>(topic, messageStr));
            //Utils.sleep(1000) ;

        }

    }

    public static void main(String[] args) {
        Producer producerThread = new Producer(KafkaProperties.topic);
        producerThread.start();
    }
}



2)这里用到其他时间转换工具类:
  1. import java.text.ParseException;  
  2. import java.text.SimpleDateFormat;  
  3. import java.util.Calendar;  
  4. import java.util.Date;  
  5.   
  6. public class DateFmt {  
  7.   
  8.     public static final String date_long = "yyyy-MM-dd HH:mm:ss" ;  
  9.     public static final String date_short = "yyyy-MM-dd" ;  
  10.       
  11.     public static SimpleDateFormat sdf = new SimpleDateFormat(date_short);  
  12.       
  13.     public static String getCountDate(String date,String patton)  
  14.     {  
  15.         SimpleDateFormat sdf = new SimpleDateFormat(patton);  
  16.         Calendar cal = Calendar.getInstance();   
  17.         if (date != null) {  
  18.             try {  
  19.                 cal.setTime(sdf.parse(date)) ;  
  20.             } catch (ParseException e) {  
  21.                 e.printStackTrace();  
  22.             }  
  23.         }  
  24.         return sdf.format(cal.getTime());  
  25.     }  
  26.       
  27.     public static String getCountDate(String date,String patton,int step)  
  28.     {  
  29.         SimpleDateFormat sdf = new SimpleDateFormat(patton);  
  30.         Calendar cal = Calendar.getInstance();   
  31.         if (date != null) {  
  32.             try {  
  33.                 cal.setTime(sdf.parse(date)) ;  
  34.             } catch (ParseException e) {  
  35.                 e.printStackTrace();  
  36.             }  
  37.         }  
  38.         cal.add(Calendar.DAY_OF_MONTH, step) ;  
  39.         return sdf.format(cal.getTime());  
  40.     }  
  41.       
  42.     public static Date parseDate(String dateStr) throws Exception  
  43.     {  
  44.         return sdf.parse(dateStr);  
  45.     }  
  46.       
  47.     public static void main(String[] args) throws Exception{  
  48.     System.out.println(DateFmt.getCountDate(null, DateFmt.date_short));  
  49.     //System.out.println(DateFmt.getCountDate("2014-03-01 12:13:14", DateFmt.date_short));  
  50.   
  51.     //System.out.println(parseDate("2014-05-02").after(parseDate("2014-05-01")));  
  52.     }  
  53.   
  54. }  
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;

public class DateFmt {

    public static final String date_long = "yyyy-MM-dd HH:mm:ss" ;
    public static final String date_short = "yyyy-MM-dd" ;
    
    public static SimpleDateFormat sdf = new SimpleDateFormat(date_short);
    
    public static String getCountDate(String date,String patton)
    {
        SimpleDateFormat sdf = new SimpleDateFormat(patton);
        Calendar cal = Calendar.getInstance(); 
        if (date != null) {
            try {
                cal.setTime(sdf.parse(date)) ;
            } catch (ParseException e) {
                e.printStackTrace();
            }
        }
        return sdf.format(cal.getTime());
    }
    
    public static String getCountDate(String date,String patton,int step)
    {
        SimpleDateFormat sdf = new SimpleDateFormat(patton);
        Calendar cal = Calendar.getInstance(); 
        if (date != null) {
            try {
                cal.setTime(sdf.parse(date)) ;
            } catch (ParseException e) {
                e.printStackTrace();
            }
        }
        cal.add(Calendar.DAY_OF_MONTH, step) ;
        return sdf.format(cal.getTime());
    }
    
    public static Date parseDate(String dateStr) throws Exception
    {
        return sdf.parse(dateStr);
    }
    
    public static void main(String[] args) throws Exception{
    System.out.println(DateFmt.getCountDate(null, DateFmt.date_short));
    //System.out.println(DateFmt.getCountDate("2014-03-01 12:13:14", DateFmt.date_short));

    //System.out.println(parseDate("2014-05-02").after(parseDate("2014-05-01")));
    }

}



3)下面写项目中的kafka消费comsumer:


这里把消费者的消费的数据保存到一个有顺序的队列里!(为了作为storm spout数据的来源)--------------非常重要哦!!!!!!
  1. import java.util.HashMap;  
  2. import java.util.List;  
  3. import java.util.Map;  
  4. import java.util.Properties;  
  5. import java.util.Queue;  
  6. import java.util.concurrent.ConcurrentLinkedQueue;  
  7.   
  8. import kafka.consumer.ConsumerConfig;  
  9. import kafka.consumer.ConsumerIterator;  
  10. import kafka.consumer.KafkaStream;  
  11. import kafka.javaapi.consumer.ConsumerConnector;  
  12. import kafka.productor.KafkaProperties;  
  13.   
  14. public class OrderConsumer extends Thread {  
  15.     private final ConsumerConnector consumer;  
  16.     private final String topic;  
  17.   
  18.     private Queue<String> queue = new ConcurrentLinkedQueue<String>() ;//有序队列  
  19.       
  20.     public OrderConsumer(String topic) {  
  21.         consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());  
  22.         this.topic = topic;  
  23.     }  
  24.   
  25.     private static ConsumerConfig createConsumerConfig() {  
  26.         Properties props = new Properties();  
  27.         props.put("zookeeper.connect", KafkaProperties.zkConnect);  
  28.         props.put("group.id", KafkaProperties.groupId);  
  29.         props.put("zookeeper.session.timeout.ms", "400");  
  30.         props.put("zookeeper.sync.time.ms", "200");  
  31.         props.put("auto.commit.interval.ms", "1000");//zookeeper offset偏移量  
  32.   
  33.         return new ConsumerConfig(props);  
  34.   
  35.     }  
  36. // push消费方式,服务端推送过来。主动方式是pull  
  37.     public void run() {  
  38.         Map<String, Integer> topicCountMap = new HashMap<String, Integer>();  
  39.         topicCountMap.put(topic, new Integer(1));  
  40.         Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer  
  41.                 .createMessageStreams(topicCountMap);  
  42.         KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);  
  43.         ConsumerIterator<byte[], byte[]> it = stream.iterator();  
  44.           
  45.         while (it.hasNext()){  
  46.             //逻辑处理  
  47.             System.out.println("consumer:"+new String(it.next().message()));  
  48.             queue.add(new String(it.next().message())) ;  
  49.             System.err.println("队列----->"+queue);  
  50.         }  
  51.               
  52.     }  
  53.   
  54.     public Queue<String> getQueue()  
  55.     {  
  56.         return queue ;  
  57.     }  
  58.       
  59.     public static void main(String[] args) {  
  60.         OrderConsumer consumerThread = new OrderConsumer(KafkaProperties.Order_topic);  
  61.         consumerThread.start();  
  62.         
  63.     }  
  64. }  
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.productor.KafkaProperties;

public class OrderConsumer extends Thread {
    private final ConsumerConnector consumer;
    private final String topic;

    private Queue<String> queue = new ConcurrentLinkedQueue<String>() ;//有序队列
    
    public OrderConsumer(String topic) {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());
        this.topic = topic;
    }

    private static ConsumerConfig createConsumerConfig() {
        Properties props = new Properties();
        props.put("zookeeper.connect", KafkaProperties.zkConnect);
        props.put("group.id", KafkaProperties.groupId);
        props.put("zookeeper.session.timeout.ms", "400");
        props.put("zookeeper.sync.time.ms", "200");
        props.put("auto.commit.interval.ms", "1000");//zookeeper offset偏移量

        return new ConsumerConfig(props);

    }
// push消费方式,服务端推送过来。主动方式是pull
    public void run() {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(1));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer
                .createMessageStreams(topicCountMap);
        KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
        ConsumerIterator<byte[], byte[]> it = stream.iterator();
        
        while (it.hasNext()){
            //逻辑处理
            System.out.println("consumer:"+new String(it.next().message()));
            queue.add(new String(it.next().message())) ;
            System.err.println("队列----->"+queue);
        }
            
    }

    public Queue<String> getQueue()
    {
        return queue ;
    }
    
    public static void main(String[] args) {
        OrderConsumer consumerThread = new OrderConsumer(KafkaProperties.Order_topic);
        consumerThread.start();
      
    }
}



4)下面开始写storm部分包括spout和bolt
  1. import java.util.Map;  
  2. import java.util.Queue;  
  3. import java.util.concurrent.ConcurrentLinkedQueue;  
  4. import kafka.consumers.OrderConsumer;  
  5. import backtype.storm.spout.SpoutOutputCollector;  
  6. import backtype.storm.task.TopologyContext;  
  7. import backtype.storm.topology.IRichSpout;  
  8. import backtype.storm.topology.OutputFieldsDeclarer;  
  9. import backtype.storm.tuple.Fields;  
  10. import backtype.storm.tuple.Values;  
  11.   
  12. public class OrderBaseSpout implements IRichSpout {  
  13.   
  14.     String topic = null;  
  15.     public OrderBaseSpout(String topic)  
  16.     {  
  17.         this.topic = topic ;  
  18.     }  
  19.     /**  
  20.      * 公共基类spout  
  21.      */  
  22.     private static final long serialVersionUID = 1L;  
  23.     Integer TaskId = null;  
  24.     SpoutOutputCollector collector = null;  
  25.     Queue<String> queue = new ConcurrentLinkedQueue<String>() ;  
  26.       
  27.     public void declareOutputFields(OutputFieldsDeclarer declarer) {  
  28.         // TODO Auto-generated method stub  
  29.   
  30.         declarer.declare(new Fields("order")) ;  
  31.     }  
  32.   
  33.     public void nextTuple() {  
  34.         // TODO Auto-generated method stub  
  35.         if (queue.size() > 0) {  
  36.             String str = queue.poll() ;  
  37.             //进行数据过滤  
  38.             System.err.println("TaskId:"+TaskId+";  str="+str);  
  39.             collector.emit(new Values(str)) ;  
  40.         }  
  41.     }  
  42.   
  43.     public void open(Map conf, TopologyContext context,  
  44.             SpoutOutputCollector collector) {  
  45.         this.collector = collector ;  
  46.         TaskId = context.getThisTaskId() ;  
  47. //        Thread.currentThread().getId()  
  48.         OrderConsumer consumer = new OrderConsumer(topic) ;  
  49.         consumer.start() ;  
  50.         queue = consumer.getQueue() ;  
  51.     }  
  52.   
  53.       
  54.     public void ack(Object msgId) {  
  55.         // TODO Auto-generated method stub  
  56.           
  57.     }  
  58.   
  59.       
  60.     public void activate() {  
  61.         // TODO Auto-generated method stub  
  62.           
  63.     }  
  64.   
  65.       
  66.     public void close() {  
  67.         // TODO Auto-generated method stub  
  68.           
  69.     }  
  70.   
  71.       
  72.     public void deactivate() {  
  73.         // TODO Auto-generated method stub  
  74.           
  75.     }  
  76.   
  77.       
  78.     public void fail(Object msgId) {  
  79.         // TODO Auto-generated method stub  
  80.           
  81.     }  
  82.   
  83.       
  84.     public Map<String, Object> getComponentConfiguration() {  
  85.         // TODO Auto-generated method stub  
  86.         return null;  
  87.     }  
  88. }  
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import kafka.consumers.OrderConsumer;
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichSpout;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;

public class OrderBaseSpout implements IRichSpout {

    String topic = null;
    public OrderBaseSpout(String topic)
    {
        this.topic = topic ;
    }
    /**
     * 公共基类spout
     */
    private static final long serialVersionUID = 1L;
    Integer TaskId = null;
    SpoutOutputCollector collector = null;
    Queue<String> queue = new ConcurrentLinkedQueue<String>() ;
    
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        // TODO Auto-generated method stub

        declarer.declare(new Fields("order")) ;
    }

    public void nextTuple() {
        // TODO Auto-generated method stub
        if (queue.size() > 0) {
            String str = queue.poll() ;
            //进行数据过滤
            System.err.println("TaskId:"+TaskId+";  str="+str);
            collector.emit(new Values(str)) ;
        }
    }

    public void open(Map conf, TopologyContext context,
            SpoutOutputCollector collector) {
        this.collector = collector ;
        TaskId = context.getThisTaskId() ;
//        Thread.currentThread().getId()
        OrderConsumer consumer = new OrderConsumer(topic) ;
        consumer.start() ;
        queue = consumer.getQueue() ;
    }

    
    public void ack(Object msgId) {
        // TODO Auto-generated method stub
        
    }

    
    public void activate() {
        // TODO Auto-generated method stub
        
    }

    
    public void close() {
        // TODO Auto-generated method stub
        
    }

    
    public void deactivate() {
        // TODO Auto-generated method stub
        
    }

    
    public void fail(Object msgId) {
        // TODO Auto-generated method stub
        
    }

    
    public Map<String, Object> getComponentConfiguration() {
        // TODO Auto-generated method stub
        return null;
    }
}




storm有了源头数据数据,该如何处理呢?下面要根据自己公司业务逻辑进行处理,我这里只是简单处理,只是为了把流程走完整而已!
下面有3个bolt:


  1. import java.util.Map;  
  2. import DateFmt;  
  3. import backtype.storm.task.TopologyContext;  
  4. import backtype.storm.topology.BasicOutputCollector;  
  5. import backtype.storm.topology.IBasicBolt;  
  6. import backtype.storm.topology.OutputFieldsDeclarer;  
  7. import backtype.storm.tuple.Fields;  
  8. import backtype.storm.tuple.Tuple;  
  9. import backtype.storm.tuple.Values;  
  10.   
  11. public class AreaFilterBolt implements IBasicBolt {  
  12.   
  13.     private static final long serialVersionUID = 1L;  
  14.   
  15.     @Override  
  16.     public void declareOutputFields(OutputFieldsDeclarer declarer) {  
  17.         declarer.declare(new Fields("area_id","order_amt","order_date"));  
  18.           
  19.     }  
  20.   
  21.     @Override  
  22.     public Map<String, Object> getComponentConfiguration() {  
  23.         // TODO Auto-generated method stub  
  24.         return null;  
  25.     }  
  26.   
  27.     @Override  
  28.     public void prepare(Map stormConf, TopologyContext context) {  
  29.         // TODO Auto-generated method stub  
  30.           
  31.     }  
  32.   
  33.     @Override  
  34.     public void execute(Tuple input, BasicOutputCollector collector) {  
  35.         String order = input.getString(0);  
  36.         if(order != null){  
  37.             String[] orderArr = order.split("\\t");  
  38.             // ared_id,order_amt,create_time  
  39.             collector.emit(new Values(orderArr[3],orderArr[1],DateFmt.getCountDate(orderArr[2], DateFmt.date_short)));  
  40.         System.out.println("--------------》"+orderArr[3]+orderArr[1]);  
  41.         }  
  42.           
  43.     }  
  44.   
  45.     @Override  
  46.     public void cleanup() {  
  47.         // TODO Auto-generated method stub  
  48.           
  49.     }  
  50.   
  51. }  
import java.util.Map;
import DateFmt;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.IBasicBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

public class AreaFilterBolt implements IBasicBolt {

    private static final long serialVersionUID = 1L;

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("area_id","order_amt","order_date"));
        
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        // TODO Auto-generated method stub
        return null;
    }

    @Override
    public void prepare(Map stormConf, TopologyContext context) {
        // TODO Auto-generated method stub
        
    }

    @Override
    public void execute(Tuple input, BasicOutputCollector collector) {
        String order = input.getString(0);
        if(order != null){
            String[] orderArr = order.split("\\t");
            // ared_id,order_amt,create_time
            collector.emit(new Values(orderArr[3],orderArr[1],DateFmt.getCountDate(orderArr[2], DateFmt.date_short)));
        System.out.println("--------------》"+orderArr[3]+orderArr[1]);
        }
        
    }

    @Override
    public void cleanup() {
        // TODO Auto-generated method stub
        
    }

}






  1. import java.util.HashMap;  
  2. import java.util.List;  
  3. import java.util.Map;  
  4. import org.apache.hadoop.hbase.KeyValue;  
  5. import org.apache.hadoop.hbase.client.Result;  
  6. import backtype.storm.task.TopologyContext;  
  7. import backtype.storm.topology.BasicOutputCollector;  
  8. import backtype.storm.topology.IBasicBolt;  
  9. import backtype.storm.topology.OutputFieldsDeclarer;  
  10. import backtype.storm.tuple.Fields;  
  11. import backtype.storm.tuple.Tuple;  
  12. import backtype.storm.tuple.Values;  
  13. import HBaseDAO;  
  14. import HBaseDAOImp;  
  15. import DateFmt;  
  16.   
  17. public class AreaAmtBolt  implements IBasicBolt{  
  18.   
  19.   
  20.     private static final long serialVersionUID = 1L;  
  21.     Map <String,Double> countsMap = null ;  
  22.     String today = null;  
  23.     HBaseDAO dao = null;  
  24.       
  25.     @Override  
  26.     public void cleanup() {  
  27.         //???  
  28.         countsMap.clear() ;  
  29.     }  
  30.       
  31.     @Override  
  32.     public void declareOutputFields(OutputFieldsDeclarer declarer) {  
  33.         declarer.declare(new Fields("date_area","amt")) ;      
  34.           
  35.     }  
  36.   
  37.     @Override  
  38.     public Map<String, Object> getComponentConfiguration() {  
  39.         return null;  
  40.     }  
  41.   
  42.     @Override  
  43.     public void prepare(Map stormConf, TopologyContext context) {  
  44.         countsMap = new HashMap<String, Double>() ;  
  45.         dao = new HBaseDAOImp() ;  
  46.         //根据hbase里初始值进行初始化countsMap  
  47.         today = DateFmt.getCountDate(null, DateFmt.date_short);  
  48.         countsMap = this.initMap(today, dao);  
  49.         for(String key:countsMap.keySet())  
  50.         {  
  51.             System.err.println("key:"+key+"; value:"+countsMap.get(key));  
  52.         }  
  53.     }  
  54.   
  55.     @Override  
  56.     public void execute(Tuple input, BasicOutputCollector collector) {  
  57.         if (input != null) {  
  58.             String area_id = input.getString(0) ;  
  59.             double order_amt = 0.0;  
  60.             //order_amt = input.getDouble(1) ;  
  61.             try {  
  62.                 order_amt = Double.parseDouble(input.getString(1)) ;  
  63.             } catch (Exception e) {  
  64.                 System.out.println(input.getString(1)+":---------------------------------");  
  65.                 e.printStackTrace() ;  
  66.             }  
  67.               
  68.             String order_date = input.getStringByField("order_date") ;  
  69.               
  70.             if (! order_date.equals(today)) {  
  71.                 //跨天处理  
  72.                 countsMap.clear() ;  
  73.             }  
  74.               
  75.             Double count = countsMap.get(order_date+"_"+area_id) ;  
  76.             if (count == null) {  
  77.                 count = 0.0 ;  
  78.             }  
  79.             count += order_amt ;  
  80.             countsMap.put(order_date+"_"+area_id, count) ;  
  81.             System.err.println("areaAmtBolt:"+order_date+"_"+area_id+"="+count);  
  82.             collector.emit(new Values(order_date+"_"+area_id,count)) ;  
  83.             System.out.println("***********"+order_date+"_"+area_id+count);  
  84.         }  
  85.           
  86.     }  
  87.       
  88.     public Map<String, Double> initMap(String rowKeyDate, HBaseDAO dao)  
  89.     {  
  90.         Map <String,Double> countsMap = new HashMap<String, Double>() ;  
  91.         List<Result> list = dao.getRows("area_order", rowKeyDate, new String[]{"order_amt"});  
  92.         for(Result rsResult : list)  
  93.         {  
  94.             String rowKey = new String(rsResult.getRow());  
  95.             for(KeyValue keyValue : rsResult.raw())  
  96.             {  
  97.                 if("order_amt".equals(new String(keyValue.getQualifier())))  
  98.                 {  
  99.                     countsMap.put(rowKey, Double.parseDouble(new String(keyValue.getValue()))) ;  
  100.                     break;  
  101.                 }  
  102.             }  
  103.         }  
  104.           
  105.         return countsMap;  
  106.           
  107.     }  
  108.       
  109.   
  110.   
  111.   
  112. }  
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Result;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.IBasicBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
import HBaseDAO;
import HBaseDAOImp;
import DateFmt;

public class AreaAmtBolt  implements IBasicBolt{


    private static final long serialVersionUID = 1L;
    Map <String,Double> countsMap = null ;
    String today = null;
    HBaseDAO dao = null;
    
    @Override
    public void cleanup() {
        //???
        countsMap.clear() ;
    }
    
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("date_area","amt")) ;    
        
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }

    @Override
    public void prepare(Map stormConf, TopologyContext context) {
        countsMap = new HashMap<String, Double>() ;
        dao = new HBaseDAOImp() ;
        //根据hbase里初始值进行初始化countsMap
        today = DateFmt.getCountDate(null, DateFmt.date_short);
        countsMap = this.initMap(today, dao);
        for(String key:countsMap.keySet())
        {
            System.err.println("key:"+key+"; value:"+countsMap.get(key));
        }
    }

    @Override
    public void execute(Tuple input, BasicOutputCollector collector) {
        if (input != null) {
            String area_id = input.getString(0) ;
            double order_amt = 0.0;
            //order_amt = input.getDouble(1) ;
            try {
                order_amt = Double.parseDouble(input.getString(1)) ;
            } catch (Exception e) {
                System.out.println(input.getString(1)+":---------------------------------");
                e.printStackTrace() ;
            }
            
            String order_date = input.getStringByField("order_date") ;
            
            if (! order_date.equals(today)) {
                //跨天处理
                countsMap.clear() ;
            }
            
            Double count = countsMap.get(order_date+"_"+area_id) ;
            if (count == null) {
                count = 0.0 ;
            }
            count += order_amt ;
            countsMap.put(order_date+"_"+area_id, count) ;
            System.err.println("areaAmtBolt:"+order_date+"_"+area_id+"="+count);
            collector.emit(new Values(order_date+"_"+area_id,count)) ;
            System.out.println("***********"+order_date+"_"+area_id+count);
        }
        
    }
    
    public Map<String, Double> initMap(String rowKeyDate, HBaseDAO dao)
    {
        Map <String,Double> countsMap = new HashMap<String, Double>() ;
        List<Result> list = dao.getRows("area_order", rowKeyDate, new String[]{"order_amt"});
        for(Result rsResult : list)
        {
            String rowKey = new String(rsResult.getRow());
            for(KeyValue keyValue : rsResult.raw())
            {
                if("order_amt".equals(new String(keyValue.getQualifier())))
                {
                    countsMap.put(rowKey, Double.parseDouble(new String(keyValue.getValue()))) ;
                    break;
                }
            }
        }
        
        return countsMap;
        
    }
    



}




import java.util.HashMap;
import java.util.Map;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.IBasicBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Tuple;
import HBaseDAO;
import HBaseDAOImp;
public class AreaRsltBolt implements IBasicBolt
{


    private static final long serialVersionUID = 1L;
    Map <String,Double> countsMap = null ;
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }

    @Override
    public void prepare(Map stormConf, TopologyContext context) {
        dao = new HBaseDAOImp() ;
        countsMap = new HashMap<String, Double>() ;
    }

    HBaseDAO dao = null;
    long beginTime = System.currentTimeMillis() ;
    long endTime = 0L ;
    
    @Override
    public void execute(Tuple input, BasicOutputCollector collector) {
        String date_areaid = input.getString(0);
        double order_amt = input.getDouble(1) ;
        countsMap.put(date_areaid, order_amt) ;
        endTime = System.currentTimeMillis() ;
        if (endTime - beginTime >= 5 * 1000) {
            for(String key : countsMap.keySet())
            {
                // put into hbase
                //这里把处理结果保存到hbase中
                 dao.insert("area_order", key, "cf", "order_amt", countsMap.get(key)+"") ;
                System.err.println("rsltBolt put hbase: key="+key+"; order_amt="+countsMap.get(key));
            }
        }
    }

    @Override
    public void cleanup() {
        
    }

}

最后 main方法:

  1. import kafka.productor.KafkaProperties;  
  2. import backtype.storm.Config;  
  3. import backtype.storm.LocalCluster;  
  4. import backtype.storm.StormSubmitter;  
  5. import backtype.storm.generated.AlreadyAliveException;  
  6. import backtype.storm.generated.InvalidTopologyException;  
  7. import backtype.storm.topology.TopologyBuilder;  
  8. import backtype.storm.tuple.Fields;  
  9. import AreaAmtBolt;  
  10. import AreaFilterBolt;  
  11. import AreaRsltBolt;  
  12. import OrderBaseSpout;  
  13.   
  14. public class MYTopology {  
  15.   
  16.     public static void main(String[] args) {  
  17.         TopologyBuilder builder = new TopologyBuilder();  
  18.         builder.setSpout("spout", new OrderBaseSpout(KafkaProperties.topic), 5);  
  19.         builder.setBolt("filterblot", new AreaFilterBolt() , 5).shuffleGrouping("spout") ;  
  20.         builder.setBolt("amtbolt", new AreaAmtBolt() , 2).fieldsGrouping("filterblot", new Fields("area_id")) ;  
  21.         builder.setBolt("rsltolt", new AreaRsltBolt(), 1).shuffleGrouping("amtbolt");  
  22.           
  23.           
  24.         Config conf = new Config() ;  
  25.         conf.setDebug(false);  
  26.         if (args.length > 0) {  
  27.             try {  
  28.                 StormSubmitter.submitTopology(args[0], conf, builder.createTopology());  
  29.             } catch (AlreadyAliveException e) {  
  30.                 e.printStackTrace();  
  31.             } catch (InvalidTopologyException e) {  
  32.                 e.printStackTrace();  
  33.             }  
  34.         }else {  
  35.             //本地测试!!!!!!!!!!!!  
  36.             LocalCluster localCluster = new LocalCluster();  
  37.             localCluster.submitTopology("mytopology", conf, builder.createTopology());  
  38.         }  
  39.           
  40.           
  41.     }  
  42.   
  43. }  
import kafka.productor.KafkaProperties;
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;
import AreaAmtBolt;
import AreaFilterBolt;
import AreaRsltBolt;
import OrderBaseSpout;

public class MYTopology {

    public static void main(String[] args) {
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("spout", new OrderBaseSpout(KafkaProperties.topic), 5);
        builder.setBolt("filterblot", new AreaFilterBolt() , 5).shuffleGrouping("spout") ;
        builder.setBolt("amtbolt", new AreaAmtBolt() , 2).fieldsGrouping("filterblot", new Fields("area_id")) ;
        builder.setBolt("rsltolt", new AreaRsltBolt(), 1).shuffleGrouping("amtbolt");
        
        
        Config conf = new Config() ;
        conf.setDebug(false);
        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology(args[0], conf, builder.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            }
        }else {
            //本地测试!!!!!!!!!!!!
            LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology("mytopology", conf, builder.createTopology());
        }
        
        
    }

}






到这里架构基本完成了单独学kafka、storm、hbase、这些东西不难,如何把他们整合起来,这就是不一样!!!!呵呵

我博客里面有讲kafka、storm、hbase的基础内容,欢迎看
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值