数仓项目

数仓项目搭建选型
1.java----1.8
2.hadoop—2.7.2
3.hive------1.2.1
4.Sqoop-----1.4.6
5.Flume-----1.7.0
6.Kafka-----0.11.02
7.Azkaban—2.5.0
8.Zookeeper—3.4.10
服务器选择:
1.物理机 4W 128G内存 20核CPU,40线程 8THDD,2TSSD硬盘 5年寿命
2.阿里云同等配置 每年5W
集群规划 假设每台服务器8T磁盘 128G内存
1.每天日活跃用户100万,每人一天平均100条:100100万=1亿=100000000
2.每条日志大概1k左右;每天1亿条:1亿
1K/1024/1024/1024=约100G
每天100G 每月100G30=3T 每年36T
3.保存3副本 36
3=108T
预留20%-30% =108T/0.7=150T
4.约150T/8T=18台
(2) 考虑数仓分层
服务器再扩容1-2倍。
机器安装
HDFS—hadoop203,hadoop204,hadoop205
服务名称 子服务 服务器
hadoop203 服务器
hadoop204 服务器
hadoop205
埋点数据基本格式
 公共字段:基本所有安卓手机都包含的字段
业务字段:埋点上报的字段,有具体的业务类型
事件日志数据
一:生成对应数据
pom:
二:创建bean对象
Main()开始
1.搭建集群
安装hadoop namenode 203 sendory 205 DN 203,204,205
RM 204 NM 203,204,205

	df -h  配置多磁盘
 <name>dfs.datanode.data.dir</name>
<value>file:///${hadoop.tmp.dir}/dfs/data1,file:///hd2/dfs/data2,file:///hd3/dfs/data3,file:///hd4/dfs/data4</value>

2/dev/mapper/centos-root   17G  6.8G   11G   40% /
 /dev/sda1               1014M  186M  829M   19% /boot
3.支持LZO配置
cp hadoop-lzo-0.4.20.jar /opt/module/hadoop-2.7.2/share/hadoop/common/
xsync hadoop-lzo-0.4.20.jar 
io.compression.codecs org.apache.hadoop.io.compress.GzipCodec, org.apache.hadoop.io.compress.DefaultCodec, org.apache.hadoop.io.compress.BZip2Codec, org.apache.hadoop.io.compress.SnappyCodec, com.hadoop.compression.lzo.LzoCodec, com.hadoop.compression.lzo.LzopCodec io.compression.codec.lzo.class com.hadoop.compression.lzo.LzoCodec 测试HDFS写性能 hadoop jar /opt/module/hadoop-2.7.2/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.2-tests.jar TestDFSIO -write -nrFiles 10 -fileSize 128MB 测试内容:读取HDFS集群10个128M的文件 hadoop jar /opt/module/hadoop-2.7.2/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.7.2-tests.jar TestDFSIO -clean HDFS参数调优hdfs-site.xml dfs.namenode.handler.count=20 * log2(Cluster Size),比如集群规模为8台时,此参数设置为60 NameNode有一个工作线程池,用来处理不同DataNode的并发心跳以及客户端并发的元数据操作。对于大集群或者有大量客户端的集群来说,通常需要增大参数dfs.namenode.handler.count的默认值10。设置该值的一般原则是将其设置为集群大小的自然对数乘以20,即20logN,N为集群大小。 (2)编辑日志存储路径dfs.namenode.edits.dir设置与镜像文件存储路径dfs.namenode.name.dir尽量分开,达到最低写入延迟 2)YARN参数调优yarn-site.xml 安装zookeeper 3.4.10 1.写zk群起脚本 2. 问题 ssh连接时无法远程ssh登录 cat /etc/profile >> ~/.bashrc zk.sh start|status|stop 采集日志 203 205配置 log-collector-1.0-SNAPSHOT-jar-with-dependencies.jar java -classpath log-collector-1.0-SNAPSHOT-jar-with-dependencies.jar com.atguigu.appclient.AppMain > /opt/module/test.log 数据仓库 DataWarehouse 是日志采集系统的输出 报表系统和用户画像的输入 Flume 1.安装flume 1.7.0 hadoop203 hadoop204 2.调整Flume内存 3.安装Gangla Flume采集数据会丢失吗? 不会,Channel存储可以存储在File中,数据传输自身有事务。 FileChannel优化 通过配置dataDirs指向多个路径,每个路径对应不同的硬盘,增大Flume吞吐量 Taildir Source:断点续传、多目录。Flume1.6以前需要自己自定义Source记录每次读取文件位置,实现断点续传。 Event 1K左右时,500-1000合适(默认为100) Channel 采用Kafka Channel,省去了Sink,提高了效率。 实现 日志文件/tmp/logs/app***.log ================flume================Kafka topic_start| ================Kafka topic_event ![在这里插入图片描述](https://img-blog.csdnimg.cn/20210308230354672.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3dlaXhpbl80MzAwMzc5Mg==,size_16,color_FFFFFF,t_70) a1.sources=r1 a1.channels=c1 c2

configure source

a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile = /opt/module/flume/test/log_position.json
a1.sources.r1.filegroups = f1
a1.sources.r1.filegroups.f1 = /tmp/logs/app.+
a1.sources.r1.fileHeader = true
a1.sources.r1.channels = c1 c2

#interceptor
a1.sources.r1.interceptors = i1 i2
a1.sources.r1.interceptors.i1.type = com.atguigu.flume.interceptor.LogETLInterceptor B u i l d e r a 1. s o u r c e s . r 1. i n t e r c e p t o r s . i 2. t y p e = c o m . a t g u i g u . f l u m e . i n t e r c e p t o r . L o g T y p e I n t e r c e p t o r Builder a1.sources.r1.interceptors.i2.type = com.atguigu.flume.interceptor.LogTypeInterceptor Buildera1.sources.r1.interceptors.i2.type=com.atguigu.flume.interceptor.LogTypeInterceptorBuilder

a1.sources.r1.selector.type = multiplexing
a1.sources.r1.selector.header = topic
a1.sources.r1.selector.mapping.topic_start = c1
a1.sources.r1.selector.mapping.topic_event = c2

configure channel

a1.channels.c1.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c1.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092,hadoop104:9092
a1.channels.c1.kafka.topic = topic_start
a1.channels.c1.parseAsFlumeEvent = false
a1.channels.c1.kafka.consumer.group.id = flume-consumer

a1.channels.c2.type = org.apache.flume.channel.kafka.KafkaChannel
a1.channels.c2.kafka.bootstrap.servers = hadoop102:9092,hadoop103:9092,hadoop104:9092
a1.channels.c2.kafka.topic = topic_event
a1.channels.c2.parseAsFlumeEvent = false
a1.channels.c2.kafka.consumer.group.id = flume-consumer

写flume过滤器
1.分类拦截器
package com.atguigu.flume.interceptor;

import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;

import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

public class LogTypeInterceptor implements Interceptor {
@Override
public void initialize() {

}

@Override
public Event intercept(Event event) {
    byte[] body = event.getBody();
    String log = new String(body, Charset.forName("UTF-8"));
    Map<String, String> headers = event.getHeaders();
    //向header里面添加
    if(log.contains("start")){
        headers.put("topic", "topic_start");
    }else {
        headers.put("topic", "topic_event");
    }
    return  event;
}
//进来一个json 数据 分类放到start 和event 将start 和event 放到header标记
@Override
public List<Event> intercept(List<Event> events) {
    ArrayList<Event> interceptors = new ArrayList<>();
    for (Event event : events) {
        Event intercept1 = intercept(event);
        interceptors.add(intercept1);
    }
    return interceptors;
}

@Override
public void close() {

}
public static class Builder implements Interceptor.Builder{
    @Override
    public Interceptor build() {
        return new LogTypeInterceptor();
    }

    @Override
    public void configure(Context context) {

    }
}

}

校验拦截器
package com.atguigu.flume.interceptor;

import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.interceptor.Interceptor;

import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;

public class LogETLInterceptor implements Interceptor {
@Override
public void initialize() {

}
//处理ETL
@Override
public Event intercept(Event event) {
    //1.获取数据
    byte[] body = event.getBody();
    String log = new String(body, Charset.forName("UTF-8"));
    //2.校验启动日志
    if(log.contains("start")){
        if(LogUtils.valueIsStart(log)){
            return event;
        }
    }else {
        if(LogUtils.valueIsEvent(log)){
            return event;
        }
    }
    return null;
}

@Override
public List<Event> intercept(List<Event> events) {
    //
    ArrayList<Event> interceptors = new ArrayList<>();
    for (Event event:events){
        Event intercepts = intercept(event);
        if(intercepts != null){
            interceptors.add(intercepts);
        }
    }
    return interceptors;
}

@Override
public void close() {

}
//静态内部类 让Flume去调取这个对象
public static class Builder implements Interceptor.Builder{
    @Override
    public Interceptor build() {
        return new LogETLInterceptor();
    }

    @Override
    public void configure(Context context) {

    }
}

}

package com.atguigu.flume.interceptor;

import org.apache.commons.lang.math.NumberUtils;

public class LogUtils {
public static Boolean valueIsStart(String log){
//json简单过滤 不能做复杂逻辑
if(log == null){
return false;
}
if(!log.trim().startsWith("{") || !log.trim().endsWith("}")){
return false;
}

    return true;
}
public static Boolean valueIsEvent(String log){
    if (log == null){
        return false;
    }
    //时间戳|json
    //1.切割
    String[] logContests = log.split("\\|");
    //2.判断事件完整性
    if(logContests.length !=2 ){
        return false;
    }
    //3.判断时间有效性 毫秒 长度 是否全是数字
    if(logContests[0].length()!=13 || NumberUtils.isNumber(logContests[0])){
        return false;
    }
    //4.判断json 必须是{开头}结尾
    if(logContests[1].trim().startsWith("{") ||logContests[1].trim().endsWith("}") ){
        return false;
    }
    return  true;
}

}

打包编译 到flume/lib
bin/flume-ng agent --name a1 --conf-file conf/file-flume-kafka.conf &
flume启动停止脚本
#! /bin/bash

case $1 in
“start”){
for i in hadoop203 hadoop204
do
echo " --------启动 $i 采集flume-------"
ssh $i “nohup /opt/module/flume/bin/flume-ng agent --conf-file /opt/module/flume/conf/file-flume-kafka.conf --name a1 -Dflume.root.logger=INFO,LOGFILE >/dev/null 2>&1 &”
done
};;
“stop”){
for i in hadoop102 hadoop103
do
echo " --------停止 $i 采集flume-------"
ssh $i “ps -ef | grep file-flume-kafka | grep -v grep |awk ‘{print $2}’ | xargs kill”
done

};;
esac
ps -ef | grep Application | grep -v grep
ps -ef | grep Application | grep -v grep | awk ‘{print $2}’
kill -9 ps -ef | grep Application | grep -v grep | awk '{print $2}'
ps -ef | grep Application | grep -v grep | awk ‘{print $2}’ | xargs kill
安装kafka 203 204 205
1.修改Brokerid
kafka群起jiaob
2.#! /bin/bash

case $1 in
“start”){
for i in hadoop102 hadoop103 hadoop104
do
echo " --------启动 $i Kafka-------"
# 用于KafkaManager监控
ssh $i “export JMX_PORT=9988 && /opt/module/kafka/bin/kafka-server-start.sh -daemon /opt/module/kafka/config/server.properties "
done
};;
“stop”){
for i in hadoop102 hadoop103 hadoop104
do
echo " --------停止 $i Kafka-------”
ssh $i “/opt/module/kafka/bin/kafka-server-stop.sh stop”
done
};;
esac

{“action”:“1”,“ar”:“MX”,“ba”:“Huawei”,“detail”:“325”,“en”:“start”,“entry”:“3”,“extend1”:"",“g”:“O0WLLEJW@gmail.com”,“hw”:“10801920",“l”:“en”,“la”:"-51.5",“ln”:"-104.9",“loading_time”:“8”,“md”:“Huawei-17”,“mid”:“956”,“nw”:“3G”,“open_ad_type”:“1”,“os”:“8.1.5”,“sr”:“B”,“sv”:“V2.8.1”,“t”:“1615146496588”,“uid”:“956”,“vc”:“0”,“vn”:“1.2.7”}
{“action”:“1”,“ar”:“MX”,“ba”:“Huawei”,“detail”:"",“en”:“start”,“entry”:“3”,“extend1”:"",“g”:“82636O12@gmail.com”,“hw”:"750
1134”,“l”:“es”,“la”:“5.5”,“ln”:"-49.6",“loading_time”:“12”,“md”:“Huawei-18”,“mid”:“957”,“nw”:“3G”,“open_ad_type”:“1”,“os”:“8.0.0”,“sr”:“S”,“sv”:“V2.4.9”,“t”:“1615143435851”,“uid”:“957”,“vc”:“10”,“vn”:“1.2.1”}

安装kafka-manager-1.3.3.22
nohup bin/kafka-manager -Dhttp.port=7456 >/opt/module/kafka-manager-1.3.3.22/start.log 2>&1 &
http://hadoop203:7456/
写kafkamanger启动脚本
Kafka机器数量(经验公式)=2*(峰值生产速度副本数/100)+1
比如我们采用压力测试测出写入的速度是10M/s一台,峰值的业务数据的速度是50M/s。副本数为2。
Kafka机器数量=2
(50*2/100)+ 1=3台
四:将flume消费kafka数据
部署在205上
topic_start=============>hdfs/orgin_data/gmal/log/topic_start
topic_event=============>hdfs/orgin_data/gmal/log/topic_event

组件 kafka-flume-hdfs.conf

a1.sources=r1 r2
a1.channels=c1 c2
a1.sinks=k1 k2

source1

a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 5000
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = hadoop203:9092,hadoop204:9092,hadoop205:9092
a1.sources.r1.kafka.topics=topic_start

source2

a1.sources.r2.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r2.batchSize = 5000
a1.sources.r2.batchDurationMillis = 2000
a1.sources.r2.kafka.bootstrap.servers = hadoop203:9092,hadoop204:9092,hadoop205:9092
a1.sources.r2.kafka.topics=topic_event

channel1

a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /opt/module/flume/checkpoint/behavior1
a1.channels.c1.dataDirs = /opt/module/flume/data/behavior1/
a1.channels.c1.maxFileSize = 2146435071
a1.channels.c1.capacity = 1000000
a1.channels.c1.keep-alive = 6

channel2

a1.channels.c2.type = file
a1.channels.c2.checkpointDir = /opt/module/flume/checkpoint/behavior2
a1.channels.c2.dataDirs = /opt/module/flume/data/behavior2/
a1.channels.c2.maxFileSize = 2146435071
a1.channels.c2.capacity = 1000000
a1.channels.c2.keep-alive = 6

sink1

a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /origin_data/gmall/log/topic_start/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = logstart-
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = second

##sink2
a1.sinks.k2.type = hdfs
a1.sinks.k2.hdfs.path = /origin_data/gmall/log/topic_event/%Y-%m-%d
a1.sinks.k2.hdfs.filePrefix = logevent-
a1.sinks.k2.hdfs.round = true
a1.sinks.k2.hdfs.roundValue = 10
a1.sinks.k2.hdfs.roundUnit = second

不要产生大量小文件

a1.sinks.k1.hdfs.rollInterval = 10
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0

a1.sinks.k2.hdfs.rollInterval = 10
a1.sinks.k2.hdfs.rollSize = 134217728
a1.sinks.k2.hdfs.rollCount = 0

控制输出文件是原生文件。

a1.sinks.k1.hdfs.fileType = CompressedStream
a1.sinks.k2.hdfs.fileType = CompressedStream

a1.sinks.k1.hdfs.codeC = lzop
a1.sinks.k2.hdfs.codeC = lzop

拼装

a1.sources.r1.channels = c1
a1.sinks.k1.channel= c1

a1.sources.r2.channels = c2
a1.sinks.k2.channel= c2

内存溢出 OOM
export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
建议4G
官方默认的这三个参数配置写入HDFS后会产生小文件,hdfs.rollInterval、hdfs.rollSize、hdfs.rollCount
基于以上hdfs.rollInterval=3600,hdfs.rollSize=134217728,hdfs.rollCount =0,hdfs.roundValue=10,hdfs.roundUnit= second几个参数综合作用,效果如下:
(1)tmp文件在达到128M时会滚动生成正式文件
(2)tmp文件创建超10秒时会滚动生成正式文件
举例:在2018-01-01 05:23的时侯sink接收到数据,那会产生如下tmp文件:
/atguigu/20180101/atguigu.201801010520.tmp
即使文件内容没有达到128M,也会在05:33时滚动生成正式文件

群起脚本 注意延时 kafka未关闭前关闭zk 引起kafka server无法关闭

#!/bin/bash
case $1 in
“start”){
echo-----------启动hadoop集群----------
ssh hadoop203 “/opt/module/hadoop-2.7.2/sbin/start-dfs.sh”
ssh hadoop204 “/opt/module/hadoop-2.7.2/sbin/start-yarn.sh”
echo-----------启动ZK集群----------
/opt/module/jiaoben/zk.sh start
sleep 5s
echo-----------启动flume-kafka----------
/opt/module/jiaoben/f1.sh start
echo-----------启动kafka集群----------
/opt/module/jiaoben/kf.sh start
sleep 5s
echo-----------启动kafka Manager----------
/opt/module/jiaoben/km.sh start
echo-----------启动flume-kafka-hdfs----------
/opt/module/jiaoben/f2.sh start
};;
“stop”){
echo-----------停止flume-kafka-hdfs----------
/opt/module/jiaoben/f2.sh stop
echo-----------停止kafka Manager----------
/opt/module/jiaoben/km.sh stop
echo-----------停止kafka集群----------
/opt/module/jiaoben/kf.sh stop
sleep 5s
echo-----------停止flume-kafka----------
/opt/module/jiaoben/f1.sh stop
echo-----------停止ZK集群----------
/opt/module/jiaoben/zk.sh stop
echo-----------停止hadoop集群----------
ssh hadoop203 “/opt/module/hadoop-2.7.2/sbin/stop-dfs.sh”
ssh hadoop204 “/opt/module/hadoop-2.7.2/sbin/stop-yarn.sh”
};;
esac
集群搭设完毕
1.生产数据

1549902643927|{“cm”:{“ln”:"-58.1",“sv”:“V2.1.6”,“os”:“8.1.6”,“g”:“56CP2931@gmail.com”,“mid”:“982”,“nw”:“4G”,“l”:“en”,“vc”:“16”,“hw”:“6401136",“ar”:“MX”,“uid”:“982”,“t”:“1549826442022”,“la”:"-29.2",“md”:“HTC-16”,“vn”:“1.0.3”,“ba”:“HTC”,“sr”:“N”},“ap”:“app”,“et”:[{“ett”:“1549869285780”,“en”:“newsdetail”,“kv”:{“entry”:“2”,“goodsid”:“229”,“news_staytime”:“2”,“loading_time”:“2”,“action”:“4”,“showtype”:“0”,“category”:“63”,“type1”:""}},{“ett”:“1549828760263”,“en”:“notification”,“kv”:{“ap_time”:“1549876245798”,“action”:“1”,“type”:“1”,“content”:""}},{“ett”:“1549880333687”,“en”:“active_foreground”,“kv”:{“access”:"",“push_id”:“1”}},{“ett”:“1549836950363”,“en”:“error”,“kv”:{“errorDetail”:“java.lang.NullPointerException\n at cn.lift.appIn.web.AbstractBaseController.validInbound(AbstractBaseController.java:72)\n at cn.lift.dfdf.web.AbstractBaseController.validInbound”,“errorBrief”:“at cn.lift.dfdf.web.AbstractBaseController.validInbound(AbstractBaseController.java:72)”}},{“ett”:“1549881120351”,“en”:“praise”,“kv”:{“target_id”:4,“id”:1,“type”:2,“add_time”:“1549807824976”,“userid”:0}}]}
1549902643927|{“cm”:{“ln”:"-39.8",“sv”:“V2.0.7”,“os”:“8.0.4”,“g”:“QIK8A883@gmail.com”,“mid”:“983”,“nw”:“4G”,“l”:“en”,“vc”:“7”,“hw”:"640
1136”,“ar”:“MX”,“uid”:“983”,“t”:“1549837747050”,“la”:"-44.3",“md”:“HTC-19”,“vn”:“1.3.8”,“ba”:“HTC”,“sr”:“D”},“ap”:“app”,“et”:[{“ett”:“1549889480459”,“en”:“notification”,“kv”:{“ap_time”:“1549890212066”,“action”:“2”,“type”:“2”,“content”:""}},{“ett”:“1549847125655”,“en”:“active_foreground”,“kv”:{“access”:“1”,“push_id”:“1”}},{“ett”:“1549831536809”,“en”:“active_background”,“kv”:{“active_source”:“1”}},{“ett”:“1549838876809”,“en”:“favorites”,“kv”:{“course_id”:2,“id”:0,“add_time”:“1549896143594”,“userid”:9}},{“ett”:“1549860136010”,“en”:“praise”,“kv”:{“target_id”:0,“id”:6,“type”:1,“add_time”:“1549830544931”,“userid”:7}}]}
1549902643927|{“cm”:{“ln”:"-41.7",“sv”:“V2.1.4”,“os”:“8.2.3”,“g”:“J972PF2V@gmail.com”,“mid”:“986”,“nw”:“WIFI”,“l”:“pt”,“vc”:“10”,“hw”:“640960",“ar”:“MX”,“uid”:“986”,“t”:“1549884866751”,“la”:"-49.3",“md”:“Huawei-16”,“vn”:“1.3.3”,“ba”:“Huawei”,“sr”:“Z”},“ap”:“app”,“et”:[{“ett”:“1549811862044”,“en”:“display”,“kv”:{“goodsid”:“229”,“action”:“1”,“extend1”:“1”,“place”:“0”,“category”:“8”}},{“ett”:“1549836797775”,“en”:“newsdetail”,“kv”:{“entry”:“2”,“goodsid”:“230”,“news_staytime”:“14”,“loading_time”:“0”,“action”:“1”,“showtype”:“0”,“category”:“70”,“type1”:""}},{“ett”:“1549802701039”,“en”:“loading”,“kv”:{“extend2”:"",“loading_time”:“8”,“action”:“2”,“extend1”:"",“type”:“1”,“type1”:"",“loading_way”:“1”}},{“ett”:“1549877764403”,“en”:“ad”,“kv”:{“entry”:“1”,“show_style”:“2”,“action”:“5”,“detail”:“102”,“source”:“2”,“behavior”:“1”,“content”:“1”,“newstype”:“6”}},{“ett”:“1549832892399”,“en”:“notification”,“kv”:{“ap_time”:“1549851095037”,“action”:“3”,“type”:“1”,“content”:""}},{“ett”:“1549876317806”,“en”:“active_foreground”,“kv”:{“access”:"",“push_id”:“1”}},{“ett”:“1549831636696”,“en”:“comment”,“kv”:{“p_comment_id”:3,“addtime”:“1549868532256”,“praise_count”:877,“other_id”:5,“comment_id”:9,“reply_count”:195,“userid”:6,“content”:“急滁缨陨那唇违共珠媒歇蜜”}},{“ett”:“1549844802304”,“en”:“favorites”,“kv”:{“course_id”:6,“id”:0,“add_time”:“1549844718243”,“userid”:7}}]}
1549902643927|{“cm”:{“ln”:"-57.3",“sv”:“V2.3.5”,“os”:“8.2.0”,“g”:“H2NN3E5B@gmail.com”,“mid”:“987”,“nw”:“3G”,“l”:“es”,“vc”:“2”,“hw”:"640
960”,“ar”:“MX”,“uid”:“987”,“t”:“1549809849246”,“la”:"-15.2",“md”:“sumsung-3”,“vn”:“1.2.0”,“ba”:“Sumsung”,“sr”:“H”},“ap”:“app”,“et”:[{“ett”:“1549879055654”,“en”:“newsdetail”,“kv”:{“entry”:“3”,“goodsid”:“230”,“news_staytime”:“5”,“loading_time”:“6”,“action”:“3”,“showtype”:“0”,“category”:“29”,“type1”:""}},{“ett”:“1549849060923”,“en”:“loading”,“kv”:{“extend2”:"",“loading_time”:“4”,“action”:“1”,“extend1”:"",“type”:“1”,“type1”:“542”,“loading_way”:“2”}},{“ett”:“1549855848751”,“en”:“active_background”,“kv”:{“active_source”:“2”}}]}
1549902643928|{“cm”:{“ln”:"-55.2",“sv”:“V2.4.7”,“os”:“8.1.7”,“g”:“3FVU1Z77@gmail.com”,“mid”:“988”,“nw”:“WIFI”,“l”:“es”,“vc”:“4”,“hw”:“640960",“ar”:“MX”,“uid”:“988”,“t”:“1549871003066”,“la”:"-30.1",“md”:“HTC-13”,“vn”:“1.0.2”,“ba”:“HTC”,“sr”:“R”},“ap”:“app”,“et”:[{“ett”:“1549902469864”,“en”:“newsdetail”,“kv”:{“entry”:“1”,“goodsid”:“230”,“news_staytime”:“4”,“loading_time”:“0”,“action”:“4”,“showtype”:“5”,“category”:“53”,“type1”:“542”}},{“ett”:“1549831629665”,“en”:“ad”,“kv”:{“entry”:“3”,“show_style”:“1”,“action”:“1”,“detail”:"",“source”:“2”,“behavior”:“1”,“content”:“2”,“newstype”:“8”}},{“ett”:“1549902124987”,“en”:“notification”,“kv”:{“ap_time”:“1549804940010”,“action”:“3”,“type”:“3”,“content”:""}},{“ett”:“1549819269944”,“en”:“comment”,“kv”:{“p_comment_id”:1,“addtime”:“1549867523907”,“praise_count”:351,“other_id”:1,“comment_id”:5,“reply_count”:47,“userid”:6,“content”:“壹斑忽”}}]}
1549902643928|{“cm”:{“ln”:"-51.0",“sv”:“V2.0.6”,“os”:“8.1.8”,“g”:“8F4E2BP2@gmail.com”,“mid”:“992”,“nw”:“WIFI”,“l”:“es”,“vc”:“17”,“hw”:"640
1136”,“ar”:“MX”,“uid”:“992”,“t”:“1549858734443”,“la”:“18.6”,“md”:“Huawei-11”,“vn”:“1.3.7”,“ba”:“Huawei”,“sr”:“V”},“ap”:“app”,“et”:[{“ett”:“1549806163266”,“en”:“newsdetail”,“kv”:{“entry”:“2”,“goodsid”:“230”,“news_staytime”:“54”,“loading_time”:“40”,“action”:“3”,“showtype”:“5”,“category”:“67”,“type1”:""}},{“ett”:“1549901829982”,“en”:“notification”,“kv”:{“ap_time”:“1549835153268”,“action”:“2”,“type”:“2”,“content”:""}},{“ett”:“1549891176437”,“en”:“error”,“kv”:{“errorDetail”:“at cn.lift.dfdfdf.control.CommandUtil.getInfo(CommandUtil.java:67)\n at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n at java.lang.reflect.Method.invoke(Method.java:606)\n”,“errorBrief”:“at cn.lift.appIn.control.CommandUtil.getInfo(CommandUtil.java:67)”}},{“ett”:“1549837281167”,“en”:“comment”,“kv”:{“p_comment_id”:2,“addtime”:“1549891030899”,“praise_count”:538,“other_id”:1,“comment_id”:0,“reply_count”:63,“userid”:3,“content”:“勇爽吵眨译亭象铲肪滴”}},{“ett”:“1549860365321”,“en”:“praise”,“kv”:{“target_id”:4,“id”:4,“type”:3,“add_time”:“1549875493366”,“userid”:7}}]}
1549902643929|{“cm”:{“ln”:"-76.5",“sv”:“V2.5.0”,“os”:“8.0.5”,“g”:“LZQ9P642@gmail.com”,“mid”:“993”,“nw”:“4G”,“l”:“en”,“vc”:“7”,“hw”:“750*1134”,“ar”:“MX”,“uid”:“993”,“t”:“1549823412444”,“la”:"-44.1",“md”:“sumsung-12”,“vn”:“1.1.7”,“ba”:“Sumsung”,“sr”:“J”},“ap”:“app”,“et”:[{“ett”:“1549872552042”,“en”:“display”,“kv”:{“goodsid”:“230”,“action”:“1”,“extend1”:“2”,“place”:“0”,“category”:“96”}},{“ett”:“1549901751459”,“en”:“newsdetail”,“kv”:{“entry”:“1”,“goodsid”:“231”,“news_staytime”:“4”,“loading_time”:“3”,“action”:“2”,“showtype”:“1”,“category”:“60”,“type1”:""}},{“ett”:“1549875575406”,“en”:“loading”,“kv”:{“extend2”:"",“loading_time”:“45”,“action”:“2”,“extend1”:"",“type”:“2”,“type1”:“433”,“loading_way”:“2”}},{“ett”:“1549809217124”,“en”:“notification”,“kv”:{“ap_time”:“1549823874466”,“action”:“1”,“type”:“2”,“content”:""}},{“ett”:“1549897001108”,“en”:“active_background”,“kv”:{“active_source”:“3”}},{“ett”:“1549828044324”,“en”:“error”,“kv”:{“errorDetail”:“java.lang.NullPointerException\n at cn.lift.appIn.web.AbstractBaseController.validInbound(AbstractBaseController.java:72)\n at cn.lift.dfdf.web.AbstractBaseController.validInbound”,“errorBrief”:“at cn.lift.dfdf.web.AbstractBaseController.validInbound(AbstractBaseController.java:72)”}}]}

{“action”:“1”,“ar”:“MX”,“ba”:“HTC”,“detail”:“433”,“en”:“start”,“entry”:“3”,“extend1”:"",“g”:“JB7Z784Q@gmail.com”,“hw”:“6401136",“l”:“es”,“la”:"-14.8",“ln”:"-103.2",“loading_time”:“1”,“md”:“HTC-0”,“mid”:“979”,“nw”:“4G”,“open_ad_type”:“2”,“os”:“8.0.2”,“sr”:“I”,“sv”:“V2.0.4”,“t”:“1549810118868”,“uid”:“979”,“vc”:“9”,“vn”:“1.2.3”}
{“action”:“1”,“ar”:“MX”,“ba”:“Huawei”,“detail”:"",“en”:“start”,“entry”:“5”,“extend1”:"",“g”:“01337MK7@gmail.com”,“hw”:"640
1136”,“l”:“en”,“la”:"-23.3",“ln”:"-78.5",“loading_time”:“15”,“md”:“Huawei-16”,“mid”:“980”,“nw”:“3G”,“open_ad_type”:“1”,“os”:“8.0.5”,“sr”:“X”,“sv”:“V2.8.0”,“t”:“1549887629407”,“uid”:“980”,“vc”:“10”,“vn”:“1.3.3”}
{“action”:“1”,“ar”:“MX”,“ba”:“Huawei”,“detail”:"",“en”:“start”,“entry”:“4”,“extend1”:"",“g”:“O3U34IBG@gmail.com”,“hw”:“640960",“l”:“pt”,“la”:"-11.9",“ln”:"-51.8",“loading_time”:“19”,“md”:“Huawei-1”,“mid”:“984”,“nw”:“3G”,“open_ad_type”:“2”,“os”:“8.0.1”,“sr”:“W”,“sv”:“V2.7.5”,“t”:“1549871898939”,“uid”:“984”,“vc”:“4”,“vn”:“1.2.1”}
{“action”:“1”,“ar”:“MX”,“ba”:“HTC”,“detail”:“201”,“en”:“start”,“entry”:“2”,“extend1”:"",“g”:“4288DG7Z@gmail.com”,“hw”:"640
960”,“l”:“en”,“la”:"-6.9",“ln”:"-60.1",“loading_time”:“0”,“md”:“HTC-8”,“mid”:“985”,“nw”:“4G”,“open_ad_type”:“2”,“os”:“8.0.0”,“sr”:“B”,“sv”:“V2.9.5”,“t”:“1549871730956”,“uid”:“985”,“vc”:“14”,“vn”:“1.0.9”}
{“action”:“1”,“ar”:“MX”,“ba”:“Huawei”,“detail”:“201”,“en”:“start”,“entry”:“4”,“extend1”:"",“g”:“ER773960@gmail.com”,“hw”:“640*960”,“l”:“pt”,“la”:"-1.2",“ln”:"-60.1",“loading_time”:“12”,“md”:“Huawei-11”,“mid”:“989”,“nw”:“WIFI”,“open_ad_type”:“1”,“os”:“8.2.8”,“sr”:“O”,“sv”:“V2.5.7”,“t”:“1549845824363”,“uid”:“989”,“vc”:“0”,“vn”:“1.0.7”}

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值