sudo service iptables stop
sudo service mysqld start
jobs -l
kill -9 4500
haddop fs -ls /
./nohup ./hive --service metastore &
./hive
//Hive
create table table_0406 (
id int,
name string,
age int)
row format delimited fields terminated by ','
stored as textfile;
load data local inpath '/home/hadoop/hadoop_home/apache-hive-1.2.1-bin/test.txt' into table table_0406;
insert into table_0406(id, name,age) values(200,'lisi',20);
hadoop fs - put test.log /hive/warehousedir/table_0506/
//drop table
drop table table_0406;
//Spark
stop-all.sh
start-all.sh
cd hadoop-home/bin
./nohup ./hive --service metastore &
./hive
//Kafka fen bu shi
// Active MQ,
[hadoop@teradata1 bin]$ ./kafka-topics.sh --create --zookeeper teradata1:2181 --replication-factor 1 --partitions 1 --topic kafka_test
[hadoop@teradata1 bin]$ ./kafka-topics.sh --list -zookeeper teradata1:2181
kafka_test
[hadoop@teradata1 bin]$ ./kafka-topics.sh --describe -zookeeper teradata1:2181
Topic:kafka_test PartitionCount:1 ReplicationFactor:1 Configs:
Topic: kafka_test Partition: 0 Leader: 0 Replicas: 0 Isr: 0
[hadoop@teradata1 bin]$ ./kafka-console-producer.sh --broker-list teradata1:9092 --topic kafka_test
[hadoop@teradata1 bin]$ ./kafka-console-consumer.sh --zookeeper teradata1:2181 --topic kafka_test --from-beginning
Centos2
cd /spark_home/spark-1.6.0...../
bin/run-example org.apache.spark.examples.streaming.KafkaWordCount teradata1 my-consumer-group kafka_test 1