#解压缩
gunzip sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
tar -xvf sqoop-1.4.7.bin__hadoop-2.6.0.tar
mv sqoop-1.4.7.bin__hadoop-2.6.0 sqoop-1.4.7.bin-hadoop-2.6.0
## Installation
Sqoop ships as one binary package that incorporates two separate parts - client and server.
Server You need to install server on single node in your cluster. This node will then serve as an entry point for all Sqoop clients.
Client Clients can be installed on any number of machines.
### Server installation
Copy the Sqoop artifact to the machine where you want to run Sqoop server. The Sqoop server acts as a Hadoop client
# > master
cd /opt/bigdata/hadoop-2.7.7/etc/hadoop
vi core-site.xml
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
cd sqoop-1.4.7.bin-hadoop-2.6.0
cp mysql-connector-java-5.1.26.jar lib
vi /etc/profile
export HADOOP_HOME=/opt/bigdata/hadoop-2.7.7
export SQOOP_HOME=/opt/bigdata/sqoop-1.4.7.bin-hadoop-2.6.0
export PATH=$SQOOP_HOME/bin:$PATH
source /etc/profile
cd conf
cp sqoop-env-template.sh sqoop-env.sh
vi sqoop-env.sh
export HADOOP_COMMON_HOME=/opt/bigdata/hadoop-2.7.7
export HIVE_HOME=/opt/bigdata/apache-hive-3.1.1-bin
export ZOOCFGDIR=/opt/zookeeper-3.4.13/conf
#test
- hdfs
sqoop import "-Dorg.apache.sqoop.splitter.allow_text_splitter=true" --connect jdbc:mysql://master/test --username root --password 123456 --table kafka_sink --split-by topic --target-dir /tmp/sqoop/kafka_sink/20190927
- hive
待补充