miniconda部署
- 安装包(清华源)https://mirrors.bfsu.edu.cn/anaconda/miniconda/
sh /software/package/Miniconda3-py37_4.9.2-Linux-x86_64.sh
==========稍等一下下=============
- 断开连接、再次连接、前面出现
(base)
- 切换国内源,创建
~/.condarc
vim ~/.condarc
==============空文件、新增如下内容=================
channels:
- defaults
show_channel_urls: true
default_channels:
- http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
- http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/r
- http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/msys2
custom_channels:
conda-forge: http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
msys2: https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
bioconda: http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
menpo: http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
pytorch: http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud
simpleitk: http://mirrors.tuna.tsinghua.edu.c/anaconda/cloud
pip install jupyter pandas scikit-learn pyspark==2.4.5 -i https://pypi.tuna.tsinghua.edu.cn/simple
jupyter notebook --generate-config
jupyter notebook password
vim /root/.jupyter/jupyter_notebook_config.py
==============查找 yy复制 p粘贴 i写入 去掉'#'并修改即可=================
c.NotebookApp.allow_remote_access = True
c.NotebookApp.open_browser = False
c.NotebookApp.ip = '*'
c.NotebookApp.allow_root = True
c.NotebookApp.port = 8888
=====================================
spark部署
rpm -e --nodeps java-1.8.0-openjdk-1.8.0.262.b10-1.el7.x86_64
rpm -e --nodeps java-1.7.0-openjdk-1.7.0.261-2.6.22.2.el7_8.x86_64
rpm -e --nodeps java-1.7.0-openjdk-headless-1.7.0.261-2.6.22.2.el7_8.x86_64
rpm -e --nodeps java-1.8.0-openjdk-headless-1.8.0.262.b10-1.el7.x86_64
mkdir /software/server/java
tar -zxvf /software/package/jdk-8u221-linux-x64.tar.gz -C /software/server/java
mkdir /software/server/hadoop
tar -zxvf /software/package/hadoop-2.7.7.tar.gz -C /software/server/hadoop
mkdir /software/server/spark
tar -zxvf /software/package/spark-2.4.5-bin-hadoop2.7.tgz -C /software/server/spark
vi /etc/profile
==============末尾添加如下内容=================
export JAVA_HOME=/software/server/java/jdk1.8.0_221
export HADOOP_HOME=/software/server/hadoop/hadoop-2.7.7
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export SPARK_HOME=/software/server/spark/spark-2.4.5-bin-hadoop2.7
export PYSPARK_PYTHON=/software/server/miniconda3/bin/python3.7
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$JAVA_HOME/bin
==========================
source /etc/profile
cd /software/server/spark/spark-2.4.5-bin-hadoop2.7/bin
./pyspark --master local[*]
=============等待、直到出现">>>"、然后输入如下内容=================
sc.parallelize([1,2,3,4,5]).map(lambda x:x+10).collect()
mkdir ~/test-2023-2
cd ~/test-2023-2
echo "hello world" > word.txt
jupyter notebook
from pyspark import SparkContext, SparkConf
conf = SparkConf().setMaster("local[*]").setAppName("test01")
sc = SparkContext(conf=conf)
sc.parallelize([1,2,3,4,5]).map(lambda x:x+10).collect()
sc.textFile("./word.txt").flatMap(lambda x: x.split(" ")).collect()