Arch安装
Win设置
关闭安全启动
- 重启进入固件设置,关闭安全启动(一般按`ESC`会出现选择界面)
关闭休眠与快速启动
# 以管理员启动cmd
powercfg -h off
硬件时间设置
设置Windows中将BIOS
时间作为 UTC
时间
reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\TimeZoneInformation" /v RealTimeIsUniversal /d 1 /t REG_DWORD /f
安装系统
改tty
字体
# 1920x1080
setfont ter-132b
验证引导模式
cat /sys/firmware/efi/fw_platform_size
# 64/32为UEFI, 否则为BIOS
连接互联网
# 关闭reflector服务
systemctl stop reflector
# 启动audit审计服务(可选,遇到一直刷audit)
systemctl start auditd
# 查看网络状态
ip link
# 连接无线网络
iwctl
station wlan0 connect CMCC-AH6G # wlan0是设备名 CMCC-AH6G是 wifi名
quit
# 测试
ping -c 4 www.baidu.com
更新系统时间
# 联网之后会自动同步时间
timedatectl #查看是否正确 UTC时间
分区与挂载
disk | 挂载点 | 大小 |
---|---|---|
/dev/nvme0n1p2 | /boot/efi | 100M |
/dev/nvme0n1p5 | / | 40G |
/dev/sda3 | /opt | 100G |
/dev/sda4 | /usr/local | 50G |
/dev/sda5 | /var | 50G |
/dev/sda6 | swap | 16G |
/dev/sda7 | /home | 剩余全部 |
lsblk #查看磁盘信息
# 分区
cfdisk /dev/nvme0n1
cfdisk /dev/sda
# 格式化(双系统不能格式化EFI分区)
mkfs.ext4 /dev/nvme0n1p5
# mkfs.fat -F 32 /dev/nvme0n1p2
mkfs.ext4 /dev/sda3
mkfs.ext4 /dev/sda4
mkfs.ext4 /dev/sda5
mkswap /dev/sda6
mkfs.ext4 /dev/sda7
# 挂载(先挂载根分区)
mount --mkdir /dev/nvme0n1p5 /mnt
mount --mkdir /dev/nvme0n1p2 /mnt/boot/efi
mount --mkdir /dev/sda3 /mnt/opt
mount --mkdir /dev/sda4 /mnt/usr/local
mount --mkdir /dev/sda5 /mnt/var
mount --mkdir /dev/sda7 /mnt/home
# 启动交换空间
swapon /dev/sda6
lsblk # 最后查看是否有误
选择镜像站
# 选择镜像站 选择ustc源移至顶部
vim /etc/pacman.d/mirrorlist
/ustc 键enter #查找
dd # 剪切
gg # 返回顶部
p # 粘贴
35dd #剪切35行
安装基础包
# 安装必需软件包(CPU微码:intel-ucode或amd-ucode)
pacstrap -K /mnt base linux linux-firmware intel-ucode base-devel linux-headers
生成fstab
# 生成fstab
genfstab -U /mnt >> /mnt/etc/fstab
cat /mnt/etc/fstab
配置系统
进入新系统
# 以 arch-chroot模式 进入系统
arch-chroot /mnt
安装补充包
# vim
pacman -S vim
# man工具
pacman -S man-db man-pages texinfo
# 文件系统支持fat32 ntfs (按需)
pacman -S dosfstools ntfs-3g
设置时区,同步硬件时钟
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
hwclock --systohc
区域与本地化设置
# 区域语言支持(英+中)
vim /etc/locale.gen
en_US.UTF-8 UTF-8
zh_CN.UTF-8 UTF-8
locale-gen
# 本地化设置
echo 'LANG=en_US.UTF-8' > /etc/locale.conf
用户配置
# 设置root密码
passwd
# 创建用户ruoli
useradd -m -G wheel ruoli
passwd ruoli
# 给予sudo无密码权限
EDITOR=vim visudo
%wheel ALL=(ALL:ALL) NOPASSWD: ALL #取消该行注释
网络配置
# 主机配置
echo 'ruoli-arch' > /etc/hostname
vim /etc/hosts
127.0.0.1 localhost
::1 localhost
127.0.0.1 ruoli-arch.localdomain ruoli-arch
# 网络管理器
pacman -S networkmanager
systemctl enable NetworkManager
引导配置
# os-prober可发现其他系统
pacman -S grub efibootmgr os-prober
# efi-directory是EFI分区挂载点
grub-install --target=x86_64-efi --efi-directory=/boot/efi --bootloader-id=ArchLinux
# 修改grub配置
vim /etc/default/grub
GRUB_DISABLE_OS_PROBER=false
# 生成grub.cfg (可能识别不到windows,之后修复)
grub-mkconfig -o /boot/grub/grub.cfg
重启
#退出chroot模式
exit
# 卸载分区
umount -R /mnt
swapoff /dev/sda6 # 关闭交换空间
reboot
#移除安装介质,测试 ruoli 和 root 用户能否登录
安装之后
设置tty字体
setfont LatGrkCyr-12x22
持久化tty配置
vim /etc/vconsole.conf
KEYMAP=us
FONT=LatGrkCyr-12x22
修补grub.cfg
grub-mkconfig -o /boot/grub/grub.cfg
组件与服务
# 联网(终端UI界面)
nmtui
# 添加32位库
vim /etc/pacman.conf
[multilib]
Include = /etc/pacman.d/mirrorlist
pacman -Syu
# 字体
pacman -S noto-fonts-cjk wqy-microhei
# 常用工具
pacman -S openssh git wget curl tree
systemctl enable sshd
安装显卡驱动
参考wiki:Intel、Nvidia、NVIDIA_Optimus 版本查询:Intel显卡支持、Nvidia显卡型号
# 查询型号
lscpu | grep -E CPU # 处理器
lspci | grep -E VGA # 显卡
# 本机配置
- CPU 8代i5
Intel(R) Core(TM) i5-8300H CPU @ 2.30GHz
- GPU
Intel Corporation CoffeeLake-H GT2 [UHD Graphics 630]
NVIDIA Corporation GP107M [GeForce GTX 1050 Ti Mobile] (rev a1)
# 安装(不全,详情见Arch优化)
# Intel集显
pacman -S mesa lib32-mesa
# Nvidia独显
pacman -S nvidia nvidia-utils lib32-nvidia-utils
驱动配置
# Intel配置(可选)
vim /etc/mkinitcpio.conf
MODULES=(... i915 ...) # 添加模块
mkinitcpio -P
# Nvidia配置
#1 移除kms钩子
vim /etc/mkinitcpio.conf
HOOKS=(... kms ...) # 去掉kms
mkinitcpio -P
#2 启用DRM KMS
vim /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="... nvidia_drm.modeset=1" # 添加参数
grub-mkconfig -o /boot/grub/grub.cfg
#3 kms早启动
vim /etc/mkinitcpio.conf
MODULES=(... nvidia nvidia_modeset nvidia_uvm nvidia_drm ...) # 添加模块
mkinitcpio -P
桌面环境(自选)
# 显示服务 + 显示管理器 + 桌面环境/桌面管理器
pacman -S xorg-server
pacman -S wayland xorg-xwayland
# sddm + kde (自用,推荐)
pacman -S sddm
pacman -S plasma-meta konsole dolphin # 元软件包(荐)/软件包组
systemctl enable --now sddm
# lightdm + cinnamon
pacman -S lightdm lightdm-gtk-greeter
pacman -S cinnamon gnome-terminal
systemctl enable --now lightdm
基础软件包(可选)
- 基于KDE(其他环境慎用)
sudo pacman -S firefox-developer-edition # Firefox开发者版
sudo pacman -S firefox-developer-edition-i18n-zh-cn # 中文支持
sudo pacman -S kate #文件编辑器*
sudo pacman -S gwenview #图片浏览工具*
sudo pacman -S okular #通用文档查看器*
sudo pacman -S ark #压缩管理工具*
sudo pacman -S ksystemlog #系统日志查看*
sudo pacman -S partitionmanager #分区管理器*
sudo pacman -S kdeconnect #设备连接工具*
sudo pacman -S kcalc #计算器* kcalc
sudo pacman -S spectacle #屏幕截图工具* spectacle
sudo pacman -S vlc #本地媒体播放器*
sudo pacman -S elisa #本地音乐播放器*
sudo pacman -S gimp #图像编辑*
sudo pacman -S kdenlive #视频剪辑软件*
sudo pacman -S timeshift #系统备份*
sudo pacman -S neofetch #系统信息* neofetch
sudo pacman -S htop #进程查看器* htop
sudo pacman -S yakuake #下拉式终端*
sudo pacman -S unzip unrar #压缩支持*
sudo pacman -S flameshot #火焰截图*
Arch优化
查看电脑设备
# CPU设备
lscpu | grep -E CPU
# PCI设备
# 显卡
lspci | grep -E VGA
# 声卡
lspci | grep -E Audio
# RAM
lspci | grep -E RAM
# 网卡
lspci | grep -E Network
软件包管理
Pacman配置
# 颜色输出 版本对比 并行下载
sudo vim /etc/pacman.conf
Color
VerbosePkgLists
ParallelDownloads = 3
添加archlinuxcn库
sudo vim /etc/pacman.conf
[archlinuxcn]
Server = https://mirrors.ustc.edu.cn/archlinuxcn/$arch
sudo pacman -Syu
sudo pacman-key --lsign-key "farseerfc@archlinux.org"
sudo pacman -S archlinuxcn-keyring
安装AUR工具
sudo pacman -S yay
sudo pacman -S paru # 建议
sudo vim /etc/paru.conf
BottomUp
Vim优化
vim
sudo pacman -S vim
vim ~/.vimrc
set nocompatible " 禁用与vi的兼容性
filetype on " 探测文件类型
filetype plugin on " 按照文件类型加载插件
filetype indent on " 按照文件类型设置缩进
syntax on " 打开语法高亮
set mouse=a " 鼠标支持
set number " 显示行号
set relativenumber " 显示相对行号
set cursorline " 高亮当前行
"set cursorcolumn " 高亮当前列
set shiftwidth=4 " 设置位移宽度为4
set tabstop=4 " 设置缩进宽度为4
set expandtab " 将缩进替换为空格
set nobackup " 不生成backup文件
set scrolloff=10 " 设置滚动时始终显示上下10行
set nowrap " 禁止折行
set incsearch " 增量式搜索
set ignorecase " 搜索时大小写不敏感
set smartcase " 搜索时对首字母大小写敏感
set showcmd " 显示键入的命令前缀
set showmode " 显示当前模式(插入、可视等)
set showmatch " 在搜索过程中显示匹配的单词
set hlsearch " 高亮搜索结果
set history=1000 " 设置命令历史记录为1000
set wildmenu " 设置tab补全
set wildmode=list:longest " 使tab补全类似于Bash
set encoding=utf-8 " 设置编码方式为UTF-8
neovim
sudo pacman -S neovim
# nvim工作空间(在此自定义配置,或者使用已有方案)
mkdir -p ~/.config/nvim
lazyvim方案
官网 LazyVim
# 安装Nerd Font字体, 并设置终端字体(不配的话,图标不全)
ark JetBrainsMono.zip #解压
sudo mv JetBrainsMono /usr/share/fonts/
# 备份旧配置(建议)
mv ~/.config/nvim ~/.config/nvim.bak
git clone https://github.com/LazyVim/starter ~/.config/nvim
cd ~/.config/nvim
nvim
#首次进入会自动下载,等待其完成
#后续依旧在这里 更新等操作
nvim
Zsh优化
别名alias
# 写入 .bashrc | .zshrc | ...
vim ~/.zshrc
alias pacman='sudo pacman'
alias ll='ls -lh'
alias vim='sudo env HOME=$HOME vim'
alias nvim='sudo env HOME=$HOME nvim'
source ~/.zshrc
安装zsh
sudo pacman -S zsh
# 更改默认Shell(重启生效)
chsh -s /usr/bin/zsh
reboot
OhMyZsh
官网 OhMyZsh
cd ~/Documents/github #留个备份
git clone https://github.com/ohmyzsh/ohmyzsh.git
# 执行安装脚本
~/Documents/github/ohmyzsh/tools/install.sh
tree -L 1 .oh-my-zsh # ohmyzsh工作空间
zsh主题
- ohmyzsh自带的主题足够丰富,当然也可以去下载
vim ~/.zshrc
# 推荐:frisk candy
ZSH_THEME="frisk"
source ~/.zshrc
zsh插件
git clone https://github.com/zsh-users/zsh-autosuggestions.git ~/.oh-my-zsh/custom/plugins/
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/
vim ~/.zshrc
# 注:语法高亮插件放在最后
plugins=(
git
zsh-autosuggestions
zsh-syntax-highlighting
)
source ~/.zshrc
显卡驱动
Intel驱动
Archwiki:Intel_graphics 型号查询:Intel显卡支持
Generation | Chipset | OpenGL | OpenGL ES | OpenCL | VAAPI | Vulkan | VIDEO_CARDS |
---|---|---|---|---|---|---|---|
Gen 8 | Broadwell, Cherryview | 4.6 | 3.2 | 3.0 | Yes | 1.1 | intel |
Intel(R) Core(TM) i5-8300H CPU @ 2.30GHz
Intel Corporation CoffeeLake-H GT2 [UHD Graphics 630]
# Intel(任何Intel显卡包括核显都不建议安装xf86-video-intel)
sudo pacman -S mesa lib32-mesa
sudo pacman -S vulkan-intel lib32-vulkan-intel
sudo pacman -S intel-compute-runtime
#sudo pacman -S intel-media-driver # 9代开始有视频硬解支持
# Intel集显配置
- kms早启动
sudo vim /etc/mkinitcpio.conf
MODULES=(... i915 ...) # 添加i915
sudo mkinitcpio -P
Nvidia驱动
Archwiki:NVIDIA 型号查询:Nvidia显卡型号
Code name | Official Name | Nvidia 3D object codename |
---|---|---|
NV137 (GP107) | GeForce GTX (1050, 1050 Ti) | Pascal |
NVIDIA Corporation GP107M [GeForce GTX 1050 Ti Mobile] (rev a1)
# Nvidia闭源驱动
sudo pacman -S nvidia
sudo pacman -S nvidia-utils lib32-nvidia-utils
sudo pacman -S opencl-nvidia lib32-opencl-nvidia
# Nvidia独显配置
- 1.移除kms (防止initramfs包含nouveau模块,以确保内核在早启动阶段不会加载它)
sudo vim /etc/mkinitcpio.conf
HOOKS=(... kms ...) # 去掉kms
sudo mkinitcpio -P
- 2.启用DRM KMS
sudo vim /etc/default/grub
GRUB_CMDLINE_LINUX_DEFAULT="... nvidia_drm.modeset=1"
sudo grub-mkconfig -o /boot/grub/grub.cfg
- 3.kms早启动(建议,因为DRM KMS只包含基础功能)
sudo vim /etc/mkinitcpio.conf
MODULES=(... nvidia nvidia_modeset nvidia_uvm nvidia_drm ...) # 添加模块
sudo mkinitcpio -P
- 4.pacman钩子(防止更新nvidia驱动时,忘记更新initramfs)
sudo mkdir /etc/pacman.d/hooks
sudo vim /etc/pacman.d/hooks/nvidia.hook
nvidia.hook
[Trigger]
Operation=Install
Operation=Upgrade
Operation=Remove
Type=Package
Target=nvidia
Target=linux
[Action]
Description=Update Nvidia module in initcpio
Depends=mkinitcpio
When=PostTransaction
NeedsTargets
Exec=/bin/sh -c 'while read -r trg; do case $trg in linux*) exit 0; esac; done; /usr/bin/mkinitcpio -P'
双显卡方案
使用具有 Intel 和 NVIDIA 双显卡的笔记本, 请参考 NVIDIA Optimus 页面
# prime方案 使用方式:prime-run 程序 &
sudo pacman -S nvidia-prime
# 系统默认使用 集显
glxinfo | grep "OpenGL renderer"
OpenGL renderer string: Mesa Intel(R) UHD Graphics 630 (CFL GT2)
# 测试 使用独显
prime-run glxinfo | grep "OpenGL renderer"
OpenGL renderer string: NVIDIA GeForce GTX 1050 Ti/PCIe/SSE2
# 使用nvidia 运行firefox(& 表示后台运行)
prime-run firefox-developer-edition &
# 打印状态
nvidia-smi
# 持续监视
watch -n 1 nvidia-smi
prime-run google-chrome-stable &
prime-run steam &
系统中文化
本地Locale
# 安装locale
sudo vim /etc/locale.gen
en_US.UTF-8 UTF-8
zh_CN.UTF-8 UTF-8
locale-gen
# 1.全局配置英文locale
sudo vim /etc/locale.conf
LANG=en_US.UTF-8
# 2.图形界面单独配置中文locale (KDE可直接设置:区域和语言)
#例:在~/.bashrc或~/.xinitrc或~/.xprofile (终端/x窗口系统/GDM等显示管理器)
export LANG=zh_CN.UTF-8
export LANGUAGE=zh_CN:en_US
中文字体
sudo pacman -S noto-fonts-cjk wqy-microhei
sudo pacman -S adobe-source-han-sans-cn-fonts adobe-source-han-serif-cn-fonts
中文输入法
fcitx5 框架(必须)
# fcitx5基础框架,还需安装 引擎 + 词库
sudo pacman -S fcitx5-im
Pinyin(建议)
sudo pacman -S fcitx5-chinese-addons fcitx5-pinyin-zhwiki
- 1.环境变量配置 `GTK_IM_MODULE`、`QT_IM_MODULE`、`XMODIFIERS`
- 2.系统设置 > 虚拟键盘, 选择 `Fcitx5`(Wayland需配置该项)
- 3.系统设置 > 输入法, 添加输入法-拼音
# fcitx5环境变量
sudo vim /etc/environment
GTK_IM_MODULE=fcitx
QT_IM_MODULE=fcitx
XMODIFIERS=@im=fcitx
# fcitx5主题与外观
sudo pacman -S fcitx5-breeze
- Fcitx5设置 > 配置附加组件 > 经典用户界面 > 主题
# fcitx5诊断(输入法有问题执行该命令)
fcitx5-diagnose
**`kcmshell5` 未找到.**
**`fcitx5-qt4-immodule-probing` 未找到.**
**无法找到 Qt4 的 fcitx5 输入法模块。**
Rime(自用)
- 需要自定义配置,或者下载好用的方案
sudo pacman -S fcitx5-rime rime-pinyin-zhwiki
- 系统设置 > 输入法, 添加输入法-中州韵
# 雾凇拼音方案(F4快速切换方案)
sudo pacman -S rime-ice-git
# rime全局配置
vim ~/.local/share/fcitx5/rime/default.custom.yaml
# 方案专有配置: <方案标识>.custom.yaml
vim ~/.local/share/fcitx5/rime/rime_ice.custom.yaml
default.custom.yaml
patch:
# 仅使用「雾凇拼音」的默认配置,配置此行即可
__include: rime_ice_suggestion:/
# 自定义配置(针对对应方案定制条目,请使用<recipe>.custom.yaml 中配置)
__patch:
key_binder/bindings/+:
# 开启逗号句号翻页
- { when: paging, accept: comma, send: Page_Up }
- { when: has_menu, accept: period, send: Page_Down }
# 候选词个数
menu/page_size: 7
# 默认英文
switches/@0/reset: 1
安装 Windows 字体
# 从本地安装
sudo mkdir /usr/share/fonts/WindowsFonts
# 进入 c:/Windows/Fonts (先挂载)
cd /run/media/ruoli/C24A038B4A037C03/Windows/Fonts
sudo cp ./* /usr/share/fonts/WindowsFonts
# 设置权限
sudo chmod 644 /usr/share/fonts/WindowsFonts/*
# 刷新字体
fc-cache -vf
系统服务
文件索引和搜索
- 大部分发行版都提供了`locate`命令进行快速文件搜索
- Arch建议安装 mlocate包,并执行`updatedb`建立文件系统索引
sudo pacman -S mlocate
sudo updatedb
打印
sudo pacman -S cups
sudo systemctl enable --now cups
本地邮件服务
防火墙
sudo pacman -S firewalld
sudo systemctl enable --now firewalld
蓝牙
sudo pacman -S bluez bluez-utils
sudo systemctl enable --now bluetooth
声音(按需)
- 一般无需配置,默认就能正常工作
内核声卡驱动提供了声音:
- 高级 Linux 声音体系(ALSA) 是Linux内核组件,推荐使用。只需要解除静音,安装alsa-utils包软件包,它包含了
alsamixer
工具 - 如果 Alsa 不能工作,可以试试OSS
对于高级声音需求, 可浏览 专业音频。例如:PipeWire
# 内有工具如:amixer 、alsamixer (UI界面)
sudo pacman -S alsa-utils
# ALSA固件
sudo pacman -S alsa-firmware
# 高质量重采样,上混和缩混
sudo pacman -S alsa-plugins
# 测试声卡是否正常工作
speaker-test -c 2
固件更新
sudo pacman -S fwupd
fwupdmgr get-updates # 列出任何设备可用更新
# 保证fwupd服务在运行
systemctl status fwupd
sudo fwupdmgr refresh # 刷新元数据
sudo fwupdmgr update # 更新所有指定的设备
Grub优化
添加重启/关机选项
sudo vim /boot/grub/custom.cfg
menuentry "System shutdown" {
echo "System shutting down..."
halt
}
menuentry "System restart" {
echo "System rebooting..."
reboot
}
Grub 主题
主题推荐Grub-theme-vimix
cd ~/Documents/tools/grub
tar -xvf Vimix-1080p.tar.xz
sudo mv Vimix-1080p/Vimix /boot/grub/themes/
# 配置theme
sudo vim /etc/default/grub
GRUB_THEME="/boot/grub/themes/Vimix/theme.txt"
sudo grub-mkconfig -o /boot/grub/grub.cfg
Grub UI
- UI界面配置Grub,有需求可安装
sudo pacman -S grub-customizer
系统管理与维护
清理包缓存
sudo pacman -S pacman-contrib
# 从 /var/cache/pacman/pkg/ 删除不需要的 .pkg
paccache -r
删除不需要的包
# 删除不需要的软件包
sudo pacman -Qtdq | sudo pacman -Rns -
错误:有参数 '-' 但标准输入流为空 #说明没有孤立包
删除旧配置文件
ls ~/.config/ # 软件保存配置文件的地方
ls ~/.cache/ # 程序缓存
ls ~/.local/share/ # 可能有旧文件
ls ~/.local/state/ # 可能有旧文件
破损的软链接
# 列出有问题的软链接,可以检查并删除(不能盲目删除)
sudo find / -xtype l -print
透明代理(未完善)
V2rayA
sudo pacman -S v2raya
Bug修复
# Failed to load module "appmenu-gtk-module"
sudo pacman -S appmenu-gtk-module
# hostname command not found
sudo pacman -S inetutils
# Flatpak packages support
sudo pacman -S flatpak
ACPI BIOS Error (bug): Failure creating named object [\_SB.PCI0.XHC.RHUB.SS10._PLD], AE_ALREADY_EXISTS (20230628/dswload2-326)
ACPI Error: AE_ALREADY_EXISTS, During name lookup/catalog (20230628/psobject-220)
pcieport 0000:00:1d.7: DPC: error containment capabilities: Int Msg #0, RPExt+ PoisonedTLP+ SwTrigger+ RP PIO Log 4, DL_ActiveErr+
platform regulatory.0: Direct firmware load for regulatory.db failed with error -2
ucsi_acpi USBC000:00: error -ETIMEDOUT: PPM init failed
warning: `kdeconnectd' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211
KDE优化
系统设置
输入和输出
鼠标和触摸板
1.禁用触摸板设备(笔记本打字总误触)
键盘
1.虚拟键盘 设置为 Fcitx5 (wayland下输入法需要)
显示和监视器
1.缩放率 wayland默认125,x11默认100(可改为125)
互联网
Wi-FI和网络
1.设置防火墙
2.设置代理服务器
外观和视觉风格
颜色和主题
1.全局主题 设置为 Breeze微风(推荐浅色)
2.登录屏幕 设置为 Breeze微风
。。。
壁纸
1.添加本地壁纸,并设置
应用和窗口
默认应用程序
1.设置默认浏览器
2.设置默认编辑器
。。。
窗口管理
桌面特效
1.设置最小化过度动画(神灯)
。。。
虚拟桌面
1.添加桌面2
。。。
工作区
常规行为
1.单机文件或文件夹时 设置为 选中
语言和时间
区域和语言
1.语言 设置为 简体中文,重启
输入法
1.见 中文输入法
系统
软件更新
1.通知频率 设置为 不通知
2.应用系统更新 设置为 重新开机之后应用
自动启动
1.添加 Yakuake下拉式终端
会话
后台服务
1.取消 欢迎中心启动程序
2.取消 Plasma浏览器集成程序安装提醒
桌面会话
1.设置 启动为空会话
桌面面板
效果图
配置
- 左边:默认面板(自带)(面板宽度36,非悬浮,避开窗口/总是显示)
- 应用程序启动器(更改图标)
- 图标任务管理器
- 边距分割符
- 回收站(增)
- 暂时显示桌面
- 上边:默认面板(添加)(面板宽度30,非悬浮,总是显示)
- 虚拟桌面切换器(配置-常规-文本显示设置为桌面编号)
- 全局菜单(增)
- 面板间隙(增:面板设置-添加间隙)
- 图标任务管理器(配置-行为-新任务出现在左侧)
- 边距分割符(增)
- 总CPU使用情况(增)
- 内存使用情况(增)
- 边距分割符
- 系统托盘
- 数字时钟
- 暂时显示桌面
终端美化
效果图
左边: Konsole 右边: Yakuake
配置方案
- 1. 新增方案 `ruoli-zsh`
- 2. 初始终端尺寸 `100x28` `92x28`(推荐)
- 2. 字体为`JetBrainsMono Nerd Font`,大小为 `10pt`
-
- 3. 背景色透明度 在`15-30` 之间,推荐20
SDDM优化
软件优化
Konsole
1.设置-显示工具栏
取消主工具栏
取消会话工具栏
Yakuake
1.宽度51 高度60
2.位置设置为最右
3.安装皮肤TabsOnly,并使用
Spectacle
1.配置-快捷键
启动Spectacle:设置全局 为 Print键
Arch软件
软件规范
- 1.个人规范
# 软件安装目录
mkdir ~/Documents/software/
# 软件快捷方式目录
mkdir ~/.local/share/applications
# 软件自启目录
mkdir ~/.config/autostart/
- 2.系统规范
# 快捷方式
ls /usr/share/applications/
ls /usr/local/share/applications/
Typora(例)
# 1.解压安装包
tar -zxvf typora-0-11-18.tar.gz
mv typara ~/Documents/software/typora # 此时可重命名
# 2.快捷启动
vim ~/.local/share/applications/typora.desktop
# 3.软件自启(演示)
cp ~/.local/share/applications/typora.desktop ~/.config/autostart/
typora.desktop
[Desktop Entry]
# 名称(必须)
Name=Typora
# 描述
GenericName=Markdown Editor
# 注释
Comment=A minimal Markdown reading & writing app
# 程序(必须)
Exec=/home/ruoli/Documents/software/typora/Typora
# 图标
Icon=/home/ruoli/Documents/software/typora/resources/assets/icon/icon_256x256@2x.png
# 类型(必须)
Type=Application
# 分类
Categories=Office;WordProcessor;Development;
# 支持的MIME类型
MimeType=text/markdown;text/x-markdown;
日常
# 谷歌浏览器
paru -S google-chrome
# Code-OSS
paru -S code
# QQ
paru -S linuxqq
# 微信
paru -S wechat-universal-bwrap
# 网易云音乐
# paru -S netease-cloud-music
paru -S yesplaymusic # 第三方(y)
# 百度网盘
paru -S baidunetdisk-bin
# Steam(字体太小解决方案:进入大屏幕模式再退出)
paru -S steam
# 迅雷(无法正常退出,下载功能正常)
paru -S xunlei-bin
###############################################
# Ventoy(启动盘制作工具)
paru -S ventoy-bin
# 我的世界
paru -S hmcl
WPS
paru -S wps-office wps-office-mui-zh-cn ttf-wps-fonts
# BUG 字体太粗
paru -S freetype2-wps
Sublime Text
官网 Sublime Text
curl -O https://download.sublimetext.com/sublimehq-pub.gpg && sudo pacman-key --add sublimehq-pub.gpg && sudo pacman-key --lsign-key 8A8F901A && rm sublimehq-pub.gpg
echo -e "\n[sublime-text]\nServer = https://download.sublimetext.com/arch/stable/x86_64" | sudo tee -a /etc/pacman.conf
sudo pacman -Sy sublime-text
开发环境
环境目录
sudo mkdir /opt/module
sudo chown ruoli:ruoli /opt/module
JDK
## OpenJDK(建议装上)
pacman -S jdk-openjdk
# 配置
archlinux-java
## OracleJDK
tar -zxvf jdk-8u201-linux-x64.tar.gz
mv jdk1.8.0_201 /opt/module/jdk
# 环境变量
sudo vim /etc/environment
JAVA_HOME=/opt/module/jdk
sudo vim /etc/profile.d/myenv.sh
# JDK
export JAVA_HOME=/opt/module/jdk
export PATH=$PATH:$JAVA_HOME/bin
source /etc/profile
java -version
Maven
tar -zxvf apache-maven-3.9.6-bin.tar.gz
mv apache-maven-3.9.6 /opt/module/maven
# 环境变量
sudo vim /etc/environment
M2_HOME=/opt/module/maven
sudo vim /etc/profile.d/myenv.sh
# Maven
export M2_HOME=/opt/module/maven
export PATH=$PATH:$M2_HOME/bin
source /etc/profile
# 软件配置
mkdir ~/.m2
cp $M2_HOME/conf/settings.xml ~/.m2/setting.xml
vim ~/.m2/setting.xml
mvn -v
setting.xml
<localRepository>/home/ruoli/.m2/repository</localRepository>
<mirror>
<id>nexus-aliyun</id>
<mirrorOf>central</mirrorOf>
<name>公共仓库</name>
<url>http://maven.aliyun.com/nexus/content/groups/public</url>
</mirror>
MySQL
# 注:保证环境干净再安装
sudo pacman -S mysql
# 初始化
sudo mysqld --initialize --user=mysql --basedir=/usr --datadir=/var/lib/mysql
# 启动服务
systemctl enable --now mysqld
mysql -u root -p
ALTER USER 'root'@'localhost' IDENTIFIED BY 'root';
FLUSH PRIVILEGES;
quit;
mysql --version
IDEA
# 删除旧 JetBrains 应用数据
rm -rf .config/JetBrains/
rm -rf .cache/JetBrains/
rm -rf .local/share/JetBrains/
# 解压方式安装
tar -zxvf ideaIU-2023.3.6.tar.gz
mv idea-IU-233.15026.9 ~/Documents/software/idea2023
# 快捷方式
vim ~/.local/share/applications/idea.desktop
# 破解
- 1.下载破解文件jetbra
- 2.vmoptions文件中引用破解文件
- 3.启动应用,输入注册码(详见www.exception.site)
cp -r jetbra /home/ruoli/Documents/software/
# 方式1(推荐)
vim ~/Documents/software/idea2023/bin/idea64.vmoptions
--add-opens=java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED
--add-opens=java.base/jdk.internal.org.objectweb.asm.tree=ALL-UNNAMED
-javaagent:/home/ruoli/Documents/software/jetbra/ja-netfilter.jar=jetbrains
# 方式2:自动脚本(不推荐,改动较大)
sh ~/Documents/software/jetbra/scripts/uninstall.sh
sh ~/Documents/software/jetbra/scripts/install.sh
reboot # 重启使环境变量生效
idea.desktop
[Desktop Entry]
Name=IntelliJ IDEA
GenericName=IntelliJ IDEA
Exec=/home/ruoli/Documents/software/idea2023/bin/idea.sh
Icon=/home/ruoli/Documents/software/idea2023/bin/idea.png
Type=Application
Categories=Development;
DataGrip
tar -zxvf datagrip-2023.3.4.tar.gz
mv DataGrip-2023.3.4 ~/Documents/software/datagrip2023
vim ~/.local/share/applications/datagrip.desktop
vim ~/Documents/software/datagrip2023/bin/datagrip64.vmoptions
--add-opens=java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED
--add-opens=java.base/jdk.internal.org.objectweb.asm.tree=ALL-UNNAMED
-javaagent:/home/ruoli/Documents/software/jetbra/ja-netfilter.jar=jetbrains
reboot
datagrip.desktop
[Desktop Entry]
Name=DataGrip
GenericName=DataGrip
Exec=/home/ruoli/Documents/software/datagrip2023/bin/datagrip.sh
Icon=/home/ruoli/Documents/software/datagrip2023/bin/datagrip.png
Type=Application
Categories=Development;
WebStorm
tar -zxvf WebStorm-2023.3.6.tar.gz
mv WebStorm-233.15026.13 ~/Documents/software/webstorm2023
vim ~/.local/share/applications/webstorm.desktop
vim ~/Documents/software/webstorm2023/bin/webstorm64.vmoptions
--add-opens=java.base/jdk.internal.org.objectweb.asm=ALL-UNNAMED
--add-opens=java.base/jdk.internal.org.objectweb.asm.tree=ALL-UNNAMED
-javaagent:/home/ruoli/Documents/software/jetbra/ja-netfilter.jar=jetbrains
reboot
webstorm.desktop
[Desktop Entry]
Name=WebStorm
GenericName=WebStorm
Exec=/home/ruoli/Documents/software/webstorm2023/bin/webstorm.sh
Icon=/home/ruoli/Documents/software/webstorm2023/bin/webstorm.png
Type=Application
Categories=Development;
Arch命令
常用命令
# 解压 tar(-x 解压 -vf) unzip unrar 7z
tar -zxvf test.tar.gz # -z gzip -j bzip2
tar -xvf test.tar.xz
tar -xvf test.tar
unzip test.zip
unrar test.rar
7z test.7z
# 压缩 参数 -c
tar -zcvf test.tar.gz test/
# 查看文件 less(行) more(页) head tail
less ~/.zshrc # h 帮助 q 退出
more ~/.zshrc # h 帮助 q 退出
head ~/.zshrc # 查看文件头
tail ~/.zshrc # 查看文件尾
# 文件系统(空间) df
df -h # 默认所有
df -h /opt # 磁盘名 或 挂载点
df -h /dev/sda3
# 文件(夹)大小 du
sudo du -h -d 0 /usr
# 树型展示 tree
tree -L 1 /usr
# 持续查看输出 watch
watch -n 1 nvidia-smi
pacman
# 查询
pacman -Ss git
# 安装
pacman -S git
# 查看所安装包详细信息
pacman -Qi git
# 删除包及其依赖,配置一并删除
pacman -Rsn git
git
# git 配置
git config --global user.name "yuruoli"
git config --global user.email "484158890@qq.com"
git config --list
# git 命令
git init
git add Readme.txt
git commit -a -m "Commit message"
git remote add origin https://gitee.com/yuruoli/gitwork.git
git push -u origin "master"
# git 远程仓库
git remote add https://github.com/ohmyzsh/ohmyzsh.git
# 远程仓库信息
git remote -v
origin https://github.com/ohmyzsh/ohmyzsh.git (fetch)
origin https://github.com/ohmyzsh/ohmyzsh.git (push)
# 拉取
git fetch
git fetch origin
# 拉取
git pull
git pull origin xf:xf # xf分支并与本地xf分支合并
# 推送 默认 origin master
git push
git push origin xf
# PS:push代码前最好先用pull更新本地代码。
Hadoop环境搭建
SSH无密码
# .ssh目录自动生成
ssh-keygen -t rsa
ssh-copy-id ruoli-arch
Hadoop
安装(伪集群)
# 1.解压安装
tar -zxvf hadoop-3.3.4.tar.gz
mv hadoop-3.3.4 /opt/module/hadoop
# 2.环境变量
sudo vim /etc/environment
HADOOP_HOME=/opt/module/hadoop
sudo vim /etc/profile.d/myenv.sh
# Hadoop
export HADOOP_HOME=/opt/module/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
source /etc/profile
# 3.软件配置
cd $HADOOP_HOME/etc/hadoop
vim core-site.xml
vim hdfs-site.xml
vim yarn-site.xml
vim mapred-site.xml
vim workers
vim capacity-scheduler.xml
# 4.初始化
hdfs namenode -format
常用命令
#启动dfs
start-dfs.sh
#启动yarn
start-yarn.sh
#启动历史服务器
mapred --daemon start historyserver
################### hdfs ##########################
# Usage: hadoop fs [generic options]
hadoop fs # 查看帮助
hdfs dfs # 同上
#创建目录
hadoop fs -mkdir -p /input
# 上传
hadoop fs -put word.txt /input
#追加写
hadoop fs -appendToFile word2.txt /input/word.txt
# 下载
hadoop fs -get /input/word.txt
# 查看 类似Linux命令 ls mv cp cat rm ...
hadoop fs -ls /input
hadoop fs -cat /input/word.txt
hadoop fs -chmod 777 /input/word.txt
hadoop fs -chown yuruoli:yuruoli /input/word.txt
hadoop fs -rm -r /input
#执行 mapreduce应用 /input /output 都是hdfs目录
hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-3.3.4.jar wordcount /input /output
core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://ruoli-arch:8020</value>
</property>
<!-- 指定hadoop数据的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop/data</value>
</property>
<!-- 配置HDFS网页登录使用的静态用户为ruoli -->
<property>
<name>hadoop.http.staticuser.user</name>
<value>ruoli</value>
</property>
<!-- 配置ruoli(superUser)允许通过代理访问的主机节点 -->
<property>
<name>hadoop.proxyuser.ruoli.hosts</name>
<value>*</value>
</property>
<!-- 配置ruoli(superUser)允许通过代理用户所属组 -->
<property>
<name>hadoop.proxyuser.ruoli.groups</name>
<value>*</value>
</property>
<!-- 配置ruoli(superUser)允许通过代理的用户-->
<property>
<name>hadoop.proxyuser.ruoli.users</name>
<value>*</value>
</property>
</configuration>
hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- nn web端访问地址-->
<property>
<name>dfs.namenode.http-address</name>
<value>ruoli-arch:9870</value>
</property>
<!-- 2nn web端访问地址-->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>ruoli-arch:9868</value>
</property>
<!-- 测试环境指定HDFS副本的数量1 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
yarn-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定MR走shuffle -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定ResourceManager的地址-->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>ruoli-arch</value>
</property>
<!-- 环境变量的继承 -->
<property>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<!--yarn单个容器允许分配的最大最小内存 -->
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>512</value>
</property>
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>6144</value>
</property>
<!--yarn单个容器允许分配的最大cpu核数 -->
<property>
<name>yarn.scheduler.maximum-allocation-vcores</name>
<value>8</value>
</property>
<!-- yarn容器允许管理的物理内存大小 -->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>6144</value>
</property>
<!-- yarn容器允许管理的CPU核数 -->
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>8</value>
</property>
<!-- 关闭yarn对物理内存和虚拟内存的限制检查 -->
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<!-- 开启日志聚集功能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 设置日志聚集服务器地址 -->
<property>
<name>yarn.log.server.url</name>
<value>http://ruoli-arch:19888/jobhistory/logs</value>
</property>
<!-- 设置日志保留时间为7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
</configuration>
mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定MapReduce程序运行在Yarn上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 单个Map Task申请的container容器 -->
<property>
<name>mapreduce.map.memory.mb</name>
<value>1024</value>
</property>
<property>
<name>mapreduce.map.cpu.vcores</name>
<value>1</value>
</property>
<!-- 单个Reduce Task申请的container容器 -->
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>1024</value>
</property>
<property>
<name>mapreduce.reduce.cpu.vcores</name>
<value>1</value>
</property>
<!-- 历史服务器端地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>ruoli-arch:10020</value>
</property>
<!-- 历史服务器web端地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>ruoli-arch:19888</value>
</property>
</configuration>
workers
ruoli-arch
capacity-scheduler.xml
<property>
<name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
<value>0.8</value>
</property>
hdp.sh 启停
vim ~/bin/hdp.sh
sudo chmod 777 ~/bin/hdp.sh
#!/bin/bash
if [ $# -lt 1 ]
then
echo "No Args Input..."
exit ;
fi
case $1 in
"start")
echo " =================== 启动 hadoop集群 ==================="
echo " --------------- 启动 hdfs ---------------"
ssh ruoli-arch "/opt/module/hadoop/sbin/start-dfs.sh"
echo " --------------- 启动 yarn ---------------"
ssh ruoli-arch "/opt/module/hadoop/sbin/start-yarn.sh"
echo " --------------- 启动 historyserver ---------------"
ssh ruoli-arch "/opt/module/hadoop/bin/mapred --daemon start historyserver"
;;
"stop")
echo " =================== 关闭 hadoop集群 ==================="
echo " --------------- 关闭 historyserver ---------------"
ssh ruoli-arch "/opt/module/hadoop/bin/mapred --daemon stop historyserver"
echo " --------------- 关闭 yarn ---------------"
ssh ruoli-arch "/opt/module/hadoop/sbin/stop-yarn.sh"
echo " --------------- 关闭 hdfs ---------------"
ssh ruoli-arch "/opt/module/hadoop/sbin/stop-dfs.sh"
;;
*)
echo "Input Args Error..."
;;
esac
Zookeeper
安装(Standalone)
# 1.解压安装
tar -zxvf apache-zookeeper-3.7.1-bin.tar.gz
mv apache-zookeeper-3.7.1-bin/ /opt/module/zookeeper
# 2.环境变量
sudo vim /etc/environment
ZOOKEEPER_HOME=/opt/module/zookeeper
sudo vim /etc/profile.d/myenv.sh
# Zookeeper
export ZOOKEEPER_HOME=/opt/module/zookeeper
export PATH=$PATH:$ZOOKEEPER_HOME/bin
source /etc/profile
# 3.配置文件
cd /opt/module/zookeeper
mkdir zkData
echo '1' > zkData/myid
mv conf/zoo_sample.cfg conf/zoo.cfg
vim /opt/module/zookeeper/conf/zoo.cfg
dataDir=/opt/module/zookeeper/zkData
clientPort=2181
常用命令
#往往是集群。需要在每个服务器都启动。一般写成脚本启动
zkServer.sh start
zkServer.sh stop
zkServer.sh status
#命令行客户端
zkCli.sh
-server hadoop102:2181
help
#查看所有znode
ls /
-s #详细
-w #监听 注册一次。监听一次
# create path value value默认为null
create /sanguo
create /sanguo/shuguo "liubei"
create /sanguo/weiguo "caocao"
create -s /sanguo/weiguo/zhangliao "zhangliao" #带序号从0开始递增
create -s /sanguo/weiguo/zhangliao "zhangliao" # 1
create -e /sanguo/weiguo/zhangliao "zhangliao2"
-s #带序号
-e #短暂存在
# 查看
get create /sanguo/shuguo
-s #详细
-w #监听 注册一次。监听一次
# 修改
set /sanguo/weiguo "simayi"
# 删除
delete /sanguo/shuguo
deleteall /sanguo
zoo.cfg
#修改
dataDir=/opt/module/zookeeper/zkData
clientPort=2181
# 2182 2183
#添加
####################### cluster ##########################
server.2=ruoli-arch:2888:3888
server.3=ruoli-arch:2889:3889
server.4=ruoli-arch:2890:3890
zk.sh 启停
vim ~/bin/zk.sh
sudo chmod 777 ~/bin/zk.sh
#!/bin/bash
case $1 in
"start"){
for i in ruoli-arch
do
echo ---------- zookeeper $i 启动 ------------
ssh $i "/opt/module/zookeeper/bin/zkServer.sh start"
done
};;
"stop"){
for i in ruoli-arch
do
echo ---------- zookeeper $i 停止 ------------
ssh $i "/opt/module/zookeeper/bin/zkServer.sh stop"
done
};;
"status"){
for i in ruoli-arch
do
echo ---------- zookeeper $i 状态 ------------
ssh $i "/opt/module/zookeeper/bin/zkServer.sh status"
done
};;
esac
Kafka
安装 standalone
# 1.解压安装
tar -zxvf kafka_2.12-3.3.1.tgz
mv kafka_2.12-3.3.1/ /opt/module/kafka
# 2.环境变量
sudo vim /etc/environment
KAFKA_HOME=/opt/module/kafka
sudo vim /etc/profile.d/myenv.sh
# Kafka
export KAFKA_HOME=/opt/module/kafka
export PATH=$PATH:$KAFKA_HOME/bin
source /etc/profile
# 3.配置文件
vim /opt/module/kafka/config/server.properties
# broker的全局唯一编号
broker.id=0
# broker对外暴露的IP和端口
advertised.listeners=PLAINTEXT://ruoli-arch:9092
# kafka运行日志(数据)存放的路径
log.dirs=/opt/module/kafka/datas
# partitions个数,一般配置和cpu核心数相同
num.partitions=8
# 配置连接Zookeeper集群地址(在zk根目录下创建/kafka,方便管理)
zookeeper.connect=ruoli-arch:2181/kafka
常用命令
# 主题
kafka-topics.sh --bootstrap-server ruoli-arch:9092 --topic test
--create
--delete
--alter
--list
--describe
--partitions #分区数量
--replication-factor #副本数量
kafka-topics.sh --bootstrap-server ruoli-arch:9092 --list
kafka-topics.sh --bootstrap-server ruoli-arch:9092 --create --topic test
kafka-topics.sh --bootstrap-server ruoli-arch:9092 --alter --topic test --partitions 8
kafka-topics.sh --bootstrap-server ruoli-arch:9092 --delete --topic test
#生产者
kafka-console-producer.sh --bootstrap-server ruoli-arch:9092 --topic test
#消费者
kafka-console-consumer.sh --bootstrap-server ruoli-arch:9092 --topic test
--group #消费者组id
--partition 0
--offset earliest
kf.sh 启停
vim ~/bin/kf.sh
sudo chmod 777 ~/bin/kf.sh
#! /bin/bash
case $1 in
"start"){
for i in ruoli-arch
do
echo " --------启动 $i Kafka-------"
ssh $i "/opt/module/kafka/bin/kafka-server-start.sh -daemon /opt/module/kafka/config/server.properties"
done
};;
"stop"){
for i in ruoli-arch
do
echo " --------停止 $i Kafka-------"
ssh $i "/opt/module/kafka/bin/kafka-server-stop.sh "
done
};;
esac
Hive
安装
# 1.解压安装
tar -zxvf hive-3.1.3.tar.gz
mv apache-hive-3.1.3-bin/ /opt/module/hive
# 2.环境变量
sudo vim /etc/environment
HIVE_HOME=/opt/module/hive
sudo vim /etc/profile.d/myenv.sh
# Hive
export HIVE_HOME=/opt/module/hive
export PATH=$PATH:$HIVE_HOME/bin
source /etc/profile
# 3.配置文件
mv $HIVE_HOME/lib/log4j-slf4j-impl-2.17.1.jar $HIVE_HOME/lib/log4j-slf4j-impl-2.17.1.jar.bak
cp mysql/mysql-connector-j-8.0.31.jar $HIVE_HOME/lib/
cd /opt/module/hive/conf
vim hive-site.xml
mv hive-log4j2.properties.template hive-log4j2.properties
vim hive-log4j2.properties
mysql -uroot -proot
######## mysql #########
create database metastore;
quit;
#初始化Hive元数据库
schematool -initSchema -dbType mysql -verbose
mysql -uroot -proot
######## mysql #########
use metastore;
alter table COLUMNS_V2 modify column COMMENT varchar(256) character set utf8;
alter table TABLE_PARAMS modify column PARAM_VALUE mediumtext character set utf8;
quit;
常用命令
# 进入命令行客户端
hive
hive (default)>
show databases;
create table student(id int, name string);
insert into student values(1, "zhangsan");
create database bigdata;
hive -e "select * from student;"
hive -f student.sql > result.txt
Hive SQL
--创建数据库
CREATE DATABASE [IF NOT EXISTS] database_name
[COMMENT database_comment]
[LOCATION hdfs_path]
[WITH DBPROPERTIES (property_name=property_value, ...)];
--创建表
CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name
[(col_name data_type [COMMENT col_comment], ...)]
[COMMENT table_comment]
[PARTITIONED BY (col_name data_type [COMMENT col_comment], ...)]
[CLUSTERED BY (col_name, col_name, ...)
[SORTED BY (col_name [ASC|DESC], ...)] INTO num_buckets BUCKETS]
[ROW FORMAT row_format]
[STORED AS file_format]
[LOCATION hdfs_path]
[TBLPROPERTIES (property_name=property_value, ...)]
--Load
LOAD DATA [LOCAL] INPATH 'filepath' [OVERWRITE] INTO TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)];
--Insert
INSERT (INTO | OVERWRITE) TABLE tablename [PARTITION (partcol1=val1, partcol2=val2 ...)] select_statement;
--导出
EXPORT TABLE tablename TO 'export_target_path'
--导入
IMPORT [EXTERNAL] TABLE new_or_original_tablename FROM 'source_path' [LOCATION 'import_target_path']
--查询
SELECT [ALL | DISTINCT] select_expr, select_expr, ...
FROM table_reference -- 从什么表查
[WHERE where_condition] -- 过滤
[GROUP BY col_list] -- 分组查询
[HAVING col_list] -- 分组后过滤
[ORDER BY col_list] -- 排序
[CLUSTER BY col_list | [DISTRIBUTE BY col_list] [SORT BY col_list]]
[LIMIT number] -- 限制输出的行数
hive-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--配置Hive保存元数据信息所需的 MySQL URL地址-->
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://ruoli-arch:3306/metastore?useSSL=false&useUnicode=true&characterEncoding=UTF-8&allowPublicKeyRetrieval=true</value>
</property>
<!--配置Hive连接MySQL的驱动全类名-->
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.cj.jdbc.Driver</value>
</property>
<!--配置Hive连接MySQL的用户名 -->
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<!--配置Hive连接MySQL的密码 -->
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>root</value>
</property>
<!-- Hive默认在HDFS的工作目录 -->
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/hive/warehouse</value>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
</property>
<!-- 指定hiveserver2连接的host -->
<property>
<name>hive.server2.thrift.bind.host</name>
<value>ruoli-arch</value>
</property>
<!-- 指定hiveserver2连接的端口号 -->
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
<property>
<name>hive.metastore.event.db.notification.api.auth</name>
<value>false</value>
</property>
<!-- Hive客户端显示当前库和表头 -->
<property>
<name>hive.cli.print.header</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.current.db</name>
<value>true</value>
</property>
<!--Hive执行引擎-->
<property>
<name>hive.execution.engine</name>
<value>mr</value>
</property>
</configuration>
hive-log4j2.properties
property.hive.log.dir=/opt/module/hive/logs
myhive.sh 启停
vim ~/bin/myhive.sh
sudo chmod 777 ~/bin/myhive.sh
#!/bin/bash
HIVE_LOG_DIR=$HIVE_HOME/logs
if [ ! -d $HIVE_LOG_DIR ]
then
mkdir -p $HIVE_LOG_DIR
fi
#检查进程是否运行正常,参数1为进程名,参数2为进程端口
function check_process()
{
pid=$(ps -ef 2>/dev/null | grep -v grep | grep -i $1 | awk '{print $2}')
ppid=$(netstat -nltp 2>/dev/null | grep $2 | awk '{print $7}' | cut -d '/' -f 1)
echo $pid
[[ "$pid" =~ "$ppid" ]] && [ "$ppid" ] && return 0 || return 1
}
function hive_start()
{
metapid=$(check_process HiveMetastore 9083)
cmd="nohup hive --service metastore >$HIVE_LOG_DIR/metastore.log 2>&1 &"
[ -z "$metapid" ] && eval $cmd || echo "Metastroe服务已启动"
server2pid=$(check_process HiveServer2 10000)
cmd="nohup hive --service hiveserver2 >$HIVE_LOG_DIR/hiveServer2.log 2>&1 &"
[ -z "$server2pid" ] && eval $cmd || echo "HiveServer2服务已启动"
}
function hive_stop()
{
server2pid=$(check_process HiveServer2 10000)
[ "$server2pid" ] && kill $server2pid || echo "HiveServer2服务未启动"
metapid=$(check_process HiveMetastore 9083)
[ "$metapid" ] && kill $metapid || echo "Metastore服务未启动"
}
case $1 in
"start")
hive_start
;;
"stop")
hive_stop
;;
"restart")
hive_stop
sleep 2
hive_start
;;
"status")
check_process HiveMetastore 9083 >/dev/null && echo "Metastore服务运行正常" || echo "Metastore服务运行异常"
check_process HiveServer2 10000 >/dev/null && echo "HiveServer2服务运行正常" || echo "HiveServer2服务运行异常"
;;
*)
echo Invalid Args!
echo 'Usage: '$(basename $0)' start|stop|restart|status'
;;
esac
Spark
安装
# 1.解压安装
tar -zxvf spark-3.3.1-bin-hadoop3.tgz
mv spark-3.3.1-bin-hadoop3/ /opt/module/spark
tar -zxvf spark-3.3.1-bin-without-hadoop.tgz
mv spark-3.3.1-bin-without-hadoop /opt/module/spark-without-hadoop
# 2.环境变量
sudo vim /etc/environment
SPARK_HOME=/opt/module/spark
sudo vim /etc/profile.d/myenv.sh
# Spark
export SPARK_HOME=/opt/module/spark
export PATH=$PATH:$SPARK_HOME/bin
source /etc/profile
# 3.配置文件
# Spark on Yarn
hadoop fs -mkdir -p /spark/history
cd $SPARK_HOME/conf
mv spark-env.sh.template spark-env.sh
mv spark-defaults.conf.template spark-defaults.conf
vim spark-env.sh
vim park-defaults.conf
# Hive on Spark
hadoop fs -mkdir -p /spark/spark-jars
hadoop fs -put /opt/module/spark-without-hadoop/jars/* /spark/spark-jars
vim $HIVE_HOME/conf/spark-defaults.conf
spark.eventLog.enabled true
spark.eventLog.dir hdfs://ruoli-arch:8020/spark/history
spark.yarn.historyServer.address ruoli-arch:18080
spark.history.ui.port 18080
vim $HIVE_HOME/conf/hive-site.xml
<!-- Hive执行引擎 -->
<property>
<name>hive.execution.engine</name>
<value>spark</value>
</property>
<!-- Spark依赖位置 -->
<property>
<name>spark.yarn.jars</name>
<value>hdfs://ruoli-arch:8020/spark/spark-jars/*</value>
</property>
常用命令
cd /opt/module/spark
# 启停历史服务器
sbin/start-history-server.sh
sbin/stop-history-server.sh
# Scala命令行 | local模式 | yarn模式
# 1.scala命令行(默认读取hdfs路径)
hadoop fs -mkdir -p /user/ruoli/input
hadoop fs -put ~/input/word.txt /user/ruoli/input
spark-shell
scala> sc.textFile("input/word.txt").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).collect
# 2.本地模式 local
spark-submit --class org.apache.spark.examples.SparkPi --master local ./examples/jars/spark-examples_2.12-3.3.1.jar 10
# 3.Yarn模式 cluster/client
# client: Driver程序运行在客户端,适用于测试环境
spark-submit --class org.apache.spark.examples.SparkPi --master yarn --deploy-mode client ./examples/jars/spark-examples_2.12-3.3.1.jar 10
spark-submit --class org.apache.spark.examples.SparkPi --master yarn --deploy-mode cluster ./examples/jars/spark-examples_2.12-3.3.1.jar 10
# cluster: Driver程序运行在由ResourceManager启动的 AppMaster,适用于生产环境
# Bug
Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
Spark SQL
-- SQL风格语法(主要)
-- DSL风格语法(次要)
spark-env.sh
YARN_CONF_DIR=/opt/module/hadoop/etc/hadoop
export SPARK_HISTORY_OPTS="
-Dspark.history.ui.port=18080
-Dspark.history.fs.logDirectory=hdfs://ruoli-arch:8020/spark/history
-Dspark.history.retainedApplications=30"
spark-defaults.conf
# spark on yarn
spark.eventLog.enabled true
spark.eventLog.dir hdfs://ruoli-arch:8020/spark/history
spark.driver.memory 4g
spark.executor.memory 2g
spark.yarn.historyServer.address ruoli-arch:18080
spark.history.ui.port 18080
Flink
安装
# 1.解压安装
tar -zxvf flink-1.17.1-bin-scala_2.12.tgz
mv flink-1.17.1/ /opt/module/flink
# 2.配置文件
cd /opt/module/flink/
vim conf/flink-conf.yaml
vim conf/masters
vim conf/workers
# 3.环境变量(Flink on Yarn)
sudo vim /etc/environment
FLINK_HOME=/opt/module/flink
sudo vim /etc/profile.d/myenv.sh
# Flink
export FLINK_HOME=/opt/module/flink
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HADOOP_CLASSPATH=`hadoop classpath`
source /etc/profile
常用命令
cd /opt/module/flink
#启动集群 standalone会话模式需要。yarn模式不需要
bin/start-cluster.sh
#开启历史服务器
bin/historyserver.sh start
#################### YARN(推荐使用) ####################
# 会话模式(适合多个小任务)
bin/yarn-session.sh -d -nm test
flink run -c com.ruoli.flink.learn.app.wc.SocketStreamWordCount /home/ruoli/input/flink-learn-1.0.jar
# 单作业模式
flink run -d -t yarn-per-job -c com.ruoli.flink.learn.app.wc.SocketStreamWordCount /home/ruoli/input/flink-learn-1.0.jar
# 应用模式(常用)
flink run-application -t yarn-application -c com.ruoli.flink.learn.app.wc.SocketStreamWordCount /home/ruoli/input/flink-learn-1.0.jar
#################### Standalone(了解) ####################
# 会话模式
bin/start-cluster.sh
flink run -m ruoli-arch:8081 -c com.ruoli.flink.learn.app.wc.SocketStreamWordCount /home/ruoli/input/flink-learn-1.0.jar
# 单作业模式
Standalone不支持
## 应用模式
cp ~/input/flink-learn-1.0.jar lib/
#启动JobManager
bin/standalone-job.sh start --job-classname com.ruoli.flink.learn.app.wc.SocketStreamWordCount
#启动TaskManager
bin/taskmanager.sh start
#跑完任务需要关闭 taskmanager 和 jobmanager
bin/taskmanager.sh stop
bin/standalone-job.sh stop
Flink SQL
动态表-持续查询
#基于yarn-session模式启动Flink
bin/yarn-session.sh -d
#Sql客户端
bin/sql-client.sh embedded -s yarn-session
#从初始化文件启动。Flink中表是一种动态表(持续查询)。停则无
bin/sql-client.sh embedded -s yarn-session -i conf/sql-client-init.sql
#### sql-client-init.sql ####
SET sql-client.execution.result-mode=tableau;
CREATE DATABASE mydatabase;
Flink SQL>quit;
配置相关
# 默认table,还可以设置为tableau、changelog
SET sql-client.execution.result-mode=tableau;
# 默认streaming,也可以设置batch
SET execution.runtime-mode=streaming;
# 并行度
SET parallelism.default=1;
# TTL
SET table.exec.state.ttl=1000;
DDL
-- 创建数据库
CREATE DATABASE [IF NOT EXISTS] [catalog_name.]db_name
[COMMENT database_comment]
WITH (key1=val1, key2=val2, ...)
ALTER DATABASE [catalog_name.]db_name SET (key1=val1, key2=val2, ...)
DROP DATABASE [IF EXISTS] [catalog_name.]db_name [ (RESTRICT | CASCADE) ]
USE database_name;
-- 创建表
CREATE TABLE [IF NOT EXISTS] [catalog_name.][db_name.]table_name
(
{ <physical_column_definition> | <metadata_column_definition> | <computed_column_definition> }[ , ...n]
[ <watermark_definition> ]
[ <table_constraint> ][ , ...n]
)
[COMMENT table_comment]
[PARTITIONED BY (partition_column_name1, partition_column_name2, ...)]
WITH (key1=val1, key2=val2, ...)
[ LIKE source_table [( <like_options> )] | AS select_query ]
-- 一般 with 中的配置项由 Flink SQL 的 Connector(链接外部存储的连接器) 来定义,每种 Connector 提供的with 配置项都是不同的
-- metadata_column_definition
-- 元数据列是 SQL 标准的扩展,允许访问数据源本身具有的一些元数据
`record_time` TIMESTAMP_LTZ(3) METADATA FROM 'timestamp'
-- watermark_definition 水位线定义
-- 1.严格升序:
WATERMARK FOR rowtime_column AS rowtime_column。
-- 2.递增:
WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '0.001' SECOND
-- 3.有界无序:可以用于设置最大乱序时间,一般都用这种 Watermark 生成策略。迟到5s
WATERMARK FOR rowtime_column AS rowtime_column – INTERVAL '5' timeUnit
-- table_constraint 支持主键
PARYMARY KEY(user_id) not enforced
ALTER TABLE table_name RENAME TO new_table_name
ALTER TABLE table_name SET (key1=val1, key2=val2, ...)
DROP TABLE [IF EXISTS] table_name
-- 事件时间 + WATERMARK
CREATE TABLE EventTable(
user STRING,
url STRING,
ts TIMESTAMP(3),
WATERMARK FOR ts AS ts - INTERVAL '5' SECOND
) WITH (
...
);
-- 处理时间 AS PROCTIME()
CREATE TABLE ProcessTable(
user STRING,
url STRING,
ts AS PROCTIME()
) WITH (
...
);
-------------------- with connector 案例 --------------------
-- kafka
-- 普通Kafka表
CREATE TABLE t1(
`event_time` TIMESTAMP(3) METADATA FROM 'timestamp',
--列名和元数据名一致可以省略 FROM 'xxxx', VIRTUAL表示只读
`partition` BIGINT METADATA VIRTUAL,
`offset` BIGINT METADATA VIRTUAL,
id int,
ts bigint ,
vc int
) WITH (
'connector' = 'kafka',
'properties.bootstrap.servers' = 'hadoop103:9092',
'properties.group.id' = 'yuruoli',
-- 'earliest-offset', 'latest-offset', 'group-offsets', 'timestamp' and 'specific-offsets'
'scan.startup.mode' = 'earliest-offset',
-- fixed为flink实现的分区器,一个并行度只写往kafka一个分区
'sink.partitioner' = 'fixed',
'topic' = 'ws1',
'format' = 'json'
)
-- upsert-kafka表
CREATE TABLE t2(
id int ,
sumVC int ,
primary key (id) NOT ENFORCED
)
WITH (
'connector' = 'upsert-kafka',
'properties.bootstrap.servers' = 'hadoop102:9092',
'topic' = 'ws2',
'key.format' = 'json',
'value.format' = 'json'
)
-- file
CREATE TABLE t3( id int, ts bigint , vc int )
WITH (
'connector' = 'filesystem',
'path' = 'hdfs://hadoop102:8020/data/t3',
'format' = 'csv'
)
--jdbc
--MySQL表
CREATE TABLE `ws2` (
`id` int(11) NOT NULL,
`ts` bigint(20) DEFAULT NULL,
`vc` int(11) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8
CREATE TABLE t4(
id INT,
ts BIGINT,
vc INT,
PRIMARY KEY (id) NOT ENFORCED
) WITH (
'connector'='jdbc',
'url' = 'jdbc:mysql://hadoop102:3306/test?useUnicode=true&characterEncoding=UTF-8',
'username' = 'root',
'password' = 'root',
'connection.max-retry-timeout' = '60s',
'table-name' = 'ws2',
'sink.buffer-flush.max-rows' = '500',
'sink.buffer-flush.interval' = '5s',
'sink.max-retries' = '3',
'sink.parallelism' = '1'
);
DQL
Flink SQL查询
流 => 表
- 更新查询:select count(*) from test;
- 追加查询:select * from test;
表 => 流
- 仅追加(Append-only)流
- 撤回(Retract)流
- 更新插入(Upsert)流
DQL
可以先去案例创表。方便测试
select * from source;
INSERT INTO sink select * from source; --启动了一个job
select * from sink;
---------------------- 分组窗口聚合(已被TVF取代) ----------------------
-- SQL中只支持基于时间的窗口,不支持基于元素个数的窗口。
TUMBLE(time_attr, interval) --滚动窗口
HOP(time_attr, interval, interval) --滑动窗口:滑动步长 窗口长度
SESSION(time_attr, interval) --会话窗口
select
id,
TUMBLE_START(et, INTERVAL '5' SECOND) wstart,
TUMBLE_END(et, INTERVAL '5' SECOND) wend,
sum(vc) sumVc
from ws
group by id, TUMBLE(et, INTERVAL '5' SECOND);
-- 窗口表值函数(TVF)聚合
FROM TABLE(
窗口类型(TABLE 表名, DESCRIPTOR(时间字段),INTERVAL时间…)
)
GROUP BY [window_start,][window_end,] --可选
SELECT
window_start,
window_end,
id,
SUM(vc) sumVC
FROM TABLE(
TUMBLE(TABLE ws, DESCRIPTOR(et), INTERVAL '5' SECONDS)
--HOP(TABLE ws, DESCRIPTOR(et), INTERVAL '5' SECONDS , INTERVAL '10' SECONDS)
-- 累积窗口 6s的窗口 每2s计算一次
-- CUMULATE(TABLE ws, DESCRIPTOR(et), INTERVAL '2' SECONDS , INTERVAL '6' SECONDS)
)
GROUP BY window_start, window_end, id;
---------------------- OVER ----------------------
SELECT
agg_func(agg_col) OVER (
[PARTITION BY col1[, col2, ...]]
ORDER BY time_col
range_definition),
...
FROM ...
-- 基于时间
SELECT
id, et, vc,
count(vc) OVER (
PARTITION BY id
ORDER BY et
RANGE BETWEEN INTERVAL '10' SECOND PRECEDING AND CURRENT ROW
) AS cnt
FROM ws
-- window 子句
SELECT
id, et, vc,
count(vc) OVER w AS cnt,
sum(vc) OVER w AS sumVC
FROM ws
WINDOW w AS (
PARTITION BY id
ORDER BY et
RANGE BETWEEN INTERVAL '10' SECOND PRECEDING AND CURRENT ROW
)
-- 基于行数
SELECT
id, et, vc,
avg(vc) OVER w AS avgVC,
count(vc) OVER w AS cnt
FROM ws
WINDOW w AS (
PARTITION BY id
ORDER BY et
ROWS BETWEEN 5 PRECEDING AND CURRENT ROW
)
---------------------- TOP-N ----------------------
SELECT [column_list]
FROM (
SELECT [column_list],
ROW_NUMBER() OVER ([PARTITION BY col1[, col2...]]
ORDER BY col1 [asc|desc][, col2 [asc|desc]...]) AS rownum
FROM table_name)
WHERE rownum <= N [AND conditions]
select
id, et, vc, rownum
from (
select
id, et, vc,
row_number() over(
partition by id
order by vc desc
) as rownum
from ws
)
where rownum<=3;
---------------------- Deduplication去重 ----------------------
SELECT [column_list]
FROM (
SELECT [column_list],
ROW_NUMBER() OVER ([PARTITION BY col1[, col2...]] ORDER BY time_attr [asc|desc]) AS rownum
FROM table_name
)
WHERE rownum = 1
select
id, et, vc,
rownum
from (
select id,et,vc,
row_number() over(
partition by id,vc
order by et
) as rownum
from ws
)
where rownum=1;
-------------------------- JOIN --------------------------
------ 常规联结查询 Regular Join ------
-- 等值内联结(INNER Equi-JOIN)
SELECT ws.id, ws.vc, ws1.id, ws1.vc
FROM ws
INNER JOIN ws1
ON ws.id = ws1.id
-- 等值外联结(OUTER Equi-JOIN)
SELECT ws.id, ws.vc, ws1.id, ws1.vc
FROM ws
LEFT JOIN ws1
ON ws.id = ws1.id
SELECT ws.id, ws.vc, ws1.id, ws1.vc
FROM ws
RIGHT JOIN ws1
ON ws.id = ws1.id
SELECT ws.id, ws.vc, ws1.id, ws1.vc
FROM ws
FULL OUTER JOIN ws1
ON ws.id = ws.id
------ 间隔联结查询 Interval Join ------
-- 不使用Join关键字
SELECT *
FROM ws,ws1
WHERE ws.id = ws1. id
AND ws.et BETWEEN ws1.et - INTERVAL '2' SECOND AND ws1.et + INTERVAL '2' SECOND
------ 维表联结查询 Lookup Join ------
-- Lookup Join 是流与 Redis,Mysql,HBase 这种外部存储介质的 Join。仅支持处理时间字段。
表A
JOIN 维度表名 FOR SYSTEM_TIME AS OF 表A.proc_time AS 别名
ON xx.字段=别名.字段
CREATE TABLE Customers (
id INT,
name STRING,
country STRING,
zip STRING
) WITH (
'connector' = 'jdbc',
'url' = 'jdbc:mysql://hadoop102:3306/customerdb',
'table-name' = 'customers'
);
-- order表每来一条数据,都会去mysql的customers表查找维度数据
SELECT o.order_id, o.total, c.country, c.zip
FROM Orders AS o
JOIN Customers FOR SYSTEM_TIME AS OF o.proc_time AS c
ON o.customer_id = c.id;
-- order by
-- 实时任务中,Order By 子句中必须要有时间属性字段,并且必须写在最前面且为升序。
SELECT *
FROM ws
ORDER BY et, id desc
-- UNION 和 UNION ALL
-- UNION:将集合合并并且去重
-- UNION ALL:将集合合并,不做去重。
(SELECT id FROM ws) UNION (SELECT id FROM ws1);
(SELECT id FROM ws) UNION ALL (SELECT id FROM ws1);
-- Intersect 和 Intersect All
-- Intersect:交集并且去重
-- Intersect ALL:交集不做去重
(SELECT id FROM ws) INTERSECT (SELECT id FROM ws1);
(SELECT id FROM ws) INTERSECT ALL (SELECT id FROM ws1);
-- Except 和 Except All
-- Except:差集并且去重
-- Except ALL:差集不做去重
(SELECT id FROM ws) EXCEPT (SELECT id FROM ws1);
(SELECT id FROM ws) EXCEPT ALL (SELECT id FROM ws1);
DQL案例
CREATE TABLE source (
id INT,
ts BIGINT,
vc INT
) WITH (
'connector' = 'datagen',
'rows-per-second'='1',
'fields.id.kind'='random',
'fields.id.min'='1',
'fields.id.max'='10',
'fields.ts.kind'='sequence',
'fields.ts.start'='1',
'fields.ts.end'='1000000',
'fields.vc.kind'='random',
'fields.vc.min'='1',
'fields.vc.max'='100'
);
CREATE TABLE sink (
id INT,
ts BIGINT,
vc INT
) WITH (
'connector' = 'print'
);
CREATE TABLE ws (
id INT,
vc INT,
pt AS PROCTIME(), --处理时间
et AS cast(CURRENT_TIMESTAMP as timestamp(3)), --事件时间
WATERMARK FOR et AS et - INTERVAL '5' SECOND --watermark
) WITH (
'connector' = 'datagen',
'rows-per-second' = '10',
'fields.id.min' = '1',
'fields.id.max' = '3',
'fields.vc.min' = '1',
'fields.vc.max' = '100'
);
CREATE TABLE ws1 (
id INT,
vc INT,
pt AS PROCTIME(), --处理时间
et AS cast(CURRENT_TIMESTAMP as timestamp(3)), --事件时间
WATERMARK FOR et AS et - INTERVAL '0.001' SECOND --watermark
) WITH (
'connector' = 'datagen',
'rows-per-second' = '1',
'fields.id.min' = '3',
'fields.id.max' = '5',
'fields.vc.min' = '1',
'fields.vc.max' = '100'
);
flink-conf.yaml
#修改
# JobManager节点地址.
jobmanager.rpc.address: ruoli-arch
jobmanager.bind-host: 0.0.0.0
rest.address: ruoli-arch
rest.bind-address: 0.0.0.0
# TaskManager节点地址.需要配置为当前机器名
taskmanager.bind-host: 0.0.0.0
taskmanager.host: ruoli-arch
taskmanager.numberOfTaskSlots: 8
classloader.check-leaked-classloader: false
############### 历史服务器 ##############
jobmanager.archive.fs.dir: hdfs://ruoli-arch:8020/flink/history
historyserver.web.address: ruoli-arch
historyserver.web.port: 8082
historyserver.archive.fs.dir: hdfs://ruoli-arch:8020/flink/history
historyserver.archive.fs.refresh-interval: 5000
############### 状态后端 ##############
state.backend.type: hashmap
state.checkpoints.dir: hdfs://ruoli-arch:8020/flink/checkpoints
state.savepoints.dir: hdfs://ruoli-arch:8020/flink/savepoints
workers
ruoli-arch
masters
ruoli-arch:8081
Hbase
安装
# 1.解压安装
tar -zxvf hbase-2.4.11-bin.tar.gz
mv hbase-2.4.11 /opt/module/hbase
# 2.环境变量
sudo vim /etc/environment
HBASE_HOME=/opt/module/hbase
sudo vim /etc/profile.d/myenv.sh
# HBase
export HBASE_HOME=/opt/module/hbase
export PATH=$PATH:$HBASE_HOME/bin
source /etc/profile
# 3.配置文件
cd /opt/module/hbase/
vim conf/hbase-env.sh
vim conf/hbase-site.xml
vim conf/regionservers
# 日志包冲突
mv $HBASE_HOME/lib/client-facing-thirdparty/slf4j-reload4j-1.7.33.jar $HBASE_HOME/lib/client-facing-thirdparty/slf4j-reload4j-1.7.33.jar.bak
常用命令
#启动
start-hbase.sh
#关闭
stop-hbase.sh
################## HBase客户端命令 #################
hbase shell
help #列出帮助信息
# 列出所有命名空间
list_namespace
# 创建命名空间
create_namespace 'gmall'
# 列出所有表
list
# 列出命名空间为gmall的所有表
list_namespace_tables 'gmall'
# 展示数据
scan 'gmall:student'
# 删除表需要先disable
disable 'gmall:student'
drop 'gmall:student'
hbase-env.sh
export HBASE_MANAGES_ZK=false
hbase-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hbase.zookeeper.quorum</name>
<value>ruoli-arch</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://ruoli-arch:8020/hbase</value>
</property>
<property>
<name>hbase.wal.provider</name>
<value>filesystem</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>./tmp</value>
</property>
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
</configuration>
regionservers
ruoli-arch
Redis
安装
sudo pacman -S redis
sudo systemctl start redis
sudo vim /etc/redis/redis.conf #默认配置文件
常用命令
redis-cli
127.0.0.1:6379>
ping
shutdown
redis 启停
sudo systemctl start redis
sudo systemctl stop redis
Doris
安装
# 永久(貌似无效)
sudo vim /etc/sysctl.conf
vm.max_map_count=2000000
# 临时
sudo sysctl -w vm.max_map_count=2000000
cd doris/x64_avx2
chmod 777 install.sh
chmod 777 start.sh
vim install.sh
./install.sh #安装
cp -r start.sh ~/bin/doris.sh
vim ~/bin/doris.sh
# 临时修改,稍后注释掉
vim /etc/hosts
#127.0.0.1 ruoli-arch
doris.sh start # 启动
doris.sh stop # 停止
# 登录用户为root root。即内置mysql登录用户
install.sh 安装脚本
脚本与安装包目录一致
#!/bin/bash
#hosts=(hadoop102 hadoop103 hadoop104)
hosts=(ruoli-arch)
# 安装前清除旧的 doris
for host in ${hosts[*]} ; do
ssh $host " /opt/module/doris/be/bin/stop_be.sh ;\
/opt/module/doris/fe/bin/stop_fe.sh ;\
rm -rf /opt/module/doris ; \
sudo rm -rf /etc/doris
"
done
# 创建 doris 用的目录
mkdir -p /opt/module/doris
# 解压 fe
tar -xvf *fe* -C /opt/module/doris
mv /opt/module/doris/*fe* /opt/module/doris/fe
# 解压 be
tar -xvf *be* -C /opt/module/doris
mv /opt/module/doris/*be* /opt/module/doris/be
# 解压依赖
tar -xvf *dependencies* -C /opt/module/doris
mv /opt/module/doris/*dependencies* /opt/module/doris/dependencies
cp /opt/module/doris/dependencies/java-udf-jar-with-dependencies.jar /opt/module/doris/be/lib
# 配置 fe
echo 'priority_networks = 192.168.10.0/24' >> /opt/module/doris/fe/conf/fe.conf
echo 'http_port = 7030' >> /opt/module/doris/fe/conf/fe.conf
# 配置 be
echo 'priority_networks = 192.168.10.0/24' >> /opt/module/doris/be/conf/be.conf
echo 'webserver_port = 7040' >> /opt/module/doris/be/conf/be.conf
echo 'mem_limit = 10%' >> /opt/module/doris/be/conf/be.conf
#rsync -rvl /opt/module/doris ruoli@hadoop103:/opt/module
#rsync -rvl /opt/module/doris ruoli@hadoop104:/opt/module
doris.sh 启停
#!/bin/bash
source /etc/profile
function my_sleep(){
for(( sec=$1;sec>=0;sec-- ))
do
echo -ne "\e[1;31m $sec $2\e[0m"
echo -ne "\r"
sleep 1
done
echo ''
}
case $1 in
"start")
if [ ! -f "/etc/doris" ]; then
echo "第一次启动 doris 集群, 时间会久一些..."
echo "在 ruoli-arch 启动 fe"
/opt/module/doris/fe/bin/start_fe.sh --daemon
# 修改 登陆密码为 root
my_sleep 30 "秒后增加 1 个be 节点"
mysql -h ruoli-arch -uroot -P 9030 2>/dev/null -e "SET PASSWORD FOR 'root' = PASSWORD('root');"
mysql -h ruoli-arch -uroot -P 9030 -proot 2>/dev/null -e "ALTER SYSTEM ADD BACKEND 'ruoli-arch:9050';"
for host in ruoli-arch ; do
echo "在 $host 启动 be"
ssh $host "/opt/module/doris/be/bin/start_be.sh --daemon"
done
sudo touch /etc/doris
else
echo "不是第一次启动 doris 集群, 正常启动..."
for host in ruoli-arch ; do
echo "========== 在 $host 上启动 fe ========="
ssh $host "source /etc/profile; /opt/module/doris/fe/bin/start_fe.sh --daemon"
done
for host in ruoli-arch ; do
echo "========== 在 $host 上启动 be ========="
ssh $host "source /etc/profile; /opt/module/doris/be/bin/start_be.sh --daemon"
done
fi
;;
"stop")
for host in ruoli-arch ; do
echo "========== 在 $host 上停止 fe ========="
ssh $host "source /etc/profile; /opt/module/doris/fe/bin/stop_fe.sh"
done
for host in ruoli-arch ; do
echo "========== 在 $host 上停止 be ========="
ssh $host "source /etc/profile; /opt/module/doris/be/bin/stop_be.sh"
done
;;
*)
echo " start 启动doris集群"
echo " stop 停止stop集群"
;;
esac
常用命令
#启停fe
/opt/module/doris/fe/bin/start_fe.sh --daemon
/opt/module/doris/fe/bin/stop_fe.sh --daemon
#连接mysql
mysql -h ruoli-arch -P 9030 -uroot -p
SET PASSWORD FOR 'root' = PASSWORD('root');
#添加be
ALTER SYSTEM ADD BACKEND "ruoli-arch:9050";
#查看be状态
SHOW PROC '/backends'\G
# fe状态
SHOW PROC '/frontends'\G
#启停be
/opt/module/doris/be/bin/start_be.sh --daemon
/opt/module/doris/be/bin/stop_be.sh --daemon
DolphinScheduler
安装
tar -zxvf apache-dolphinscheduler-2.0.5-bin.tar.gz
mysql -uroot -proot
# mysql
CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
CREATE USER 'dolphinscheduler'@'%' IDENTIFIED BY 'dolphinscheduler';
GRANT ALL PRIVILEGES ON dolphinscheduler.* TO 'dolphinscheduler'@'%';
flush privileges;
quit;
cd apache-dolphinscheduler-2.0.5-bin/
# 安装配置,详见word文档
vim conf/config/install_config.conf
cp ../mysql/mysql-connector-j-8.0.31.jar lib
#初始化数据库
script/create-dolphinscheduler.sh
hdp.sh start
zk.sh start
#一键部署并启动DolphinScheduler
./install.sh
# UI地址为 http://ruoli-arch:12345/dolphinscheduler
# 用户名:admin 密码:dolphinscheduler123
常用命令
## 单机模式 ##
bin/dolphinscheduler-daemon.sh start standalone-server
bin/dolphinscheduler-daemon.sh stop standalone-server
## 集群模式 ##
1)一键启停所有服务
bin/start-all.sh
bin/stop-all.sh
- 注意同Hadoop的启停脚本进行区分。
2)启停 Master
bin/dolphinscheduler-daemon.sh start master-server
bin/dolphinscheduler-daemon.sh stop master-server
3)启停 Worker
bin/dolphinscheduler-daemon.sh start worker-server
bin/dolphinscheduler-daemon.sh stop worker-server
4)启停 Api
bin/dolphinscheduler-daemon.sh start api-server
bin/dolphinscheduler-daemon.sh stop api-server
5)启停 Logger
bin/dolphinscheduler-daemon.sh start logger-server
bin/dolphinscheduler-daemon.sh stop logger-server
6)启停 Alert
bin/dolphinscheduler-daemon.sh start alert-server
bin/dolphinscheduler-daemon.sh stop alert-server
install_config.conf
ips="ruoli-arch"
# 将要部署任一 DolphinScheduler 服务的服务器主机名或 ip 列表
masters="ruoli-arch"
# master 所在主机名列表,必须是 ips 的子集
workers="ruoli-arch:default"
# worker主机名及队列,此处的 ip 必须在 ips 列表中
alertServer="ruoli-arch"
# 告警服务所在服务器主机名
apiServers="ruoli-arch"
# api服务所在服务器主机名
# pythonGatewayServers="ds1"
# 不需要的配置项,可以保留默认值,也可以用 # 注释
installPath="/opt/module/dolphinscheduler"
# DS 安装路径,如果不存在会创建
deployUser="ruoli"
# 部署用户,任务执行服务是以 sudo -u {linux-user} 切换不同 Linux 用户的方式来实现多租户运行作业,因此该用户必须有免密的 sudo 权限。
dataBasedirPath="/tmp/dolphinscheduler"
# 前文配置的所有节点的本地数据存储路径,需要确保部署用户拥有该目录的读写权限
javaHome="/opt/module/jdk"
# JAVA_HOME 路径
DATABASE_TYPE=${DATABASE_TYPE:-"mysql"}
# 数据库类型
SPRING_DATASOURCE_URL=${SPRING_DATASOURCE_URL:-"jdbc:mysql://ruoli-arch:3306/dolphinscheduler?useUnicode=true&allowPublicKeyRetrieval=true&characterEncoding=UTF-8"}
# 数据库 URL
SPRING_DATASOURCE_USERNAME=${SPRING_DATASOURCE_USERNAME:-"dolphinscheduler"}
# 数据库用户名
SPRING_DATASOURCE_PASSWORD=${SPRING_DATASOURCE_PASSWORD:-"dolphinscheduler"}
# 数据库密码
registryPluginName="zookeeper"
# 注册中心插件名称,DS 通过注册中心来确保集群配置的一致性
registryServers="ruoli-arch:2181"
# 注册中心地址,即 Zookeeper 集群的地址
registryNamespace="dolphinscheduler"
# DS 在 Zookeeper 的结点名称
resourceStorageType="HDFS"
# 资源存储类型
resourceUploadPath="/dolphinscheduler"
# 资源上传路径
defaultFS="hdfs://ruoli-arch:8020"
# 默认文件系统
resourceManagerHttpAddressPort="8088"
# yarn RM http 访问端口
yarnHaIps=
# Yarn RM 高可用 ip,若未启用 RM 高可用,则将该值置空
singleYarnIp="ruoli-arch"
# Yarn RM 主机名,若启用了 HA 或未启用 RM,保留默认值
hdfsRootUser="ruoli"
# 拥有 HDFS 根目录操作权限的用户