查询命令对照表
打印/etc/passwd中第10行的内容
sed -n '10p' /etc/passwd
打印/etc/passwd中第8行开始,到第15行结束的内容
sed -n '8,15p' /etc/passwd
打印/etc/passwd中从第8行开始,然后+5行结束的内容
sed -n '8,+5p' passwd
打印/etc/passwd中开头匹配nginx字符的内容
sed -n '/^nginx/p' /etc/passwd
打印/etc/passwd中开头为root的行开始,到ftp开头的行结束的内容
sed -n '/^root/,/^ftp/p' /etc/passwd
打印/etc/passwd中第8行开始,到包含有/sbin/nologin的内容结束内容
sed -n '8,/\/sbin\/nologin/p' /etc/passwd
打印/etc/passwd中第一个包含/bin/bash内容的行开始,到第5行结束的内容
sed -n '/\/bin\/bash/,5p' /etc/passwd
练习脚本
需求描述:处理一个类似MYSQL配置文件my.cnf的文本
示例如下:
编写脚本实现以下功能:输出文件有几个段,并且针对每个段可以统计配置参数总个数
预想输出结果:
1:client 2
2: server 12
3: mysqld 12
4: mysqld_safe 7
5: embedded 8
6: mysqld-5.5 9
[client]
port= 3306socket= /tmp/mysql.sock
# The MySQL server
[mysqld]
user=mysql
port= 3306bind-address = 0.0.0.0socket= /tmp/mysql.sock
datadir= /data/mysql_data
pid-file = /data/mysql_data/mysql.pid
basedir= /usr/local/mysql
tmpdir= /tmp
#此开关默认为NULL,即不允许导入导出。
#secure-file-priv = /opt/upload
#-------------------------------gobal variables------------------------#
#默认关闭,涉及到timestamp类型的列自动更新的问题
explicit_defaults_for_timestamp= 1###transaction_write_set_extraction=XXHASH64 #以便在server收集写集合的同时将其记录到二进制日志。并且是行更改后的唯一标识此标识将用于检测冲突。
###loose-group_replication_group_name = 'ce9be252-2b71-11e6-b8f4-00212889f856'#组的名字可以随便起,但不能用主机的GTID
###loose-group_replication_start_on_boot =off #为了避免每次启动自动引导具有相同名称的第二个组,所以设置为OFF。
###loose-group_replication_bootstrap_group =off #同上
###loose-group_replication_local_address = '192.168.1.88:33071'#写自己主机所在IP
###loose-group_replication_group_seeds ='192.168.1.88:33071,192.168.1.89:33071,192.168.1.90:33071'###loose-group_replication_single_primary_mode =off #关闭单主模式的参数
###loose-group_replication_enforce_update_everywhere_checks =on #开启多主模式的参数
skip-external-locking
skip-name-resolve
skip-ssl
#memoryis16G
key_buffer_size=32M
table_open_cache= 2048table_definition_cache= 1024sort_buffer_size=4M
net_buffer_length=32K
read_buffer_size=4M
read_rnd_buffer_size=16M
open_files_limit= 10000thread_cache_size= 400query_cache_type= 0query_cache_size=32M
max_write_lock_count= 300wait_timeout= 120interactive_timeout= 120net_read_timeout= 120net_write_timeout= 120max_connections= 800max_user_connections= 750max_connect_errors= 10000max_allowed_packet=512M
back_log= 2048log_timestamps=system
performance_schema=OFF
character_set_server=utf8mb4
##当链接数耗尽后,通过设置别用端口,让root可以登录
extra_max_connections= 2extra_port= 13306###让mysql不区分大小写敏感
lower_case_table_names= 1#explicit_defaults_for_timestamp= 1#----------------Myisam--------------------------------#
myisam_recover_options=DEFAULT
bulk_insert_buffer_size=64M
myisam_sort_buffer_size=128M
myisam_max_sort_file_size=512M
myisam_repair_threads= 1
#if the query is exec time great than 2 seconds, the query will log to slow log if slowlog is enabled.long_query_time= 1slow_query_log=On
slow-query-log-file = /data/mysql_data/slow.log
show_compatibility_56=on
# Don't listen on a TCP/IP port at all. This can be a security enhancement,
# ifall processes that need to connect to mysqld run on the same host.
# All interaction with mysqld must be made via Unix sockets or named pipes.
# Note thatusing thisoption without enabling named pipes on Windows
# (via the"enable-named-pipe" option) will render mysqld useless!#
#skip-networking
#----------------------------MySQL Log----------------#
# Replication Master Server (default)
# binary loggingis required forreplication
log-bin = mysql-bin
expire_logs_days= 20log_error=error.log
log_error_verbosity= 1log_warnings= 1# binary logging format-mixed recommended
binlog_format=row
relay-log = mysql-relay-bin
relay-log-index =relay.index
# required unique id between1 and 2^32 - 1server-id = 1#sql-mode = "NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"#sql-mode = "NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"sync_binlog= 1log_slave_updates= 1#binlog_checksum=NONE
#------------------------------replicate--------------#
#排除不需要同步的库表
#replicate-ignore-db =mysql
#replicate-ignore-db =sys
replicate-ignore-db =information_schema
replicate-ignore-db =performance_schema
replicate-ignore-db =undolog
replicate-ignore-db =for_nagios
replicate-ignore-db =undolog
#replicate_wild_ignore_table= mysql.%#replicate_wild_ignore_table= sys.%replicate_wild_ignore_table= information_schema.%replicate_wild_ignore_table= performance_schema.%replicate_wild_ignore_table= undolog.%replicate_wild_ignore_table= for_nagios.%replicate_wild_ignore_table= undolog.%#主主复制需要开启
#auto_increment_offset= 2#auto_increment_increment= 2#GTID模式复制,需要开启如下
gtid_mode=ON
enforce_gtid_consistency=ON
#并发复制
slave-parallel-type =LOGICAL_CLOCK
slave-parallel-workers = 2master_info_repository=TABLE
relay_log_info_repository=TABLE
relay_log_recovery=ON
#跳过slave进程启动参数
skip-slave-start
#如果实例为从库,则需要设置为on
#read_only=on
#skip-grant-tables
#--------------------------------------------------------innoDB------------#
innodb_rollback_on_timeout
# Uncomment the followingif you are usingInnoDB tables
innodb_data_home_dir= /data/mysql_data
innodb_data_file_path=ibdata1:1G;ibdata2:1G:autoextend
innodb_log_group_home_dir= /data/mysql_data
innodb_undo_directory= /data/mysql_data/undolog/innodb_undo_logs= 128innodb_undo_tablespaces= 3# You canset .._buffer_pool_size up to 50 - 80 %#innodb_use_sys_malloc= 0#innodb_page_size= 8192innodb_buffer_pool_size=3G
innodb_buffer_pool_instances= 1#innodb_additional_mem_pool_size=8M
# Set .._log_file_size to25 %of buffer pool size
innodb_log_file_size=256M
innodb_log_buffer_size=64M
innodb_log_files_in_group= 3#每次事务提交时MySQL都会把log buffer的数据写入log file,并且flush(刷到磁盘)中去,该模式为系统默认
innodb_flush_log_at_trx_commit= 1innodb_lock_wait_timeout= 120#启用独立表空间
innodb_file_per_table= 1#CPU是1颗8核的,那么可以设置
innodb_read_io_threads= 8innodb_write_io_threads= 8#默认是0,则表示没有并发线程数限制,所有请求都会直接请求线程执行,当并发用户线程数量小于64,建议设置innodb_thread_concurrency=0,在大多数情况下,最佳的值是小于并接近虚拟CPU的个数
innodb_thread_concurrency= 12innodb_max_dirty_pages_pct= 75innodb_flush_method=O_DIRECT
innodb_purge_threads= 10innodb_large_prefix= 1#参数待测试
#innodb_io_capacity= 20000#innodb_io_capacity_max= 40000#根据CPU核心数来设定
thread_pool_size= 8#thread_handling= pool-of-threads
thread_pool_oversubscribe= 24#thread_handling= pool-of-threads
thread_pool_stall_limit= 100thread_pool_max_threads= 30#解释: 在启动时把热数据加载到内存。
innodb_buffer_pool_load_at_startup= 1#解释: 在关闭时把热数据dump到本地磁盘
innodb_buffer_pool_dump_at_shutdown= 1##默认是8M, 如果一次insert数据量比较多的话, 可以适当增加
innodb_autoextend_increment= 32[mysqldump]
quick
max_allowed_packet=512M
[mysql]
no-auto-rehash
# Remove the next comment characterifyou are not familiar with SQL
#safe-updates
[myisamchk]
#key_buffer_size=20M
#sort_buffer_size=20M
key_buffer_size=200M
sort_buffer_size=200M
read_buffer=2M
write_buffer=2M
[mysqld_safe]
#控制文件打开数的showglobal status like 'open%file%';比较合适的设置:Open_files / open_files_limit * 100% <= 75%open-files-limit = 65535log-error = /data/mysql_data/error.log
[mysqlhotcopy]
interactive-timeout
my.cnf
根据需求需要编写 2 个函数
function get_all_segment 获取所有配置项的段名
function count_items_in_segment 获取所有段名的配置个数
get_all_segment 函数实现思路
找到所有段名
sed -n '/\[.*\]/p' my.cnf | sed -e 's/\[//g' -e 's/\]//g'
.代表任意一个字符,*代表0个或多个字符,.*合起来就是任意字符(包括空) ,-e 可以连着使用多次
mysql_process.sh
#!/bin/bash
#
FILE_NAME=/home/roo/Desktop/shell_code/day6/my.cnf
# 获取所有的片段
function get_all_segments
{
echo `sed -n '/\[.*\]/p' $FILE_NAME | sed -e 's/\[//g' -e 's/\]//g'`
}
for item in `get_all_segments`
do
echo $item
done
count_items_in_segment 函数实现思路
找到指定段中的内容
找到 [client] 段 过滤掉以 # 开头的行和空行 然后过滤掉 [.*] 剩下的就是中间的内容了
sed -n '/\[client\]/,/\[.*\]/p' my.cnf |grep -v '^#' | grep -v '^$' | grep -v '\[.*\]'
统计函数
sed -n '/\[client\]/,/\[.*\]/p' my.cnf |grep -v '^#' | grep -v '^$' | grep -v '\[.*\]' | wc -l
完整的脚本
mysql_process.sh
#!/bin/bash
#
FILE_NAME=/home/roo/Desktop/shell_code/day6/my.cnf
# 获取所有的片段
function get_all_segments
{
echo `sed -n '/\[.*\]/p' $FILE_NAME | sed -e 's/\[//g' -e 's/\]//g'`
}
# 统计单个片段 配置行数
function count_items_in_segment
{
# 找到[xxx]并且下一个 [.*] 的项中间的内容就是一段配置
items=`sed -n '/\['$1'\]/,/\[.*\]/p' "$FILE_NAME" | grep -v "^$" | grep -v "^#" | grep -v "\[.*\]"| wc -l`
echo $items
# for 循环打印并计算my.cnf每一行的配置,适合配置项之间没有空格的情况
#index=0
#for item in $items;
#do
# index=`expr $index + 1`
#done
#echo $index
}
number=0
for segment in `get_all_segments`
do
number=`expr $number + 1`
# 将每个片段当做参数传进来计算
items_count=`count_items_in_segment $segment`
echo "$number: $segment $items_count"
done