330pics shell scripts_sixth

***@@@self-copy.sh@@@!!!************************************************************************************
#!/bin/bash
# self-copy.sh

# 这个脚本会拷贝自身.

file_subscript=copy

dd if=$0 of=$0.$file_subscript 2>/dev/null
# 阻止dd产生的消息:            ^^^^^^^^^^^

exit $?
%%%&&&self-copy.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@self-destruct.sh@@@!!!************************************************************************************
#!/bin/bash
# self-destruct.sh

kill $$  # 脚本将在此处结束自己的进程.
         # 回忆一下,"$$"就是脚本的PID.

echo "This line will not echo."
# 而且shell将会发送一个"Terminated"消息到stdout.

exit 0

#  在脚本结束自身进程之后,
#+ 它返回的退出码是什么?
#
# sh self-destruct.sh
# echo $?
# 143
#
# 143 = 128 + 15
#             结束信号
%%%&&&self-destruct.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@self-document.sh@@@!!!************************************************************************************
#!/bin/bash
# self-document.sh: 自文档化(self-documenting)的脚本
# 修改于"colm.sh".

DOC_REQUEST=70

if [ "$1" = "-h"  -o "$1" = "--help" ]     # 请求帮助.
then
  echo; echo "Usage: $0 [directory-name]"; echo
  sed --silent -e '/DOCUMENTATIONXX$/,/^DOCUMENTATIONXX$/p' "$0" |
  sed -e '/DOCUMENTATIONXX$/d'; exit $DOC_REQUEST; fi


: <<DOCUMENTATIONXX
List the statistics of a specified directory in tabular format.
---------------------------------------------------------------
The command line parameter gives the directory to be listed.
If no directory specified or directory specified cannot be read,
then list the current working directory.

DOCUMENTATIONXX

if [ -z "$1" -o ! -r "$1" ]
then
  directory=.
else
  directory="$1"
fi 

echo "Listing of "$directory":"; echo
(printf "PERMISSIONS LINKS OWNER GROUP SIZE MONTH DAY HH:MM PROG-NAME/n" /
; ls -l "$directory" | sed 1d) | column -t

exit 0
%%%&&&self-document.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@self-exec.sh@@@!!!************************************************************************************
#!/bin/bash
# self-exec.sh

echo

echo "This line appears ONCE in the script, yet it keeps echoing."
echo "The PID of this instance of the script is still $$."
#     上边这行展示了并没有fork出子shell.

echo "==================== Hit Ctl-C to exit ===================="

sleep 1

exec $0   #  产生了本脚本的另一个实例,
          #+ 但是这个新产生的实例却代替了原来的实例.

echo "This line will never echo!"  # 为什么不是这样?

exit 0
%%%&&&self-exec.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@self-mailer.sh@@@!!!************************************************************************************
#!/bin/sh
# self-mailer.sh: mail自身的脚本.

adr=${1:-`whoami`}     # 如果没有指定的话, 默认是当前用户.
#  键入'self-mailer.sh wiseguy@superdupergenius.com'
#+ 将脚本发送到这个地址.
#  如果只键入'self-mailer.sh'(不给参数)的话,
#+ 那么这个脚本就会被发送给调用者, 比如, 比如, bozo@localhost.localdomain.
#
#  如果想了解${parameter:-default}结构的更多细节,
#+ 请参考"变量重游"那章中的
#+ "参数替换"小节.

# ============================================================================
  cat $0 | mail -s "Script /"`basename $0`/" has mailed itself to you." "$adr"
# ============================================================================

# --------------------------------------------
#  来自self-mailing脚本的一份祝福.
#  一个喜欢恶搞的家伙运行了这个脚本,
#+ 这导致了他自己收到了这份mail.
#  显然的, 有些人确实没什么事好做,
#+ 就只能浪费他们自己的时间玩了.
# --------------------------------------------

echo "At `date`, script /"`basename $0`/" mailed to "$adr"."

exit 0
%%%&&&self-mailer.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@self-source.sh@@@!!!************************************************************************************
#!/bin/bash
# self-source.sh: 一个脚本"递归"的source自身.
# 来自于"Stupid Script Tricks," 卷 II.

MAXPASSCNT=100    # 最大的可执行次数.

echo -n  "$pass_count  "
#  在第一次运行的时候,这句只不过echo出2个空格,
#+ 因为$pass_count还没被初始化.

let "pass_count += 1"
#  假定这个未初始化的变量$pass_count
#+ 可以在第一次运行的时候+1.
#  这句可以正常工作在Bash和pdksh下, 但是
#+ 它依赖于不可移植(并且可能危险)的行为.
#  更好的方法是在使用$pass_count之前,先把这个变量初始化为0.

while [ "$pass_count" -le $MAXPASSCNT ]
do
  . $0   # 脚本"source"自身, 而不是调用自己.
         # ./$0 (应该能够正常递归)不能在这正常运行. 为什么?
done 

#  这里发生的动作并不是真正的递归,
#+ 因为脚本成功的展开了自己,换句话说,
#+ 在每次循环的过程中
#+ 在每个'source'行(第20行)上
#  都产生了新的代码.
#
#  当然, 脚本会把每个新'source'进来文件的"#!"行
#+ 都解释成注释, 而不会把它看成是一个新的脚本.

echo

exit 0   # 最终的效果就是从1数到100.
         # 真是让人印象深刻.

# 练习:
# -----
# 使用这个小技巧编写一些真正能够干些事情的脚本.
%%%&&&self-source.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@setnew-passwd.sh@@@!!!************************************************************************************
#!/bin/bash
#  setnew-password.sh: 这个脚本仅仅用于说明passwd命令.
#                      如果你真想运行这个脚本, 很遗憾, 这可不是个好主意.
#  这个脚本必须以root身份来运行.

ROOT_UID=0         # Root的$UID为0.
E_WRONG_USER=65    # 不是root用户?

E_NOSUCHUSER=70
SUCCESS=0


if [ "$UID" -ne "$ROOT_UID" ]
then
  echo; echo "Only root can run this script."; echo
  exit $E_WRONG_USER
else
  echo
  echo "You should know better than to run this script, root."
  echo "Even root users get the blues... "
  echo
fi 


username=bozo
NEWPASSWORD=security_violation

# 检查bozo是否在这里.
grep -q "$username" /etc/passwd
if [ $? -ne $SUCCESS ]
then
  echo "User $username does not exist."
  echo "No password changed."
  exit $E_NOSUCHUSER
fi 

echo "$NEWPASSWORD" | passwd --stdin "$username"
#  'passwd'命令的'--stdin'选项允许
#+ 从stdin(或者管道)中获得一个新的密码.

echo; echo "User $username's password changed!"

# 在脚本中使用'passwd'命令是非常危险的.

exit 0
%%%&&&setnew-passwd.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@set-pos.sh@@@!!!************************************************************************************
#!/bin/bash

variable="one two three four five"

set -- $variable
# 将位置参数的内容设为变量"$variable"的内容.

first_param=$1
second_param=$2
shift; shift        # 将最前面的两个位置参数移除.
remaining_params="$*"

echo
echo "first parameter = $first_param"             # one
echo "second parameter = $second_param"           # two
echo "remaining parameters = $remaining_params"   # three four five

echo; echo

# 再来一次.
set -- $variable
first_param=$1
second_param=$2
echo "first parameter = $first_param"             # one
echo "second parameter = $second_param"           # two

# ======================================================

set --
# 如果没指定变量,那么将会unset所有的位置参数.

first_param=$1
second_param=$2
echo "first parameter = $first_param"             # (null value)
echo "second parameter = $second_param"           # (null value)

exit 0
%%%&&&set-pos.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@soundex.sh@@@!!!************************************************************************************
#!/bin/bash
# soundex.sh: Calculate "soundex" code for names

# =======================================================
#        Soundex script
#              by
#         Mendel Cooper
#     thegrendel@theriver.com
#       23 January, 2002
#
#   Placed in the Public Domain.
#
# A slightly different version of this script appeared in
#+ Ed Schaefer's July, 2002 "Shell Corner" column
#+ in "Unix Review" on-line,
#+ http://www.unixreview.com/documents/uni1026336632258/
# =======================================================


ARGCOUNT=1                     # Need name as argument.
E_WRONGARGS=70

if [ $# -ne "$ARGCOUNT" ]
then
  echo "Usage: `basename $0` name"
  exit $E_WRONGARGS
fi 


assign_value ()                #  Assigns numerical value
{                              #+ to letters of name.

  val1=bfpv                    # 'b,f,p,v' = 1
  val2=cgjkqsxz                # 'c,g,j,k,q,s,x,z' = 2
  val3=dt                      #  etc.
  val4=l
  val5=mn
  val6=r

# Exceptionally clever use of 'tr' follows.
# Try to figure out what is going on here.

value=$( echo "$1" /
| tr -d wh /
| tr $val1 1 | tr $val2 2 | tr $val3 3 /
| tr $val4 4 | tr $val5 5 | tr $val6 6 /
| tr -s 123456 /
| tr -d aeiouy )

# Assign letter values.
# Remove duplicate numbers, except when separated by vowels.
# Ignore vowels, except as separators, so delete them last.
# Ignore 'w' and 'h', even as separators, so delete them first.
#
# The above command substitution lays more pipe than a plumber <g>.


input_name="$1"
echo
echo "Name = $input_name"


# Change all characters of name input to lowercase.
# ------------------------------------------------
name=$( echo $input_name | tr A-Z a-z )
# ------------------------------------------------
# Just in case argument to script is mixed case.


# Prefix of soundex code: first letter of name.
# --------------------------------------------


char_pos=0                     # Initialize character position.
prefix0=${name:$char_pos:1}
prefix=`echo $prefix0 | tr a-z A-Z`
                               # Uppercase 1st letter of soundex.

let "char_pos += 1"            # Bump character position to 2nd letter of name.
name1=${name:$char_pos}


# ++++++++++++++++++++++++++ Exception Patch +++++++++++++++++++++++++++++++++
#  Now, we run both the input name and the name shifted one char to the right
#+ through the value-assigning function.
#  If we get the same value out, that means that the first two characters
#+ of the name have the same value assigned, and that one should cancel.
#  However, we also need to test whether the first letter of the name
#+ is a vowel or 'w' or 'h', because otherwise this would bollix things up.

char1=`echo $prefix | tr A-Z a-z`    # First letter of name, lowercased.

assign_value $name
s1=$value
assign_value $name1
s2=$value
assign_value $char1
s3=$value
s3=9$s3                              #  If first letter of name is a vowel
                                     #+ or 'w' or 'h',
                                     #+ then its "value" will be null (unset).
         #+ Therefore, set it to 9, an otherwise
         #+ unused value, which can be tested for.


if [[ "$s1" -ne "$s2" || "$s3" -eq 9 ]]
then
  suffix=$s2
else 
  suffix=${s2:$char_pos}
fi 
# ++++++++++++++++++++++ end Exception Patch +++++++++++++++++++++++++++++++++


padding=000                    # Use at most 3 zeroes to pad.


soun=$prefix$suffix$padding    # Pad with zeroes.

MAXLEN=4                       # Truncate to maximum of 4 chars.
soundex=${soun:0:$MAXLEN}

echo "Soundex = $soundex"

echo

#  The soundex code is a method of indexing and classifying names
#+ by grouping together the ones that sound alike.
#  The soundex code for a given name is the first letter of the name,
#+ followed by a calculated three-number code.
#  Similar sounding names should have almost the same soundex codes.

#   Examples:
#   Smith and Smythe both have a "S-530" soundex.
#   Harrison = H-625
#   Hargison = H-622
#   Harriman = H-655

#  This works out fairly well in practice, but there are numerous anomalies.
#
#
#  The U.S. Census and certain other governmental agencies use soundex,
#  as do genealogical researchers.
#
#  For more information,
#+ see the "National Archives and Records Administration home page",
#+ http://www.nara.gov/genealogy/soundex/soundex.html

 

# Exercise:
# --------
# Simplify the "Exception Patch" section of this script.

exit 0
%%%&&&soundex.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@spam-lookup.sh@@@!!!************************************************************************************
#!/bin/bash
# spam-lookup.sh: 查找滥用的连接来报告垃圾邮件发送者.
# 感谢Michael Zick.
                                                     
# 检查命令行参数.
ARGCOUNT=1
E_WRONGARGS=65
if [ $# -ne "$ARGCOUNT" ]
then
  echo "Usage: `basename $0` domain-name"
  exit $E_WRONGARGS
fi


dig +short $1.contacts.abuse.net -c in -t txt
# 也试试:
#     dig +nssearch $1
#     尽量找到"可信赖的名字服务器"并且显示SOA记录.
                                                              
# 下边这句也可以:
#     whois -h whois.abuse.net $1
#           ^^ ^^^^^^^^^^^^^^^  指定主机.
#     使用这个命令也可以查找多个垃圾邮件发送者, 比如:"
#     whois -h whois.abuse.net $spamdomain1 $spamdomain2 . . .
                                                              
                                                              
#  练习:
#  -----
#  扩展这个脚本的功能,
#+ 让它可以自动发送e-mail来通知
#+ 需要对此负责的ISP的联系地址.
#  暗示: 使用"mail"命令.

exit $?

# spam-lookup.sh chinatietong.com
#                一个已知的垃圾邮件域. (译者: 中国铁通 . . .)

# "crnet_mgr@chinatietong.com"
# "crnet_tec@chinatietong.com"
# "postmaster@chinatietong.com"


#  如果想找到这个脚本的一个更详尽的版本,
#+ 请访问SpamViz的主页, http://www.spamviz.net/index.html.
%%%&&&spam-lookup.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@spawn.sh@@@!!!************************************************************************************
#!/bin/bash
# spawn.sh


PIDS=$(pidof sh $0)  # 这个脚本不同实例的进程ID.
P_array=( $PIDS )    # 把它们放到数组里(为什么?).
echo $PIDS           # 显示父进程和子进程的进程ID.
let "instances = ${#P_array[*]} - 1"  # 计算元素个数, 至少为1.
                                      # 为什么减1?
echo "$instances instance(s) of this script running."
echo "[Hit Ctl-C to exit.]"; echo


sleep 1              # 等一下.
sh $0                # 再来一次, Sam.

exit 0               # 没必要; 脚本永远不会运行到这里.
                     # 为什么运行不到这里?

#  在使用Ctl-C退出之后,
#+ 是否所有产生出来的进程都会被kill掉?
#  如果是这样的话, 为什么?

# 注意:
# ----
# 小心, 不要让这个脚本运行太长时间.
# 它最后会吃掉你绝大多数的系统资源.

#  是否有合适的脚本技术,
#+ 用于产生脚本自身的大量实例.
#  为什么或为什么不?
%%%&&&spawn.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@stack.sh@@@!!!************************************************************************************
#!/bin/bash
# stack.sh: 模拟下推堆栈

#  类似于CPU栈, 下推堆栈依次保存数据项,
#+ 但是取数据时, 却反序进行, 后进先出.

BP=100            #  栈数组的基址指针.
                  #  从元素100开始.

SP=$BP            #  栈指针.
                  #  将其初始化为栈"基址"(栈底).

Data=             #  当前栈的数据内容.
                  #  必须定义为全局变量,
                  #+ 因为函数所能够返回的整数存在范围限制.

declare -a stack


push()            # 压栈.
{
if [ -z "$1" ]    # 没有可压入的数据项?
then
  return
fi

let "SP -= 1"     # 更新栈指针.
stack[$SP]=$1

return
}

pop()                    # 从栈中弹出数据项.
{
Data=                    # 清空保存数据项的中间变量.

if [ "$SP" -eq "$BP" ]   # 栈空?
then
  return
fi                       #  这使得SP不会超过100,
                         #+ 例如, 这可以防止堆栈失控.

Data=${stack[$SP]}
let "SP += 1"            # 更新栈指针.
return
}

status_report()          # 打印当前状态.
{
echo "-------------------------------------"
echo "REPORT"
echo "Stack Pointer = $SP"
echo "Just popped /""$Data"/" off the stack."
echo "-------------------------------------"
echo
}


# =======================================================
# 现在, 来点乐子.

echo

# 看你是否能从空栈里弹出数据项来.
pop
status_report

echo

push garbage
pop
status_report     # 压入garbage, 弹出garbage.      

value1=23; push $value1
value2=skidoo; push $value2
value3=FINAL; push $value3

pop              # FINAL
status_report
pop              # skidoo
status_report
pop              # 23
status_report    # 后进, 先出!

#  注意: 栈指针在压栈时减,
#+ 在弹出时加.

echo

exit 0

# =======================================================


# 练习:
# -----

# 1)  修改"push()"函数,
#   + 使其调用一次就能够压入多个数据项.

# 2)  修改"pop()"函数,
#   + 使其调用一次就能弹出多个数据项.

# 3)  给那些有临界操作的函数添加出错检查.
#     说明白一些, 就是让这些函数返回错误码,
#   + 返回的错误码依赖于操作是否成功完成,
#   + 如果没有成功完成, 那么就需要启动合适的处理动作.

# 4)  以这个脚本为基础,
#   + 编写一个用栈实现的四则运算计算器.
%%%&&&stack.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@string.sh@@@!!!************************************************************************************
#!/bin/bash

# string.bash --- bash emulation of string(3) library routines
# Author: Noah Friedman <friedman@prep.ai.mit.edu>
# ==>     Used with his kind permission in this document.
# Created: 1992-07-01
# Last modified: 1993-09-29
# Public domain

# Conversion to bash v2 syntax done by Chet Ramey

# Commentary:
# Code:

#:docstring strcat:
# Usage: strcat s1 s2
#
# Strcat appends the value of variable s2 to variable s1.
#
# Example:
#    a="foo"
#    b="bar"
#    strcat a b
#    echo $a
#    => foobar
#
#:end docstring:

###;;;autoload   ==> Autoloading of function commented out.
function strcat ()
{
    local s1_val s2_val

    s1_val=${!1}                        # indirect variable expansion
    s2_val=${!2}
    eval "$1"=/'"${s1_val}${s2_val}"/'
    # ==> eval $1='${s1_val}${s2_val}' avoids problems,
    # ==> if one of the variables contains a single quote.
}

#:docstring strncat:
# Usage: strncat s1 s2 $n
#
# Line strcat, but strncat appends a maximum of n characters from the value
# of variable s2.  It copies fewer if the value of variabl s2 is shorter
# than n characters.  Echoes result on stdout.
#
# Example:
#    a=foo
#    b=barbaz
#    strncat a b 3
#    echo $a
#    => foobar
#
#:end docstring:

###;;;autoload
function strncat ()
{
    local s1="$1"
    local s2="$2"
    local -i n="$3"
    local s1_val s2_val

    s1_val=${!s1}                       # ==> indirect variable expansion
    s2_val=${!s2}

    if [ ${#s2_val} -gt ${n} ]; then
       s2_val=${s2_val:0:$n}            # ==> substring extraction
    fi

    eval "$s1"=/'"${s1_val}${s2_val}"/'
    # ==> eval $1='${s1_val}${s2_val}' avoids problems,
    # ==> if one of the variables contains a single quote.
}

#:docstring strcmp:
# Usage: strcmp $s1 $s2
#
# Strcmp compares its arguments and returns an integer less than, equal to,
# or greater than zero, depending on whether string s1 is lexicographically
# less than, equal to, or greater than string s2.
#:end docstring:

###;;;autoload
function strcmp ()
{
    [ "$1" = "$2" ] && return 0

    [ "${1}" '<' "${2}" ] > /dev/null && return -1

    return 1
}

#:docstring strncmp:
# Usage: strncmp $s1 $s2 $n
#
# Like strcmp, but makes the comparison by examining a maximum of n
# characters (n less than or equal to zero yields equality).
#:end docstring:

###;;;autoload
function strncmp ()
{
    if [ -z "${3}" -o "${3}" -le "0" ]; then
       return 0
    fi
  
    if [ ${3} -ge ${#1} -a ${3} -ge ${#2} ]; then
       strcmp "$1" "$2"
       return $?
    else
       s1=${1:0:$3}
       s2=${2:0:$3}
       strcmp $s1 $s2
       return $?
    fi
}

#:docstring strlen:
# Usage: strlen s
#
# Strlen returns the number of characters in string literal s.
#:end docstring:

###;;;autoload
function strlen ()
{
    eval echo "/${#${1}}"
    # ==> Returns the length of the value of the variable
    # ==> whose name is passed as an argument.
}

#:docstring strspn:
# Usage: strspn $s1 $s2
#
# Strspn returns the length of the maximum initial segment of string s1,
# which consists entirely of characters from string s2.
#:end docstring:

###;;;autoload
function strspn ()
{
    # Unsetting IFS allows whitespace to be handled as normal chars.
    local IFS=
    local result="${1%%[!${2}]*}"
 
    echo ${#result}
}

#:docstring strcspn:
# Usage: strcspn $s1 $s2
#
# Strcspn returns the length of the maximum initial segment of string s1,
# which consists entirely of characters not from string s2.
#:end docstring:

###;;;autoload
function strcspn ()
{
    # Unsetting IFS allows whitspace to be handled as normal chars.
    local IFS=
    local result="${1%%[${2}]*}"
 
    echo ${#result}
}

#:docstring strstr:
# Usage: strstr s1 s2
#
# Strstr echoes a substring starting at the first occurrence of string s2 in
# string s1, or nothing if s2 does not occur in the string.  If s2 points to
# a string of zero length, strstr echoes s1.
#:end docstring:

###;;;autoload
function strstr ()
{
    # if s2 points to a string of zero length, strstr echoes s1
    [ ${#2} -eq 0 ] && { echo "$1" ; return 0; }

    # strstr echoes nothing if s2 does not occur in s1
    case "$1" in
    *$2*) ;;
    *) return 1;;
    esac

    # use the pattern matching code to strip off the match and everything
    # following it
    first=${1/$2*/}

    # then strip off the first unmatched portion of the string
    echo "${1##$first}"
}

#:docstring strtok:
# Usage: strtok s1 s2
#
# Strtok considers the string s1 to consist of a sequence of zero or more
# text tokens separated by spans of one or more characters from the
# separator string s2.  The first call (with a non-empty string s1
# specified) echoes a string consisting of the first token on stdout. The
# function keeps track of its position in the string s1 between separate
# calls, so that subsequent calls made with the first argument an empty
# string will work through the string immediately following that token.  In
# this way subsequent calls will work through the string s1 until no tokens
# remain.  The separator string s2 may be different from call to call.
# When no token remains in s1, an empty value is echoed on stdout.
#:end docstring:

###;;;autoload
function strtok ()
{
 :
}

#:docstring strtrunc:
# Usage: strtrunc $n $s1 {$s2} {$...}
#
# Used by many functions like strncmp to truncate arguments for comparison.
# Echoes the first n characters of each string s1 s2 ... on stdout.
#:end docstring:

###;;;autoload
function strtrunc ()
{
    n=$1 ; shift
    for z; do
        echo "${z:0:$n}"
    done
}

# provide string

# string.bash ends here


# ========================================================================== #
# ==> Everything below here added by the document author.

# ==> Suggested use of this script is to delete everything below here,
# ==> and "source" this file into your own scripts.

# strcat
string0=one
string1=two
echo
echo "Testing /"strcat/" function:"
echo "Original /"string0/" = $string0"
echo "/"string1/" = $string1"
strcat string0 string1
echo "New /"string0/" = $string0"
echo

# strlen
echo
echo "Testing /"strlen/" function:"
str=123456789
echo "/"str/" = $str"
echo -n "Length of /"str/" = "
strlen str
echo

 

# Exercise:
# --------
# Add code to test all the other string functions above.


exit 0
%%%&&&string.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@strip-comments.sh@@@!!!************************************************************************************
#!/bin/bash
# strip-comment.sh: 去掉C程序中的注释(/* 注释 */).

E_NOARGS=0
E_ARGERROR=66
E_WRONG_FILE_TYPE=67

if [ $# -eq "$E_NOARGS" ]
then
  echo "Usage: `basename $0` C-program-file" >&2 # 将错误消息发到stderr.
  exit $E_ARGERROR
fi 

# 检查文件类型是否正确.
type=`file $1 | awk '{ print $2, $3, $4, $5 }'`
# "file $1" echo出文件类型 . . .
# 然后awk会删掉第一个域, 就是文件名 . . .
# 然后结果将会传递到变量"type"中.
correct_type="ASCII C program text"

if [ "$type" != "$correct_type" ]
then
  echo
  echo "This script works on C program files only."
  echo
  exit $E_WRONG_FILE_TYPE
fi 


# 相当隐秘的sed脚本:
#--------
sed '
/^///*/d
/.*/*///d
' $1
#--------
# 如果你花上几个小时来学习sed语法的话, 上边这个命令还是很好理解的.
                                                                    
                                                                    
#  如果注释和代码在同一行上, 上边的脚本就不行了.
#+ 所以需要添加一些代码来处理这种情况.
#  这是一个很重要的练习.
                                                                    
#  当然, 上边的代码也会删除带有"*/"的非注释行 --
#+ 这也不是一个令人满意的结果.

exit 0


# ----------------------------------------------------------------
# 下边的代码不会执行, 因为上边已经'exit 0'了.
                                               
# Stephane Chazelas建议使用下边的方法:

usage() {
  echo "Usage: `basename $0` C-program-file" >&2
  exit 1
}

WEIRD=`echo -n -e '/377'`   # 或者WEIRD=$'/377'
[[ $# -eq 1 ]] || usage
case `file "$1"` in
  *"C program text"*) sed -e "s%//*%${WEIRD}%g;s%/*/%${WEIRD}%g" "$1" /
     | tr '/377/n' '/n/377' /
     | sed -ne 'p;n' /
     | tr -d '/n' | tr '/377' '/n';;
  *) usage;;
esac

#  如果是下列的这些情况, 还是很糟糕:
#  printf("/*");
#  或者
#  /*  /* buggy embedded comment */
#                                                               
#  为了处理上边所有这些特殊情况(字符串中的注释, 含有 /", //" ...
#+ 的字符串中的注释)唯一的方法还是写一个C分析器
#+ (或许可以使用lex或者yacc?).

exit 0
%%%&&&strip-comments.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@str-test.sh@@@!!!************************************************************************************
#!/bin/bash
#  str-test.sh: 检查null字符串和未引用的字符串,
#+ but not strings and sealing wax, not to mention cabbages and kings . . .
#+ 但不是字符串和封蜡, 也并没有提到卷心菜和国王. . . ??? (没看懂, rojy bug)

# 使用   if [ ... ]


# 如果字符串并没有被初始化, 那么它里面的值未定义.
# 这种状态被称为"null" (注意这与零值不同).

if [ -n $string1 ]    # $string1 没有被声明和初始化.
then
  echo "String /"string1/" is not null."
else 
  echo "String /"string1/" is null."
fi 
# 错误的结果.
# 显示$string1为非null, 虽然这个变量并没有被初始化.


echo


# 让我们再试一下.

if [ -n "$string1" ]  # 这次$string1被引号扩起来了.
then
  echo "String /"string1/" is not null."
else 
  echo "String /"string1/" is null."
fi                    # 注意一定要将引用的字符放到中括号结构中!


echo


if [ $string1 ]       # 这次, 就一个$string1, 什么都不加.
then
  echo "String /"string1/" is not null."
else 
  echo "String /"string1/" is null."
fi 
# 这种情况运行的非常好.
# [ ] 测试操作符能够独立检查string是否为null.
# 然而, 使用("$string1")是一种非常好的习惯.
#
# 就像Stephane Chazelas所指出的,
#    if [ $string1 ]    只有一个参数, "]"
#    if [ "$string1" ]  有两个参数, 一个是空的"$string1", 另一个是"]"

 

echo

 

string1=initialized

if [ $string1 ]       # 再来, 还是只有$string1, 什么都不加.
then
  echo "String /"string1/" is not null."
else 
  echo "String /"string1/" is null."
fi 
# 再来试一下, 给出了正确的结果.
# 再强调一下, 使用引用的("$string1")还是更好一些, 原因我们上边已经说过了.


string1="a = b"

if [ $string1 ]       # 再来, 还是只有$string1, 什么都不加.
then
  echo "String /"string1/" is not null."
else 
  echo "String /"string1/" is null."
fi 
# 未引用的"$string1", 这回给出了错误的结果!

exit 0
# 也感谢Florian Wisser, 给出了上面这个"足智多谋"的例子.
%%%&&&str-test.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@stupid-script-tricks.sh@@@!!!************************************************************************************
#!/bin/bash
# stupid-script-tricks.sh: 朋友, 别在家试这个脚本.
# 来自于"Stupid Script Tricks," 卷I.


dangerous_variable=`cat /boot/vmlinuz`   # 这是压缩过的Linux内核自身.

echo "string-length of /$dangerous_variable = ${#dangerous_variable}"
# 这个字符串变量的长度是$dangerous_variable = 794151
# (不要使用as 'wc -c /boot/vmlinuz'来计算长度.)

# echo "$dangerous_variable"
# 千万别尝试这么做! 这样将挂起这个脚本.


#  脚本作者已经意识到将二进制文件设置到
#+ 变量中一点作用都没有.

exit 0
%%%&&&stupid-script-tricks.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@subshell-pitfalls.sh@@@!!!************************************************************************************
#!/bin/bash
# 子shell中的变量缺陷.

outer_variable=outer
echo
echo "outer_variable = $outer_variable"
echo

(
# 开始子shell

echo "outer_variable inside subshell = $outer_variable"
inner_variable=inner  # Set
echo "inner_variable inside subshell = $inner_variable"
outer_variable=inner  # 会修改全局变量么?
echo "outer_variable inside subshell = $outer_variable"

# 如果将变量'导出'会产生不同的结果么?
#    export inner_variable
#    export outer_variable
# 试试看.

# 结束子shell
)

echo
echo "inner_variable outside subshell = $inner_variable"  # 未设置.
echo "outer_variable outside subshell = $outer_variable"  # 未修改.
echo

exit 0

# 如果你打开第19和第20行的注释会怎样?
# 会产生不同的结果么? (译者注: 小提示, 第18行的'导出'都加上引号了.)
%%%&&&subshell-pitfalls.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@subshell.sh@@@!!!************************************************************************************
#!/bin/bash
# subshell.sh

echo

echo "Subshell level OUTSIDE subshell = $BASH_SUBSHELL"
# Bash, 版本3, 添加了这个新的           $BASH_SUBSHELL 变量.
echo

outer_variable=Outer

(
echo "Subshell level INSIDE subshell = $BASH_SUBSHELL"
inner_variable=Inner

echo "From subshell, /"inner_variable/" = $inner_variable"
echo "From subshell, /"outer/" = $outer_variable"
)

echo
echo "Subshell level OUTSIDE subshell = $BASH_SUBSHELL"
echo

if [ -z "$inner_variable" ]
then
  echo "inner_variable undefined in main body of shell"
else
  echo "inner_variable defined in main body of shell"
fi

echo "From main body of shell, /"inner_variable/" = $inner_variable"
#  $inner_variable将被作为未初始化的变量, 被显示出来,
#+ 这是因为变量是在子shell里定义的"局部变量".
#  还有补救的办法么?

echo

exit 0
%%%&&&subshell.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@substring-extraction.sh@@@!!!************************************************************************************
#!/bin/bash
# substring-extraction.sh

String=23skidoo1
#      012345678    Bash
#      123456789    awk
# 注意不同的字符串索引系统:
# Bash的第一个字符是从'0'开始记录的.
# Awk的第一个字符是从'1'开始记录的.

echo ${String:2:4} # 位置 3 (0-1-2), 4 个字符长
                                         # skid

# awk中等价于${string:pos:length}的命令是substr(string,pos,length).
echo | awk '
{ print substr("'"${String}"'",3,4)      # skid
}
'
#  使用一个空的"echo"通过管道传递给awk一个假的输入,
#+ 这样就不必提供一个文件名给awk.

exit 0
%%%&&&substring-extraction.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@sum-product.sh@@@!!!************************************************************************************
#!/bin/bash
# sum-product.sh
# 可以"返回"超过一个值的函数.

sum_and_product ()   # 计算所有传递进来的参数的总和, 与总乘积.
{
  echo $(( $1 + $2 )) $(( $1 * $2 ))
# 将每个计算出来的结果输出到stdout, 并以空格分隔.
}

echo
echo "Enter first number "
read first

echo
echo "Enter second number "
read second
echo

retval=`sum_and_product $first $second`      # 将函数的输出赋值给变量.
sum=`echo "$retval" | awk '{print $1}'`      # 赋值第一个域.
product=`echo "$retval" | awk '{print $2}'`  # 赋值第二个域.

echo "$first + $second = $sum"
echo "$first * $second = $product"
echo

exit 0
%%%&&&sum-product.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@symlinks2.sh@@@!!!************************************************************************************
#!/bin/bash
# symlinks.sh: 列出目录中所有的符号链接文件.

OUTFILE=symlinks.list                         # 保存符号链接文件名的文件

directory=${1-`pwd`}
#  如果没有其他特殊的指定,
#+ 默认为当前工作目录.


echo "symbolic links in directory /"$directory/"" > "$OUTFILE"
echo "---------------------------" >> "$OUTFILE"

for file in "$( find $directory -type l )"    # -type l = 符号链接
do
  echo "$file"
done | sort >> "$OUTFILE"                     # 循环的stdout
#           ^^^^^^^^^^^^^                       重定向到一个文件中.

exit 0
%%%&&&symlinks2.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@symlinks.sh@@@!!!************************************************************************************
#!/bin/bash
# symlinks.sh: 列出目录中所有的符号链接文件.


directory=${1-`pwd`}
#  如果没有其他特殊的指定,
#+ 默认为当前工作目录.
#  下边的代码块, 和上边这句等价.
# ----------------------------------------------------------
# ARGS=1                 # 需要一个命令行参数.
#
# if [ $# -ne "$ARGS" ]  # 如果不是单个参数的话...
# then
#   directory=`pwd`      # 当前工作目录
# else
#   directory=$1
# fi
# ----------------------------------------------------------

echo "symbolic links in directory /"$directory/""

for file in "$( find $directory -type l )"   # -type l = 符号链接
do
  echo "$file"
done | sort                                  # 否则的话, 列出的文件都是未经排序的.
#  严格意义上说, 这里并不一定非要一个循环不可.
#+ 因为"find"命令的输出将被扩展成一个单词.
#  然而, 这种方式很容易理解也很容易说明.

#  就像Dominik 'Aeneas' Schnitzer所指出的,
#+ 如果没将$( find $directory -type l )用""引用起来的话,
#+ 那么将会把一个带有空白部分的文件名拆分成以空白分隔的两部分(文件名允许有空白).
#  即使这里只会取出每个参数的第一个域.

exit 0


# Jean Helou建议采用下边的方法:

echo "symbolic links in directory /"$directory/""
# 当前IFS的备份. 要小心使用这个值.
OLDIFS=$IFS
IFS=:

for file in $(find $directory -type l -printf "%p$IFS")
do     #                              ^^^^^^^^^^^^^^^^
       echo "$file"
       done|sort
%%%&&&symlinks.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@tempfile-name.sh@@@!!!************************************************************************************
#!/bin/bash
# tempfile-name.sh:  临时文件名产生器

BASE_STR=`mcookie`   # 32-字符的magic cookie.
POS=11               # 字符串中随便的一个位置.
LEN=5                # 取得$LEN长度连续的字符串.

prefix=temp          #  最终的一个"临时"文件.
                     #  如果想让这个文件更加"唯一",
                     #+ 可以对这个前缀也采用下边的方法进行生成.

suffix=${BASE_STR:POS:LEN}
                     # 提取从第11个字符之后的长度为5的字符串.

temp_filename=$prefix.$suffix
                     # 构造文件名.

echo "Temp filename = "$temp_filename""

# sh tempfile-name.sh
# Temp filename = temp.e19ea

#  与使用'date'命令(参考 ex51.sh)来创建"唯一"文件名
#+ 的方法相比较.

exit 0
%%%&&&tempfile-name.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@test-cgi.sh@@@!!!************************************************************************************
#!/bin/bash
# 可能需要修改你的站点位置.
# (在ISP的服务器上, Bash可能不在标准位置/bin.)
# 其他可能出现的地方: /usr/bin或/usr/local/bin
# 甚至可以不带任何路径信息来尝试使用#!.

# test-cgi.sh
# 由Michael Zick编写
# 经过授权在此使用


# 禁用文件名匹配.
set -f

# 头信息将会给浏览器需要的东西.
echo Content-type: text/plain
echo

echo CGI/1.0 test script report:
echo

echo environment settings:
set
echo

echo whereis bash?
whereis bash
echo


echo who are we?
echo ${BASH_VERSINFO[*]}
echo

echo argc is $#. argv is "$*".
echo

# CGI/1.0需要的环境变量.

echo SERVER_SOFTWARE = $SERVER_SOFTWARE
echo SERVER_NAME = $SERVER_NAME
echo GATEWAY_INTERFACE = $GATEWAY_INTERFACE
echo SERVER_PROTOCOL = $SERVER_PROTOCOL
echo SERVER_PORT = $SERVER_PORT
echo REQUEST_METHOD = $REQUEST_METHOD
echo HTTP_ACCEPT = "$HTTP_ACCEPT"
echo PATH_INFO = "$PATH_INFO"
echo PATH_TRANSLATED = "$PATH_TRANSLATED"
echo SCRIPT_NAME = "$SCRIPT_NAME"
echo QUERY_STRING = "$QUERY_STRING"
echo REMOTE_HOST = $REMOTE_HOST
echo REMOTE_ADDR = $REMOTE_ADDR
echo REMOTE_USER = $REMOTE_USER
echo AUTH_TYPE = $AUTH_TYPE
echo CONTENT_TYPE = $CONTENT_TYPE
echo CONTENT_LENGTH = $CONTENT_LENGTH

exit 0

# Here document可以给出简要的使用说明.
:<<-'_test_CGI_'

1) Drop this in your http://domain.name/cgi-bin directory.
2) Then, open http://domain.name/cgi-bin/test-cgi.sh.

_test_CGI_
%%%&&&test-cgi.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@timed-input.sh@@@!!!************************************************************************************
#!/bin/bash
# timed-input.sh

# TMOUT=3    在新一些的Bash版本上也能运行的很好.


TIMELIMIT=3  # 这个例子中设置的是3秒. 也可以设置为其他的时间值.

PrintAnswer()
{
  if [ "$answer" = TIMEOUT ]
  then
    echo $answer
  else       # 别和上边的例子弄混了.
    echo "Your favorite veggie is $answer"
    kill $!  # 不再需要后台运行的TimerOn函数了, kill了吧.
             # $! 变量是上一个在后台运行的作业的PID.
  fi

 

TimerOn()
{
  sleep $TIMELIMIT && kill -s 14 $$ &
  # 等待3秒, 然后给脚本发送一个信号.

Int14Vector()
{
  answer="TIMEOUT"
  PrintAnswer
  exit 14

trap Int14Vector 14   # 定时中断(14)会暗中给定时间限制.

echo "What is your favorite vegetable "
TimerOn
read answer
PrintAnswer


#  无可否认, 这是一个定时输入的复杂实现,
#+ 然而"read"命令的"-t"选项可以简化这个任务.
#  参考后边的"t-out.sh".

#  如果你需要一个真正优雅的写法...
#+ 建议你使用C或C++来重写这个应用,
#+ 你可以使用合适的函数库, 比如'alarm'和'setitimer'来完成这个任务.

exit 0
%%%&&&timed-input.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@timeout.sh@@@!!!************************************************************************************
#!/bin/bash
# timeout.sh

#  由Stephane Chazelas所编写,
#+ 本书作者做了一些修改.

INTERVAL=5                # 超时间隔

timedout_read() {
  timeout=$1
  varname=$2
  old_tty_settings=`stty -g`
  stty -icanon min 0 time ${timeout}0
  eval read $varname      # 或者仅仅读取$varname变量
  stty "$old_tty_settings"
  # 参考"stty"的man页.
}

echo; echo -n "What's your name? Quick! "
timedout_read $INTERVAL your_name

#  这种方法可能并不是在每种终端类型上都可以正常使用的.
#  最大的超时时间依赖于具体的中断类型.
#+ (通常是25.5秒).

echo

if [ ! -z "$your_name" ]  # 如果在超时之前名字被键入...
then
  echo "Your name is $your_name."
else
  echo "Timed out."
fi

echo

# 这个脚本的行为可能与脚本"timed-input.sh"的行为有些不同.
# 每次按键, 计时器都会重置(译者注: 就是从0开始).

exit 0
%%%&&&timeout.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@t-out.sh@@@!!!************************************************************************************
#!/bin/bash
# t-out.sh
# 从"syngin seven"的建议中得到的灵感 (感谢).


TIMELIMIT=4         # 4秒

read -t $TIMELIMIT variable <&1
#                           ^^^
#  在这个例子中, 对于Bash 1.x和2.x就需要"<&1"了,
#  但是Bash 3.x就不需要.

echo

if [ -z "$variable" ]  # 值为null?
then
  echo "Timed out, variable still unset."
else 
  echo "variable = $variable"
fi 

exit 0
%%%&&&t-out.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@tree.sh@@@!!!************************************************************************************
#!/bin/bash
# tree.sh

#  Written by Rick Boivie.
#  Used with permission.
#  This is a revised and simplified version of a script
#+ by Jordi Sanfeliu (and patched by Ian Kjos).
#  This script replaces the earlier version used in
#+ previous releases of the Advanced Bash Scripting Guide.

# ==> Comments added by the author of this document.


search () {
for dir in `echo *`
#  ==> `echo *` lists all the files in current working directory,
#+ ==> without line breaks.
#  ==> Similar effect to for dir in *
#  ==> but "dir in `echo *`" will not handle filenames with blanks.
do
  if [ -d "$dir" ] ; then # ==> If it is a directory (-d)...
  zz=0                    # ==> Temp variable, keeping track of directory level.
  while [ $zz != $1 ]     # Keep track of inner nested loop.
    do
      echo -n "| "        # ==> Display vertical connector symbol,
                          # ==> with 2 spaces & no line feed in order to indent.
      zz=`expr $zz + 1`   # ==> Increment zz.
    done

    if [ -L "$dir" ] ; then # ==> If directory is a symbolic link...
      echo "+---$dir" `ls -l $dir | sed 's/^.*'$dir' //'`
      # ==> Display horiz. connector and list directory name, but...
      # ==> delete date/time part of long listing.
    else
      echo "+---$dir"       # ==> Display horizontal connector symbol...
      # ==> and print directory name.
      numdirs=`expr $numdirs + 1` # ==> Increment directory count.
      if cd "$dir" ; then         # ==> If can move to subdirectory...
        search `expr $1 + 1`      # with recursion ;-)
        # ==> Function calls itself.
        cd ..
      fi
    fi
  fi
done
}

if [ $# != 0 ] ; then
  cd $1 # move to indicated directory.
  #else # stay in current directory
fi

echo "Initial directory = `pwd`"
numdirs=0

search 0
echo "Total directories = $numdirs"

exit 0
%%%&&&tree.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@twodim.sh@@@!!!************************************************************************************
#!/bin/bash
# twodim.sh: 模拟一个二维数组.

# 一维数组由单行组成.
# 二维数组由连续的多行组成.

Rows=5
Columns=5
# 5 X 5 的数组.

declare -a alpha     # char alpha [Rows] [Columns];
                     # 没必要声明. 为什么?

load_alpha ()
{
local rc=0
local index

for i in A B C D E F G H I J K L M N O P Q R S T U V W X Y
do     # 你可以随你的心意, 使用任意符号.
  local row=`expr $rc / $Columns`
  local column=`expr $rc % $Rows`
  let "index = $row * $Rows + $column"
  alpha[$index]=$i
# alpha[$row][$column]
  let "rc += 1"
done 

#  更简单的方法:
#+   declare -a alpha=( A B C D E F G H I J K L M N O P Q R S T U V W X Y )
#+ 但是如果写的话, 就缺乏二维数组的"风味"了.
}

print_alpha ()
{
local row=0
local index

echo

while [ "$row" -lt "$Rows" ]   #  以"行序为主"进行打印:
do                             #+ 行号不变(外层循环),
                               #+ 列号进行增长. (译者注: 就是按行打印)
  local column=0

  echo -n "       "            #  按照行方向打印"正方形"数组.

  while [ "$column" -lt "$Columns" ]
  do
    let "index = $row * $Rows + $column"
    echo -n "${alpha[index]} "  # alpha[$row][$column]
    let "column += 1"
  done

  let "row += 1"
  echo

done 

# 更简单的等价写法为:
#     echo ${alpha[*]} | xargs -n $Columns

echo
}

filter ()     # 过滤掉负的数组下标.
{

echo -n "  "  # 产生倾斜.
              # 解释一下, 这是怎么做到的.

if [[ "$1" -ge 0 &&  "$1" -lt "$Rows" && "$2" -ge 0 && "$2" -lt "$Columns" ]]
then
    let "index = $1 * $Rows + $2"
    # 现在, 按照旋转方向进行打印.
    echo -n " ${alpha[index]}"
    #           alpha[$row][$column]
fi   

}
 

 

rotate ()  #  将数组旋转45度 --
{          #+ 从左下角进行"平衡".
local row
local column

for (( row = Rows; row > -Rows; row-- ))
  do       # 反向步进数组, 为什么?

  for (( column = 0; column < Columns; column++ ))
  do

    if [ "$row" -ge 0 ]
    then
      let "t1 = $column - $row"
      let "t2 = $column"
    else
      let "t1 = $column"
      let "t2 = $column + $row"
    fi 

    filter $t1 $t2   # 将负的数组下标过滤出来.
                     # 如果你不做这一步, 将会怎样?
  done

  echo; echo

done

#  数组旋转的灵感来源于Herbert Mayer所著的
#+ "Advanced C Programming on the IBM PC"的例子(第143-146页)
#+ (参见参考书目).
#  由此可见, C语言能够做到的好多事情,
#+ 用shell脚本一样能够做到.

}


#--------------- 现在, 让我们开始吧. ------------#
load_alpha     # 加载数组.
print_alpha    # 打印数组.  
rotate         # 逆时钟旋转45度打印.
#-----------------------------------------------------#

exit 0

# 这是有点做作, 不是那么优雅.

# 练习:
# -----
# 1)  重新实现数组加载和打印函数,
#     让其更直观, 可读性更强.
#
# 2)  详细地描述旋转函数的原理.
#     提示: 思考一下倒序索引数组的实现.
#
# 3)  重写这个脚本, 扩展它, 让不仅仅能够支持非正方形的数组.
#     比如6 X 4的数组.
#     尝试一下, 在数组旋转时, 做到最小"失真".
%%%&&&twodim.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@unalias.sh@@@!!!************************************************************************************
#!/bin/bash
# unalias.sh

shopt -s expand_aliases  # 启用别名扩展.

alias llm='ls -al | more'
llm

echo

unalias llm              # 删除别名.
llm
# 产生错误信息, 因为'llm'已经不再有效了.

exit 0
%%%&&&unalias.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@unit-conversion.sh@@@!!!************************************************************************************
#!/bin/bash
# unit-conversion.sh


convert_units ()  # 通过参数取得需要转换的单位.
{
  cf=$(units "$1" "$2" | sed --silent -e '1p' | awk '{print $2}')
  # 除了真正需要转换的部分保留下来外,其他的部分都去掉.
  echo "$cf"

Unit1=miles
Unit2=meters
cfactor=`convert_units $Unit1 $Unit2`
quantity=3.73

result=$(echo $quantity*$cfactor | bc)

echo "There are $result $Unit2 in $quantity $Unit1."

#  如果你传递了两个不匹配的单位会发生什么?
#+ 比如分别传入"英亩"和"英里"?

exit 0
%%%&&&unit-conversion.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@unprotect_literal.sh@@@!!!************************************************************************************
#! /bin/bash
# unprotect_literal.sh

# set -vx

:<<-'_UnProtect_Literal_String_Doc'

    Copyright (c) Michael S. Zick, 2003; All Rights Reserved
    License: Unrestricted reuse in any form, for any purpose.
    Warranty: None
    Revision: $ID$

    Documentation redirected to the Bash no-operation. Bash will
    '/dev/null' this block when the script is first read.
    (Uncomment the above set command to see this action.)

    Remove the first (Sha-Bang) line when sourcing this as a library
    procedure.  Also comment out the example use code in the two
    places where shown.


    Usage:
        Complement of the "$(_pls 'Literal String')" function.
        (See the protect_literal.sh example.)

        StringVar=$(_upls ProtectedSringVariable)

    Does:
        When used on the right-hand-side of an assignment statement;
        makes the substitions embedded in the protected string.

    Notes:
        The strange names (_*) are used to avoid trampling on
        the user's chosen names when this is sourced as a
        library.


_UnProtect_Literal_String_Doc

_upls() {
    local IFS=$'x1B'                # /ESC character (not required)
    eval echo $@                    # Substitution on the glob.
}

# :<<-'_UnProtect_Literal_String_Test'
# # # Remove the above "# " to disable this code. # # #


_pls() {
    local IFS=$'x1B'                # /ESC character (not required)
    echo $'/x27'$@$'/x27'           # Hard quoted parameter glob
}

# Declare an array for test values.
declare -a arrayZ

# Assign elements with various types of quotes and escapes.
arrayZ=( zero "$(_pls 'Hello ${Me}')" 'Hello ${You}' "/'Pass: ${pw}/'" )

# Now make an assignment with that result.
declare -a array2=( ${arrayZ[@]} )

# Which yielded:
# - - Test Three - -
# Element 0: zero is: 4 long            # Our marker element.
# Element 1: Hello ${Me} is: 11 long    # Intended result.
# Element 2: Hello is: 5 long           # ${You} expanded to nothing.
# Element 3: 'Pass: is: 6 long          # Split on the whitespace.
# Element 4: ' is: 1 long               # The end quote is here now.

# set -vx

#  Initialize 'Me' to something for the embedded ${Me} substitution.
#  This needs to be done ONLY just prior to evaluating the
#+ protected string.
#  (This is why it was protected to begin with.)

Me="to the array guy."

# Set a string variable destination to the result.
newVar=$(_upls ${array2[1]})

# Show what the contents are.
echo $newVar

# Do we really need a function to do this?
newerVar=$(eval echo ${array2[1]})
echo $newerVar

#  I guess not, but the _upls function gives us a place to hang
#+ the documentation on.
#  This helps when we forget what a # construction like:
#+ $(eval echo ... ) means.

# What if Me isn't set when the protected string is evaluated?
unset Me
newestVar=$(_upls ${array2[1]})
echo $newestVar

# Just gone, no hints, no runs, no errors.

#  Why in the world?
#  Setting the contents of a string variable containing character
#+ sequences that have a meaning in Bash is a general problem in
#+ script programming.
#
#  This problem is now solved in eight lines of code
#+ (and four pages of description).

#  Where is all this going?
#  Dynamic content Web pages as an array of Bash strings.
#  Content set per request by a Bash 'eval' command
#+ on the stored page template.
#  Not intended to replace PHP, just an interesting thing to do.
###
#  Don't have a webserver application?
#  No problem, check the example directory of the Bash source;
#+ there is a Bash script for that also.

# _UnProtect_Literal_String_Test
# # # Remove the above "# " to disable this code. # # #

exit 0
%%%&&&unprotect_literal.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@unset.sh@@@!!!************************************************************************************
#!/bin/bash
# unset.sh: Unset 一个变量.

variable=hello                       # 初始化.
echo "variable = $variable"

unset variable                       # Unset.
                                     # 与 variable= 效果相同.
echo "(unset) variable = $variable"  # $variable 设为 null.

exit 0
%%%&&&unset.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@upperconv.sh@@@!!!************************************************************************************
#!/bin/bash
# upperconv.sh
# 将一个指定的输入文件转换为大写.

E_FILE_ACCESS=70
E_WRONG_ARGS=71

if [ ! -r "$1" ]     # 判断指定的输入文件是否可读?
then
  echo "Can't read from input file!"
  echo "Usage: $0 input-file output-file"
  exit $E_FILE_ACCESS
fi                   #  即使输入文件($1)没被指定
                     #+ 也还是会以相同的错误退出(为什么?).

if [ -z "$2" ]
then
  echo "Need to specify output file."
  echo "Usage: $0 input-file output-file"
  exit $E_WRONG_ARGS
fi


exec 4<&0
exec < $1            # 将会从输入文件中读取.

exec 7>&1
exec > $2            # 将写到输出文件中.
                     # 假设输出文件是可写的(添加检查?).

# -----------------------------------------------
    cat - | tr a-z A-Z   # 转换为大写.
#   ^^^^^                # 从stdin中读取.
#           ^^^^^^^^^^   # 写到stdout上.
# 然而, stdin和stdout都被重定向了.
# -----------------------------------------------

exec 1>&7 7>&-       # 恢复stout.
exec 0<&4 4<&-       # 恢复stdin.

# 恢复之后, 下边这行代码将会如预期的一样打印到stdout上.
echo "File /"$1/" written to /"$2/" as uppercase conversion."

exit 0
%%%&&&upperconv.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@usage-message.sh@@@!!!************************************************************************************
#!/bin/bash
# usage-message.sh

: ${1?"Usage: $0 ARGUMENT"}
#  如果没有提供命令行参数的话, 那么脚本就在这里退出了,
#+ 并且打印如下错误消息.
#    usage-message.sh: 1: Usage: usage-message.sh ARGUMENT

echo "These two lines echo only if command-line parameter given."
echo "command line parameter = /"$1/""

exit 0  # 如果提供了命令行参数, 那么脚本就会在这里退出.

# 分别检查有命令行参数时和没有命令行参数时, 脚本的退出状态.
# 如果有命令行参数, 那么"$?"就是0.
# 如果没有的话, 那么"$?"就是1.
%%%&&&usage-message.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@usb.sh@@@!!!************************************************************************************
#!/bin/bash
# ==> usb.sh
# ==> Script for mounting and installing pen/keychain USB storage devices.
# ==> Runs as root at system startup (see below).
# ==>
# ==> Newer Linux distros (2004 or later) autodetect
# ==> and install USB pen drives, and therefore don't need this script.
# ==> But, it's still instructive.
 
#  This code is free software covered by GNU GPL license version 2 or above.
#  Please refer to http://www.gnu.org/ for the full license text.
#
#  Some code lifted from usb-mount by Michael Hamilton's usb-mount (LGPL)
#+ see http://users.actrix.co.nz/michael/usbmount.html
#
#  INSTALL
#  -------
#  Put this in /etc/hotplug/usb/diskonkey.
#  Then look in /etc/hotplug/usb.distmap, and copy all usb-storage entries
#+ into /etc/hotplug/usb.usermap, substituting "usb-storage" for "diskonkey".
#  Otherwise this code is only run during the kernel module invocation/removal
#+ (at least in my tests), which defeats the purpose.
#
#  TODO
#  ----
#  Handle more than one diskonkey device at one time (e.g. /dev/diskonkey1
#+ and /mnt/diskonkey1), etc. The biggest problem here is the handling in
#+ devlabel, which I haven't yet tried.
#
#  AUTHOR and SUPPORT
#  ------------------
#  Konstantin Riabitsev, &lt;icon linux duke edu&gt;.
#  Send any problem reports to my email address at the moment.
#
# ==> Comments added by ABS Guide author.

 

SYMLINKDEV=/dev/diskonkey
MOUNTPOINT=/mnt/diskonkey
DEVLABEL=/sbin/devlabel
DEVLABELCONFIG=/etc/sysconfig/devlabel
IAM=$0

##
# Functions lifted near-verbatim from usb-mount code.
#
function allAttachedScsiUsb {
    find /proc/scsi/ -path '/proc/scsi/usb-storage*' -type f | xargs grep -l 'Attached: Yes'
}
function scsiDevFromScsiUsb {
    echo $1 | awk -F"[-/]" '{ n=$(NF-1);  print "/dev/sd" substr("abcdefghijklmnopqrstuvwxyz", n+1,
 1) }'
}

if [ "${ACTION}" = "add" ] && [ -f "${DEVICE}" ]; then
    ##
    # lifted from usbcam code.
    #
    if [ -f /var/run/console.lock ]; then
        CONSOLEOWNER=`cat /var/run/console.lock`
    elif [ -f /var/lock/console.lock ]; then
        CONSOLEOWNER=`cat /var/lock/console.lock`
    else
        CONSOLEOWNER=
    fi
    for procEntry in $(allAttachedScsiUsb); do
        scsiDev=$(scsiDevFromScsiUsb $procEntry)
        #  Some bug with usb-storage?
        #  Partitions are not in /proc/partitions until they are accessed
        #+ somehow.
        /sbin/fdisk -l $scsiDev >/dev/null
        ##
        #  Most devices have partitioning info, so the data would be on
        #+ /dev/sd?1. However, some stupider ones don't have any partitioning
        #+ and use the entire device for data storage. This tries to
        #+ guess semi-intelligently if we have a /dev/sd?1 and if not, then
        #+ it uses the entire device and hopes for the better.
        #
        if grep -q `basename $scsiDev`1 /proc/partitions; then
            part="$scsiDev""1"
        else
            part=$scsiDev
        fi
        ##
        #  Change ownership of the partition to the console user so they can
        #+ mount it.
        #
        if [ ! -z "$CONSOLEOWNER" ]; then
            chown $CONSOLEOWNER:disk $part
        fi
        ##
        # This checks if we already have this UUID defined with devlabel.
        # If not, it then adds the device to the list.
        #
        prodid=`$DEVLABEL printid -d $part`
        if ! grep -q $prodid $DEVLABELCONFIG; then
            # cross our fingers and hope it works
            $DEVLABEL add -d $part -s $SYMLINKDEV 2>/dev/null
        fi
        ##
        # Check if the mount point exists and create if it doesn't.
        #
        if [ ! -e $MOUNTPOINT ]; then
            mkdir -p $MOUNTPOINT
        fi
        ##
        # Take care of /etc/fstab so mounting is easy.
        #
        if ! grep -q "^$SYMLINKDEV" /etc/fstab; then
            # Add an fstab entry
            echo -e /
                "$SYMLINKDEV/t/t$MOUNTPOINT/t/tauto/tnoauto,owner,kudzu 0 0" /
                >> /etc/fstab
        fi
    done
    if [ ! -z "$REMOVER" ]; then
        ##
        # Make sure this script is triggered on device removal.
        #
        mkdir -p `dirname $REMOVER`
        ln -s $IAM $REMOVER
    fi
elif [ "${ACTION}" = "remove" ]; then
    ##
    # If the device is mounted, unmount it cleanly.
    #
    if grep -q "$MOUNTPOINT" /etc/mtab; then
        # unmount cleanly
        umount -l $MOUNTPOINT
    fi
    ##
    # Remove it from /etc/fstab if it's there.
    #
    if grep -q "^$SYMLINKDEV" /etc/fstab; then
        grep -v "^$SYMLINKDEV" /etc/fstab > /etc/.fstab.new
        mv -f /etc/.fstab.new /etc/fstab
    fi
fi

exit 0
%%%&&&usb.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@userlist.sh@@@!!!************************************************************************************
#!/bin/bash
# userlist.sh

PASSWORD_FILE=/etc/passwd
n=1           # User number

for name in $(awk 'BEGIN{FS=":"}{print $1}' < "$PASSWORD_FILE" )
# 域分隔 = :             ^^^^^^
# 打印出第一个域                 ^^^^^^^^
# 从password文件中取得输入                     ^^^^^^^^^^^^^^^^^
do
  echo "USER #$n = $name"
  let "n += 1"
done 


# USER #1 = root
# USER #2 = bin
# USER #3 = daemon
# ...
# USER #30 = bozo

exit 0

#  练习:
#  -----
#  一个普通用户(或者是一个普通用户运行的脚本)
#+ 怎么才能够读取/etc/passwd呢?
#  这是否是一个安全漏洞? 为什么是?为什么不是?
%%%&&&userlist.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@usrmnt.sh@@@!!!************************************************************************************
#!/bin/bash
# usrmnt.sh, 由Anthony Richardson编写,
# 经过作者授权, 可以在本书中使用.

# 用法:       usrmnt.sh
# 描述: 挂载设备, 调用这个脚本的用户必须属于
#              /etc/sudoers文件中的MNTUSERS组.

# ----------------------------------------------------------
#  这是一个用户挂载设备的脚本, 脚本将会使用sudo来递归的调用自身.
#  只有拥有合适权限的用户才能使用

#   usermount /dev/fd0 /mnt/floppy

#  来代替

#   sudo usermount /dev/fd0 /mnt/floppy

#  我使用相同的技术来处理我所有的sudo脚本,
#+ 因为我觉得它很方便.
# ----------------------------------------------------------

#  如果没有设置SUDO_COMMAND变量, 而且我们并没有处于sudo运行的状态下
#+ (译者注: 也就是说第一次运行, 还没被递归), 这样就会开始递归了. 传递用户的真实id和组id . . .

if [ -z "$SUDO_COMMAND" ]
then
   mntusr=$(id -u) grpusr=$(id -g) sudo $0 $*
   exit 0
fi

# 如果我们处于sudo调用自身的状态中(译者注: 就是说处于递归中), 那么我们就会运行到这里.
/bin/mount $* -o uid=$mntusr,gid=$grpusr

exit 0

# 附注(脚本作者添加的):
# -------------------------------------------------

# 1) Linux允许在/etc/fstab文件中使用"users"选项,
#    以便于任何用户都可以挂载可移动设备.
#    但是, 在服务器上,
#    我希望只有一小部分用户可以访问可移动设备.
#    我发现使用sudo可以给我更多的控制空间.

# 2) 我还发现, 通过使用组,
#    我能够更容易的完成这个任务.

# 3) 这个方法可以将root访问mount命令的权利,
#    赋予任何具有合适权限的用户,
#    所以一定要小心那些被你赋予访问权限的用户.
#    你可以开发出类似于mntfloppy, mntcdrom,
#    和mntsamba脚本, 将访问类型分类,
#    然后你就可以使用上面所讲的这种技术,
#    获得对mount命令更好的控制.
%%%&&&usrmnt.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@var-match.sh@@@!!!************************************************************************************
#!/bin/bash
# var-match.sh:
# 对字符串的前缀和后缀进行模式替换的一个演示.

v0=abc1234zip1234abc    # 变量原始值.
echo "v0 = $v0"         # abc1234zip1234abc
echo

# 匹配字符串的前缀(开头).
v1=${v0/#abc/ABCDEF}    # abc1234zip1234abc
                        # |-|
echo "v1 = $v1"         # ABCDEF1234zip1234abc
                        # |----|

# 匹配字符串的后缀(结尾).
v2=${v0/%abc/ABCDEF}    # abc1234zip123abc
                        #              |-|
echo "v2 = $v2"         # abc1234zip1234ABCDEF
                        #               |----|

echo

#  ----------------------------------------------------
#  必须匹配字符串的开头或结尾,
#+ 否则是不会产生替换结果的.
#  ----------------------------------------------------
v3=${v0/#123/000}       # 匹配, 但不是在开头.
echo "v3 = $v3"         # abc1234zip1234abc
                        # 不会发生替换.
v4=${v0/%123/000}       # 匹配, 但不是在结尾.
echo "v4 = $v4"         # abc1234zip1234abc
                        # 不会发生替换.

exit 0   
%%%&&&var-match.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@vartrace.sh@@@!!!************************************************************************************
#!/bin/bash

trap 'echo "VARIABLE-TRACE> /$variable = /"$variable/""' DEBUG
# 当每个命令执行之后, 就会打印出$variable的值.

variable=29

echo "Just initialized /"/$variable/" to $variable."

let "variable *= 3"
echo "Just multiplied /"/$variable/" by 3."

exit $?

#  "trap 'command1 . . . command2 . . .' DEBUG"结构更适合于
#+ 使用在复杂脚本的上下文中,
#+ 如果在这种情况下大量使用"echo $variable"语句的话,
#+ 将会非常笨拙, 而且很耗时.

# 感谢, Stephane Chazelas指出这点.


脚本的输出:

VARIABLE-TRACE> $variable = ""
VARIABLE-TRACE> $variable = "29"
Just initialized "$variable" to 29.
VARIABLE-TRACE> $variable = "29"
VARIABLE-TRACE> $variable = "87"
Just multiplied "$variable" by 3.
VARIABLE-TRACE> $variable = "87"
%%%&&&vartrace.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@viewdata.sh@@@!!!************************************************************************************
#!/bin/bash
# viewdata.sh
# 转换自VIEWDATA.BAT的shell脚本.

DATAFILE=/home/bozo/datafiles/book-collection.data
ARGNO=1

# @ECHO OFF                 这个命令在这里就不需要了.

if [ $# -lt "$ARGNO" ]    # IF !%1==! GOTO VIEWDATA
then
  less $DATAFILE          # TYPE C:/MYDIR/BOOKLIST.TXT | MORE
else
  grep "$1" $DATAFILE     # FIND "%1" C:/MYDIR/BOOKLIST.TXT
fi 

exit 0                    # :EXIT0

#  跳转, 标签, 还有其他一些小手段, 在shell脚本中就不需要了.
#  我们可以说, 转换后的脚本比原始批处理文件好的多,
#+ 它更短, 看起来更整洁, 更优雅.
%%%&&&viewdata.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@weirdvars.sh@@@!!!************************************************************************************
#!/bin/bash
# weirdvars.sh: echo出一些诡异变量.

var="'(]//{}/$/""
echo $var        # '(]/{}$"
echo "$var"      # '(]/{}$"     和上一句没什么区别.Doesn't make a difference.

echo

IFS='/'
echo $var        # '(] {}$"     / 字符被空白符替换了, 为什么?
echo "$var"      # '(]/{}$"

# 这个例子由Stephane Chazelas提供.

exit 0
%%%&&&weirdvars.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@wf2.sh@@@!!!************************************************************************************
#!/bin/bash
# wf2.sh: 分析一个文本文件中单词出现的频率.

# 使用 'xargs' 将文本行分解为单词.
# 与后边的 "wf.sh" 脚本相比较.


# 检查命令行上输入的文件.
ARGS=1
E_BADARGS=65
E_NOFILE=66

if [ $# -ne "$ARGS" ]
# 纠正传递到脚本中的参数个数?
then
  echo "Usage: `basename $0` filename"
  exit $E_BADARGS
fi

if [ ! -f "$1" ]       # 检查文件是否存在.
then
  echo "File /"$1/" does not exist."
  exit $E_NOFILE
fi

 

#####################################################################
cat "$1" | xargs -n1 | /
#  列出文件, 每行一个单词.
tr A-Z a-z | /
#  将字符转换为小写.
sed -e 's//.//g'  -e 's//,//g' -e 's/ //
/g' | /
#  过滤掉句号和逗号,
#+ 并且将单词间的空格修改为换行,
sort | uniq -c | sort -nr
#  最后统计出现次数, 把数字显示在第一列, 然后显示单词, 并按数字排序.
#####################################################################

#  这个例子的作用与"wf.sh"的作用是一样的,
#+ 但是这个例子比较臃肿, 并且运行起来更慢一些(为什么?).

exit 0
%%%&&&wf2.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@wf.sh@@@!!!************************************************************************************
#!/bin/bash
# wf.sh: 分析文本文件中词汇出现的频率.
# "wf2.sh"脚本是一个效率更高的版本.


# 从命令行中检查输入的文件.
ARGS=1
E_BADARGS=65
E_NOFILE=66

if [ $# -ne "$ARGS" ]  # 检验传递到脚本中参数的个数.
then
  echo "Usage: `basename $0` filename"
  exit $E_BADARGS
fi

if [ ! -f "$1" ]       # 检查传入的文件是否存在.
then
  echo "File /"$1/" does not exist."
  exit $E_NOFILE
fi

 

########################################################
# main ()
sed -e 's//.//g'  -e 's//,//g' -e 's/ //
/g' "$1" | tr 'A-Z' 'a-z' | sort | uniq -c | sort -nr
#                           =========================
#                              检查单词出现的频率

#  过滤掉句号和逗号,
#+ 并且把单词间的空格转化为换行,
#+ 然后转化为小写,
#+ 最后统计单词出现的频率并按频率排序.

#  Arun Giridhar建议将上边的代码修改为:
#  . . . | sort | uniq -c | sort +1 [-f] | sort +0 -nr
#  这句添加了第2个排序主键, 所以
#+ 这个与上边等价的例子将按照字母顺序进行排序.
#  就像他所解释的:
#  "这是一个有效的根排序, 首先对频率最少的
#+ 列进行排序
#+ (单词或者字符串, 忽略大小写)
#+ 然后对频率最高的列进行排序."
#
#  像Frank Wang所解释的那样, 上边的代码等价于:
#+       . . . | sort | uniq -c | sort +0 -nr
#+ 用下边这行也行:
#+       . . . | sort | uniq -c | sort -k1nr -k
########################################################

exit 0

# 练习:
# -----
# 1) 使用'sed'命令来过滤其他的标点符号,
#+   比如分号.
# 2) 修改这个脚本, 添加能够过滤多个空格或者
#    空白的能力.
%%%&&&wf.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@wgetter2.bash@@@!!!************************************************************************************
#!/bin/bash
# wgetter2.bash

# Author: Little Monster [monster@monstruum.co.uk]
# ==> Used in ABS Guide with permission of script author.
# ==> This script still needs debugging and fixups (exercise for reader).
# ==> It could also use some additional editing in the comments.


#  This is wgetter2 --
#+ a Bash script to make wget a bit more friendly, and save typing.

#  Carefully crafted by Little Monster.
#  More or less complete on 02/02/2005.
#  If you think this script can be improved,
#+ email me at: monster@monstruum.co.uk
# ==> and cc: to the author of the ABS Guide, please.
#  This script is licenced under the GPL.
#  You are free to copy, alter and re-use it,
#+ but please don't try to claim you wrote it.
#  Log your changes here instead.

# =======================================================================
# changelog:

# 07/02/2005.  Fixups by Little Monster.
# 02/02/2005.  Minor additions by Little Monster.
#              (See after # +++++++++++ )
# 29/01/2005.  Minor stylistic edits and cleanups by author of ABS Guide.
#              Added exit error codes.
# 22/11/2004.  Finished initial version of second version of wgetter:
#              wgetter2 is born.
# 01/12/2004.  Changed 'runn' function so it can be run 2 ways --
#              either ask for a file name or have one input on the CL.
# 01/12/2004.  Made sensible handling of no URL's given.
# 01/12/2004.  Made loop of main options, so you don't
#              have to keep calling wgetter 2 all the time.
#              Runs as a session instead.
# 01/12/2004.  Added looping to 'runn' function.
#              Simplified and improved.
# 01/12/2004.  Added state to recursion setting.
#              Enables re-use of previous value.
# 05/12/2004.  Modified the file detection routine in the 'runn' function
#              so it's not fooled by empty values, and is cleaner.
# 01/02/2004.  Added cookie finding routine from later version (which
#              isn't ready yet), so as not to have hard-coded paths.
# =======================================================================

# Error codes for abnormal exit.
E_USAGE=67        # Usage message, then quit.
E_NO_OPTS=68      # No command-line args entered.
E_NO_URLS=69      # No URLs passed to script.
E_NO_SAVEFILE=70  # No save filename passed to script.
E_USER_EXIT=71    # User decides to quit.


#  Basic default wget command we want to use.
#  This is the place to change it, if required.
#  NB: if using a proxy, set http_proxy = yourproxy in .wgetrc.
#  Otherwise delete --proxy=on, below.
# ====================================================================
CommandA="wget -nc -c -t 5 --progress=bar --random-wait --proxy=on -r"
# ====================================================================

 

# --------------------------------------------------------------------
# Set some other variables and explain them.

pattern=" -A .jpg,.JPG,.jpeg,.JPEG,.gif,.GIF,.htm,.html,.shtml,.php"
                    # wget's option to only get certain types of file.
                    # comment out if not using
today=`date +%F`    # Used for a filename.
home=$HOME          # Set HOME to an internal variable.
                    # In case some other path is used, change it here.
depthDefault=3      # Set a sensible default recursion.
Depth=$depthDefault # Otherwise user feedback doesn't tie in properly.
RefA=""             # Set blank referring page.
Flag=""             #  Default to not saving anything,
                    #+ or whatever else might be wanted in future.
lister=""           # Used for passing a list of urls directly to wget.
Woptions=""         # Used for passing wget some options for itself.
inFile=""           # Used for the run function.
newFile=""          # Used for the run function.
savePath="$home/w-save"
Config="$home/.wgetter2rc"
                    #  This is where some variables can be stored,
                    #+ if permanently changed from within the script.
Cookie_List="$home/.cookielist"
                    # So we know where the cookies are kept . . .
cFlag=""            # Part of the cookie file selection routine.

# Define the options available. Easy to change letters here if needed.
# These are the optional options; you don't just wait to be asked.

save=s   # Save command instead of executing it.
cook=c   # Change cookie file for this session.
help=h   # Usage guide.
list=l   # Pass wget the -i option and URL list.
runn=r   # Run saved commands as an argument to the option.
inpu=i   # Run saved commands interactively.
wopt=w   # Allow to enter options to pass directly to wget.
# --------------------------------------------------------------------


if [ -z "$1" ]; then   # Make sure we get something for wget to eat.
   echo "You must at least enter a URL or option!"
   echo "-$help for usage."
   exit $E_NO_OPTS
fi

 

# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# added added added added added added added added added added added added

if [ ! -e "$Config" ]; then   # See if configuration file exists.
   echo "Creating configuration file, $Config"
   echo "# This is the configuration file for wgetter2" > "$Config"
   echo "# Your customised settings will be saved in this file" >> "$Config"
else
   source $Config             # Import variables we set outside the script.
fi

if [ ! -e "$Cookie_List" ]; then
   # Set up a list of cookie files, if there isn't one.
   echo "Hunting for cookies . . ."
   find -name cookies.txt >> $Cookie_List   # Create the list of cookie files.
fi #  Isolate this in its own 'if' statement,
   #+ in case we got interrupted while searching.

if [ -z "$cFlag" ]; then # If we haven't already done this . . .
   echo                  # Make a nice space after the command prompt.
   echo "Looks like you haven't set up your source of cookies yet."
   n=0                   # Make sure the counter doesn't contain random values.
   while read; do
      Cookies[$n]=$REPLY # Put the cookie files we found into an array.
      echo "$n) ${Cookies[$n]}"  # Create a menu.
      n=$(( n + 1 ))     # Increment the counter.
   done < $Cookie_List   # Feed the read statement.
   echo "Enter the number of the cookie file you want to use."
   echo "If you won't be using cookies, just press RETURN."
   echo
   echo "I won't be asking this again. Edit $Config"
   echo "If you decide to change at a later date"
   echo "or use the -${cook} option for per session changes."
   read
   if [ ! -z $REPLY ]; then   # User didn't just press return.
      Cookie=" --load-cookies ${Cookies[$REPLY]}"
      # Set the variable here as well as in the config file.

      echo "Cookie=/" --load-cookies ${Cookies[$REPLY]}/"" >> $Config
   fi
   echo "cFlag=1" >> $Config  # So we know not to ask again.
fi

# end added section end added section end added section end added section end
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

 

# Another variable.
# This one may or may not be subject to variation.
# A bit like the small print.
CookiesON=$Cookie
# echo "cookie file is $CookiesON" # For debugging.
# echo "home is ${home}"           # For debugging. Got caught with this one!


wopts()
{
echo "Enter options to pass to wget."
echo "It is assumed you know what you're doing."
echo
echo "You can pass their arguments here too."
# That is to say, everything passed here is passed to wget.

read Wopts
# Read in the options to be passed to wget.

Woptions=" $Wopts"
# Assign to another variable.
# Just for fun, or something . . .

echo "passing options ${Wopts} to wget"
# Mainly for debugging.
# Is cute.

return
}


save_func()
{
echo "Settings will be saved."
if [ ! -d $savePath ]; then  #  See if directory exists.
   mkdir $savePath           #  Create the directory to save things in
                             #+ if it isn't already there.
fi

Flag=S
# Tell the final bit of code what to do.
# Set a flag since stuff is done in main.

return
}


usage() # Tell them how it works.
{
    echo "Welcome to wgetter.  This is a front end to wget."
    echo "It will always run wget with these options:"
    echo "$CommandA"
    echo "and the pattern to match: $pattern (which you can change at the top of this script)."
    echo "It will also ask you for recursion depth, and if you want to use a referring page."
    echo "Wgetter accepts the following options:"
    echo ""
    echo "-$help : Display this help."
    echo "-$save : Save the command to a file $savePath/wget-($today) instead of running it."
    echo "-$runn : Run saved wget commands instead of starting a new one --"
    echo "Enter filename as argument to this option."
    echo "-$inpu : Run saved wget commands interactively --"
    echo "The script will ask you for the filename."
    echo "-$cook : Change the cookies file for this session."
    echo "-$list : Tell wget to use URL's from a list instead of from the command line."
    echo "-$wopt : Pass any other options direct to wget."
    echo ""
    echo "See the wget man page for additional options you can pass to wget."
    echo ""

    exit $E_USAGE  # End here. Don't process anything else.
}

 

list_func() #  Gives the user the option to use the -i option to wget,
            #+ and a list of URLs.
{
while [ 1 ]; do
   echo "Enter the name of the file containing URL's (press q to change your
mind)."
   read urlfile
   if [ ! -e "$urlfile" ] && [ "$urlfile" != q ]; then
       # Look for a file, or the quit option.
       echo "That file does not exist!"
   elif [ "$urlfile" = q ]; then # Check quit option.
       echo "Not using a url list."
       return
   else
      echo "using $urlfile."
      echo "If you gave me url's on the command line, I'll use those first."
                            # Report wget standard behaviour to the user.
      lister=" -i $urlfile" # This is what we want to pass to wget.
      return
   fi
done
}


cookie_func() # Give the user the option to use a different cookie file.
{
while [ 1 ]; do
   echo "Change the cookies file. Press return if you don't want to change
it."
   read Cookies
   # NB: this is not the same as Cookie, earlier.
   # There is an 's' on the end.
   # Bit like chocolate chips.
   if [ -z "$Cookies" ]; then                 # Escape clause for wusses.
      return
   elif [ ! -e "$Cookies" ]; then
      echo "File does not exist.  Try again." # Keep em going . . .
   else
       CookiesON=" --load-cookies $Cookies"   # File is good -- let's use it!
       return
   fi
done
}

 

run_func()
{
if [ -z "$OPTARG" ]; then
# Test to see if we used the in-line option or the query one.
   if [ ! -d "$savePath" ]; then      # In case directory doesn't exist . . .
      echo "$savePath does not appear to exist."
      echo "Please supply path and filename of saved wget commands:"
      read newFile
         until [ -f "$newFile" ]; do  # Keep going till we get something.
            echo "Sorry, that file does not exist.  Please try again."
            # Try really hard to get something.
            read newFile
         done


# -------------------------------------------------------------------------
#         if [ -z ( grep wget ${newfile} ) ]; then
          # Assume they haven't got the right file and bail out.
#         echo "Sorry, that file does not contain wget commands.  Aborting."
#         exit
#         fi
#
# This is bogus code.
# It doesn't actually work.
# If anyone wants to fix it, feel free!
# -------------------------------------------------------------------------


      filePath="${newFile}"
   else
   echo "Save path is $savePath"
      echo "Please enter name of the file which you want to use."
      echo "You have a choice of:"
      ls $savePath                                    # Give them a choice.
      read inFile
         until [ -f "$savePath/$inFile" ]; do         # Keep going till we get something.
            if [ ! -f "${savePath}/${inFile}" ]; then # If file doesn't exist.
               echo "Sorry, that file does not exist.  Please choose from:"
               ls $savePath                           # If a mistake is made.
               read inFile
            fi
         done
      filePath="${savePath}/${inFile}"  # Make one variable . . .
   fi
else filePath="${savePath}/${OPTARG}"   # Which can be many things . . .
fi

if [ ! -f "$filePath" ]; then           # If a bogus file got through.
   echo "You did not specify a suitable file."
   echo "Run this script with the -${save} option first."
   echo "Aborting."
   exit $E_NO_SAVEFILE
fi
echo "Using: $filePath"
while read; do
    eval $REPLY
    echo "Completed: $REPLY"
done < $filePath  # Feed the actual file we are using into a 'while' loop.

exit
}

 

# Fish out any options we are using for the script.
# This is based on the demo in "Learning The Bash Shell" (O'Reilly).
while getopts ":$save$cook$help$list$runn:$inpu$wopt" opt
do
  case $opt in
     $save) save_func;;   #  Save some wgetter sessions for later.
     $cook) cookie_func;; #  Change cookie file.
     $help) usage;;       #  Get help.
     $list) list_func;;   #  Allow wget to use a list of URLs.
     $runn) run_func;;    #  Useful if you are calling wgetter from, for example,
                          #+ a cron script.
     $inpu) run_func;;    #  When you don't know what your files are named.
     $wopt) wopts;;       #  Pass options directly to wget.
        /?) echo "Not a valid option."
            echo "Use -${wopt} if you want to pass options directly to wget,"
            echo "or -${help} for help";;      # Catch anything else.
  esac
done
shift $((OPTIND - 1))     # Do funky magic stuff with $#.


if [ -z "$1" ] && [ -z "$lister" ]; then
                          #  We should be left with at least one URL
                          #+ on the command line, unless a list is
     #+ being used -- catch empty CL's.
   echo "No URL's given!  You must enter them on the same line as wgetter2."
   echo "E.g.,  wgetter2 http://somesite http://anothersite."
   echo "Use $help option for more information."
   exit $E_NO_URLS        # Bail out, with appropriate error code.
fi

URLS=" $@"
# Use this so that URL list can be changed if we stay in the option loop.

while [ 1 ]; do
   # This is where we ask for the most used options.
   # (Mostly unchanged from version 1 of wgetter)
   if [ -z $curDepth ]; then
      Current=""
   else Current=" Current value is $curDepth"
   fi
       echo "How deep should I go? (integer: Default is $depthDefault.$Current)"
       read Depth   # Recursion -- how far should we go?
       inputB=""    # Reset this to blank on each pass of the loop.
       echo "Enter the name of the referring page (default is none)."
       read inputB  # Need this for some sites.

       echo "Do you want to have the output logged to the terminal"
       echo "(y/n, default is yes)?"
       read noHide  # Otherwise wget will just log it to a file.

       case $noHide in    # Now you see me, now you don't.
          y|Y ) hide="";;
          n|N ) hide=" -b";;
            * ) hide="";;
       esac

       if [ -z ${Depth} ]; then       #  User accepted either default or current depth,
                                      #+ in which case Depth is now empty.
          if [ -z ${curDepth} ]; then #  See if a depth was set on a previous iteration.
             Depth="$depthDefault"    #  Set the default recursion depth if nothing
                                      #+ else to use.
          else Depth="$curDepth"      #  Otherwise, set the one we used before.
          fi
       fi
   Recurse=" -l $Depth"               # Set how deep we want to go.
   curDepth=$Depth                    # Remember setting for next time.

       if [ ! -z $inputB ]; then
          RefA=" --referer=$inputB"   # Option to use referring page.
       fi

   WGETTER="${CommandA}${pattern}${hide}${RefA}${Recurse}${CookiesON}${lister}${Woptions}${URLS}"
   #  Just string the whole lot together . . .
   #  NB: no embedded spaces.
   #  They are in the individual elements so that if any are empty,
   #+ we don't get an extra space.

   if [ -z "${CookiesON}" ] && [ "$cFlag" = "1" ] ; then
       echo "Warning -- can't find cookie file"
       # This should be changed, in case the user has opted to not use cookies.
   fi

   if [ "$Flag" = "S" ]; then
      echo "$WGETTER" >> $savePath/wget-${today}
      #  Create a unique filename for today, or append to it if it exists.
      echo "$inputB" >> $savePath/site-list-${today}
      #  Make a list, so it's easy to refer back to,
      #+ since the whole command is a bit confusing to look at.
      echo "Command saved to the file $savePath/wget-${today}"
           # Tell the user.
      echo "Referring page URL saved to the file $savePath/site-list-${today}"
           # Tell the user.
      Saver=" with save option"
      # Stick this somewhere, so it appears in the loop if set.
   else
       echo "*****************"
       echo "*****Getting*****"
       echo "*****************"
       echo ""
       echo "$WGETTER"
       echo ""
       echo "*****************"
       eval "$WGETTER"
   fi

       echo ""
       echo "Starting over$Saver."
       echo "If you want to stop, press q."
       echo "Otherwise, enter some URL's:"
       # Let them go again. Tell about save option being set.

       read
       case $REPLY in                # Need to change this to a 'trap' clause.
          q|Q ) exit $E_USER_EXIT;;  # Exercise for the reader?
            * ) URLS=" $REPLY";;
       esac

       echo ""
done


exit 0
%%%&&&wgetter2.bash&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@what.sh@@@!!!************************************************************************************
#!/bin/bash

# 所有在/usr/X11R6/bin中的神秘2进制文件都是些什么东西?

DIRECTORY="/usr/X11R6/bin"
# 也试试 "/bin", "/usr/bin", "/usr/local/bin", 等等.

for file in $DIRECTORY/*
do
  whatis `basename $file`   # 将会echo出这个2进制文件的信息.
done

exit 0

# 你可能希望将这个脚本的输出重定向, 像这样:
# ./what.sh >>whatis.db
# 或者一页一页的在stdout上察看,
# ./what.sh | less
%%%&&&what.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@wh-loopc.sh@@@!!!************************************************************************************
#!/bin/bash
# wh-loopc.sh: 循环10次的"while"循环.

LIMIT=10
a=1

while [ "$a" -le $LIMIT ]
do
  echo -n "$a "
  let "a+=1"
done           # 到目前为止都没有什么令人惊奇的地方.

echo; echo

# +=================================================================+

# 现在, 重复C风格的语法.

((a = 1))      # a=1
# 双圆括号允许赋值两边的空格, 就像C语言一样.

while (( a <= LIMIT ))   # 双圆括号, 变量前边没有"$".
do
  echo -n "$a "
  ((a += 1))   # let "a+=1"
  # Yes, 看到了吧.
  # 双圆括号允许像C风格的语法一样增加变量的值.
done

echo

# 现在, C程序员可以在Bash中找到回家的感觉了吧.

exit 0
%%%&&&wh-loopc.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@whx.sh@@@!!!************************************************************************************
#!/bin/bash
# whx.sh: "whois" spammer lookup
# Author: Walter Dnes
# Slight revisions (first section) by ABS Guide author.
# Used in ABS Guide with permission.

# Needs version 3.x or greater of Bash to run (because of =~ operator).
# Commented by script author and ABS Guide author.

 

E_BADARGS=65        # Missing command-line arg.
E_NOHOST=66         # Host not found.
E_TIMEOUT=67        # Host lookup timed out.
E_UNDEF=68          # Some other (undefined) error.
HOSTWAIT=10         # Specify up to 10 seconds for host query reply.
                    # The actual wait may be a bit longer.
OUTFILE=whois.txt   # Output file.
PORT=4321


if [ -z "$1" ]      # Check for (required) command-line arg.
then
  echo "Usage: $0 domain name or IP address"
  exit $E_BADARGS
fi


if [[ "$1" =~ "[a-zA-Z][a-zA-Z]$" ]]  # Ends in two alpha chars?
then                                  # It's a domain name && must do host lookup.
  IPADDR=$(host -W $HOSTWAIT $1 | awk '{print $4}')
                                      # Doing host lookup to get IP address.
          # Extract final field.
else
  IPADDR="$1"                         # Command-line arg was IP address.
fi

echo; echo "IP Address is: "$IPADDR""; echo

if [ -e "$OUTFILE" ]
then
  rm -f "$OUTFILE"
  echo "Stale output file /"$OUTFILE/" removed."; echo
fi


#  Sanity checks.
#  (This section needs more work.)
#  ===============================
if [ -z "$IPADDR" ]
# No response.
then
  echo "Host not found!"
  exit $E_NOHOST    # Bail out.
fi

if [[ "$IPADDR" =~ "^[;;]" ]]
#  ;; connection timed out; no servers could be reached
then
  echo "Host lookup timed out!"
  exit $E_TIMEOUT   # Bail out.
fi

if [[ "$IPADDR" =~ "[(NXDOMAIN)]$" ]]
#  Host xxxxxxxxx.xxx not found: 3(NXDOMAIN)
then
  echo "Host not found!"
  exit $E_NOHOST    # Bail out.
fi

if [[ "$IPADDR" =~ "[(SERVFAIL)]$" ]]
#  Host xxxxxxxxx.xxx not found: 2(SERVFAIL)
then
  echo "Host not found!"
  exit $E_NOHOST    # Bail out.
fi

 


# ======================== Main body of script ========================

AFRINICquery() {
#  Define the function that queries AFRINIC. Echo a notification to the
#+ screen, and then run the actual query, redirecting output to $OUTFILE.

  echo "Searching for $IPADDR in whois.afrinic.net"
  whois -h whois.afrinic.net "$IPADDR" > $OUTFILE

#  Check for presence of reference to an rwhois.
#  Warn about non-functional rwhois.infosat.net server
#+ and attempt rwhois query.
  if grep -e "^remarks: .*rwhois/.[^ ]/+" "$OUTFILE"
  then
    echo " " >> $OUTFILE
    echo "***" >> $OUTFILE
    echo "***" >> $OUTFILE
    echo "Warning: rwhois.infosat.net was not working as of 2005/02/02" >> $OUTFILE
    echo "         when this script was written." >> $OUTFILE
    echo "***" >> $OUTFILE
    echo "***" >> $OUTFILE
    echo " " >> $OUTFILE
    RWHOIS=`grep "^remarks: .*rwhois/.[^ ]/+" "$OUTFILE" | tail -n 1 |/
    sed "s//(^.*/)/(rwhois/..*/)/(:4.*/)//2/"`
    whois -h ${RWHOIS}:${PORT} "$IPADDR" >> $OUTFILE
  fi
}

APNICquery() {
  echo "Searching for $IPADDR in whois.apnic.net"
  whois -h whois.apnic.net "$IPADDR" > $OUTFILE

#  Just  about  every  country has its own internet registrar.
#  I don't normally bother consulting them, because the regional registry
#+ usually supplies sufficient information.
#  There are a few exceptions, where the regional registry simply
#+ refers to the national registry for direct data.
#  These are Japan and South Korea in APNIC, and Brasil in LACNIC.
#  The following if statement checks $OUTFILE (whois.txt) for the presence
#+ of "KR" (South Korea) or "JP" (Japan) in the country field.
#  If either is found, the query is re-run against the appropriate
#+ national registry.

  if grep -E "^country:[ ]+KR$" "$OUTFILE"
  then
    echo "Searching for $IPADDR in whois.krnic.net"
    whois -h whois.krnic.net "$IPADDR" >> $OUTFILE
  elif grep -E "^country:[ ]+JP$" "$OUTFILE"
  then
    echo "Searching for $IPADDR in whois.nic.ad.jp"
    whois -h whois.nic.ad.jp "$IPADDR"/e >> $OUTFILE
  fi
}

ARINquery() {
  echo "Searching for $IPADDR in whois.arin.net"
  whois -h whois.arin.net "$IPADDR" > $OUTFILE

#  Several large internet providers listed by ARIN have their own
#+ internal whois service, referred to as "rwhois".
#  A large block of IP addresses is listed with the provider
#+ under the ARIN registry.
#  To get the IP addresses of 2nd-level ISPs or other large customers,
#+ one has to refer to the rwhois server on port 4321.
#  I originally started with a bunch of "if" statements checking for
#+ the larger providers.
#  This approach is unwieldy, and there's always another rwhois server
#+ that I didn't know about.
#  A more elegant approach is to check $OUTFILE for a reference
#+ to a whois server, parse that server name out of the comment section,
#+ and re-run the query against the appropriate rwhois server.
#  The parsing looks a bit ugly, with a long continued line inside
#+ backticks.
#  But it only has to be done once, and will work as new servers are added.
#@   ABS Guide author comment: it isn't all that ugly, and is, in fact,
#@+  an instructive use of Regular Expressions.

  if grep -E "^Comment: .*rwhois.[^ ]+" "$OUTFILE"
  then
    RWHOIS=`grep -e "^Comment:.*rwhois/.[^ ]/+" "$OUTFILE" | tail -n 1 |/
    sed "s/^/(.*/)/(rwhois/.[^ ]/+/)/(.*$/)//2/"`
    echo "Searching for $IPADDR in ${RWHOIS}"
    whois -h ${RWHOIS}:${PORT} "$IPADDR" >> $OUTFILE
  fi
}

LACNICquery() {
  echo "Searching for $IPADDR in whois.lacnic.net"
  whois -h whois.lacnic.net "$IPADDR" > $OUTFILE

#  The  following if statement checks $OUTFILE (whois.txt) for the presence of
#+ "BR" (Brasil) in the country field.
#  If it is found, the query is re-run against whois.registro.br.

  if grep -E "^country:[ ]+BR$" "$OUTFILE"
  then
    echo "Searching for $IPADDR in whois.registro.br"
    whois -h whois.registro.br "$IPADDR" >> $OUTFILE
  fi
}

RIPEquery() {
  echo "Searching for $IPADDR in whois.ripe.net"
  whois -h whois.ripe.net "$IPADDR" > $OUTFILE
}

#  Initialize a few variables.
#  * slash8 is the most significant octet
#  * slash16 consists of the two most significant octets
#  * octet2 is the second most significant octet

 


slash8=`echo $IPADDR | cut -d. -f 1`
  if [ -z "$slash8" ]  # Yet another sanity check.
  then
    echo "Undefined error!"
    exit $E_UNDEF
  fi
slash16=`echo $IPADDR | cut -d. -f 1-2`
#                             ^ Period specified as 'cut" delimiter.
  if [ -z "$slash16" ]
  then
    echo "Undefined error!"
    exit $E_UNDEF
  fi
octet2=`echo $slash16 | cut -d. -f 2`
  if [ -z "$octet2" ]
  then
    echo "Undefined error!"
    exit $E_UNDEF
  fi


#  Check for various odds and ends of reserved space.
#  There is no point in querying for those addresses.

if [ $slash8 == 0 ]; then
  echo $IPADDR is '"This Network"' space/; Not querying
elif [ $slash8 == 10 ]; then
  echo $IPADDR is RFC1918 space/; Not querying
elif [ $slash8 == 14 ]; then
  echo $IPADDR is '"Public Data Network"' space/; Not querying
elif [ $slash8 == 127 ]; then
  echo $IPADDR is loopback space/; Not querying
elif [ $slash16 == 169.254 ]; then
  echo $IPADDR is link-local space/; Not querying
elif [ $slash8 == 172 ] && [ $octet2 -ge 16 ] && [ $octet2 -le 31 ];then
  echo $IPADDR is RFC1918 space/; Not querying
elif [ $slash16 == 192.168 ]; then
  echo $IPADDR is RFC1918 space/; Not querying
elif [ $slash8 -ge 224 ]; then
  echo $IPADDR is either Multicast or reserved space/; Not querying
elif [ $slash8 -ge 200 ] && [ $slash8 -le 201 ]; then LACNICquery "$IPADDR"
elif [ $slash8 -ge 202 ] && [ $slash8 -le 203 ]; then APNICquery "$IPADDR"
elif [ $slash8 -ge 210 ] && [ $slash8 -le 211 ]; then APNICquery "$IPADDR"
elif [ $slash8 -ge 218 ] && [ $slash8 -le 223 ]; then APNICquery "$IPADDR"

#  If we got this far without making a decision, query ARIN.
#  If a reference is found in $OUTFILE to APNIC, AFRINIC, LACNIC, or RIPE,
#+ query the appropriate whois server.

else
  ARINquery "$IPADDR"
  if grep "whois.afrinic.net" "$OUTFILE"; then
    AFRINICquery "$IPADDR"
  elif grep -E "^OrgID:[ ]+RIPE$" "$OUTFILE"; then
    RIPEquery "$IPADDR"
  elif grep -E "^OrgID:[ ]+APNIC$" "$OUTFILE"; then
    APNICquery "$IPADDR"
  elif grep -E "^OrgID:[ ]+LACNIC$" "$OUTFILE"; then
    LACNICquery "$IPADDR"
  fi
fi

#@  ---------------------------------------------------------------
#   Try also:
#   wget http://logi.cc/nw/whois.php3?ACTION=doQuery&amp;DOMAIN=$IPADDR
#@  ---------------------------------------------------------------

#  We've  now  finished  the querying.
#  Echo a copy of the final result to the screen.

cat $OUTFILE
# Or "less $OUTFILE" . . .


exit 0

#@  ABS Guide author comments:
#@  Nothing fancy here, but still a very useful tool for hunting spammers.
#@  Sure, the script can be cleaned up some, and it's still a bit buggy,
#@+ (exercise for reader), but all the same, it's a nice piece of coding
#@+ by Walter Dnes.
#@  Thank you!
%%%&&&whx.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@wipedir.sh@@@!!!************************************************************************************
#!/bin/bash

E_WRONG_DIRECTORY=73

clear # 清屏.

TargetDirectory=/home/bozo/projects/GreatAmericanNovel

cd $TargetDirectory
echo "Deleting stale files in $TargetDirectory."

if [ "$PWD" != "$TargetDirectory" ]
then    # 防止偶然删错目录.
  echo "Wrong directory!"
  echo "In $PWD, rather than $TargetDirectory!"
  echo "Bailing out!"
  exit $E_WRONG_DIRECTORY
fi 

rm -rf *
rm .[A-Za-z0-9]*    # 删除点文件(译者注: 隐藏文件).
# rm -f .[^.]* ..?*   为了删除以多个点开头的文件.
# (shopt -s dotglob; rm -f *)   也可以.
# 感谢, S.C. 指出这点.

# 文件名可以包含ascii中0 - 255范围内的所有字符, 除了"/".
# 删除以各种诡异字符开头的文件将会作为一个练习留给大家.

# 如果必要的话, 这里预留给其他操作.

echo
echo "Done."
echo "Old files deleted in $TargetDirectory."
echo


exit 0
%%%&&&wipedir.sh&&&%%%>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


***@@@wstrings.sh@@@!!!************************************************************************************
#!/bin/bash
# wstrings.sh: "word-strings" (增强的"strings"命令)
#
#  这个脚本将会通过排除标准单词列表的形式
#+ 来过滤"strings"命令的输出.
#  这将有效的过滤掉无意义的字符,
#+ 并且只会输出可以识别的字符.

# ===========================================================
#                 脚本参数的标准检查
ARGS=1
E_BADARGS=65
E_NOFILE=66

if [ $# -ne $ARGS ]
then
  echo "Usage: `basename $0` filename"
  exit $E_BADARGS
fi

if [ ! -f "$1" ]                      # 检查文件是否存在.
then
    echo "File /"$1/" does not exist."
    exit $E_NOFILE
fi
# ===========================================================


MINSTRLEN=3                           #  最小的字符串长度.
WORDFILE=/usr/share/dict/linux.words  #  字典文件.
                                      #  也可以指定一个不同的
                                      #+ 单词列表文件,
                                      #+ 但这种文件必须是以每个单词一行的方式进行保存.


wlist=`strings "$1" | tr A-Z a-z | tr '[:space:]' Z | /
tr -cs '[:alpha:]' Z | tr -s '/173-/377' Z | tr Z ' '`

#  将'strings'命令的输出通过管道传递到多个'tr'命令中.
#  "tr A-Z a-z" 全部转换为小写字符.
#  "tr '[:space:]'" 转换空白字符为多个Z.
#  "tr -cs '[:alpha:]' Z" 将非字母表字符转换为多个Z,
#+ 然后去除多个连续的Z.
#  "tr -s '/173-/377' Z" 把所有z后边的字符都转换为Z.
#+ 并且去除多余重复的Z. (注意173(123 ascii "{")和377(255 ascii 最后一个字符)都是8进制)
#+ 这样处理之后, 我们所有之前需要处理的令我们头痛的字符
#+ 就全都转换为字符Z了.
#  最后"tr Z ' '" 将把所有的Z都转换为空格,
#+ 这样我们在下边循环中用到的变量wlist中的内容就全部以空格分隔了.

#  ****************************************************************
#  注意, 我们使用管道来将多个'tr'的输出传递到下一个'tr'时
#+ 每次都使用了不同的参数.
#  ****************************************************************


for word in $wlist                    # 重要:
                                      # $wlist 这里不能使用双引号.
                                      # "$wlist" 不能正常工作.
                                      # 为什么不行?
do                                                                
                                                                  
  strlen=${#word}                     # 字符串长度.
  if [ "$strlen" -lt "$MINSTRLEN" ]   # 跳过短的字符串.
  then                                                            
    continue                                                      
  fi                                                              
                                                                  
  grep -Fw $word "$WORDFILE"          #  只匹配整个单词.
#      ^^^                            #  "固定字符串" 和
                                      #+ "整个单词" 选项.

done
exit $?

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值