1:data preprocess
TEXT=wmt16_en_de_bpe32k
fairseq-preprocess --source-lang en --target-lang de \
--trainpref $TEXT/train.tok.clean.bpe.32000 \
--validpref $TEXT/newstest2013.tok.bpe.32000 \
--testpref $TEXT/newstest2014.tok.bpe.32000 \
--destdir data-bin/wmt16_en_de_bpe32k \
--nwordssrc 32768 --nwordstgt 32768 \
--joined-dictionary \
--srcdict /wmt16.en-de.joined-dict.transformer/dict.en.txt
2:exam bleu
CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-generate \
data-bin/wmt16_en_de_bpe32k/ --path ./model/model.pt \
--remove-bpe --beam 4 --batch-size 64 --lenpen 0.6 \
--max-len-a 1 --max-len-b 50|tee generate.out
grep ^T generate.out | cut -f2- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > generate.ref
grep ^H generate.out |cut -f3- | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > generate.sys
fairseq-score --sys generate.sys --ref generate.ref
3: genarate attent