一、综述
最开始的博客用的是oschina, 自我感觉小清新,支持比较多风格的编辑器,容易编辑程度比csdn的要好。后来因为csdn人气的关系,又转到csdn,不过csdn编辑确实比较虐心,再后来就在服务器上搭一个WordPress的博客,不过用起来不是很爽。几个月前支持Markdown编辑,这无疑给用github pages的用户一个福音,因为可以直接上传到自己的github博客上。
用github pages生成静态的页面,目前比较流行有Jekyll,Octopress,Hexo等,其他的我没怎么调查,jekyll是github推荐的,上面有他们给的官方框架、主题,支持网页书写Markdown文章。后两者需要编译后再上传,github上呈现出来的是html文件。总体而言,后两者的界面更好,用起来更加舒适。我用的是Hexo,命令简洁,不过要注意备份整个工程,不然后果不堪设想。
二、迁移CSDN原文档到github Pages
1、前文
虽然目前CSDN支持markdown,但以前的文章都是用xeditor编辑器写的,不能导出,所以调研了方法。
总体而言有两种方法,但好像没多少人采用,因为格式可能不好。
1.爬取页面,导出html,然后在放在hexo中,目录为/source/_post/,直接放html文件,然后设置layout:false,那么hexo会忽略对html的编译,在浏览时直接超链接到html文件
2.将html文件再用程序转换为markdown
3.直接用代码爬取页面然后生成markdown文件
第1种方法可能会遇到html文件中格式不支持的情况,没得到解决;
第2种方法发现在线转换效果也不好,就寻求代码解决,github上有一段程序,作者说可行,不过我环境没搭成功,不懂php,更何况要装curl(这个之前做android时NDK开发时也很难配置),后来用第3种方法就直接写python爬虫程序,参考github的一段程序,不过程序有些问题,也有些不符合如今CSDN的布局,所以我大改了一下,转为markdown的那一部分程序脉络是差不多的,这个也是最关键的部分,直接影响到markdown的显示,不过我也做得不太好。
2、程序
所需安装库:
BeautifulSoup
根据版本不同可能要改动相应的代码,一般不用改。
#! /usr/bin/env python
#coding=utf-8
import urllib2
from bs4 import BeautifulSoup
import logging
import re
import threading
import traceback
import time
import datetime
import sys
reload(sys)
sys.setdefaultencoding('gb18030')
# global variable
blog = "http://blog.csdn.net"
url = "http://blog.csdn.net/gugugujiawei?viewmode=contents"
outputDir = 'F:\\linux\\Share\\github\\article\\'
gRetryCount = 4
def decodeHtmlSpecialCharacter(htmlStr):
specChars = {" " : "", \
" " : "", \
" " : "", \
"<" : "<", \
">" : ">", \
"&" : "&", \
""" : "\"", \
"©" : "®", \
"×" : "×", \
"÷" : "÷", \
}
for key in specChars.keys():
htmlStr = htmlStr.replace(key, specChars[key])
return htmlStr
def repalceInvalidCharInFilename(filename):
specChars = {"\\" : "", \
"/" : "", \
":" : "", \
"*" : "", \
"?" : "", \
"\"" : "", \
"<" : "小于", \
">" : "大于", \
"|" : " and ", \
"&" :" or ", \
}
for key in specChars.keys():
filename = filename.replace(key, specChars[key])
return filename
def getPageUrlList(url):
global blog
#获取所有的页面的 url
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
header = { 'User-Agent' : user_agent }
request = urllib2.Request(url, None, header)
response = urllib2.urlopen(request)
data = response.read()
#print data
soup = BeautifulSoup(data)
pageListDocs = soup.find_all(id="article_list")
# artclie----{url:title}
articleUrlTitle = {}
#print len(pageListDocs)
for pageList in pageListDocs:
h1List = pageList.find_all('h1')
for articleList in h1List:
hrefDocs = articleList.find_all("a")
if len(hrefDocs) > 0:
articleHrefDoc = hrefDocs[0]
#print "hello",articleHrefDoc
articleUrl = blog + articleHrefDoc["href"]
articleTitle = articleHrefDoc.text
articleUrlTitle[articleUrl] = articleTitle
print 'the count of articles is',len(articleUrlTitle)
'''
for s in articleUrlTitle:
print s,'--',articleUrlTitle[s]
'''
return articleUrlTitle
def download(url, title):
# 下载文章,并保存为 markdown 格式
logging.info(" >> download: " + url)
print 'downloading the article',title
data = None
title = '"' + title + '"'
categories = ""
content = ""
#postDate = datetime.datetime.now()
global gRetryCount
count = 0
while True:
if count >= gRetryCount:
break
count = count + 1
try:
time.sleep(2.0) #访问太快会不响应
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
header = { 'User-Agent' : user_agent }
request = urllib2.Request(url, None, header)
response = urllib2.urlopen(request)
data = response.read()
break
except Exception,e:
exstr = traceback.format_exc()
logging.info(" >> failed to download " + url + ", retry: " + str(count) + ", error:" + exstr)
pass
if data == None:
logging.info(" >> failed to download " + url)
return
#print data
soup = BeautifulSoup(data)
#date=link_postdate
manageDocs = soup.find_all("div", "article_manage")
for managerDoc in manageDocs:
categoryDoc = managerDoc.find_all("span", "link_categories")
if len(categoryDoc) > 0:
categories = categoryDoc[0].a.get_text().encode('UTF-8').strip()
categories = categories.decode('utf-8').encode('gb2312')
postDateDoc = managerDoc.find_all("span", "link_postdate")
if len(postDateDoc) > 0:
postDateStr = postDateDoc[0].string.encode('UTF-8').strip()
postDate = datetime.datetime.strptime(postDateStr, '%Y-%m-%d %H:%M')
print 'date',postDate
contentDocs = soup.find_all(id="article_content")
for contentDoc in contentDocs:
htmlContent = contentDoc.prettify().encode('UTF-8')
#print htmlContent
#file = open('F:\\linux\\Share\\github\\out2.txt','a+')
#file.write(htmlContent)
content = htmlContent2String(htmlContent)
exportToMarkdown(outputDir, postDate, categories, title, content)
# htmlContent2String 是整个程序的关键,用于将html转换为markdown格式
def htmlContent2String(contentStr):
# 因为格式中可能会有点乱,换行符乱入,所以用[\s\S]匹配任何字符,包括换行符,注意其中的?是为了去除贪婪匹配
# <img src="https://img-blog.csdn.net/20150118194525562" align="middle" width="400 height="300" alt="">
# 图片链接
patternImg = re.compile(r'(<img[\s\S]+?src=")([\s\S]+?)("[\s\S]+?>)')
# <a target="_blank" href="http://blog.csdn.net/gugugujiawei/article/details/42558411">博文</a>
# 文字链接
patternHref = re.compile(r'(<a[\s\S]+?href=")([\s\S]*?)("[\s\S]*?>)([\s\S]+?)(</a>)')
# 去除html各种标签,这里的?则是指匹配0次或1次
patternRemoveHtml = re.compile(r'</?[^>]+>')
resultContent = patternImg.sub(r'![image_mark](\2)', contentStr)
resultContent = patternHref.sub(r'[\4](\2)', resultContent)
resultContent = re.sub(patternRemoveHtml, r'', resultContent)
resultContent = decodeHtmlSpecialCharacter(resultContent)
#file = open('F:\\linux\\Share\\github\\out3.txt','a+')
#file.write(resultContent)
return resultContent
def exportToMarkdown(exportDir, postdate, categories, title, content):
titleDate = postdate.strftime('%Y-%m')
contentDate = postdate.strftime('%Y-%m-%d %H:%M:%S %z')
filename = title
filename = repalceInvalidCharInFilename(filename)
filepath = exportDir + filename + '.txt'
#newFile = open(unicode(filepath, "utf8"), 'w')
newFile = open(filepath,'a+')
# 根据自己需要选择去留注释,这里categores和tag用了一样的
# newFile.write('---' + '\n')
# newFile.write('layout: post' + '\n')
newFile.write('title: ' + title + '\n')
newFile.write('date: ' + contentDate + '\n')
# newFile.write('comments: true' + '\n')
newFile.write('categories: [' + categories + ']' + '\n')
newFile.write('tags: [' + categories + ']' + '\n')
#newFile.write('description:' + title + '\n')
# newFile.write('keywords: ' + categories + '\n')
newFile.write('---' + '\n\n')
content = content.decode('utf-8').encode('gb18030')
#print content
newFile.write(content)
newFile.write('\n')
newFile.close()
if __name__ == "__main__":
global url
articleUrlTitle = getPageUrlList(url)
'''
for s in articleUrlTitle:
print s,'--',articleUrlTitle[s]
'''
#multithread download
threads = []
for url in articleUrlTitle:
patternTitle = re.compile('\r\n *(.+) *\r\n')
title = patternTitle.sub(r'\1',articleUrlTitle[url])
# print 'title',title
t = threading.Thread(target = download,args = (url,title))
t.start()
threads.append(t)
for i in threads:
i.join()
print "success"
注意一下,我是直接在windows下运行生成的txt文件,所以文件时dos文件,当在linux下编辑的话会出现各种因格式问题导致的奇怪的问题,这需要自己改一下编码。