1 #-*- coding:utf-8 -*-
2 from multiprocessing.managers importBaseManager3 from pyquery importPyQuery4 importos, sys, urllib5 importre, random, logging, time6 importQueue, threading, multiprocessing, threadpool7
8 USER_NAME = 'kirai'
9 TOTAL_PAGE_NUMBER =010 INT_REGEXP = re.compile('([\d]+)')11 BASE_URL = 'http://www.cnblogs.com/'+USER_NAME+'/p/?page='
12 ARTICLE_REGEXP = re.compile('href=\"(http://www.cnblogs.com/'+USER_NAME+'/p/[\d]+.html)\"')13 THREAD_NUMBER = multiprocessing.cpu_count() * 2
14 ARTICLE_URLS_MUTEX =threading.Lock()15 ARTICLE_URLS =[]16
17 classListWithLinkExtend(list):18 defextend(self, value):19 super(ListWithLinkExtend, self).extend(value)20 returnself21
22 defget_total_page_number():23 doc = PyQuery(url=BASE_URL)24 returnint(INT_REGEXP.findall(25 doc.find('.pager .Pager').text())[0].encode('ascii'))26
27 defget_page_url():28 globalTOTAL_PAGE_NUMBER29 return map(lambda page: BASE_URL+str(page),30 [i for i in range(1, TOTAL_PAGE_NUMBER+1)])31
32 defget_article_url(idx):33 url =PAGE_URLS[idx]34 doc = PyQuery(url=url)35 article_urls = ARTICLE_REGEXP.findall(str(doc.find('.PostList .postTitl2')))36 returnarticle_urls37
38 defhandle_result(request, result):39 globalARTICLE_URLS_MUTEX, ARTICLE_URLS40 try:41 ARTICLE_URLS_MUTEX.acquire()42 ARTICLE_URLS.append(result)43 finally:44 ARTICLE_URLS_MUTEX.release()45
46 defcluster_process():47 globalARTICLE_URLS48 #list : urls
49 task_queue =Queue.Queue()50 #str : path
51 result_queue =Queue.Queue()52 KiraiManager.register('get_task_queue', callable=lambda: task_queue)53 KiraiManager.register('get_result_queue', callable=lambda: result_queue)54 manager = KiraiManager(address=('', 6969), authkey='whosyourdaddy')55 manager.start()56 manager.shutdown()57 #article_flag, article_urls = get_article_url()
58
59 #a simple way.
60 defget_article(url):61 html =urllib.urlopen(url).read()62 returnhtml, INT_REGEXP.findall(url)[0]63
64 defsave_article(request, result):65 content =result[0]66 file_name = result[1]67 path = './' + USER_NAME + '/' + file_name + '.html'
68 try:69 fp = file(path, 'w')70 fp.writelines(content)71 finally:72 fp.close()73
74 defthread_process():75 globalARTICLE_URLS76 os.mkdir(USER_NAME)77 thread_pool =threadpool.ThreadPool(THREAD_NUMBER)78 requests =threadpool.makeRequests(get_article, ARTICLE_URLS, save_article)79 [thread_pool.putRequest(req) for req inrequests]80 thread_pool.wait()81
82 def __main__(argv):83 globalARTICLE_URLS, TOTAL_PAGE_NUMBER, USER_NAME, BASE_URL, ARTICLE_REGEXP, PAGE_URLS, TOTAL_PAGE_NUMBER84 if len(argv) == 2:85 USER_NAME = argv[1]86 BASE_URL = 'http://www.cnblogs.com/'+USER_NAME+'/p/?page='
87 ARTICLE_REGEXP = re.compile('href=\"(http://www.cnblogs.com/'+USER_NAME+'/p/[\d]+.html)\"')88 TOTAL_PAGE_NUMBER =get_total_page_number()89 PAGE_URLS =get_page_url()90 thread_pool =threadpool.ThreadPool(THREAD_NUMBER)91 requests =threadpool.makeRequests(92 get_article_url,93 [i for i inrange(0, TOTAL_PAGE_NUMBER)],94 handle_result)95 [thread_pool.putRequest(req) for req inrequests]96 thread_pool.wait()97 ARTICLE_URLS = list(reduce(lambdaa, b: ListWithLinkExtend(a).extend(ListWithLinkExtend(b)),98 ARTICLE_URLS))99 thread_process()100
101 if __name__ == '__main__':102 __main__(sys.argv)