简单的crawler,python

以下是十分简单的python写的爬虫小脚本。

import os,re
import urlparse,urllib,urllib2
import hashlib,Queue

class Request(object):
    def __init__(self):
        self.url= ''
        self.error_url = []
        self.undownload_url = []
        self.downloaded_url = []
        self.headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6)  \
                       Gecko/20091201 Firefox/3.5.6',
                      'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
                      }

    def read_url_from_file(self):
        urls = []
        try:
            r = open('urls.txt','r')
            urls = r.readlines()
            r.close()
        except IOError:
            url = raw_input("no file urls.txt,please input a url >>> ")
            if not url.lower().startswith('http://'):
                url = 'http://'+url
            urls.append(url)
        return urls


    def is_download_url(self):
        return self.url in self.downloaded_url

    def get(self):
        ok = False
        req = urllib2.Request(self.url,None,self.headers)
        for i in range(3):
                temp = urllib2.urlopen(req)
                if temp.getcode()==200:
                    if 'content-type' in temp.headers.keys(): 
                        content_type = temp.headers.get('content-type')
                        if 'text' in content_type:
                            return temp.read()
                    else:
                        return temp.read()
        return ''
 
    def find_links(self):
        com = re.compile(r"""<a[^>]*?href\s*=\s*['"]?([^'"\s>]{1,500})['">\s]""",re.I|re.M|re.S)
        links = com.findall(self.content)
        links = set(links)
        links = list(links)
        full_links = [urlparse.urljoin(self.url,x) for x in links]
        for i in full_links:
            if i not in self.undownload_url and i not in self.downloaded_url:
                self.undownload_url.append(i)

    def save_page(self,page_source,filename):
        dirname = filename.split('/')[0]
        if not os.path.isdir(dirname):
            os.mkdir(dirname)
        w = open(filename,'w')
        w.write(page_source)
        w.close()
              
    def save_result(self,urls,filename):
        w = open(filename,'w')
        w.write('\n'.join(urls))
        w.close()

    def run(self):
        self.page = 0
        self.ldir = 0
        self.num_per_dir = 100
        self.undownload_url = self.read_url_from_file()
        self.url_to_path = {}
        try:
            while self.undownload_url:
                self.url = self.undownload_url.pop(0)
                self.url = self.url.replace('\n','')
                order = self.page+self.ldir*self.num_per_dir
                print (" %4d download... %s " % (order,self.url) )
                self.content = self.get()
                self.downloaded_url.append(self.url)
                if self.content:
                    self.find_links()
                    filename = str(self.ldir)+'/'+str(self.page)
                    self.save_page(self.content,filename)
                    self.url_to_path[str(order)] = self.url
                    self.page += 1
                    if self.page >= self.num_per_dir:
                        self.page -= self.num_per_dir
                        self.ldir += 1
                else:
                    self.error_url.append(self.url)
            print ("finished !")     
            
        except:
            print ("save downloaded urls into downloaded file")
            self.save_result(self.downloaded_url,'downloaded')
            print ("save error urls into error file")
            self.save_result(self.undownload_url,'undownload')
            print ("save url to file dictonary into url_to_file")
            self.save_result([x+'\t'+self.url_to_path[x] for x in self.url_to_path.keys()],'url_to_file')
            print ("total try to download %d urls" % order)
 
aa = Request()
aa.run()
 
Certainly! Here is a complete Python code example for an image crawler: ```python import requests from bs4 import BeautifulSoup import os def download_images(url): # Send a GET request to the website response = requests.get(url) # Check if the request was successful if response.status_code == 200: # Use BeautifulSoup to parse the HTML soup = BeautifulSoup(response.content, 'html.parser') # Find all image tags on the website img_tags = soup.find_all('img') # Create a directory to store the images os.makedirs('images', exist_ok=True) # Loop through the list of image tags for img in img_tags: # Check if the image tag has a 'src' attribute if 'src' in img.attrs: # Get the image URL img_url = img.attrs['src'] # Create a file name from the image URL filename = img_url.split('/')[-1] # Create the file path to store the image filepath = os.path.join('images', filename) # Download and store the image with open(filepath, 'wb') as file: # Send a GET request to download the image img_response = requests.get(img_url) # Write the data into the file file.write(img_response.content) print(f"Downloaded: {filename}") # Example usage url = 'https://example.com' download_images(url) ``` In this code, you can replace `'https://example.com'` with the URL of the website you want to crawl images from. The script will create an `images` directory (if it doesn't exist) and download all the images found on the webpage into that directory. Please note that crawling and downloading images from websites should be done responsibly and in compliance with the website's terms of service.
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值