VeryCD那个铜光盘才能进候补区的规定很烦人,必须得灌水才能升级,可是又懒得灌,于是写了一个灌水脚本,有人问我要,就干脆发出来共享一下吧。
`
使用方法:
1.把username和passwd改成相应的东东
2.用python解释器执行脚本,自动在首页的所有资源后面跟帖支持楼主
`
想要了解灌水机原理的话:
1.学会python基本原理
2.学会正则表达式,网络脚本必备(也可以学beautifulsoup,写的代码更直观易懂,不过正则通用性强,总要学的)
3.学会html基本格式,理解获取网页的GET/POST机制
4.然后再看代码吧,这个脚本因为很简单,所以注释了一下
`
使用方法:
1.把username和passwd改成相应的东东
2.用python解释器执行脚本,自动在首页的所有资源后面跟帖支持楼主
`
想要了解灌水机原理的话:
1.学会python基本原理
2.学会正则表达式,网络脚本必备(也可以学beautifulsoup,写的代码更直观易懂,不过正则通用性强,总要学的)
3.学会html基本格式,理解获取网页的GET/POST机制
4.然后再看代码吧,这个脚本因为很简单,所以注释了一下
要实现每日灌水的话,linux下建个cron job,windows下计划任务,两周以后就升铜可以去那啥候补区闲逛了。
#!/usr/bin/env python
#coding:utf-8
#author:observer
#http://obmem.info/old
import urllib,urllib2,cookielib
import re,time,random
username = 'username' #改成自己的用户名
passwd = 'password' #改成自己的密码
#这个就是发帖内容,任意改,会随机选择一条发帖
msg = [ '多谢楼主',
'Mark',
'正在找这个,谢谢',
'谢谢分享',
]
def login():
'''这个就是登录脚本'''
print 'try to login...'
#登录需要准备cookie
cookie=cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie), urllib2.HTTPHandler)
urllib2.install_opener(opener)
#先获取verycd的fk串,用于填表
print '...getting login form...'
loginform = urllib2.urlopen('http://secure.verycd.com/signin/*/http://www.verycd.com/').read()
fk = re.compile(r'id="fk" value="(.*)"').findall(loginform)[0]
#好的,现在填表
postdata=urllib.urlencode({'username':username,
'password':passwd,
'continueURI':'http://www.verycd.com/',
'fk':fk,
'login_submit':'登录',
})
req = urllib2.Request(
url = 'http://secure.verycd.com/signin/*/http://www.verycd.com/',
data = postdata
)
#填header,伪装成正常浏览访问,这是一种写法,也可以用我之前那篇文章的写法,更有条理一点
req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6')
req.add_header('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')
req.add_header('Accept-Encoding','gzip,deflate')
#交登录表,然后就登录成功了
print '...login form submitted'
result = urllib2.urlopen(req).read()
print '...login succeed!'
def farm():
'''这个就是灌水函数了'''
#读取主页,获得主页的所有资源id
res = urllib.urlopen('http://www.verycd.com').read()
topics = re.compile(r'/topics/(\d+)').findall(res)
topics = set(topics)
print topics
#对每一个获取的资源,回一贴支持楼主:)
#语法和刚才交登录表差不多
for topic in topics:
url = 'http://www.verycd.com/topics/'+str(topic)+'/reply#fast-replay'
print url
postData = {
'contents':random.choice(msg),
'use_bbcode':'1',
'tid':str(topic),
'Action':'FolderCommentOperate:doReplyFolder'
}
postData = urllib.urlencode(postData)
req = urllib2.Request(url = url, data = postData )
kk = urllib2.urlopen(req).read()
time.sleep(random.randint(1,10)) #随机等待一会再发贴
if __name__ == '__main__':
login()
farm()
-------------------------------------------------------------------
#! /usr/bin/python
# -*- coding: cp936 -*-
import urllib, urllib2, cookielib
import re, time, random
#####################################
#
# Globals
#
#####################################
site = 'http://www.*******.com.cn/bbs'
catagory = '2'
sleep_interval = '900'
content_choices = ['关注一下', '帮顶', '顶', '路过', '这个话题……', '大家好!', '看看',
'结果如何', '走过路过', '瞧一瞧,看一看']
content_max_index = len(content_choices) - 1
####################################
#
# Store the cookie
#
####################################
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
########################
#
# setup connection
#
########################
conn = opener.open(site + '/index.asp')
####################################
#
# Find the passcode and prompt user
#
####################################
conn = opener.open(site + '/code.asp')
conn.read(54) # the header of bitmap file
debug = ['', '', '', '', '', '', '', '', '', '']
s = ['', '', '', '']
bg = '\xee\xee\xee'
for x in range(0, 10):
for y in range(0, 4):
for z in range(0, 10):
data = conn.read(3)
if data == bg:
s[y] += '1'
debug[x] += ' '
else:
s[y] += '0'
debug[x] += '#'
#debug[x] += '|'
for x in range (9, -1, -1):
print debug[x]
passcode = raw_input('\nPlease input passcode as above: ')
params = {'username':'******', 'UserPassword':'******', 'passcode':passcode}
data = urllib.urlencode(params)
req = urllib2.Request(site + '/chkuserj.asp', data)
conn = opener.open(req);
print conn.read(200)
##############################
#
# find titles
#
##############################
conn = opener.open(site + '/jj.asp?fenlei=' + catagory)
reo = re.compile(r'titleid=(\d+)');
f = conn.readlines()
post_count = 0
for line in f:
if line.find('titleid=') != -1:
m = reo.search(line);
tid = m.group(1)
# Get random data for watering the topic, hehe
index = random.randint(0, content_max_index)
userface = random.randint(1,24)
params = {'title':'Re:', 'body':content_choices[index], 'userface':str(userface)}
data = urllib.urlencode(params)
# post my replies
print 'Re: ' + tid + ' with data: ' + content_choices[index]
#conn = opener.open(site + '/saveh.asp?fenlei=' + catagory + '&titleid=' + tid, data)
# This is a bug of the website. There is no catagory 33 at all. The post is lost,
# but the post score of the user has been increased. hehe.
conn = opener.open(site + '/saveh.asp?fenlei=' + '33' + '&titleid=' + tid, data)
#print conn.read()
conn.close()
post_count = post_count + 1
# Don't make a flood
time.sleep(sleep_interval)
print '\n\nTotal posted: ' + post_count