前段时间,学校发通知要进行课程评估,女票评估完所有课程花了5分钟,而且评估主要是机械化操作,真心觉得烦。为了节省暨南大学同学宝贵的5分钟,本人研究了一下课程评估系统的运作过程,写出了以下自动化评估脚本(为了使用方便,特意写了两个版本,一个直接使用账号密码登录,但是需要验证码识别的python环境,另一个使用cookie):
#! /usr/bin/env python
#-- coding: utf-8 --
import urllib
import urllib2
from PIL import Image,ImageEnhance
from pytesser import *
import cookielib
import re
from bs4 import BeautifulSoup
#设置登录账号和密码
username = "" #学号
password = "" #密码
#识别验证码
while 1:
cookie = cookielib.CookieJar()
handler=urllib2.HTTPCookieProcessor(cookie)
opener = urllib2.build_opener(handler)
response = opener.open('http://undergraduate.jnu.edu.cn/ces/')
keepcookie = ''
for item in cookie:
keepcookie = 'JSESSIONID=%s' %(item.value)
print keepcookie
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.70 Safari/537.36',
'Connection': 'keep-alive',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie': keepcookie}
url = 'http://undergraduate.jnu.edu.cn/ces/servlet/ValidateCodeServlet'
req = urllib2.Request(url,headers=header)
im = urllib2.urlopen(req).read()
with open('download.jpg','wb') as f:
f.write(im)
im = Image.open('download.jpg')
box = (4,5,48,21) #裁剪图像,提高识别的正确率
region = im.crop(box)
region.save("image_code.jpg")
imgry = region.convert('L')
sharpness =ImageEnhance.Contrast(imgry) #增强对比度,提高识别的正确率
sharp_img = sharpness.enhance(2.0)
sharp_img.save("image_L.jpg")
text = image_to_string(sharp_img)
if text.strip().isdigit():
print "validcode:"
print text.strip()
break
#登录
post_data = {
"username":username,
"password":password,
"validateCode": text
}
post_data_urlencode = urllib.urlencode(post_data)
requrl = 'http://undergraduate.jnu.edu.cn/ces/sys/Login/login.do?method=login'
req = urllib2.Request(url = requrl,headers = header ,data =post_data_urlencode)
result = opener.open(req)
#获取所有课程编号和班号
url = 'http://undergraduate.jnu.edu.cn/ces/zk/EvaStu/evaList.do'
req = urllib2.Request(url,headers=header)
wbdata = urllib2.urlopen(req).read()
soup = BeautifulSoup(wbdata,"html.parser")
nodeset = soup.findAll(attrs={"class","btn_operate"})
pattern = re.compile(r'/ces/.+ids=.{5}')
coursenums = []
for node in nodeset:
for link in re.findall(pattern,str(node)):
begi = link.find('classNo')
classNo = link[begi+8:begi+8+9]
begi = link.find('courseNo')
courseNo = link[begi+9:begi+8+9]
coursenums.append([classNo,courseNo])
#构造post数据,逐个评估完所有课程
for coursenum in coursenums:
post_data = { "eduBean.classNo":[coursenum[0]],
"eduBean.courseNo":[coursenum[1]],
"eduBean.courseName":["课程名无关"],
"eduBean.subType":["EDU_EVA"],
"eduBean.type":["EVA_STU"],
"eduBean.typeId":["55"],
"eduBean.checkItem":["229:0.20:1106:9.5","230:0.20:1106:9.5","231:0.20:1106:9.5","232:0.20:1106:9.5","233:0.20:1106:9.5"],
"eduBean.msg":[""],
"selfBean.classNo":[coursenum[0]],
"selfBean.courseNo":[coursenum[1]],
"selfBean.courseName":["课程名无关"],
"selfBean.subType":["SELF_EVA"],
"selfBean.type":["EVA_STU"],
"selfBean.typeId":["56"],
"selfBean.checkItem":["234:0.30:1127:9.5","235:0.30:1127:9.5","236:0.40:1127:9.5"],
"selfBean.msg":[""]
}
post_data_urlencode = urllib.urlencode([(k, v) for k, vs in post_data.items() for v in vs])
requrl = 'http://undergraduate.jnu.edu.cn/ces/zk/EvaStu/addEva.do'
req = urllib2.Request(url = requrl,headers = header ,data =post_data_urlencode)
print req
res_data = urllib2.urlopen(req)
res = res_data.read()
print res.decode('utf-8')
term = coursenums[0][0][:5]
#完成最后的提交
submitallurl = 'http://undergraduate.jnu.edu.cn//ces/zk/EvaStu/submitAll.do?xn=%s' % term
print submitallurl
req = urllib2.Request(url = submitallurl,headers = header)
res_data = urllib2.urlopen(req)
res = res_data.read()
#print res.decode('utf-8')
print "over!"
需要安装pytesser库
#! /usr/bin/env python
#-- coding: utf-8 --
import urllib
import urllib2
import re
from bs4 import BeautifulSoup
JSESSIONID = "" #填写cookie
cookie = 'JSESSIONID=%s' % (JSESSIONID)
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.70 Safari/537.36',
'Connection': 'keep-alive',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Cookie': cookie}
#获取所有课程编号和班号
url = 'http://undergraduate.jnu.edu.cn/ces/zk/EvaStu/evaList.do'
req = urllib2.Request(url,headers=header)
wbdata = urllib2.urlopen(req).read()
soup = BeautifulSoup(wbdata,"html.parser")
nodeset = soup.findAll(attrs={"class","btn_operate"})
pattern = re.compile(r'/ces/.+ids=.{5}')
coursenums = []
for node in nodeset:
for link in re.findall(pattern,str(node)):
begi = link.find('classNo')
classNo = link[begi+8:begi+8+9]
begi = link.find('courseNo')
courseNo = link[begi+9:begi+8+9]
coursenums.append([classNo,courseNo])
#构造post数据,逐个评估完所有课程
for coursenum in coursenums:
post_data = { "eduBean.classNo":[coursenum[0]],
"eduBean.courseNo":[coursenum[1]],
"eduBean.courseName":["课程名无关"],
"eduBean.subType":["EDU_EVA"],
"eduBean.type":["EVA_STU"],
"eduBean.typeId":["55"],
"eduBean.checkItem":["229:0.20:1106:9.5","230:0.20:1106:9.5","231:0.20:1106:9.5","232:0.20:1106:9.5","233:0.20:1106:9.5"],
"eduBean.msg":[""],
"selfBean.classNo":[coursenum[0]],
"selfBean.courseNo":[coursenum[1]],
"selfBean.courseName":["课程名无关"],
"selfBean.subType":["SELF_EVA"],
"selfBean.type":["EVA_STU"],
"selfBean.typeId":["56"],
"selfBean.checkItem":["234:0.30:1127:9.5","235:0.30:1127:9.5","236:0.40:1127:9.5"],
"selfBean.msg":[""]
}
post_data_urlencode = urllib.urlencode([(k, v) for k, vs in post_data.items() for v in vs])
requrl = 'http://undergraduate.jnu.edu.cn/ces/zk/EvaStu/addEva.do'
req = urllib2.Request(url = requrl,headers = header ,data =post_data_urlencode)
print req
res_data = urllib2.urlopen(req)
res = res_data.read()
print res.decode('utf-8')
term = coursenums[0][0][:5]
#完成最后的提交
submitallurl = 'http://undergraduate.jnu.edu.cn//ces/zk/EvaStu/submitAll.do?xn=%s' % term
print submitallurl
req = urllib2.Request(url = submitallurl,headers = header)
res_data = urllib2.urlopen(req)
res = res_data.read()
#print res.decode('utf-8')
print "over!"
使用cookie版本
运行后的效果如下:
欢迎广大同学使用和参与改进!