java 基于Jsoup 对领英完成模拟登陆(Java爬虫)

 public String login() {
        // TODO Auto-generated method stub
        String pwd = "XXXXXXXXXXXXXXX";//密码
        String account = "XXXXXXXXXXXXXXXXXXXX";//账号
        String nameId="";
        String csrf_token="";
        Map<String, String> cookie = new HashMap<String,String>();
        Map<String, String> cookies = new HashMap<String,String>();
        try {
            String url="https://www.linkedin.com/login?trk=guest_homepage-basic_nav-header-signin";
            Connection connect = Jsoup.connect(url);
            connect.header("authority","www.linkedin.com");
            connect.header("cache-control","max-age=0,no-cache");
            connect.header("user-agent","Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36");
            connect.header("accept","text/html,appli cation/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8");
            connect.header("referer","https://www.baidu.com/s?ie=·-8&wd=%E9%A2%86%E8%8B%B1");
            connect.header("accept-encoding","gzip, deflate, br");
            connect.header("accept-language","en-US;q=0.8,en;q=0.7");


            Connection.Response response = connect.ignoreContentType(true)
                    .execute();
            Document parse = response.parse();
            String loginCsrfParam = parse.select("input[name=loginCsrfParam]").get(0).attr("value");
            cookie = response.cookies();
            System.out.println(cookie);
            for (String s : cookie.keySet()) {
                if(s.equalsIgnoreCase("JSESSIONID")){
                    csrf_token=cookie.get(s);
                    System.out.println(csrf_token);
                    break;
                }
            }


            Map<String, String> datas = new HashMap<String,String>();
            datas.put("session_key", account);
            datas.put("session_password",pwd );
            datas.put("isJsEnabled", "false");
            datas.put("loginCsrfParam",loginCsrfParam);
            datas.put("fp_data", "default");
            datas.put("undefined", "");
            url="https://www.linkedin.com/uas/login-submit?loginSubmitSource=GUEST_HOME";
            Connection connection2 = Jsoup.connect(url);
            connection2.header("authority","www.linkedin.com");
            connection2.header("cache-control","max-age=0,no-cache");
            connection2.header("origin","https://www.linkedin.com");
            connection2.header("upgrade-insecure-requests","1");
            connection2.header("content-type","application/x-www-form-urlencoded");
            connection2.header("User-Agent", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36");
            connection2.header("accept","text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8");
            connection2.header("referer","https://www.linkedin.com/");
            connection2.header("accept-encoding","gzip, deflate, br");
            connection2.header("accept-language","en-US;q=0.8,en;q=0.7");
            connection2.cookies(cookie);
            Connection.Response execute = connection2
                    .data(datas)
                    .cookies(cookie)
                    .method(Method.POST)
                    .execute();
            URL referer = execute.url();
            Map<String, String> executeCookie = execute.cookies();
            Document parse1 = execute.parse();
            String result=parse1.toString();
            if (result.contains("Sign-In Verification") || result.contains("verification code")) {
                cookie.putAll(executeCookie);
                cookies =cookie;
            }else{
                cookies =executeCookie;
            }
            nameId = RegexUtil.getValue("\"publicIdentifier\":\"(.*?)\"", result, 1).trim();
            System.out.println("登陆成功");
            return "success";
        } catch (Exception e) {
            System.out.println("登陆异常");
            return "error";
        }

    }

 

  • 1
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Jsoup+httpclient 模拟登陆和抓取页面 package com.app.html; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; import java.text.SimpleDateFormat; import java.util.Date; import org.apache.commons.httpclient.Cookie; import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.NameValuePair; import org.apache.commons.httpclient.cookie.CookiePolicy; import org.apache.commons.httpclient.cookie.CookieSpec; import org.apache.commons.httpclient.methods.PostMethod; import org.apache.commons.httpclient.params.HttpMethodParams; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; import com.app.comom.FileUtil; public class HttpClientHtml { private static final String SITE = "login.goodjobs.cn"; private static final int PORT = 80; private static final String loginAction = "/index.php/action/UserLogin"; private static final String forwardURL = "http://user.goodjobs.cn/dispatcher.php/module/Personal/?skip_fill=1"; private static final String toUrl = "d:\\test\\"; private static final String css = "http://user.goodjobs.cn/personal.css"; private static final String Img = "http://user.goodjobs.cn/images"; private static final String _JS = "http://user.goodjobs.cn/scripts/fValidate/fValidate.one.js"; /** * 模拟等录 * @param LOGON_SITE * @param LOGON_PORT * @param login_Action * @param params * @throws Exception */ private static HttpClient loginHtml(String LOGON_SITE, int LOGON_PORT,String login_Action,String ...params) throws Exception { HttpClient client = new HttpClient(); client.getHostConfiguration().setHost(LOGON_SITE, LOGON_PORT); // 模拟登录页面 PostMethod post = new PostMethod(login_Action); NameValuePair userName = new NameValuePair("memberName",params[0] ); NameValuePair password = new NameValuePair("password",params[1] ); post.setRequestBody(new NameValuePair[] { userName, password }); client.executeMethod(post); post.releaseConnection(); // 查看cookie信息 CookieSpec cookiespec = CookiePolicy.getDefaultSpec(); Cookie[] cookies = cookiespec.match(LOGON_SITE, LOGON_PORT, "/", false, client.getState().getCookies()); if (cookies != null) if (cookies.length == 0) { System.out.println("Cookies is not Exists "); } else { for (int i = 0; i < cookies.length; i++) { System.out.println(cookies[i].toString()); } } return client; } /** * 模拟等录 后获取所需要的页面 * @param client * @param newUrl * @throws Exception */ private static String createHtml(HttpClient client, String newUrl) throws Exception { SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd"); String filePath = toUrl + format.format(new Date() )+ "_" + 1 + ".html"; PostMethod post = new PostMethod(newUrl); client.executeMethod(post); //设置编码 post.getParams().setParameter(HttpMethodParams.HTTP_CONTENT_CHARSET, "GBK"); String content= post.getResponseBodyAsString(); FileUtil.write(content, filePath); System.out.println("\n写入文件成功!"); post.releaseConnection(); return filePath; } /** * 解析html代码 * @param filePath * @param random * @return */ private static String JsoupFile(String filePath, int random) { SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd"); File infile = new File(filePath); String url = toUrl + format.format(new Date()) + "_new_" + random+ ".html"; try { File outFile = new File(url); Document doc = Jsoup.parse(infile, "GBK"); String html="<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>"; StringBuffer sb = new StringBuffer(); sb.append(html).append("\n"); sb.append("<html>").append("\n"); sb.append("<head>").append("\n"); sb.append("<title>欢迎使用新安人才网个人专区</title>").append("\n"); Elements meta = doc.getElementsByTag("meta"); sb.append(meta.toString()).append("\n"); ////////////////////////////body////////////////////////// Elements body = doc.getElementsByTag("body"); ////////////////////////////link////////////////////////// Elements links = doc.select("link");//对link标签有href的路径都作处理 for (Element link : links) { String hrefAttr = link.attr("href"); if (hrefAttr.contains("/personal.css")) { hrefAttr = hrefAttr.replace("/personal.css",css); Element hrefVal=link.attr("href", hrefAttr);//修改href的属性值 sb.append(hrefVal.toString()).append("\n"); } } ////////////////////////////script////////////////////////// Elements scripts = doc.select("script");//对script标签 for (Element js : scripts) { String jsrc = js.attr("src"); if (jsrc.contains("/fValidate.one.js")) { String oldJS="/scripts/fValidate/fValidate.one.js";//之前的css jsrc = jsrc.replace(oldJS,_JS); Element val=js.attr("src", jsrc);//修改href的属性值 sb.append(val.toString()).append("\n").append("</head>"); } } ////////////////////////////script////////////////////////// Elements tags = body.select("*");//对所有标签有src的路径都作处理 for (Element tag : tags) { String src = tag.attr("src"); if (src.contains("/images")) { src = src.replace("/images",Img); tag.attr("src", src);//修改src的属性值 } } sb.append(body.toString()); sb.append("</html>"); BufferedReader in = new BufferedReader(new FileReader(infile)); Writer out = new BufferedWriter(new OutputStreamWriter( new FileOutputStream(outFile), "gbk")); String content = sb.toString(); out.write(content); in.close(); System.out.println("页面已经爬完"); out.close(); } catch (IOException e) { e.printStackTrace(); } return url; } public static void main(String[] args) throws Exception { String [] params={"admin","admin123"}; HttpClient client = loginHtml(SITE, PORT, loginAction,params); // 访问所需的页面 String path=createHtml(client, forwardURL); System.out.println( JsoupFile(path,1)); } }
爬虫(Web Crawler)是一种自动化程序,用于从互联网上收集信息。其主要功能是访问网页、提取数据并存储,以便后续分析或展示。爬虫通常由搜索引擎、数据挖掘工具、监测系统等应用于网络数据抓取的场景。 爬虫的工作流程包括以下几个关键步骤: URL收集: 爬虫从一个或多个初始URL开始,递归或迭代地发现新的URL,构建一个URL队列。这些URL可以通过链接分析、站点地图、搜索引擎等方式获取。 请求网页: 爬虫使用HTTP或其他协议向目标URL发起请求,获取网页的HTML内容。这通常通过HTTP请求库实现,如Python中的Requests库。 解析内容: 爬虫对获取的HTML进行解析,提取有用的信息。常用的解析工具有正则表达式、XPath、Beautiful Soup等。这些工具帮助爬虫定位和提取目标数据,如文本、图片、链接等。 数据存储: 爬虫将提取的数据存储到数据库、文件或其他存储介质中,以备后续分析或展示。常用的存储形式包括关系型数据库、NoSQL数据库、JSON文件等。 遵守规则: 为避免对网站造成过大负担或触发反爬虫机制,爬虫需要遵守网站的robots.txt协议,限制访问频率和深度,并模拟人类访问行为,如设置User-Agent。 反爬虫应对: 由于爬虫的存在,一些网站采取了反爬虫措施,如验证码、IP封锁等。爬虫工程师需要设计相应的策略来应对这些挑战。 爬虫在各个领域都有广泛的应用,包括搜索引擎索引、数据挖掘、价格监测、新闻聚合等。然而,使用爬虫需要遵守法律和伦理规范,尊重网站的使用政策,并确保对被访问网站的服务器负责。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值