当当网商品详情API接口(当当商品详情接口,当当商品评论接口,当当商品问答接口,当当抢购价接口,当当商品列表接口)代码对接教程

当当网商品详情API接口(当当商品详情接口,当当商品评论接口,当当商品问答接口,当当抢购价接口,当当商品列表接口,关键词搜索当当网商品接口)代码对接教程如下

1.公共参数

名称类型必须描述(接口代码教程wx19970108018)
keyString调用key(必须以GET方式拼接在URL中,点击获取请求key和secret
secretString调用密钥
api_nameStringAPI接口名称(包括在请求地址中)[item_search,item_get,item_search_shop等]
cacheString[yes,no]默认yes,将调用缓存的数据,速度比较快
result_typeString[json,jsonu,xml,serialize,var_export]返回数据格式,默认为json,jsonu输出的内容中文可以直接阅读
langString[cn,en,ru]翻译语言,默认cn简体中文
versionStringAPI版本

2.请求参数

请求参数:num_iid=25122027

参数说明:num_iid:当当商品ID

备注:可以支持高并发调用接口 

3.请求示例(CURL、PHP 、PHPsdk 、Java 、C# 、Python…)

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.net.URL;
import java.nio.charset.Charset;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.PrintWriter;
import java.net.URLConnection;

public class Example {
	private static String readAll(Reader rd) throws IOException {
		StringBuilder sb = new StringBuilder();
		int cp;
		while ((cp = rd.read()) != -1) {
			sb.append((char) cp);
		}
		return  sb.toString();
	}
	public static JSONObject postRequestFromUrl(String url, String body) throws IOException, JSONException {
		URL realUrl = new URL(url);
		URLConnection conn = realUrl.openConnection();
		conn.setDoOutput(true);
		conn.setDoInput(true);
		PrintWriter out = new PrintWriter(conn.getOutputStream());
		out.print(body);
		out.flush();
		InputStream instream = conn.getInputStream();
		try {
			BufferedReader rd = new BufferedReader(new InputStreamReader(instream, Charset.forName("UTF-8")));
			String jsonText = readAll(rd);
			JSONObject json = new JSONObject(jsonText);
			return json;
		} finally {
			instream.close();
		}
	}
	public static JSONObject getRequestFromUrl(String url) throws IOException, JSONException {
		URL realUrl = new URL(url);
		URLConnection conn = realUrl.openConnection();
		InputStream instream = conn.getInputStream();
		try {
			BufferedReader rd = new BufferedReader(new InputStreamReader(instream, Charset.forName("UTF-8")));
			String jsonText = readAll(rd);
			JSONObject json = new JSONObject(jsonText);
			return json;
		} finally {
			instream.close();
		}
	}
	public static void main(String[] args) throws IOException, JSONException {
		// 请求示例 url 默认请求参数已经URL编码处理
		String url = "https://wx19970108018/dangdang/item_get/?key=<您自己的apiKey>&secret=<您自己的apiSecret>&num_iid=25122027";
		JSONObject json = getRequestFromUrl(url);
		System.out.println(json.toString());
	}

}

 4.响应示例

{
	"item": {
		"num_iid": "25122027",
		"title": "朗读者(1-3辑)(荣获2017年度大众喜爱的50种图书)",
		"desc_short": "",
		"price": "156.00",
		"total_price": 0,
		"suggestive_price": 0,
		"orginal_price": "156.00",
		"nick": "",
		"num": "",
		"min_num": 0,
		"detail_url": "http://product.dangdang.com/25122027.html",
		"pic_url": "//img3m7.ddimg.cn/84/26/25122027-1_u_8.jpg",
		"brand": "",
		"brandId": "",
		"rootCatId": "",
		"cid": "",
		"crumbs": [],
		"created_time": "",
		"modified_time": "",
		"delist_time": "",
		"desc": "",
		"desc_img": [],
		"item_imgs": [
			{
				"url": "//img3m7.ddimg.cn/84/26/25122027-1_u_8.jpg"
			},
			{
				"url": "//img3m7.ddimg.cn/84/26/25122027-2_u_3.jpg"
			},
			{
				"url": "//img3m7.ddimg.cn/84/26/25122027-3_u_1.jpg"
			},
			{
				"url": "//img3m7.ddimg.cn/84/26/25122027-4_u_1.jpg"
			},
			{
				"url": "//img3m7.ddimg.cn/84/26/25122027-5_u_1.jpg"
			},
			{
				"url": "//img3m7.ddimg.cn/84/26/25122027-6_u_1.jpg"
			}
		],
		"item_weight": "",
		"item_size": "",
		"location": "",
		"post_fee": "",
		"express_fee": "",
		"ems_fee": "",
		"shipping_to": "",
		"has_discount": "",
		"video": [],
		"is_virtual": "",
		"sample_id": "",
		"is_promotion": "",
		"props_name": "",
		"prop_imgs": {
			"prop_img": []
		},
		"property_alias": "",
		"props": [
			{
				"name": "开 本",
				"value": "32开"
			},
			{
				"name": "纸 张",
				"value": "胶版纸"
			},
			{
				"name": "包 装",
				"value": "平装-胶订"
			},
			{
				"name": "是否套装",
				"value": "是"
			},
			{
				"name": "国际标准书号ISBN",
				"value": "25122027"
			}
		],
		"total_sold": "",
		"skus": {
			"sku": []
		},
		"seller_id": "",
		"sales": 0,
		"shop_id": "",
		"props_list": [],
		"seller_info": {
			"nick": "",
			"shop_name": "",
			"city": "",
			"level": "",
			"rate": "",
			"totle_pf": "",
			"desc_pf": "",
			"server_pf": "",
			"send_pf": "",
			"shop_type": "",
			"user_num_id": "",
			"shopid": "",
			"shopname": "",
			"zhuy": "http://shop.dangdang.com/",
			"tel": "",
			"menu": []
		},
		"tmall": "",
		"error": "",
		"warning": "",
		"url_log": "",
		"sold": "88930",
		"stuff_status": "",
		"props_img": [],
		"shopinfo": {
			"shop_name": ""
		},
		"promo_type": "",
		"shop_item": [],
		"relate_items": [],
		"format_check": "ok"
	},
	"error": "",
	"secache": "665a65ebbf05689bd424affec75b2af6",
	"secache_time": 1661739308,
	"secache_date": "2022-08-29 10:15:08",
	"translate_status": "",
	"translate_time": 0,
	"language": {
		"default_lang": "cn",
		"current_lang": "cn"
	},
	"reason": "",
	"error_code": "0000",
	"cache": 0,
	"api_info": "today:21 max:10100 all[77=21+29+27];expires:2030-12-31",
	"execution_time": "0.325",
	"server_time": "Beijing/2022-08-29 10:15:08",
	"client_ip": "106.6.36.86",
	"call_args": {
		"num_iid": "25122027"
	},
	"api_type": "dangdang",
	"translate_language": "zh-CN",
	"translate_engine": "baidu",
	"server_memory": "0.76MB",
	"request_id": "gw-4.630c212c5c5cf",
	"last_id": "1181945063"
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
使用Python爬虫爬取当当网的数据可以分为以下几个步骤: 1. 确定目标网站和爬虫策略:确定需要爬取哪些页面和数据,选择合适的爬虫策略,比如使用requests库发送HTTP请求,使用beautifulsoup库解析HTML等。 2. 发送HTTP请求获取页面内容:使用requests库发送HTTP请求获取目标网站的页面内容。 3. 解析HTML获取所需数据:使用beautifulsoup库对页面内容进行解析,提取出所需要的数据。 4. 存储数据:将获取到的数据存储到本地文件或者数据库中。 下面是一个爬取当当网图书信息的示例代码: ```python import requests from bs4 import BeautifulSoup # 发送HTTP请求获取页面内容 url = 'http://search.dangdang.com/?key=python&act=input' response = requests.get(url) html = response.text # 解析HTML获取所需数据 soup = BeautifulSoup(html, 'html.parser') book_list = soup.find_all('li', class_='bigimg') for book in book_list: title = book.find('a', class_='pic')['title'] author = book.find('div', class_='pub').text.strip().split('/')[0] price = book.find('span', class_='search_now_price').text.strip() print(title, author, price) # 存储数据 # 将数据存储到本地文件或者数据库中 ``` 这段代码首先发送HTTP请求获取当当网以“python”为关键词搜索结果页面的内容,然后使用beautifulsoup库解析HTML,提取出每本图书的标题、作者和格,并打印输出。最后可以将获取到的数据存储到本地文件或者数据库中。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值