我已经编写了一个Python3脚本来从Pipedrive检索所有ldeal信息并将其存入Excel csv文件中。
我不明白为什么我只能检索前1000个交易,而数据库中有超过2500个交易。如有任何建议,将不胜感激。在
脚本是:
导入请求
导入json
导入csvAPI_TOKEN = ""
FILTER = False # Apply Filtering.
FILTER_READABLE = True # Filter uses human-readable field names, not just ids/hashes.
FILTER_FIELDS = [ # Fields to Include, separated by commas, example below. Make sure spelling is exact. Run without filter enabled to see exact names.
]
EXAMPLE_FILTER = [
"ID",
"Expected close date",
"Email messages count",
]
DEALS_BASE_URL = "https://api.pipedrive.com/v1/deals/"
DEALFIELDS_BASE_URL = "https://api.pipedrive.com/v1/dealFields/"
DEALFIELDS = {}
DEALFIELDS_OPTIONS = {}
end_last = 0
while True:
req = requests.get(DEALFIELDS_BASE_URL, params={"api_token": API_TOKEN, "start": end_last, "limit": 5000})
response = json.loads(req.text)
if not response.get("data", None):
print("Invalid Request. Check API Token!")
quit(1)
for field in response.get("data", []):
DEALFIELDS[field["key"]] = field["name"]
if field["field_type"] == "enum":
DEALFIELDS_OPTIONS[field["key"]] = ("enum", {str(item["id"]): item["label"] for item in field["options"]})
elif field["field_type"] == "set":
DEALFIELDS_OPTIONS[field["key"]] = ("set", {str(item["id"]): item["label"] for item in field["options"]})
if not response.get("additional_info", {}).get("pagination", {}).get("more_items_in_collection", False):
break
end_last = end_last + 5000
DEALS = []
end_last = 0
while True:
req = requests.get(DEALS_BASE_URL, params={"api_token": API_TOKEN, "start": end_last, "limit": 5000})
response = json.loads(req.text)
if not response.get("data", None):
quit()
for deal in response.get("data", []):
new_deal = {}
for key, item in deal.items():
if type(item) is dict:
item = item["name"]
if key in DEALFIELDS:
if FILTER and FILTER_READABLE and DEALFIELDS[key] not in FILTER_FIELDS:
continue
if FILTER and not FILTER_READABLE and key not in FILTER_FIELDS:
continue
if key in DEALFIELDS_OPTIONS:
if DEALFIELDS_OPTIONS[key][0] == "enum":
if item is None:
continue
new_deal[DEALFIELDS[key]] = DEALFIELDS_OPTIONS[key][1][str(item)]
elif DEALFIELDS_OPTIONS[key][0] == "set":
if item is None:
continue
new_deal[DEALFIELDS[key]] = str(",".join([DEALFIELDS_OPTIONS[key][1][i] for i in item.split(",")]))
continue
new_deal[DEALFIELDS[key]] = item
else:
if FILTER and key not in FILTER_FIELDS:
continue
new_deal[key] = item
DEALS.append(new_deal)
if not response.get("additional_info", {}).get("pagination", {}).get("more_items_in_collection", False):
break
end_last = end_last + 5000
fieldnames = []
for deal in DEALS:
for key, item in deal.items():
if key not in fieldnames:
fieldnames.append(key)
output_csv = open("out.csv", "w", encoding="utf-8")
out = csv.DictWriter(output_csv, fieldnames=fieldnames)
out.writeheader()
for deal in DEALS:
out.writerow({str(key).strip(): str(item).strip() for key, item in deal.items()})
output_csv.close()