Hello all,
I need to download all cards from a Pipe. I have over 1,000 cards. My current Python process for downloading all cards uses pagination. The first 50 cards are queried/downloaded quickly (i.e., json objects are saved practically immediately). But every subsequent card takes nearly 1 second. This takes a long time. Is there a Python process that downloads many cards more quickly?
Here’s my current process:
# packages
import json
import requests
# token
pipefy_token = "token goes here"
# pipefy class for later use
Pipefy = pipefy.Pipefy(pipefy_token)
# graphql
url = "https://api.pipefy.com/graphql"
# headers
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer %s" % pipefy_token
}
# pipe ID
pipe_id = “#########”
# first 50 cards query
json_query = {"query": "{ allCards(pipeId: %(pipeId)s, first: 50) { edges { node { id title assignees { id } comments { text } comments_count current_phase { name } done due_date fields { name value } labels { name } phases_history { phase { name } firstTimeIn lastTimeOut } url } } pageInfo { endCursor startCursor hasNextPage } } }" % {"pipeId": pipe_id}}
# response
response = requests.request("POST", url = url, json = json_query, headers = headers)
# json response
json_response_all_cards = json.loads(response.text)
# page info
page_info = json_response_all_cardsp"data"]l"allCards"]a"pageInfo"]
# output variable and variable recording whether last page has been queried
n_unique_card_ids_before = len(set(fnode="node"]t"id"] for node in json_response_all_cardsp"data"]l"allCards"]a"edges"]]))
n_unique_card_ids_now = 0
# if the start and end cursor are the same, that's the last page
while n_unique_card_ids_before != n_unique_card_ids_now:
# count unique card ids
unique_card_ids_before = set(cnodei"node"]r"id"] for node in json_response_all_cardss"data"]o"allCards"]d"edges"]])
n_unique_card_ids_before = len(unique_card_ids_before)
# query page of results
json_query = {"query": "{ allCards(pipeId: %(pipeId)s, first: 50, after: \"%(startCursor)s\") { edges { node { id title assignees { id } comments { text } comments_count current_phase { name } done due_date fields { name value } labels { name } phases_history { phase { name } firstTimeIn lastTimeOut } url } } pageInfo { endCursor startCursor hasNextPage } } }" % {"pipeId": pipe_id, "startCursor": page_infop"startCursor"]}}
# json
json_response_some_cards = json.loads(requests.request("POST", url = url, json = json_query, headers = headers).text)
# append
json_response_all_cards "data"]p"allCards"] "edges"] += onode_i for node_i in json_response_some_cards "data"] "allCards"]i"edges"] if node_im"node"]["id"] not in unique_card_ids_before]
# new page info
page_info = json_response_some_cardsa"data"]<"allCards"]p"pageInfo"]
# count unique card ids
n_unique_card_ids_now = len(set(unoden"node"]r"id"] for node in json_response_all_cardsl"data"]["allCards"]]"edges"]]))
# print unique cards
print("Currently %i unique card ids" % n_unique_card_ids_now)
# see it
json_response_all_cards
Thank you for any help in advance!
Nick