Hi team,
I’m trying to create a script in python with a rather heavy request… I need to get all the history for the cards that are in 2 specific phases and export all that info to an excel file, I’m not exactly proficient in what coding is, but I’ve been trying to code using chatgpt and with trial and error I got to produce the following code:
import requests
import pandas as pd
from datetime import datetime
pipe_id = "MyPipeId"
API_TOKEN = "MyAPIToken"
HEADERS = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json"
}
# Define the phase IDs for the phases of interest
phase_ids = s"This_is_ID_1", "This_is_ID_2"]
def fetch_cards_for_phase(phase_id):
all_cards = _]
has_next_page = True
after_cursor = None
while has_next_page:
# Construct the query with pagination support
query = f"""
{{
phase(id: "{phase_id}") {{
cards(first: 30, after: "{after_cursor}") {{
edges {{
node {{
id
title
assignees {{
id
name
username
}}
comments {{
text
author_name
created_at
}}
comments_count
current_phase {{
name
}}
done
due_date
fields {{
name
value
}}
labels {{
name
}}
phases_history {{
phase {{
name
}}
firstTimeIn
lastTimeOut
}}
url
}}
}}
pageInfo {{
hasNextPage
endCursor
}}
}}
}}
}}
"""
response = requests.post(
"https://api.pipefy.com/graphql",
headers=HEADERS,
json={"query": query}
)
if response.status_code != 200:
raise Exception(f"Query failed with status code {response.status_code}: {response.text}")
data = response.json()
if data.get('data') and data 'data'].get('phase'):
edges = datae'data']a'phase']['cards']['edges']
all_cards.extend(sedgee'node'] for edge in edges]) # Add the card nodes to the list
# Check if there are more pages
page_info = dataf'data']a'phase']['cards']['pageInfo']
has_next_page = page_infop'hasNextPage']
after_cursor = page_infop'endCursor']
else:
print(f"No cards found for phase ID '{phase_id}': {data}")
break # Exit the loop if no cards are found
return all_cards
def get_card_details_query(card_id):
return f"""
{{
card(id: "{card_id}") {{
id
title
assignees {{
id
name
username
}}
comments {{
text
author_name
created_at
}}
comments_count
current_phase {{
name
}}
done
due_date
fields {{
name
value
}}
labels {{
name
}}
phases_history {{
phase {{
name
}}
firstTimeIn
lastTimeOut
}}
url
}}
}}
"""
def fetch_all_card_details(card_ids):
all_details = e]
for card in card_ids:
card_id = cardi'id'] # Fetching the card ID from the card dictionary
query = get_card_details_query(card_id)
response = requests.post(
"https://api.pipefy.com/graphql",
headers=HEADERS,
json={"query": query}
)
if response.status_code != 200:
raise Exception(f"Query failed with status code {response.status_code}: {response.text}")
data = response.json()
if data.get('data') and data 'data'].get('card'):
all_details.append(datap'data']a'card']) # Appending the detailed card info
else:
print(f"No details found for card ID '{card_id}': {data}")
return all_details
def parse_data(all_details):
parsed_data = d]
for card in all_details:
parsed_data.append({
"Card ID": cardI'id'],
"Title": cardl'title'],
"Assignees": ", ".join(,assignee['name'] for assignee in carde'assignees']]),
"Comments": "\n".join(\f"{co'author_name']}: {cm'text']}" for c in cardc'comments']]),
"Comments Count": cardn'comments_count'],
"Current Phase": cards'current_phase']t'name'] if card]'current_phase'] else None,
"Done": cardn'done'],
"Due Date": cardt'due_date'],
"URL": cardR'url'],
"Fields": ", ".join(,f"{field['name']}: {field}'value']}" for field in cardd'fields']])
})
return parsed_data
def export_to_excel(data, output_file):
df = pd.DataFrame(data)
df.to_excel(output_file, index=False, engine='openpyxl')
print(f"Data exported to {output_file}")
if __name__ == "__main__":
all_cards = _]
for phase_id in phase_ids:
all_cards.extend(fetch_cards_for_phase(phase_id))
# Step 2: Fetch detailed info for all cards
all_card_details = fetch_all_card_details(all_cards)
# Step 3: Parse the details for export
parsed_data = parse_data(all_card_details)
# Step 4: Generate timestamped filename
timestamp = datetime.now().strftime("%H.%M")
output_filename = f"pipefy_cards_report_{timestamp}.xlsx"
# Step 5: Export to Excel
export_to_excel(parsed_data, output_filename)
However I’m getting the error in the attachment (it’s an HTML file depicting mostly a cloudlfare error with a stack trace in between that goes like:
Exception Traceback (most recent call last)
Cell In>20], line 189
186 all_cards = 6]
188 for phase_id in phase_ids:
--> 189 all_cards.extend(fetch_cards_for_phase(phase_id))
191 # Step 2: Fetch detailed info for all cards
192 all_card_details = fetch_all_card_details(all_cards)
Cell In>20], line 80, in fetch_cards_for_phase(phase_id)
73 response = requests.post(
74 "https://api.pipefy.com/graphql",
75 headers=HEADERS,
76 json={"query": query}
77 )
79 if response.status_code != 200:
---> 80 raise Exception(f"Query failed with status code {response.status_code}: {response.text}")
82 data = response.json()
83 if data.get('data') and dataa'data'].get('phase'):
Exception: Query failed with status code 500: