179 lines
6.2 KiB
Python
179 lines
6.2 KiB
Python
import os
|
|
import sys
|
|
import json
|
|
import requests
|
|
from datetime import datetime, timedelta
|
|
from todoist_api_python.api import TodoistAPI
|
|
from jinja2 import Environment, FileSystemLoader, select_autoescape
|
|
|
|
ATTACHMENTS_DIR = "attachments"
|
|
|
|
|
|
def usage():
|
|
print("""
|
|
Todoist Export Script
|
|
---------------------
|
|
Exports all active and completed tasks from the Todoist API to a JSON file, including attachments and comments, and generates a human-readable HTML backup using Jinja2.
|
|
|
|
Usage:
|
|
python export_todoist.py export
|
|
- Exports all data and generates JSON and HTML files.
|
|
python export_todoist.py [any other argument or none]
|
|
- Shows this help message.
|
|
""")
|
|
|
|
|
|
def get_api_key():
|
|
key = os.environ.get("TODOIST_KEY")
|
|
if not key:
|
|
print("Error: TODOIST_KEY environment variable not set.")
|
|
sys.exit(1)
|
|
return key
|
|
|
|
|
|
def ensure_attachments_dir():
|
|
if not os.path.exists(ATTACHMENTS_DIR):
|
|
os.makedirs(ATTACHMENTS_DIR)
|
|
|
|
|
|
def download_attachment(url, filename):
|
|
local_path = os.path.join(ATTACHMENTS_DIR, filename)
|
|
if os.path.exists(local_path):
|
|
return local_path
|
|
print(f"Downloading attachment {url}")
|
|
r = requests.get(url, stream=True)
|
|
if r.status_code == 200:
|
|
with open(local_path, 'wb') as f:
|
|
for chunk in r.iter_content(1024):
|
|
f.write(chunk)
|
|
return local_path
|
|
else:
|
|
return None
|
|
|
|
|
|
def fetch_all_projects(api):
|
|
ret = []
|
|
projects_iter = api.get_projects()
|
|
for projects in projects_iter:
|
|
for project in projects:
|
|
name = getattr(project, 'name', None)
|
|
id = getattr(project, 'id', None)
|
|
print(f"Found project {name} with ID {id}")
|
|
ret.append(project)
|
|
return ret
|
|
|
|
def fetch_all_completed_tasks(api, project_id):
|
|
# Fetch all completed tasks for a project using get_completed_tasks_by_completion_date
|
|
# The API only allows up to 3 months per call, so we fetch just once for the last 3 months
|
|
all_completed = []
|
|
since = (datetime.now() - timedelta(days=90)).replace(hour=0, minute=0, second=0, microsecond=0)
|
|
until = datetime.now()
|
|
try:
|
|
completed_iter = api.get_completed_tasks_by_completion_date(since=since, until=until)
|
|
for completed_list in completed_iter:
|
|
for task in completed_list:
|
|
if hasattr(task, 'project_id') and str(task.project_id) == str(project_id):
|
|
all_completed.append(task)
|
|
except Exception as e:
|
|
print(f"Error fetching completed tasks for {since} to {until}: {e}")
|
|
print(f"Found {len(all_completed)} completed tasks for project {project_id}")
|
|
return all_completed
|
|
|
|
def fetch_all_tasks(api, project_id, completed=False):
|
|
if completed:
|
|
return fetch_all_completed_tasks(api, project_id)
|
|
else:
|
|
tasks = []
|
|
try:
|
|
tasks_iter = api.get_tasks(project_id=project_id)
|
|
for batch in tasks_iter:
|
|
for task in batch:
|
|
tasks.append(task)
|
|
except Exception as e:
|
|
print(f"Error fetching active tasks for project {project_id}: {e}")
|
|
print(f"Found {len(tasks)} completed tasks for project {project_id}")
|
|
return tasks
|
|
|
|
|
|
def fetch_comments(api, task_id):
|
|
comments = []
|
|
try:
|
|
comments_iter = api.get_comments(task_id=task_id)
|
|
for batch in comments_iter:
|
|
for comment in batch:
|
|
comments.append(comment)
|
|
except Exception:
|
|
return []
|
|
return comments
|
|
|
|
|
|
def process_task(api, task, completed=False):
|
|
task_dict = task.__dict__.copy()
|
|
# Attachments (if any)
|
|
attachments = []
|
|
if hasattr(task, 'attachments') and task.attachments:
|
|
for att in task.attachments:
|
|
att_dict = att.__dict__.copy()
|
|
if 'file_url' in att_dict and att_dict['file_url']:
|
|
filename = att_dict.get('file_name') or os.path.basename(att_dict['file_url'])
|
|
local_path = download_attachment(att_dict['file_url'], filename)
|
|
if local_path:
|
|
att_dict['local_file'] = os.path.relpath(local_path)
|
|
attachments.append(att_dict)
|
|
if attachments:
|
|
task_dict['attachments'] = attachments
|
|
# Comments
|
|
comments = fetch_comments(api, task.id)
|
|
if comments:
|
|
task_dict['comments'] = [c.__dict__ for c in comments]
|
|
return task_dict
|
|
|
|
|
|
def main():
|
|
if len(sys.argv) != 2 or sys.argv[1] != "export":
|
|
usage()
|
|
return
|
|
ensure_attachments_dir()
|
|
api = TodoistAPI(get_api_key())
|
|
projects = fetch_all_projects(api)
|
|
data = []
|
|
for project in projects:
|
|
project_dict = project.__dict__.copy()
|
|
project_id = project.id
|
|
# Active tasks
|
|
active_tasks = fetch_all_tasks(api, project_id, completed=False)
|
|
# Completed tasks
|
|
completed_tasks = fetch_all_tasks(api, project_id, completed=True)
|
|
project_dict['tasks'] = [process_task(api, t, completed=False) for t in active_tasks]
|
|
project_dict['completed_tasks'] = [process_task(api, t, completed=True) for t in completed_tasks]
|
|
data.append(project_dict)
|
|
# Write JSON
|
|
today = datetime.now().strftime("%Y-%m-%d")
|
|
json_filename = f"Todoist-Actual-Backup-{today}.json"
|
|
def json_serial(obj):
|
|
if isinstance(obj, datetime):
|
|
return obj.isoformat()
|
|
return str(obj)
|
|
with open(json_filename, "w", encoding="utf-8") as f:
|
|
json.dump(data, f, ensure_ascii=False, indent=2, default=json_serial)
|
|
print(f"Exported data to {json_filename}")
|
|
# Write HTML
|
|
env = Environment(
|
|
loader=FileSystemLoader(os.path.dirname(__file__)),
|
|
autoescape=select_autoescape(['html', 'xml'])
|
|
)
|
|
# Add markdown filter
|
|
try:
|
|
import markdown
|
|
env.filters['markdown'] = lambda text: markdown.markdown(text or "")
|
|
except ImportError:
|
|
env.filters['markdown'] = lambda text: text or ""
|
|
template = env.get_template("todoist_backup_template.html")
|
|
html_filename = f"Todoist-Actual-Backup-{today}.html"
|
|
with open(html_filename, "w", encoding="utf-8") as f:
|
|
f.write(template.render(projects=data, date=today))
|
|
print(f"Generated HTML backup at {html_filename}")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|