From bf3fd878ac3e7aacd646d152ba867702d0019bf5 Mon Sep 17 00:00:00 2001 From: Romain Quinet Date: Sat, 7 Oct 2023 08:32:07 +0200 Subject: [PATCH] Use DnD API --- download_transcripts.py | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/download_transcripts.py b/download_transcripts.py index aac4e17..1f38e82 100644 --- a/download_transcripts.py +++ b/download_transcripts.py @@ -1,6 +1,7 @@ import requests import os from bs4 import BeautifulSoup +import json folder_path = "transcripts" @@ -8,21 +9,22 @@ if not os.path.exists(folder_path): os.makedirs(folder_path) for i in range(1, 139): - url = f"https://darknetdiaries.com/transcript/{i}" - r = requests.get(url) - soup = BeautifulSoup(r.text, 'html.parser') + try: + url = f"https://darknetdiaries.com/transcript/{i}" + r = requests.get(url) + soup = BeautifulSoup(r.text, 'html.parser') - transcript = soup.find('pre').get_text() - title_section = soup.find('h1').get_text() + transcript = soup.find('pre').get_text() - url = f"https://darknetdiaries.com/episode/{i}" - r = requests.get(url) - soup = BeautifulSoup(r.text, 'html.parser') - downloads = soup.find(id='downloads').get_text() + url = f"https://api.darknetdiaries.com/{i}.json" + r = requests.get(url) + parsed_json = json.loads(r.text) + title = parsed_json["episode_name"] + number = parsed_json["episode_number"] + downloads = parsed_json["total_downloads"] - ep, title = title_section.split(":", 1) - ep = ep.strip() - title = title.strip() - with open(f"{folder_path}/episode_{i}.txt", "w") as f: - f.write(f"{title}\n{downloads}\n{transcript}") - print(f"{ep} {title}") + with open(f"{folder_path}/episode_{number}.txt", "w") as f: + f.write(f"{title}\n{downloads}\n{transcript}") + print(f"{number} {title}") + except Exception: + print(f"Failed scraping episode {i}")