forked from phito/darknet_diaries_llm
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
import requests
|
|
import os
|
|
from bs4 import BeautifulSoup
|
|
import json
|
|
|
|
folder_path = "transcripts"
|
|
|
|
if __name__ == '__main__':
|
|
if not os.path.exists(folder_path):
|
|
os.makedirs(folder_path)
|
|
|
|
for i in range(1, 139):
|
|
try:
|
|
# fetch transcript
|
|
url = f"https://darknetdiaries.com/transcript/{i}"
|
|
r = requests.get(url)
|
|
soup = BeautifulSoup(r.text, 'html.parser')
|
|
|
|
transcript = soup.find('pre').get_text()
|
|
|
|
# fetch transcript metadata
|
|
url = f"https://api.darknetdiaries.com/{i}.json"
|
|
r = requests.get(url)
|
|
parsed_json = json.loads(r.text)
|
|
title = parsed_json["episode_name"]
|
|
number = parsed_json["episode_number"]
|
|
downloads = parsed_json["total_downloads"]
|
|
|
|
# write transcript
|
|
with open(f"{folder_path}/episode_{number}.txt", "w", encoding='utf-8') as f:
|
|
f.write(f"{title}\n{downloads}\n{transcript}")
|
|
print(f"{number} {title}")
|
|
except Exception as err:
|
|
print(f"Failed scraping episode {i} : [{err}]")
|