Compare commits

..

No commits in common. "1a46ea481666fce52e2b28d0ff103016a1b09e57" and "c47ff3d9ed66e91050755d493d9edeb3c3ea056d" have entirely different histories.

2 changed files with 21 additions and 24 deletions

View File

@ -1,7 +1,6 @@
import requests import requests
import os import os
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import json
folder_path = "transcripts" folder_path = "transcripts"
@ -9,22 +8,21 @@ if not os.path.exists(folder_path):
os.makedirs(folder_path) os.makedirs(folder_path)
for i in range(1, 139): for i in range(1, 139):
try:
url = f"https://darknetdiaries.com/transcript/{i}" url = f"https://darknetdiaries.com/transcript/{i}"
r = requests.get(url) r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser') soup = BeautifulSoup(r.text, 'html.parser')
transcript = soup.find('pre').get_text() transcript = soup.find('pre').get_text()
title_section = soup.find('h1').get_text()
url = f"https://api.darknetdiaries.com/{i}.json" url = f"https://darknetdiaries.com/episode/{i}"
r = requests.get(url) r = requests.get(url)
parsed_json = json.loads(r.text) soup = BeautifulSoup(r.text, 'html.parser')
title = parsed_json["episode_name"] downloads = soup.find(id='downloads').get_text()
number = parsed_json["episode_number"]
downloads = parsed_json["total_downloads"]
with open(f"{folder_path}/episode_{number}.txt", "w") as f: ep, title = title_section.split(":", 1)
ep = ep.strip()
title = title.strip()
with open(f"{folder_path}/episode_{i}.txt", "w") as f:
f.write(f"{title}\n{downloads}\n{transcript}") f.write(f"{title}\n{downloads}\n{transcript}")
print(f"{number} {title}") print(f"{ep} {title}")
except Exception:
print(f"Failed scraping episode {i}")

13
main.py
View File

@ -4,13 +4,9 @@ from llama_index.node_parser import SimpleNodeParser
from llama_index import VectorStoreIndex from llama_index import VectorStoreIndex
from llama_index.llms import OpenAI, ChatMessage, MessageRole from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index.prompts import ChatPromptTemplate from llama_index.prompts import ChatPromptTemplate
from llama_index import set_global_handler
from llama_index.chat_engine.types import ChatMode
import os import os
import re import re
#set_global_handler("simple")
llm = OpenAI(model="gpt-4", temperature=0, max_tokens=256) llm = OpenAI(model="gpt-4", temperature=0, max_tokens=256)
service_context = ServiceContext.from_defaults(llm=llm) service_context = ServiceContext.from_defaults(llm=llm)
set_global_service_context(service_context) set_global_service_context(service_context)
@ -94,13 +90,16 @@ refine_template = ChatPromptTemplate(chat_refine_msgs)
chat_engine = index.as_chat_engine( chat_engine = index.as_chat_engine(
text_qa_template=text_qa_template, text_qa_template=text_qa_template,
refine_template=refine_template, refine_template=refine_template
chat_mode=ChatMode.OPENAI
) )
while True: while True:
try: try:
chat_engine.chat_repl() user_prompt = input("Prompt: ")
streaming_response = chat_engine.stream_chat(user_prompt)
for token in streaming_response.response_gen:
print(token, end="")
print("\n")
except KeyboardInterrupt: except KeyboardInterrupt:
break break