From d3d977a3d053c11ac5e964b7e88ae9c702b5257b Mon Sep 17 00:00:00 2001 From: Romain Quinet Date: Fri, 6 Oct 2023 21:35:53 +0200 Subject: [PATCH] initial commit --- .gitignore | 3 +++ download_transcripts.py | 13 +++++++++++ main.py | 51 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) create mode 100644 .gitignore create mode 100644 download_transcripts.py create mode 100644 main.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b66076d --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +/data +/index +/.idea \ No newline at end of file diff --git a/download_transcripts.py b/download_transcripts.py new file mode 100644 index 0000000..5b9f382 --- /dev/null +++ b/download_transcripts.py @@ -0,0 +1,13 @@ +import requests +from bs4 import BeautifulSoup + +for i in range(1, 139): + url = f"https://darknetdiaries.com/transcript/{i}" + r = requests.get(url) + soup = BeautifulSoup(r.text, 'html.parser') + pre_section = soup.find('pre') + + if pre_section: + text = pre_section.get_text() + with open(f"data/episode_{i}.txt", "w") as f: + f.write(text) \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..3afda1a --- /dev/null +++ b/main.py @@ -0,0 +1,51 @@ +from llama_index import (SimpleDirectoryReader, ServiceContext, StorageContext, PromptTemplate, + load_index_from_storage, Document, set_global_service_context) +from llama_index.node_parser import SimpleNodeParser +from llama_index import VectorStoreIndex +from llama_index.llms import OpenAI +import os +import re + +llm = OpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=256) +service_context = ServiceContext.from_defaults(llm=llm) +set_global_service_context(service_context) + +if not os.path.exists("./index/lock"): + documents = [] + for filename in os.listdir("./data"): + episode_number = re.search(r'\d+', filename).group() + with open("./data/" + filename, 'r') as f: + content = f.read() + document = Document( + text=content, + metadata={ + "episode_number": episode_number + } + ) + + documents = SimpleDirectoryReader('./data').load_data() + parser = SimpleNodeParser.from_defaults() + nodes = parser.get_nodes_from_documents(documents) + + index = VectorStoreIndex(nodes, show_progress=True) + index.storage_context.persist(persist_dir="./index") + open("./index/lock", 'a').close() +else: + storage_context = StorageContext.from_defaults(persist_dir="./index") + index = load_index_from_storage(storage_context) + +template = ( + "You are now an expert on the Darknet Diaries podcast. \n" + "Please answer this question by referring to the podcast: {query_str}\n" +) +qa_template = PromptTemplate(template) +query_engine = index.as_query_engine(text_qa_template=qa_template) + +while True: + try: + user_prompt = input("Prompt: ") + response = query_engine.query(user_prompt) + print(response) + except KeyboardInterrupt: + break +