forked from phito/darknet_diaries_llm
initial commit
This commit is contained in:
commit
d3d977a3d0
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
/data
|
||||||
|
/index
|
||||||
|
/.idea
|
13
download_transcripts.py
Normal file
13
download_transcripts.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
for i in range(1, 139):
|
||||||
|
url = f"https://darknetdiaries.com/transcript/{i}"
|
||||||
|
r = requests.get(url)
|
||||||
|
soup = BeautifulSoup(r.text, 'html.parser')
|
||||||
|
pre_section = soup.find('pre')
|
||||||
|
|
||||||
|
if pre_section:
|
||||||
|
text = pre_section.get_text()
|
||||||
|
with open(f"data/episode_{i}.txt", "w") as f:
|
||||||
|
f.write(text)
|
51
main.py
Normal file
51
main.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
from llama_index import (SimpleDirectoryReader, ServiceContext, StorageContext, PromptTemplate,
|
||||||
|
load_index_from_storage, Document, set_global_service_context)
|
||||||
|
from llama_index.node_parser import SimpleNodeParser
|
||||||
|
from llama_index import VectorStoreIndex
|
||||||
|
from llama_index.llms import OpenAI
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
llm = OpenAI(model="gpt-3.5-turbo", temperature=0, max_tokens=256)
|
||||||
|
service_context = ServiceContext.from_defaults(llm=llm)
|
||||||
|
set_global_service_context(service_context)
|
||||||
|
|
||||||
|
if not os.path.exists("./index/lock"):
|
||||||
|
documents = []
|
||||||
|
for filename in os.listdir("./data"):
|
||||||
|
episode_number = re.search(r'\d+', filename).group()
|
||||||
|
with open("./data/" + filename, 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
document = Document(
|
||||||
|
text=content,
|
||||||
|
metadata={
|
||||||
|
"episode_number": episode_number
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
documents = SimpleDirectoryReader('./data').load_data()
|
||||||
|
parser = SimpleNodeParser.from_defaults()
|
||||||
|
nodes = parser.get_nodes_from_documents(documents)
|
||||||
|
|
||||||
|
index = VectorStoreIndex(nodes, show_progress=True)
|
||||||
|
index.storage_context.persist(persist_dir="./index")
|
||||||
|
open("./index/lock", 'a').close()
|
||||||
|
else:
|
||||||
|
storage_context = StorageContext.from_defaults(persist_dir="./index")
|
||||||
|
index = load_index_from_storage(storage_context)
|
||||||
|
|
||||||
|
template = (
|
||||||
|
"You are now an expert on the Darknet Diaries podcast. \n"
|
||||||
|
"Please answer this question by referring to the podcast: {query_str}\n"
|
||||||
|
)
|
||||||
|
qa_template = PromptTemplate(template)
|
||||||
|
query_engine = index.as_query_engine(text_qa_template=qa_template)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
user_prompt = input("Prompt: ")
|
||||||
|
response = query_engine.query(user_prompt)
|
||||||
|
print(response)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
break
|
||||||
|
|
Loading…
Reference in New Issue
Block a user