Compare commits

..

3 Commits

Author SHA1 Message Date
NIHART, Jeremi
7774fc2698 fix: pr comments 2023-10-09 10:06:56 +02:00
6d7600e47e
feat: ups deps, add dotenv 2023-10-07 16:55:18 +02:00
0730db5727
feat: package manager 2023-10-07 15:14:18 +02:00
6 changed files with 157 additions and 108 deletions

14
.editorconfig Normal file
View File

@ -0,0 +1,14 @@
root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
max_line_length = 120
[*.md]
trim_trailing_whitespace = false
max_line_length = 0

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
.env
/transcripts
/index
/.idea
/venv

View File

@ -9,9 +9,15 @@ Well, let's ask our LLM:
## How to run
### Install dependencies
I have no idea what the correct way to install dependencies with python is. Somehow install these libraries and their dependencies:
- llama_index
- beautifulsoup4
It is recommended to use a python version greater than or equal to ``3.10.0``.
Another stuff recommended, is to create a venv or use an IDE that supports venv creation, so all dependencies are installed locally to the project and not globally. If not, you can use https://virtualenv.pypa.io/en/latest/ to artificially create isolated environments.
Install the dependencies required to run the project by running the following command at the project root :
```shell
pip install -r requirements.txt
```
### Execution
Download transcripts:
```shell
@ -31,6 +37,7 @@ python3 main.py
On the first run, it will generate the index. This can take a while, but it will be cached on disk for the next runs.
You can then ask it any questions about Darknet Diaries!
## Examples
> What is the intro of the podcast?

View File

@ -5,17 +5,20 @@ import json
folder_path = "transcripts"
if not os.path.exists(folder_path):
if __name__ == '__main__':
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for i in range(1, 139):
for i in range(1, 139):
try:
# fetch transcript
url = f"https://darknetdiaries.com/transcript/{i}"
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
transcript = soup.find('pre').get_text()
# fetch transcript metadata
url = f"https://api.darknetdiaries.com/{i}.json"
r = requests.get(url)
parsed_json = json.loads(r.text)
@ -23,8 +26,9 @@ for i in range(1, 139):
number = parsed_json["episode_number"]
downloads = parsed_json["total_downloads"]
with open(f"{folder_path}/episode_{number}.txt", "w") as f:
# write transcript
with open(f"{folder_path}/episode_{number}.txt", "w", encoding='utf-8') as f:
f.write(f"{title}\n{downloads}\n{transcript}")
print(f"{number} {title}")
except Exception:
print(f"Failed scraping episode {i}")
except Exception as err:
print(f"Failed scraping episode {i} : [{err}]")

34
main.py
View File

@ -4,18 +4,25 @@ from llama_index.node_parser import SimpleNodeParser
from llama_index import VectorStoreIndex
from llama_index.llms import OpenAI, ChatMessage, MessageRole
from llama_index.prompts import ChatPromptTemplate
from llama_index import set_global_handler
# from llama_index import set_global_handler
from llama_index.chat_engine.types import ChatMode
from dotenv import load_dotenv
import os
import re
# set_global_handler("simple")
llm = OpenAI(model="gpt-4", temperature=0, max_tokens=256)
# load .env
load_dotenv()
OPEN_API_KEY = os.getenv('OPEN_API_KEY')
# config llm context
llm = OpenAI(model="gpt-4", temperature=0, max_tokens=256, api_key=OPEN_API_KEY)
service_context = ServiceContext.from_defaults(llm=llm)
set_global_service_context(service_context)
if not os.path.exists("./index/lock"):
if __name__ == '__main__':
if not os.path.exists("./index/lock"):
documents = []
for filename in os.listdir("./transcripts"):
episode_number = re.search(r'\d+', filename).group()
@ -41,12 +48,12 @@ if not os.path.exists("./index/lock"):
index = VectorStoreIndex(nodes, show_progress=True)
index.storage_context.persist(persist_dir="./index")
open("./index/lock", 'a').close()
else:
else:
print("Loading index...")
storage_context = StorageContext.from_defaults(persist_dir="./index")
index = load_index_from_storage(storage_context)
chat_text_qa_msgs = [
chat_text_qa_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content=(
@ -67,10 +74,10 @@ chat_text_qa_msgs = [
"answer the question: {query_str}\n"
)
)
]
text_qa_template = ChatPromptTemplate(chat_text_qa_msgs)
]
text_qa_template = ChatPromptTemplate(chat_text_qa_msgs)
chat_refine_msgs = [
chat_refine_msgs = [
ChatMessage(
role=MessageRole.SYSTEM,
content="Always answer the question, even if the context isn't helpful.",
@ -89,18 +96,17 @@ chat_refine_msgs = [
"Original Answer: {existing_answer}"
),
),
]
refine_template = ChatPromptTemplate(chat_refine_msgs)
]
refine_template = ChatPromptTemplate(chat_refine_msgs)
chat_engine = index.as_chat_engine(
chat_engine = index.as_chat_engine(
text_qa_template=text_qa_template,
refine_template=refine_template,
chat_mode=ChatMode.OPENAI
)
)
while True:
while True:
try:
chat_engine.chat_repl()
except KeyboardInterrupt:
break

16
requirements.txt Normal file
View File

@ -0,0 +1,16 @@
# =====================
# Required dependencies
# =====================
# general deps
requests~=2.31.0
llama-index~=0.8.40
beautifulsoup4~=4.12.2
python-dotenv~=1.0.0
# llama sub deps
transformers~=4.34.0
torch~=2.1.0
# =====================
# Development dependencies
# =====================