1
1
import asyncio
2
- from pgml import Collection , Model , Splitter , Pipeline
2
+ from pgml import Collection , Model , Splitter , Pipeline , migrate , init_logger
3
3
import logging
4
4
from rich .logging import RichHandler
5
5
from rich .progress import track
@@ -121,6 +121,7 @@ async def generate_response(
121
121
messages , openai_api_key , temperature = 0.7 , max_tokens = 256 , top_p = 0.9
122
122
):
123
123
openai .api_key = openai_api_key
124
+ log .debug ("Generating response from OpenAI API: " + str (messages ))
124
125
response = openai .ChatCompletion .create (
125
126
model = "gpt-3.5-turbo" ,
126
127
messages = messages ,
@@ -170,7 +171,7 @@ async def chat_cli():
170
171
response = await generate_response (
171
172
messages , openai_api_key , max_tokens = 512 , temperature = 0.0
172
173
)
173
- print ("PgBot: " + response )
174
+ log . info ("PgBot: " + response )
174
175
175
176
user_input = input ("User (Ctrl-C to exit): " )
176
177
except KeyboardInterrupt :
@@ -241,8 +242,8 @@ async def run():
241
242
The `main` function connects to a database, ingests documents from a specified folder, generates
242
243
chunks, and logs the total number of documents and chunks.
243
244
"""
244
- print ("Starting pgml-chat.... " )
245
-
245
+ log . info ("Starting pgml-chat.... " )
246
+ # await migrate()
246
247
if stage == "ingest" :
247
248
root_dir = args .root_dir
248
249
await ingest_documents (root_dir )
@@ -255,6 +256,7 @@ async def run():
255
256
256
257
257
258
def main ():
259
+ init_logger ()
258
260
if (
259
261
stage == "chat"
260
262
and chat_interface == "discord"
0 commit comments