diff --git a/example/llm/RAG-OGA/custom_llm/custom_llm.py b/example/llm/RAG-OGA/custom_llm/custom_llm.py index 51d51310..9d85899c 100644 --- a/example/llm/RAG-OGA/custom_llm/custom_llm.py +++ b/example/llm/RAG-OGA/custom_llm/custom_llm.py @@ -1,3 +1,12 @@ +# Windows changed DLL path reference policy since Python 3.8 +# Add the directory containing the DLLs to the DLL search path +import os + +# check current python version +import sys +if sys.version_info >= (3, 8): + os.add_dll_directory(r"C:\Program Files\RyzenAI\1.5.1\deployment") # add path to your model dependent runtime DLLs + import warnings import time from typing import Any, Dict, List, Optional diff --git a/example/llm/RAG-OGA/rag.py b/example/llm/RAG-OGA/rag.py index 4f938bdb..8edf416f 100644 --- a/example/llm/RAG-OGA/rag.py +++ b/example/llm/RAG-OGA/rag.py @@ -26,7 +26,7 @@ if args.gradio: - gradio_launch_app(dataset_path=r"./Dataset", model_path= r"path/to/llm") #update this path + gradio_launch_app(dataset_path=r"./Dataset", model_path= r"C:\huggingface") #update this path exit() # --- Paths --- @@ -36,7 +36,7 @@ # --- Embedding Model --- print("Using ONNX embedding model on VitisAI NPU...") -embedding_model = custom_embeddings(model_path="bge-large-en-v1.5.onnx", tokenizer_name="BAAI/bge-large-en-v1.5") +embedding_model = custom_embeddings(model_path="./custom_embedding/bge-large-en-v1.5.onnx", tokenizer_name="BAAI/bge-large-en-v1.5") # --- Load or Build FAISS Index --- if os.path.exists(os.path.join(faiss_index_path, "index.faiss")) and os.path.exists(os.path.join(faiss_index_path, "index.pkl")): @@ -70,7 +70,7 @@ retriever = vectorstore.as_retriever(search_type='similarity',search_kwargs={"k": 3}) print("Number of vectors:", vectorstore.index.ntotal) -llm = custom_llm(model_path=r"path/to/llm") #update this path +llm = custom_llm(model_path=r"C:\huggingface") #update this path # llm = custom_llm(model_path=r"C:\Users\akumar23\RAG-repo-xilinx\model") # Example template = PromptTemplate.from_template("""<|system|>