from langchain_localai import LocalAIEmbeddingsembeddings = LocalAIEmbeddings( openai_api_base="http://localhost:8080", model="embedding-model-name")text = "This is a test document."query_result = embeddings.embed_query(text)doc_result = embeddings.embed_documents([text])
from langchain_community.embeddings import LocalAIEmbeddingsimport os# if you are behind an explicit proxy, you can use the OPENAI_PROXY environment variable to pass throughos.environ["OPENAI_PROXY"] = "http://proxy.yourcompany.com:8080"embeddings = LocalAIEmbeddings( openai_api_base="http://localhost:8080", model="embedding-model-name")text = "This is a test document."query_result = embeddings.embed_query(text)doc_result = embeddings.embed_documents([text])