Secure credential management for LangChain agents and chains
pip install langchain-avp
from langchain_avp import AVPCredentialProvider
from langchain_openai import ChatOpenAI
# Create credential provider with file backend
provider = AVPCredentialProvider(
vault_path="~/.avp/vault.enc",
password="your-password"
)
# Store your API key once
provider.set_credential("OPENAI_API_KEY", "sk-...")
# Create LLM with credentials from AVP
llm = ChatOpenAI(
api_key=provider.get_credential("OPENAI_API_KEY")
)
# Use the LLM
response = llm.invoke("Hello, world!")
print(response.content)
API keys are encrypted at rest using AES-256, not stored in plaintext config files.
Separate credentials by project, environment, or agent to prevent cross-contamination.
Track every credential access with built-in LangChain callbacks.
Rotate API keys without redeploying your agents.
Drop-in credential provider for LangChain.
from langchain_avp import AVPCredentialProvider, Backend
# File backend (default)
provider = AVPCredentialProvider(
vault_path="~/.avp/vault.enc",
password="your-password",
workspace="langchain-agent"
)
# Memory backend (for testing)
provider = AVPCredentialProvider(backend=Backend.MEMORY)
# Keychain backend (most secure without hardware)
provider = AVPCredentialProvider(backend=Backend.KEYCHAIN)
# Store credentials
provider.set_credential("OPENAI_API_KEY", "sk-...")
provider.set_credential("ANTHROPIC_API_KEY", "sk-ant-...")
# Retrieve credentials
api_key = provider.get_credential("OPENAI_API_KEY")
# Check if credential exists
if provider.has_credential("OPENAI_API_KEY"):
print("Key is configured")
# List all credentials
keys = provider.list_credentials()
# Delete credential
provider.delete_credential("OLD_API_KEY")
# Rotate credential
provider.rotate_credential("OPENAI_API_KEY", "sk-new-...")
Track credential access in your LangChain runs.
from langchain_avp import AVPCredentialCallback
from langchain.callbacks.manager import CallbackManager
# Create callback
avp_callback = AVPCredentialCallback(provider)
# Add to your chain/agent
callback_manager = CallbackManager([avp_callback])
# Use with LLM
llm = ChatOpenAI(
api_key=provider.get_credential("OPENAI_API_KEY"),
callbacks=callback_manager
)
# The callback logs every credential access:
# [AVP Audit] Retrieved: OPENAI_API_KEY (workspace: langchain-agent)
Quick setup for common LLM providers.
from langchain_avp import get_llm_with_avp
# OpenAI
llm = get_llm_with_avp(
provider,
provider_type="openai",
model="gpt-4"
)
# Anthropic
llm = get_llm_with_avp(
provider,
provider_type="anthropic",
model="claude-3-opus-20240229"
)
Load multiple credentials at once.
from langchain_avp import load_credentials
# Load into environment
load_credentials(provider, ["OPENAI_API_KEY", "SERPAPI_API_KEY"])
# Now accessible via os.environ
import os
print(os.environ["OPENAI_API_KEY"])
"""LangChain agent with AVP credential management."""
from langchain_avp import AVPCredentialProvider, AVPCredentialCallback
from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_react_agent
from langchain.tools import DuckDuckGoSearchRun
from langchain import hub
# Setup credential provider
provider = AVPCredentialProvider(
vault_path="~/.avp/vault.enc",
password="your-password",
workspace="search-agent"
)
# Store API key (do this once)
# provider.set_credential("OPENAI_API_KEY", "sk-...")
# Create LLM with AVP credentials
llm = ChatOpenAI(
api_key=provider.get_credential("OPENAI_API_KEY"),
model="gpt-4",
callbacks=[AVPCredentialCallback(provider)]
)
# Setup agent
tools = [DuckDuckGoSearchRun()]
prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
# Run agent
result = agent_executor.invoke({
"input": "What's the latest news about AI safety?"
})
print(result["output"])
# Clean up
provider.close()
.env to AVP? Use the CLI:
avp import .env --format dotenv --workspace langchain