Lightweight, LLM-agnostic RAG pipeline with pluggable corpora. Works with Claude, OpenAI, Gemini, or any LLM.
- No LLM SDK at install time. All provider deps are optional extras.
- Pluggable corpus. Use attune-help (the default), any
markdown directory, or your own
CorpusProtocol. - Returns a prompt string by default — send it to whatever LLM you like. Optional provider adapters ship convenience wrappers.
pip install attune-rag # core only
pip install 'attune-rag[attune-help]' # + bundled help corpus
pip install 'attune-rag[claude]' # + Claude adapter
pip install 'attune-rag[openai]' # + OpenAI adapter
pip install 'attune-rag[gemini]' # + Gemini adapter
pip install 'attune-rag[all]' # everythingpip install 'attune-rag[attune-help,claude]'import asyncio
from attune_rag import RagPipeline
async def main():
pipeline = RagPipeline() # defaults to AttuneHelpCorpus
response, result = await pipeline.run_and_generate(
"How do I run a security audit with attune?",
provider="claude",
)
print(response)
print("\nSources:", [h.entry.path for h in result.citation.hits])
asyncio.run(main())pip install 'attune-rag[attune-help,openai]'response, result = await pipeline.run_and_generate(
"...", provider="openai", model="gpt-4o",
)pip install 'attune-rag[attune-help,gemini]'response, result = await pipeline.run_and_generate(
"...", provider="gemini", model="gemini-1.5-pro",
)from pathlib import Path
from attune_rag import RagPipeline, DirectoryCorpus
pipeline = RagPipeline(corpus=DirectoryCorpus(Path("./my-docs")))
result = pipeline.run("How do I...?")
# Send result.augmented_prompt to whatever LLM you use.
# The pipeline itself does NOT call an LLM unless you use
# run_and_generate or call a provider adapter yourself.v0.1.0 — initial release. Part of the attune ecosystem (attune-ai, attune-help, attune-author).
Apache 2.0. See LICENSE.