Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 83 additions & 0 deletions logicshell-llm/examples/phase8.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
// Phase 8 demo: LLM context + prompt composer
//
// Usage:
// cargo run --example phase8 --package logicshell-llm
//
// No network calls; demonstrates SystemContextProvider, PromptComposer,
// and the LlmClient trait without a live Ollama daemon.

use logicshell_core::config::LlmConfig;
use logicshell_llm::{
LlmClient, LlmError, LlmRequest, LlmResponse, PromptComposer, SystemContextProvider,
};

/// Stub LlmClient that returns a hard-coded suggestion.
struct StubClient;

impl LlmClient for StubClient {
async fn complete(&self, req: LlmRequest) -> Result<LlmResponse, LlmError> {
Ok(LlmResponse {
text: "ls -lhS".into(),
model: req.model,
})
}
}

#[tokio::main]
async fn main() {
println!("[Phase 8: SystemContextProvider]");
let provider = SystemContextProvider::new();
let snap = provider.snapshot();
assert!(!snap.os_family.is_empty(), "os_family must be set");
assert!(!snap.arch.is_empty(), "arch must be set");
assert!(!snap.cwd.is_empty(), "cwd must be set");
println!(" os_family = {:?}", snap.os_family);
println!(" arch = {:?}", snap.arch);
println!(" cwd = {:?}", snap.cwd);
println!(" PATH[0..] = {:?}", snap.path_dirs);

println!("[Phase 8: PromptComposer — NL to command]");
let cfg = LlmConfig {
enabled: true,
model: Some("llama3".into()),
..LlmConfig::default()
};
let composer = PromptComposer::from_config(&cfg).expect("composer from config");
let req = composer
.compose_nl_to_command("list files sorted by size", &snap)
.expect("compose_nl_to_command");

assert_eq!(req.model, "llama3");
assert!(req.prompt.contains("list files sorted by size"));
println!(" model = {:?}", req.model);
println!(
" prompt (first 80 chars) = {:?}",
&req.prompt[..80.min(req.prompt.len())]
);

println!("[Phase 8: PromptComposer — assist on exit 127]");
let req2 = composer
.compose_assist_on_127(&["gti", "status"], &snap)
.expect("compose_assist_on_127");
assert!(req2.prompt.contains("gti status"));
println!(" failed_cmd = \"gti status\" embedded in prompt: OK");

println!("[Phase 8: LlmClient trait — stub round-trip]");
let client = StubClient;
let resp = client.complete(req).await.expect("complete");
assert_eq!(resp.model, "llama3");
assert!(!resp.text.is_empty());
println!(" response = {:?}", resp.text);

println!("[Phase 8: ContextTooLarge error]");
let tight_composer = PromptComposer::new("m", 10);
let snap2 = snap.clone();
match tight_composer.compose_nl_to_command("ls", &snap2) {
Err(logicshell_llm::LlmError::ContextTooLarge { size, max }) => {
println!(" ContextTooLarge: size={size} > max={max} ✓");
}
other => panic!("expected ContextTooLarge, got: {other:?}"),
}

println!("\n✓ Phase 8 features verified OK");
}
179 changes: 179 additions & 0 deletions logicshell-llm/src/client.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
// LLM client trait + request/response types — LLM Module PRD §5.1
//
// `LlmClient` is async because inference is I/O-bound (NFR-05).
// Implementations must not read `std::env` or discover cwd — all context is
// supplied via `SystemContextSnapshot` assembled upstream (FR-10, FR-11).

use crate::error::LlmError;

/// Input to an LLM inference call.
#[derive(Debug, Clone, PartialEq)]
pub struct LlmRequest {
/// Model identifier (e.g. `"llama3"`).
pub model: String,
/// Fully-composed prompt text.
pub prompt: String,
}

/// Output from an LLM inference call.
#[derive(Debug, Clone, PartialEq)]
pub struct LlmResponse {
/// Generated text returned by the model.
pub text: String,
/// Model that produced the response.
pub model: String,
}

/// Async inference provider boundary — FR-21, LLM Module PRD §5.1.
///
/// Implementations supply the HTTP transport (e.g. `OllamaLlmClient` in Phase 9).
/// Unit tests use concrete mock structs; `#[automock]` generates `MockLlmClient`
/// in test builds.
///
/// The `async_fn_in_trait` lint is suppressed here intentionally: this trait is
/// used only within this crate for now and `Send` bounds are added in Phase 10
/// when object-safe boxing is introduced.
#[allow(async_fn_in_trait)]
#[cfg_attr(test, mockall::automock)]
pub trait LlmClient: Send + Sync {
/// Submit a prompt and return the model's response.
async fn complete(&self, request: LlmRequest) -> Result<LlmResponse, LlmError>;
}

#[cfg(test)]
mod tests {
use super::*;

// ── LlmRequest ─────────────────────────────────────────────────────────────

#[test]
fn request_fields_accessible() {
let req = LlmRequest {
model: "llama3".into(),
prompt: "list files".into(),
};
assert_eq!(req.model, "llama3");
assert_eq!(req.prompt, "list files");
}

#[test]
fn request_clone_eq() {
let req = LlmRequest {
model: "m".into(),
prompt: "p".into(),
};
assert_eq!(req.clone(), req);
}

#[test]
fn request_debug() {
let req = LlmRequest {
model: "m".into(),
prompt: "p".into(),
};
assert!(!format!("{req:?}").contains("llama3"));
assert!(format!("{req:?}").contains("LlmRequest"));
}

#[test]
fn request_partial_eq() {
let a = LlmRequest {
model: "m".into(),
prompt: "p".into(),
};
let b = LlmRequest {
model: "m".into(),
prompt: "p".into(),
};
let c = LlmRequest {
model: "other".into(),
prompt: "p".into(),
};
assert_eq!(a, b);
assert_ne!(a, c);
}

// ── LlmResponse ───────────────────────────────────────────────────────────

#[test]
fn response_fields_accessible() {
let resp = LlmResponse {
text: "ls -la".into(),
model: "llama3".into(),
};
assert_eq!(resp.text, "ls -la");
assert_eq!(resp.model, "llama3");
}

#[test]
fn response_clone_eq() {
let resp = LlmResponse {
text: "t".into(),
model: "m".into(),
};
assert_eq!(resp.clone(), resp);
}

#[test]
fn response_debug() {
let resp = LlmResponse {
text: "t".into(),
model: "m".into(),
};
assert!(format!("{resp:?}").contains("LlmResponse"));
}

#[test]
fn response_partial_eq() {
let a = LlmResponse {
text: "t".into(),
model: "m".into(),
};
let b = LlmResponse {
text: "t".into(),
model: "m".into(),
};
let c = LlmResponse {
text: "x".into(),
model: "m".into(),
};
assert_eq!(a, b);
assert_ne!(a, c);
}

// ── MockLlmClient (generated by mockall) ──────────────────────────────────

#[tokio::test]
async fn mock_client_returns_configured_response() {
let mut mock = MockLlmClient::new();
mock.expect_complete().returning(|req| {
Ok(LlmResponse {
text: format!("echo {}", req.prompt.lines().last().unwrap_or("")),
model: req.model,
})
});

let req = LlmRequest {
model: "llama3".into(),
prompt: "list files".into(),
};
let resp = mock.complete(req).await.unwrap();
assert_eq!(resp.model, "llama3");
assert!(!resp.text.is_empty());
}

#[tokio::test]
async fn mock_client_can_return_error() {
let mut mock = MockLlmClient::new();
mock.expect_complete()
.returning(|_| Err(LlmError::Http("connection refused".into())));

let req = LlmRequest {
model: "m".into(),
prompt: "p".into(),
};
let result = mock.complete(req).await;
assert!(result.is_err());
assert!(matches!(result.unwrap_err(), LlmError::Http(_)));
}
}
Loading
Loading