@agentic-dev-library/vitest-control
v1.0.0
Published
Vitest plugin with fixtures and utilities for @agentic-dev-library/control E2E testing
Maintainers
Readme
vitest-agentic-control
Vitest plugin with fixtures and utilities for agentic-control E2E testing.
Installation
pnpm add -D vitest-agentic-controlFeatures
- MCP Mocking: Mock MCP servers, tools, and resources without real implementations
- Provider Mocking: Mock AI providers (Anthropic, OpenAI, Google, Mistral, Azure, Ollama)
- Sandbox Mocking: Mock Docker container execution and sandbox operations
- Test Fixtures: Pre-configured fixtures for tokens, fleet, triage, and sandbox
- Environment Helpers: Easy setup and cleanup of test environment variables
Quick Start
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import {
createAgenticMocker,
withTestEnv,
DEFAULT_TEST_ENV,
} from 'vitest-agentic-control';
describe('My Agentic Tests', () => {
let cleanup: () => void;
let mocker: ReturnType<typeof createAgenticMocker>;
beforeEach(() => {
// Set up test environment
cleanup = withTestEnv(DEFAULT_TEST_ENV);
mocker = createAgenticMocker();
});
afterEach(() => {
// Clean up
mocker.restoreAll();
cleanup();
});
it('should mock MCP server', async () => {
const server = mocker.mcp.mockServer('test-server', {
tools: [
{
name: 'get_data',
handler: () => ({ data: 'mocked' }),
},
],
});
await server.connect();
const result = await server.callTool('get_data', {});
expect(result).toEqual({ data: 'mocked' });
});
it('should mock AI provider', async () => {
mocker.providers.mockAnthropic({
response: 'Hello from mocked Claude!',
});
// Your test code that uses Anthropic
});
it('should mock sandbox execution', async () => {
mocker.sandbox.mockExecution({
success: true,
stdout: 'Task completed successfully',
exitCode: 0,
});
const container = mocker.sandbox.createMockContainer({
image: 'node:22',
});
await container.start();
const result = await container.exec(['npm', 'test']);
expect(result.success).toBe(true);
});
});MCP Mocking
import { createMcpMocker } from 'vitest-agentic-control';
const mcpMocker = createMcpMocker();
// Create a mock MCP server with tools and resources
const server = mcpMocker.mockServer('my-server', {
tools: [
{
name: 'search',
description: 'Search for documents',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string' },
},
},
handler: (args) => ({
results: ['doc1', 'doc2'],
}),
},
],
resources: [
{
uri: 'file:///config.json',
content: '{"key": "value"}',
mimeType: 'application/json',
},
],
});
// Use the mock server
await server.connect();
const tools = await server.listTools();
const result = await server.callTool('search', { query: 'test' });Provider Mocking
import { createProviderMocker } from 'vitest-agentic-control';
const providerMocker = createProviderMocker();
// Mock Anthropic
providerMocker.mockAnthropic({
response: 'Hello! I am Claude.',
usage: {
promptTokens: 10,
completionTokens: 20,
totalTokens: 30,
},
});
// Mock OpenAI with streaming
providerMocker.mockOpenAI({
response: 'Hello! I am GPT.',
stream: true,
});
// Mock with simulated latency
providerMocker.mockGoogle({
response: 'Hello! I am Gemini.',
latency: 500, // 500ms delay
});
// Mock with error
providerMocker.mockMistral({
error: new Error('Rate limit exceeded'),
});Sandbox Mocking
import { createSandboxMocker } from 'vitest-agentic-control';
const sandboxMocker = createSandboxMocker();
// Set default execution result
sandboxMocker.mockExecution({
success: true,
stdout: 'Build completed',
exitCode: 0,
});
// Queue multiple results for sequential executions
sandboxMocker.queueResults([
{ success: true, stdout: 'Step 1 done', exitCode: 0 },
{ success: true, stdout: 'Step 2 done', exitCode: 0 },
{ success: false, stderr: 'Step 3 failed', exitCode: 1 },
]);
// Create and use mock container
const container = sandboxMocker.createMockContainer({
image: 'python:3.11',
workdir: '/app',
memory: 512,
});
await container.start();
const result1 = await container.exec(['pip', 'install', '-r', 'requirements.txt']);
const result2 = await container.exec(['python', 'main.py']);
await container.stop();Test Fixtures
import {
createTestConfig,
createTokenConfig,
createFleetConfig,
createTriageConfig,
createSandboxConfig,
createMockGitHubIssue,
createMockGitHubPR,
} from 'vitest-agentic-control';
// Create full test configuration
const config = createTestConfig({
logLevel: 'debug',
tokens: true,
fleet: true,
triage: true,
sandbox: true,
});
// Create individual configs with overrides
const tokens = createTokenConfig({
organizations: {
'my-org': { name: 'my-org', tokenEnvVar: 'MY_ORG_TOKEN' },
},
});
const triage = createTriageConfig({
provider: 'openai',
model: 'gpt-4-turbo',
});
// Create mock GitHub objects
const issue = createMockGitHubIssue({
number: 42,
title: 'Fix the bug',
labels: ['bug', 'priority:high'],
});
const pr = createMockGitHubPR({
number: 123,
title: 'Add new feature',
state: 'open',
labels: ['enhancement'],
});Environment Setup
import { withTestEnv, DEFAULT_TEST_ENV } from 'vitest-agentic-control';
import { beforeEach, afterEach } from 'vitest';
describe('Tests with environment', () => {
let cleanup: () => void;
beforeEach(() => {
// Set up test environment. You can use the defaults:
// cleanup = withTestEnv(DEFAULT_TEST_ENV);
// Or provide custom values:
cleanup = withTestEnv({
GITHUB_TOKEN: 'custom-token',
ANTHROPIC_API_KEY: 'custom-api-key',
MY_CUSTOM_VAR: 'custom-value',
});
});
afterEach(() => {
cleanup();
});
it('should have test tokens available', () => {
expect(process.env.GITHUB_TOKEN).toBeDefined();
});
});API Reference
Main Mocker
createAgenticMocker(options?)- Create the main mocker instanceAgenticMocker.mcp- MCP mocking utilitiesAgenticMocker.providers- Provider mocking utilitiesAgenticMocker.sandbox- Sandbox mocking utilitiesAgenticMocker.mockEnv(env)- Mock environment variablesAgenticMocker.mockGitHubClient(options?)- Mock GitHub clientAgenticMocker.restoreAll()- Restore all mocks
MCP Mocking
createMcpMocker(options?)- Create MCP mockerMcpMocker.mockServer(name, config)- Create mock MCP serverMcpMocker.mockClient()- Mock MCP client moduleMcpMocker.createMockTool(name, handler, options?)- Create mock toolMcpMocker.createMockResource(uri, content, options?)- Create mock resource
Provider Mocking
createProviderMocker(options?)- Create provider mockerProviderMocker.mockAnthropic(config?)- Mock AnthropicProviderMocker.mockOpenAI(config?)- Mock OpenAIProviderMocker.mockGoogle(config?)- Mock GoogleProviderMocker.mockMistral(config?)- Mock MistralProviderMocker.mockAzure(config?)- Mock AzureProviderMocker.mockOllama(config?)- Mock OllamaProviderMocker.createMockModel(provider, modelId, config?)- Create mock model
Sandbox Mocking
createSandboxMocker(options?)- Create sandbox mockerSandboxMocker.mockExecution(result)- Set default execution resultSandboxMocker.queueResult(result)- Queue execution resultSandboxMocker.createMockContainer(config?)- Create mock containerSandboxMocker.mockDockerCommands()- Mock Docker CLI commandsSandboxMocker.mockContainerManager()- Mock ContainerManager class
Fixtures
createTestConfig(options?)- Create full test configurationcreateTokenConfig(overrides?)- Create token configurationcreateFleetConfig(overrides?)- Create fleet configurationcreateTriageConfig(overrides?)- Create triage configurationcreateSandboxConfig(overrides?)- Create sandbox configurationwithTestEnv(env?)- Set up test environmentcreateMockGitHubIssue(overrides?)- Create mock GitHub issuecreateMockGitHubPR(overrides?)- Create mock GitHub PR
License
MIT
