CLI and config file support

This commit is contained in:
Nicolas Barraud
2025-03-30 15:57:29 -04:00
parent a63de622f8
commit 5b22143c85
10 changed files with 453 additions and 249 deletions

7
.gitignore vendored
View File

@@ -1,8 +1,11 @@
.DS_Store
node_modules
node_modules/
*-workspace/
server/build
client/dist
client/tsconfig.app.tsbuildinfo
client/tsconfig.node.tsbuildinfo
.vscode
bin/build
cli/build
cli/build
test-output

View File

@@ -36,15 +36,31 @@ The inspector runs both a client UI (default port 5173) and an MCP proxy server
CLIENT_PORT=8080 SERVER_PORT=9000 npx @modelcontextprotocol/inspector node build/index.js
```
### Using a Configuration File
For more details on ways to use the inspector, see the [Inspector section of the MCP docs site](https://modelcontextprotocol.io/docs/tools/inspector). For help with debugging, see the [Debugging guide](https://modelcontextprotocol.io/docs/tools/debugging).
The inspector supports configuration files to store settings for different MCP servers. This is useful when working with multiple servers or complex configurations:
### Authentication
The inspector supports bearer token authentication for SSE connections. Enter your token in the UI when connecting to an MCP server, and it will be sent in the Authorization header.
### Security Considerations
The MCP Inspector includes a proxy server that can run and communicate with local MCP processes. The proxy server should not be exposed to untrusted networks as it has permissions to spawn local processes and can connect to any specified MCP server.
### Configuration
The MCP Inspector supports the following configuration settings. To change them click on the `Configuration` button in the MCP Inspector UI:
| Name | Purpose | Default Value |
| -------------------------- | --------------------------------------------------------------------------- | ------------- |
| MCP_SERVER_REQUEST_TIMEOUT | Maximum time in milliseconds to wait for a response from the MCP server before timing out | 10000 |
The inspector also supports configuration files to store settings for different MCP servers. This is useful when working with multiple servers or complex configurations:
```bash
npx @modelcontextprotocol/inspector --config path/to/config.json --server everything
```
Example configuration file:
Example server configuration file:
```json
{
@@ -68,6 +84,30 @@ Example configuration file:
}
```
### From this repository
If you're working on the inspector itself:
Development mode:
```bash
npm run dev
```
> **Note for Windows users:**
> On Windows, use the following command instead:
>
> ```bash
> npm run dev:windows
> ```
Production mode:
```bash
npm run build
npm start
```
### CLI Mode
CLI mode enables programmatic interaction with MCP servers from the command line, ideal for scripting, automation, and integration with coding assistants. This creates an efficient feedback loop for MCP server development.
@@ -105,4 +145,20 @@ npx @modelcontextprotocol/inspector --cli https://my-mcp-server.example.com --me
# List resources from a remote server
npx @modelcontextprotocol/inspector --cli https://my-mcp-server.example.com --method resources/list
```
```
### UI Mode vs CLI Mode: When to Use Each
| Use Case | UI Mode | CLI Mode |
|----------|---------|----------|
| **Server Development** | Visual interface for interactive testing and debugging during development | Scriptable commands for quick testing and continuous integration; creates feedback loops with AI coding assistants like Cursor for rapid development |
| **Resource Exploration** | Interactive browser with hierarchical navigation and JSON visualization | Programmatic listing and reading for automation and scripting |
| **Tool Testing** | Form-based parameter input with real-time response visualization | Command-line tool execution with JSON output for scripting |
| **Prompt Engineering** | Interactive sampling with streaming responses and visual comparison | Batch processing of prompts with machine-readable output |
| **Debugging** | Request history, visualized errors, and real-time notifications | Direct JSON output for log analysis and integration with other tools |
| **Automation** | N/A | Ideal for CI/CD pipelines, batch processing, and integration with coding assistants |
| **Learning MCP** | Rich visual interface helps new users understand server capabilities | Simplified commands for focused learning of specific endpoints |
## License
This project is licensed under the MIT License—see the [LICENSE](LICENSE) file for details.

View File

@@ -57,7 +57,7 @@ async function runWebClient(args) {
// Make sure our server/client didn't immediately fail
await Promise.any([server, client, delay(2 * 1000)]);
const portParam = SERVER_PORT === "3000" ? "" : `?proxyPort=${SERVER_PORT}`;
console.log(`\n🔍 MCP Inspector is up and running at http://localhost:${CLIENT_PORT}${portParam} 🚀`);
console.log(`\n🔍 MCP Inspector is up and running at http://127.0.0.1:${CLIENT_PORT}${portParam} 🚀`);
try {
await Promise.any([server, client]);
}

View File

@@ -1,6 +1,6 @@
{
"name": "@modelcontextprotocol/inspector-bin",
"version": "0.5.1",
"version": "0.7.0",
"description": "Model Context Protocol inspector",
"license": "MIT",
"author": "Anthropic, PBC (https://anthropic.com)",
@@ -15,8 +15,8 @@
],
"scripts": {
"build": "tsc",
"postbuild": "chmod +x build/index.js && cp build/index.js cli.js",
"test": "./tests/cli-tests.sh"
"postbuild": "node scripts/make-executable.js && node scripts/copy-cli.js",
"test": "node scripts/cli-tests.js"
},
"dependencies": {},
"devDependencies": {}

326
bin/scripts/cli-tests.js Executable file
View File

@@ -0,0 +1,326 @@
#!/usr/bin/env node
// Colors for output
const colors = {
GREEN: "\x1b[32m",
YELLOW: "\x1b[33m",
RED: "\x1b[31m",
BLUE: "\x1b[34m",
ORANGE: "\x1b[33m",
NC: "\x1b[0m" // No Color
};
import fs from "fs";
import path from "path";
import { execSync, spawn } from "child_process";
import os from "os";
import { fileURLToPath } from "url";
// Get directory paths with ESM compatibility
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Track test results
let PASSED_TESTS = 0;
let FAILED_TESTS = 0;
let SKIPPED_TESTS = 0;
let TOTAL_TESTS = 0;
console.log(`${colors.YELLOW}=== MCP Inspector CLI Test Script ===${colors.NC}`);
console.log(`${colors.BLUE}This script tests the MCP Inspector CLI's ability to handle various command line options:${colors.NC}`);
console.log(`${colors.BLUE}- Basic CLI mode${colors.NC}`);
console.log(`${colors.BLUE}- Environment variables (-e)${colors.NC}`);
console.log(`${colors.BLUE}- Config file (--config)${colors.NC}`);
console.log(`${colors.BLUE}- Server selection (--server)${colors.NC}`);
console.log(`${colors.BLUE}- Method selection (--method)${colors.NC}`);
console.log(`${colors.BLUE}- Tool-related options (--tool-name, --tool-arg)${colors.NC}`);
console.log(`${colors.BLUE}- Resource-related options (--uri)${colors.NC}`);
console.log(`${colors.BLUE}- Prompt-related options (--prompt-name, --prompt-args)${colors.NC}`);
console.log(`${colors.BLUE}- Logging options (--log-level)${colors.NC}`);
console.log("");
// Get directory paths
const SCRIPTS_DIR = __dirname;
const BIN_DIR = path.resolve(SCRIPTS_DIR, "..");
const PROJECT_ROOT = path.resolve(BIN_DIR, "..");
// Compile bin and cli projects
console.log(`${colors.YELLOW}Compiling MCP Inspector bin and cli...${colors.NC}`);
try {
process.chdir(BIN_DIR);
execSync("npm run build", { stdio: "inherit" });
process.chdir(path.join(PROJECT_ROOT, "cli"));
execSync("npm run build", { stdio: "inherit" });
process.chdir(BIN_DIR);
} catch (error) {
console.error(`${colors.RED}Error during compilation: ${error.message}${colors.NC}`);
process.exit(1);
}
// Define the test server command using npx
const TEST_CMD = "npx";
const TEST_ARGS = ["@modelcontextprotocol/server-everything"];
// Create output directory for test results
const OUTPUT_DIR = path.join(SCRIPTS_DIR, "test-output");
if (!fs.existsSync(OUTPUT_DIR)) {
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
}
// Create a temporary directory for test files
const TEMP_DIR = fs.mkdirSync(path.join(os.tmpdir(), "mcp-inspector-tests-"), { recursive: true });
process.on("exit", () => {
try {
fs.rmSync(TEMP_DIR, { recursive: true, force: true });
} catch (err) {
console.error(`${colors.RED}Failed to remove temp directory: ${err.message}${colors.NC}`);
}
});
// Use the existing sample config file
console.log(`${colors.BLUE}Using existing sample config file: ${PROJECT_ROOT}/sample-config.json${colors.NC}`);
try {
const sampleConfig = fs.readFileSync(path.join(PROJECT_ROOT, "sample-config.json"), "utf8");
console.log(sampleConfig);
} catch (error) {
console.error(`${colors.RED}Error reading sample config: ${error.message}${colors.NC}`);
}
// Create an invalid config file for testing
const invalidConfigPath = path.join(TEMP_DIR, "invalid-config.json");
fs.writeFileSync(invalidConfigPath, '{\n "mcpServers": {\n "invalid": {');
// Function to run a basic test
async function runBasicTest(testName, ...args) {
const outputFile = path.join(OUTPUT_DIR, `${testName.replace(/\//g, "_")}.log`);
console.log(`\n${colors.YELLOW}Testing: ${testName}${colors.NC}`);
TOTAL_TESTS++;
// Run the command and capture output
console.log(`${colors.BLUE}Command: node ${BIN_DIR}/cli.js ${args.join(" ")}${colors.NC}`);
try {
// Create a write stream for the output file
const outputStream = fs.createWriteStream(outputFile);
// Spawn the process
return new Promise((resolve) => {
const child = spawn("node", [path.join(BIN_DIR, "cli.js"), ...args], {
stdio: ["ignore", "pipe", "pipe"]
});
// Pipe stdout and stderr to the output file
child.stdout.pipe(outputStream);
child.stderr.pipe(outputStream);
// Also capture output for display
let output = "";
child.stdout.on("data", (data) => {
output += data.toString();
});
child.stderr.on("data", (data) => {
output += data.toString();
});
child.on("close", (code) => {
outputStream.end();
if (code === 0) {
console.log(`${colors.GREEN}✓ Test passed: ${testName}${colors.NC}`);
console.log(`${colors.BLUE}First few lines of output:${colors.NC}`);
const firstFewLines = output.split("\n").slice(0, 5).map(line => ` ${line}`).join("\n");
console.log(firstFewLines);
PASSED_TESTS++;
resolve(true);
} else {
console.log(`${colors.RED}✗ Test failed: ${testName}${colors.NC}`);
console.log(`${colors.RED}Error output:${colors.NC}`);
console.log(output.split("\n").map(line => ` ${line}`).join("\n"));
FAILED_TESTS++;
// Stop after any error is encountered
console.log(`${colors.YELLOW}Stopping tests due to error. Please validate and fix before continuing.${colors.NC}`);
process.exit(1);
}
});
});
} catch (error) {
console.error(`${colors.RED}Error running test: ${error.message}${colors.NC}`);
FAILED_TESTS++;
process.exit(1);
}
}
// Function to run an error test (expected to fail)
async function runErrorTest(testName, ...args) {
const outputFile = path.join(OUTPUT_DIR, `${testName.replace(/\//g, "_")}.log`);
console.log(`\n${colors.YELLOW}Testing error case: ${testName}${colors.NC}`);
TOTAL_TESTS++;
// Run the command and capture output
console.log(`${colors.BLUE}Command: node ${BIN_DIR}/cli.js ${args.join(" ")}${colors.NC}`);
try {
// Create a write stream for the output file
const outputStream = fs.createWriteStream(outputFile);
// Spawn the process
return new Promise((resolve) => {
const child = spawn("node", [path.join(BIN_DIR, "cli.js"), ...args], {
stdio: ["ignore", "pipe", "pipe"]
});
// Pipe stdout and stderr to the output file
child.stdout.pipe(outputStream);
child.stderr.pipe(outputStream);
// Also capture output for display
let output = "";
child.stdout.on("data", (data) => {
output += data.toString();
});
child.stderr.on("data", (data) => {
output += data.toString();
});
child.on("close", (code) => {
outputStream.end();
// For error tests, we expect a non-zero exit code
if (code !== 0) {
console.log(`${colors.GREEN}✓ Error test passed: ${testName}${colors.NC}`);
console.log(`${colors.BLUE}Error output (expected):${colors.NC}`);
const firstFewLines = output.split("\n").slice(0, 5).map(line => ` ${line}`).join("\n");
console.log(firstFewLines);
PASSED_TESTS++;
resolve(true);
} else {
console.log(`${colors.RED}✗ Error test failed: ${testName} (expected error but got success)${colors.NC}`);
console.log(`${colors.RED}Output:${colors.NC}`);
console.log(output.split("\n").map(line => ` ${line}`).join("\n"));
FAILED_TESTS++;
// Stop after any error is encountered
console.log(`${colors.YELLOW}Stopping tests due to error. Please validate and fix before continuing.${colors.NC}`);
process.exit(1);
}
});
});
} catch (error) {
console.error(`${colors.RED}Error running test: ${error.message}${colors.NC}`);
FAILED_TESTS++;
process.exit(1);
}
}
// Run all tests
async function runTests() {
console.log(`\n${colors.YELLOW}=== Running Basic CLI Mode Tests ===${colors.NC}`);
// Test 1: Basic CLI mode with method
await runBasicTest("basic_cli_mode", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "tools/list");
// Test 2: CLI mode with non-existent method (should fail)
await runErrorTest("nonexistent_method", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "nonexistent/method");
// Test 3: CLI mode without method (should fail)
await runErrorTest("missing_method", TEST_CMD, ...TEST_ARGS, "--cli");
console.log(`\n${colors.YELLOW}=== Running Environment Variable Tests ===${colors.NC}`);
// Test 4: CLI mode with environment variables
await runBasicTest("env_variables", TEST_CMD, ...TEST_ARGS, "-e", "KEY1=value1", "-e", "KEY2=value2", "--cli", "--method", "tools/list");
// Test 5: CLI mode with invalid environment variable format (should fail)
await runErrorTest("invalid_env_format", TEST_CMD, ...TEST_ARGS, "-e", "INVALID_FORMAT", "--cli", "--method", "tools/list");
console.log(`\n${colors.YELLOW}=== Running Config File Tests ===${colors.NC}`);
// Test 6: Using config file with CLI mode
await runBasicTest("config_file", "--config", path.join(PROJECT_ROOT, "sample-config.json"), "--server", "everything", "--cli", "--method", "tools/list");
// Test 7: Using config file without server name (should fail)
await runErrorTest("config_without_server", "--config", path.join(PROJECT_ROOT, "sample-config.json"), "--cli", "--method", "tools/list");
// Test 8: Using server name without config file (should fail)
await runErrorTest("server_without_config", "--server", "everything", "--cli", "--method", "tools/list");
// Test 9: Using non-existent config file (should fail)
await runErrorTest("nonexistent_config", "--config", "./nonexistent-config.json", "--server", "everything", "--cli", "--method", "tools/list");
// Test 10: Using invalid config file format (should fail)
await runErrorTest("invalid_config", "--config", invalidConfigPath, "--server", "everything", "--cli", "--method", "tools/list");
// Test 11: Using config file with non-existent server (should fail)
await runErrorTest("nonexistent_server", "--config", path.join(PROJECT_ROOT, "sample-config.json"), "--server", "nonexistent", "--cli", "--method", "tools/list");
console.log(`\n${colors.YELLOW}=== Running Tool-Related Tests ===${colors.NC}`);
// Test 12: CLI mode with tool call
await runBasicTest("tool_call", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "tools/call", "--tool-name", "echo", "--tool-arg", "message=Hello");
// Test 13: CLI mode with tool call but missing tool name (should fail)
await runErrorTest("missing_tool_name", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "tools/call", "--tool-arg", "message=Hello");
// Test 14: CLI mode with tool call but invalid tool args format (should fail)
await runErrorTest("invalid_tool_args", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "tools/call", "--tool-name", "echo", "--tool-arg", "invalid_format");
// Test 15: CLI mode with multiple tool args
await runBasicTest("multiple_tool_args", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "tools/call", "--tool-name", "add", "--tool-arg", "a=1", "b=2");
console.log(`\n${colors.YELLOW}=== Running Resource-Related Tests ===${colors.NC}`);
// Test 16: CLI mode with resource read
await runBasicTest("resource_read", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "resources/read", "--uri", "test://static/resource/1");
// Test 17: CLI mode with resource read but missing URI (should fail)
await runErrorTest("missing_uri", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "resources/read");
console.log(`\n${colors.YELLOW}=== Running Prompt-Related Tests ===${colors.NC}`);
// Test 18: CLI mode with prompt get
await runBasicTest("prompt_get", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "prompts/get", "--prompt-name", "simple_prompt");
// Test 19: CLI mode with prompt get and args
await runBasicTest("prompt_get_with_args", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "prompts/get", "--prompt-name", "complex_prompt", "--prompt-args", "temperature=0.7", "style=concise");
// Test 20: CLI mode with prompt get but missing prompt name (should fail)
await runErrorTest("missing_prompt_name", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "prompts/get");
console.log(`\n${colors.YELLOW}=== Running Logging Tests ===${colors.NC}`);
// Test 21: CLI mode with log level
await runBasicTest("log_level", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "logging/setLevel", "--log-level", "debug");
// Test 22: CLI mode with invalid log level (should fail)
await runErrorTest("invalid_log_level", TEST_CMD, ...TEST_ARGS, "--cli", "--method", "logging/setLevel", "--log-level", "invalid");
console.log(`\n${colors.YELLOW}=== Running Combined Option Tests ===${colors.NC}`);
// Note about the combined options issue
console.log(`${colors.BLUE}Testing combined options with environment variables and config file.${colors.NC}`);
// Test 23: CLI mode with config file, environment variables, and tool call
await runBasicTest("combined_options", "--config", path.join(PROJECT_ROOT, "sample-config.json"), "--server", "everything", "-e", "CLI_ENV_VAR=cli_value", "--cli", "--method", "tools/list");
// Test 24: CLI mode with all possible options (that make sense together)
await runBasicTest("all_options", "--config", path.join(PROJECT_ROOT, "sample-config.json"), "--server", "everything", "-e", "CLI_ENV_VAR=cli_value", "--cli", "--method", "tools/call", "--tool-name", "echo", "--tool-arg", "message=Hello", "--log-level", "debug");
// Print test summary
console.log(`\n${colors.YELLOW}=== Test Summary ===${colors.NC}`);
console.log(`${colors.GREEN}Passed: ${PASSED_TESTS}${colors.NC}`);
console.log(`${colors.RED}Failed: ${FAILED_TESTS}${colors.NC}`);
console.log(`${colors.ORANGE}Skipped: ${SKIPPED_TESTS}${colors.NC}`);
console.log(`Total: ${TOTAL_TESTS}`);
console.log(`${colors.BLUE}Detailed logs saved to: ${OUTPUT_DIR}${colors.NC}`);
console.log(`\n${colors.GREEN}All tests completed!${colors.NC}`);
}
// Run all tests
runTests().catch(error => {
console.error(`${colors.RED}Tests failed with error: ${error.message}${colors.NC}`);
process.exit(1);
});

20
bin/scripts/copy-cli.js Executable file
View File

@@ -0,0 +1,20 @@
/**
* Cross-platform script to copy the built file to cli.js
*/
import { promises as fs } from "fs";
import path from "path";
const SOURCE_FILE = path.resolve("build/index.js");
const TARGET_FILE = path.resolve("cli.js");
async function copyFile() {
try {
await fs.copyFile(SOURCE_FILE, TARGET_FILE);
console.log(`Successfully copied ${SOURCE_FILE} to ${TARGET_FILE}`);
} catch (error) {
console.error("Error copying file:", error);
process.exit(1);
}
}
copyFile();

29
bin/scripts/make-executable.js Executable file
View File

@@ -0,0 +1,29 @@
/**
* Cross-platform script to make a file executable
*/
import { promises as fs } from "fs";
import { platform } from "os";
import { execSync } from "child_process";
import path from "path";
const TARGET_FILE = path.resolve("build/index.js");
async function makeExecutable() {
try {
// On Unix-like systems (Linux, macOS), use chmod
if (platform() !== "win32") {
execSync(`chmod +x "${TARGET_FILE}"`);
console.log("Made file executable with chmod");
} else {
// On Windows, no need to make files "executable" in the Unix sense
// Just ensure the file exists
await fs.access(TARGET_FILE);
console.log("File exists and is accessible on Windows");
}
} catch (error) {
console.error("Error making file executable:", error);
process.exit(1);
}
}
makeExecutable();

View File

@@ -1,231 +0,0 @@
#!/bin/bash
# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
ORANGE='\033[0;33m'
NC='\033[0m' # No Color
# Track test results
PASSED_TESTS=0
FAILED_TESTS=0
SKIPPED_TESTS=0
TOTAL_TESTS=0
echo -e "${YELLOW}=== MCP Inspector CLI Test Script ===${NC}"
echo -e "${BLUE}This script tests the MCP Inspector CLI's ability to handle various command line options:${NC}"
echo -e "${BLUE}- Basic CLI mode${NC}"
echo -e "${BLUE}- Environment variables (-e)${NC}"
echo -e "${BLUE}- Config file (--config)${NC}"
echo -e "${BLUE}- Server selection (--server)${NC}"
echo -e "${BLUE}- Method selection (--method)${NC}"
echo -e "${BLUE}- Tool-related options (--tool-name, --tool-arg)${NC}"
echo -e "${BLUE}- Resource-related options (--uri)${NC}"
echo -e "${BLUE}- Prompt-related options (--prompt-name, --prompt-args)${NC}"
echo -e "${BLUE}- Logging options (--log-level)${NC}"
echo ""
# Change to the bin directory
cd "$(dirname "$0")/.."
BIN_DIR="$(pwd)"
PROJECT_ROOT="$(dirname "$BIN_DIR")"
# Compile bin and cli projects
echo -e "${YELLOW}Compiling MCP Inspector bin and cli...${NC}"
cd "$BIN_DIR"
npm run build
cd "$PROJECT_ROOT/cli"
npm run build
cd "$BIN_DIR"
# Create a symbolic link to handle path resolution
echo -e "${YELLOW}Setting up environment for tests...${NC}"
PARENT_DIR="$(dirname "$PROJECT_ROOT")"
# Define the test server command using npx
TEST_CMD="npx"
TEST_ARGS=("@modelcontextprotocol/server-everything")
# Create output directory for test results
OUTPUT_DIR="$BIN_DIR/tests/output"
mkdir -p "$OUTPUT_DIR"
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT INT TERM
# Use the existing sample config file
echo -e "${BLUE}Using existing sample config file: $PROJECT_ROOT/sample-config.json${NC}"
cat "$PROJECT_ROOT/sample-config.json"
# Create an invalid config file for testing
echo '{
"mcpServers": {
"invalid": {' > "$TEMP_DIR/invalid-config.json"
# Function to run a basic test
run_basic_test() {
local test_name=$1
local output_file="$OUTPUT_DIR/${test_name//\//_}.log"
shift
echo -e "\n${YELLOW}Testing: ${test_name}${NC}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
# Run the command and capture output
echo -e "${BLUE}Command: node ${BIN_DIR}/cli.js $*${NC}"
node "$BIN_DIR/cli.js" "$@" > "$output_file" 2>&1
local exit_code=$?
# Check if the test passed or failed
if [ $exit_code -eq 0 ]; then
echo -e "${GREEN}✓ Test passed: ${test_name}${NC}"
echo -e "${BLUE}First few lines of output:${NC}"
head -n 5 "$output_file" | sed 's/^/ /'
PASSED_TESTS=$((PASSED_TESTS + 1))
else
echo -e "${RED}✗ Test failed: ${test_name}${NC}"
echo -e "${RED}Error output:${NC}"
cat "$output_file" | sed 's/^/ /'
FAILED_TESTS=$((FAILED_TESTS + 1))
# Stop after any error is encountered
echo -e "${YELLOW}Stopping tests due to error. Please validate and fix before continuing.${NC}"
exit 1
fi
}
# Function to run an error test (expected to fail)
run_error_test() {
local test_name=$1
local output_file="$OUTPUT_DIR/${test_name//\//_}.log"
shift
echo -e "\n${YELLOW}Testing error case: ${test_name}${NC}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
# Run the command and capture output
echo -e "${BLUE}Command: node ${BIN_DIR}/cli.js $*${NC}"
node "$BIN_DIR/cli.js" "$@" > "$output_file" 2>&1
local exit_code=$?
# For error tests, we expect a non-zero exit code
if [ $exit_code -ne 0 ]; then
echo -e "${GREEN}✓ Error test passed: ${test_name}${NC}"
echo -e "${BLUE}Error output (expected):${NC}"
head -n 5 "$output_file" | sed 's/^/ /'
PASSED_TESTS=$((PASSED_TESTS + 1))
else
echo -e "${RED}✗ Error test failed: ${test_name} (expected error but got success)${NC}"
echo -e "${RED}Output:${NC}"
cat "$output_file" | sed 's/^/ /'
FAILED_TESTS=$((FAILED_TESTS + 1))
# Stop after any error is encountered
echo -e "${YELLOW}Stopping tests due to error. Please validate and fix before continuing.${NC}"
exit 1
fi
}
echo -e "\n${YELLOW}=== Running Basic CLI Mode Tests ===${NC}"
# Test 1: Basic CLI mode with method
run_basic_test "basic_cli_mode" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "tools/list"
# Test 2: CLI mode with non-existent method (should fail)
run_error_test "nonexistent_method" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "nonexistent/method"
# Test 3: CLI mode without method (should fail)
run_error_test "missing_method" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli"
echo -e "\n${YELLOW}=== Running Environment Variable Tests ===${NC}"
# Test 4: CLI mode with environment variables
run_basic_test "env_variables" "${TEST_CMD}" "${TEST_ARGS[@]}" "-e" "KEY1=value1" "-e" "KEY2=value2" "--cli" "--method" "tools/list"
# Test 5: CLI mode with invalid environment variable format (should fail)
run_error_test "invalid_env_format" "${TEST_CMD}" "${TEST_ARGS[@]}" "-e" "INVALID_FORMAT" "--cli" "--method" "tools/list"
echo -e "\n${YELLOW}=== Running Config File Tests ===${NC}"
# Test 6: Using config file with CLI mode
run_basic_test "config_file" "--config" "$PROJECT_ROOT/sample-config.json" "--server" "everything" "--cli" "--method" "tools/list"
# Test 7: Using config file without server name (should fail)
run_error_test "config_without_server" "--config" "$PROJECT_ROOT/sample-config.json" "--cli" "--method" "tools/list"
# Test 8: Using server name without config file (should fail)
run_error_test "server_without_config" "--server" "everything" "--cli" "--method" "tools/list"
# Test 9: Using non-existent config file (should fail)
run_error_test "nonexistent_config" "--config" "./nonexistent-config.json" "--server" "everything" "--cli" "--method" "tools/list"
# Test 10: Using invalid config file format (should fail)
run_error_test "invalid_config" "--config" "$TEMP_DIR/invalid-config.json" "--server" "everything" "--cli" "--method" "tools/list"
# Test 11: Using config file with non-existent server (should fail)
run_error_test "nonexistent_server" "--config" "$PROJECT_ROOT/sample-config.json" "--server" "nonexistent" "--cli" "--method" "tools/list"
echo -e "\n${YELLOW}=== Running Tool-Related Tests ===${NC}"
# Test 12: CLI mode with tool call
run_basic_test "tool_call" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "tools/call" "--tool-name" "echo" "--tool-arg" "message=Hello"
# Test 13: CLI mode with tool call but missing tool name (should fail)
run_error_test "missing_tool_name" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "tools/call" "--tool-arg" "message=Hello"
# Test 14: CLI mode with tool call but invalid tool args format (should fail)
run_error_test "invalid_tool_args" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "tools/call" "--tool-name" "echo" "--tool-arg" "invalid_format"
# Test 15: CLI mode with multiple tool args
run_basic_test "multiple_tool_args" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "tools/call" "--tool-name" "add" "--tool-arg" "a=1" "b=2"
echo -e "\n${YELLOW}=== Running Resource-Related Tests ===${NC}"
# Test 16: CLI mode with resource read
run_basic_test "resource_read" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "resources/read" "--uri" "test://static/resource/1"
# Test 17: CLI mode with resource read but missing URI (should fail)
run_error_test "missing_uri" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "resources/read"
echo -e "\n${YELLOW}=== Running Prompt-Related Tests ===${NC}"
# Test 18: CLI mode with prompt get
run_basic_test "prompt_get" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "prompts/get" "--prompt-name" "simple_prompt"
# Test 19: CLI mode with prompt get and args
run_basic_test "prompt_get_with_args" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "prompts/get" "--prompt-name" "complex_prompt" "--prompt-args" "temperature=0.7" "style=concise"
# Test 20: CLI mode with prompt get but missing prompt name (should fail)
run_error_test "missing_prompt_name" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "prompts/get"
echo -e "\n${YELLOW}=== Running Logging Tests ===${NC}"
# Test 21: CLI mode with log level
run_basic_test "log_level" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "logging/setLevel" "--log-level" "debug"
# Test 22: CLI mode with invalid log level (should fail)
run_error_test "invalid_log_level" "${TEST_CMD}" "${TEST_ARGS[@]}" "--cli" "--method" "logging/setLevel" "--log-level" "invalid"
echo -e "\n${YELLOW}=== Running Combined Option Tests ===${NC}"
# Note about the combined options issue
echo -e "${BLUE}Testing combined options with environment variables and config file.${NC}"
# Test 23: CLI mode with config file, environment variables, and tool call
run_basic_test "combined_options" "--config" "$PROJECT_ROOT/sample-config.json" "--server" "everything" "-e" "CLI_ENV_VAR=cli_value" "--cli" "--method" "tools/list"
# Test 24: CLI mode with all possible options (that make sense together)
run_basic_test "all_options" "--config" "$PROJECT_ROOT/sample-config.json" "--server" "everything" "-e" "CLI_ENV_VAR=cli_value" "--cli" "--method" "tools/call" "--tool-name" "echo" "--tool-arg" "message=Hello" "--log-level" "debug"
# Print test summary
echo -e "\n${YELLOW}=== Test Summary ===${NC}"
echo -e "${GREEN}Passed: $PASSED_TESTS${NC}"
echo -e "${RED}Failed: $FAILED_TESTS${NC}"
echo -e "${ORANGE}Skipped: $SKIPPED_TESTS${NC}"
echo -e "Total: $TOTAL_TESTS"
echo -e "${BLUE}Detailed logs saved to: $OUTPUT_DIR${NC}"
echo -e "\n${GREEN}All tests completed!${NC}"

View File

@@ -1,6 +1,6 @@
{
"name": "@modelcontextprotocol/inspector-cli",
"version": "0.5.1",
"version": "0.7.0",
"description": "CLI for the Model Context Protocol inspector",
"license": "MIT",
"author": "Nicolas Barraud",

View File

@@ -1,6 +1,6 @@
{
"name": "@modelcontextprotocol/inspector",
"version": "0.5.1",
"version": "0.7.0",
"description": "Model Context Protocol inspector",
"license": "MIT",
"author": "Anthropic, PBC (https://anthropic.com)",
@@ -27,7 +27,7 @@
],
"scripts": {
"dev": "concurrently \"cd client && npm run dev\" \"cd server && npm run dev\"",
"dev:windows": "concurrently \"cd client && npm run dev\" \"cd server && npm run dev:windows",
"dev:windows": "concurrently \"cd client && npm run dev\" \"cd server && npm run dev:windows\"",
"build-bin": "cd bin && npm run build",
"build-server": "cd server && npm run build",
"build-client": "cd client && npm run build",
@@ -38,13 +38,14 @@
"start": "node ./bin/cli.js",
"prepare": "npm run build",
"prettier-fix": "prettier --write .",
"publish-all": "npm publish --workspaces --access public && npm publish --access public"
"publish-all": "npm publish --workspaces --access public && npm publish --access public",
"test:cli": "cd bin && npm run test"
},
"dependencies": {
"@modelcontextprotocol/inspector-bin": "0.5.1",
"@modelcontextprotocol/inspector-cli": "0.5.1",
"@modelcontextprotocol/inspector-client": "0.5.1",
"@modelcontextprotocol/inspector-server": "0.5.1",
"@modelcontextprotocol/inspector-bin": "0.7.0",
"@modelcontextprotocol/inspector-cli": "0.7.0",
"@modelcontextprotocol/inspector-client": "0.7.0",
"@modelcontextprotocol/inspector-server": "0.7.0",
"@modelcontextprotocol/sdk": "^1.6.1",
"commander": "^13.1.0",
"concurrently": "^9.0.1",