forked from modelcontextprotocol/python-sdk
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathanalyze_results.py
More file actions
198 lines (151 loc) · 6.78 KB
/
analyze_results.py
File metadata and controls
198 lines (151 loc) · 6.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
"""
Analyze and visualize MCP server benchmark results.
Analyzer generated by Claude 4.5 Sonnet.
"""
import json
import sys
from pathlib import Path
from typing import Any
LOAD_INFO = {
"sequential_load": ("Sequential Load", "1 concurrent request"),
"light_load": ("Light Load", "20 concurrent requests"),
"medium_load": ("Medium Load", "100 concurrent requests"),
"heavy_load": ("Heavy Load", "300 concurrent requests"),
}
def load_results(json_path: Path) -> dict[str, Any]:
"""Load benchmark results from JSON file."""
if not json_path.exists():
print(f"Error: File not found: {json_path}")
sys.exit(1)
with open(json_path) as f:
return json.load(f)
def calculate_improvement(minimcp_val: float, fastmcp_val: float, lower_is_better: bool = True) -> float:
"""Calculate percentage improvement."""
if lower_is_better:
return ((fastmcp_val - minimcp_val) / fastmcp_val) * 100
else:
return ((minimcp_val - fastmcp_val) / fastmcp_val) * 100
def print_title(title: str) -> None:
# Bold + Underline
print("\033[1m\033[4m" + title + "\033[0m\n")
def organize_results(results: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
"""Organize results by server and load."""
data: dict[str, dict[str, Any]] = {}
for result in results["results"]:
server = result["server_name"]
load = result["load_name"]
if server not in data:
data[server] = {}
data[server][load] = result["metrics"]
return data["minimcp"], data["fastmcp"]
def print_metadata(results: dict[str, Any]) -> None:
"""Print metadata."""
min, sec = divmod(results["metadata"]["duration_seconds"], 60)
print(f"Date: {results['metadata']['timestamp']}")
print(f"Duration: {min:.0f}m {sec:.0f}s\n")
def print_key_findings(results: dict[str, Any]) -> None:
"""Print key findings section."""
print_title("Key Findings")
minimcp, fastmcp = organize_results(results)
# Response time improvements (excluding sequential)
response_improvements: list[float] = []
for load in ["light_load", "medium_load", "heavy_load"]:
min_rt = minimcp[load]["response_time"]["mean"]
fast_rt = fastmcp[load]["response_time"]["mean"]
improvement = calculate_improvement(min_rt, fast_rt, lower_is_better=True)
response_improvements.append(improvement)
rt_min = min(response_improvements)
rt_max = max(response_improvements)
# Throughput improvements
throughput_improvements: list[float] = []
for load in ["light_load", "medium_load", "heavy_load"]:
min_tp = minimcp[load]["throughput_rps"]["mean"]
fast_tp = fastmcp[load]["throughput_rps"]["mean"]
improvement = calculate_improvement(min_tp, fast_tp, lower_is_better=False)
throughput_improvements.append(improvement)
tp_min = min(throughput_improvements)
tp_max = max(throughput_improvements)
# Memory improvements
memory_improvements: list[float] = []
for load in ["medium_load", "heavy_load"]:
min_mem = minimcp[load]["max_memory_usage"]["mean"]
fast_mem = fastmcp[load]["max_memory_usage"]["mean"]
improvement = calculate_improvement(min_mem, fast_mem, lower_is_better=True)
memory_improvements.append(improvement)
mem_min = min(memory_improvements)
mem_max = max(memory_improvements)
print(
f"- MiniMCP outperforms FastMCP by ~{rt_min:.0f}-{rt_max:.0f}% in response time across "
"all concurrent load scenarios"
)
print(f"- MiniMCP achieves ~{tp_min:.0f}-{tp_max:.0f}% higher throughput than FastMCP")
# Handle memory improvements (can be positive or negative)
if mem_min >= 0 and mem_max >= 0:
print(f"- MiniMCP uses ~{mem_min:.0f}-{mem_max:.0f}% less memory under medium to heavy loads")
elif mem_min < 0 and mem_max < 0:
print(f"- MiniMCP uses ~{abs(mem_max):.0f}-{abs(mem_min):.0f}% more memory under medium to heavy loads")
else:
print(
f"- MiniMCP memory usage varies from {mem_min:.0f}% to {mem_max:.0f}% compared to FastMCP under medium "
"to heavy loads"
)
print()
def print_response_time_visualization(results: dict[str, Any]) -> None:
"""Print response time visualization."""
print_title("Response Time Visualization (smaller is better)")
minimcp, fastmcp = organize_results(results)
for load_key, (title, subtitle) in LOAD_INFO.items():
min_rt = minimcp[load_key]["response_time"]["mean"] * 1000 # to ms
fast_rt = fastmcp[load_key]["response_time"]["mean"] * 1000
improvement = calculate_improvement(min_rt, fast_rt, lower_is_better=True)
# Scale bars (max 50 chars for fastmcp)
max_val = max(min_rt, fast_rt)
fast_bars = int((fast_rt / max_val) * 50)
min_bars = int((min_rt / max_val) * 50)
# Determine if minimcp is better or worse
if improvement > 0:
status = f"✓ {improvement:.1f}% faster"
else:
status = f"✗ {abs(improvement):.1f}% slower"
print(f"{title} ({subtitle})")
print(f"minimcp {'▓' * min_bars} {min_rt:.2f}ms {status}")
print(f"fastmcp {'▓' * fast_bars} {fast_rt:.2f}ms")
print()
print()
def print_memory_visualization(results: dict[str, Any]) -> None:
"""Print maximum memory usage visualization."""
print_title("Maximum Memory Usage Visualization (smaller is better)")
minimcp, fastmcp = organize_results(results)
for load_key, (title, subtitle) in LOAD_INFO.items():
min_mem = minimcp[load_key]["max_memory_usage"]["mean"]
fast_mem = fastmcp[load_key]["max_memory_usage"]["mean"]
improvement = calculate_improvement(min_mem, fast_mem, lower_is_better=True)
# Scale bars (max 50 chars for the higher value)
max_val = max(min_mem, fast_mem)
min_bars = int((min_mem / max_val) * 50)
fast_bars = int((fast_mem / max_val) * 50)
# Determine if minimcp is better or worse
if improvement > 0:
status = f"✓ {improvement:.1f}% lower"
else:
status = f"✗ {abs(improvement):.1f}% higher"
print(f"{title} ({subtitle})")
print(f"minimcp {'▓' * min_bars} {min_mem:,.0f} KB {status}")
print(f"fastmcp {'▓' * fast_bars} {fast_mem:,.0f} KB")
print()
print()
def main() -> None:
"""Main entry point."""
if len(sys.argv) != 2:
print("Usage: python analyze_results.py <results.json>")
sys.exit(1)
json_path = Path(sys.argv[1])
results = load_results(json_path)
print()
print_title("Benchmark Analysis")
print_metadata(results)
print_key_findings(results)
print_response_time_visualization(results)
print_memory_visualization(results)
if __name__ == "__main__":
main()