Skip to content

Commit 073041d

Browse files
committed
fix: mypy type errors in interactive eval mode
- Add Optional type hints for Path parameters - Add Dict and Any imports for type annotations - Add null checks for process.stdout before iteration - Add type annotations for evaluation dictionaries - Fix return type for _add_evaluations_interactive
1 parent 9af53d9 commit 073041d

1 file changed

Lines changed: 13 additions & 11 deletions

File tree

src/uipath/_cli/_eval_interactive.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import subprocess
55
import sys
66
from pathlib import Path
7-
from typing import List, Optional, Tuple
7+
from typing import Any, Dict, List, Optional, Tuple
88

99
import select
1010
import sys
@@ -29,7 +29,7 @@ def has_termios() -> bool:
2929
class InteractiveEvalCLI:
3030
"""Simple, fast, keyboard-driven evaluation CLI."""
3131

32-
def __init__(self, project_root: Path = None):
32+
def __init__(self, project_root: Optional[Path] = None):
3333
self.project_root = project_root or Path.cwd()
3434
self.eval_sets: List[Tuple[str, Path]] = []
3535
self.evaluators: List[Tuple[str, Path]] = []
@@ -457,8 +457,9 @@ def _execute_evaluation(self, eval_path: Path) -> None:
457457
)
458458

459459
# Stream output in real-time
460-
for line in process.stdout:
461-
print(line.rstrip())
460+
if process.stdout:
461+
for line in process.stdout:
462+
print(line.rstrip())
462463

463464
process.wait()
464465

@@ -504,8 +505,9 @@ def _execute_evaluation_no_clear(self, eval_path: Path) -> None:
504505
)
505506

506507
# Stream output in real-time
507-
for line in process.stdout:
508-
print(line.rstrip())
508+
if process.stdout:
509+
for line in process.stdout:
510+
print(line.rstrip())
509511

510512
process.wait()
511513

@@ -795,7 +797,7 @@ def _create_eval_set(self) -> None:
795797
# Ask if they want to add evaluations
796798
add_evals = self._get_input("Add evaluations now? (y/n): ").lower()
797799
if add_evals in ['y', 'yes']:
798-
eval_set["evaluations"] = self._add_evaluations_interactive(eval_set["id"])
800+
eval_set["evaluations"] = self._add_evaluations_interactive(str(eval_set["id"]))
799801

800802
# Ensure evaluationSets directory exists
801803
eval_sets_dir = self.project_root / "evaluationSets"
@@ -883,7 +885,7 @@ def _create_eval_set_interactive(self) -> None:
883885
console.warning("Invalid JSON, using empty expected output")
884886
expected_output = {}
885887

886-
evaluation = {
888+
evaluation: Dict[str, Any] = {
887889
"id": f"test-{test_count}",
888890
"name": test_name,
889891
"inputs": inputs,
@@ -941,7 +943,7 @@ def _create_eval_set_interactive(self) -> None:
941943

942944
input("\nPress Enter to continue...")
943945

944-
def _add_evaluations_interactive(self, eval_set_id: str) -> List[dict]:
946+
def _add_evaluations_interactive(self, eval_set_id: str) -> List[Dict[str, Any]]:
945947
"""Add evaluations interactively."""
946948
evaluations = []
947949
test_count = 1
@@ -974,7 +976,7 @@ def _add_evaluations_interactive(self, eval_set_id: str) -> List[dict]:
974976
console.warning("Invalid JSON, using empty expected output")
975977
expected_output = {}
976978

977-
evaluation = {
979+
evaluation: Dict[str, Any] = {
978980
"id": f"test-{test_count}",
979981
"name": test_name,
980982
"inputs": inputs,
@@ -1193,7 +1195,7 @@ def _get_evaluator_id(self, path: Path) -> str:
11931195
return path.stem
11941196

11951197

1196-
def launch_interactive_cli(project_root: Path = None) -> None:
1198+
def launch_interactive_cli(project_root: Optional[Path] = None) -> None:
11971199
"""Launch the interactive CLI."""
11981200
cli = InteractiveEvalCLI(project_root)
11991201
cli.run()

0 commit comments

Comments
 (0)