diff --git a/python/packages/core/agent_framework/_workflows/_workflow.py b/python/packages/core/agent_framework/_workflows/_workflow.py index e8d7f6f155..5f93644035 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow.py +++ b/python/packages/core/agent_framework/_workflows/_workflow.py @@ -152,7 +152,7 @@ class Workflow(DictConvertible): Checkpointing can be configured at build time or runtime: Build-time (via WorkflowBuilder): - workflow = WorkflowBuilder().with_checkpointing(storage).build() + workflow = WorkflowBuilder(checkpoint_storage=storage).build() Runtime (via run parameters): result = await workflow.run(message, checkpoint_storage=runtime_storage) @@ -428,7 +428,7 @@ async def _execute_with_message_or_checkpoint( if not has_checkpointing and checkpoint_storage is None: raise ValueError( "Cannot restore from checkpoint: either provide checkpoint_storage parameter " - "or build workflow with WorkflowBuilder.with_checkpointing(checkpoint_storage)." + "or build workflow with WorkflowBuilder(checkpoint_storage=checkpoint_storage)." ) await self._runner.restore_from_checkpoint(checkpoint_id, checkpoint_storage) diff --git a/python/packages/core/agent_framework/_workflows/_workflow_builder.py b/python/packages/core/agent_framework/_workflows/_workflow_builder.py index e47279b1a2..14fd512e17 100644 --- a/python/packages/core/agent_framework/_workflows/_workflow_builder.py +++ b/python/packages/core/agent_framework/_workflows/_workflow_builder.py @@ -138,11 +138,10 @@ async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: # Build a workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="UpperCase") .register_executor(lambda: UpperCaseExecutor(id="upper"), name="UpperCase") .register_executor(lambda: ReverseExecutor(id="reverse"), name="Reverse") .add_edge("UpperCase", "Reverse") - .set_start_executor("UpperCase") .build() ) @@ -156,23 +155,32 @@ def __init__( max_iterations: int = DEFAULT_MAX_ITERATIONS, name: str | None = None, description: str | None = None, + *, + start_executor: Executor | SupportsAgentRun | str, + checkpoint_storage: CheckpointStorage | None = None, + output_executors: list[Executor | SupportsAgentRun | str] | None = None, ): - """Initialize the WorkflowBuilder with an empty list of edges and no starting executor. + """Initialize the WorkflowBuilder. Args: max_iterations: Maximum number of iterations for workflow convergence. Default is 100. name: Optional human-readable name for the workflow. description: Optional description of what the workflow does. + start_executor: The starting executor for the workflow. Can be an Executor instance, + SupportsAgentRun instance, or the name of a registered executor factory. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + output_executors: Optional list of executors whose outputs should be collected. + If not provided, outputs from all executors are collected. """ self._edge_groups: list[EdgeGroup] = [] self._executors: dict[str, Executor] = {} self._start_executor: Executor | str | None = None - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._max_iterations: int = max_iterations self._name: str | None = name self._description: str | None = description # Maps underlying SupportsAgentRun object id -> wrapped Executor so we reuse the same wrapper - # across set_start_executor / add_edge calls. This avoids multiple AgentExecutor instances + # across start_executor / add_edge calls. This avoids multiple AgentExecutor instances # being created for the same agent. self._agent_wrappers: dict[str, Executor] = {} @@ -187,7 +195,10 @@ def __init__( self._executor_registry: dict[str, Callable[[], Executor]] = {} # Output executors filter; if set, only outputs from these executors are yielded - self._output_executors: list[Executor | SupportsAgentRun | str] = [] + self._output_executors: list[Executor | SupportsAgentRun | str] = output_executors if output_executors else [] + + # Set the start executor + self._set_start_executor(start_executor) # Agents auto-wrapped by builder now always stream incremental updates. @@ -279,10 +290,9 @@ async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: # Build a workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="UpperCase") .register_executor(lambda: UpperCaseExecutor(id="upper"), name="UpperCase") .register_executor(lambda: ReverseExecutor(id="reverse"), name="Reverse") - .set_start_executor("UpperCase") .add_edge("UpperCase", "Reverse") .build() ) @@ -302,9 +312,8 @@ async def log(self, message: str, ctx: WorkflowContext) -> None: # Register the same executor factory under multiple names workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="ExecutorA") .register_executor(lambda: LoggerExecutor(id="logger"), name=["ExecutorA", "ExecutorB"]) - .set_start_executor("ExecutorA") .add_edge("ExecutorA", "ExecutorB") .build() """ @@ -347,7 +356,7 @@ def register_agent( # Build a workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="SomeOtherExecutor") .register_executor(lambda: ..., name="SomeOtherExecutor") .register_agent( lambda: AnthropicAgent(name="writer", model="claude-3-5-sonnet-20241022"), @@ -355,7 +364,6 @@ def register_agent( output_response=True, ) .add_edge("SomeOtherExecutor", "WriterAgent") - .set_start_executor("SomeOtherExecutor") .build() ) """ @@ -420,20 +428,18 @@ async def process(self, count: int, ctx: WorkflowContext[Never, str]) -> None: # Connect executors with an edge workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="ProcessorA") .register_executor(lambda: ProcessorA(id="a"), name="ProcessorA") .register_executor(lambda: ProcessorB(id="b"), name="ProcessorB") .add_edge("ProcessorA", "ProcessorB") - .set_start_executor("ProcessorA") .build() ) workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="ProcessorA") .register_executor(lambda: ProcessorA(id="a"), name="ProcessorA") .register_executor(lambda: ProcessorB(id="b"), name="ProcessorB") .add_edge("ProcessorA", "ProcessorB", condition=only_large_numbers) - .set_start_executor("ProcessorA") .build() ) """ @@ -507,12 +513,11 @@ async def validate(self, data: str, ctx: WorkflowContext) -> None: # Broadcast to multiple validators workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="DataSource") .register_executor(lambda: DataSource(id="source"), name="DataSource") .register_executor(lambda: ValidatorA(id="val_a"), name="ValidatorA") .register_executor(lambda: ValidatorB(id="val_b"), name="ValidatorB") .add_fan_out_edges("DataSource", ["ValidatorA", "ValidatorB"]) - .set_start_executor("DataSource") .build() ) """ @@ -600,7 +605,7 @@ async def handle(self, result: Result, ctx: WorkflowContext) -> None: # Route based on score value workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="Evaluator") .register_executor(lambda: Evaluator(id="eval"), name="Evaluator") .register_executor(lambda: HighScoreHandler(id="high"), name="HighScoreHandler") .register_executor(lambda: LowScoreHandler(id="low"), name="LowScoreHandler") @@ -611,7 +616,6 @@ async def handle(self, result: Result, ctx: WorkflowContext) -> None: Default(target="LowScoreHandler"), ], ) - .set_start_executor("Evaluator") .build() ) """ @@ -714,7 +718,7 @@ def select_workers(task: Task, available: list[str]) -> list[str]: workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="TaskDispatcher") .register_executor(lambda: TaskDispatcher(id="dispatcher"), name="TaskDispatcher") .register_executor(lambda: WorkerA(id="worker_a"), name="WorkerA") .register_executor(lambda: WorkerB(id="worker_b"), name="WorkerB") @@ -723,7 +727,6 @@ def select_workers(task: Task, available: list[str]) -> list[str]: ["WorkerA", "WorkerB"], selection_func=select_workers, ) - .set_start_executor("TaskDispatcher") .build() ) """ @@ -803,12 +806,11 @@ async def aggregate(self, results: list[str], ctx: WorkflowContext[Never, str]) # Collect results from multiple producers workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="Producer1") .register_executor(lambda: Producer(id="prod_1"), name="Producer1") .register_executor(lambda: Producer(id="prod_2"), name="Producer2") .register_executor(lambda: Aggregator(id="agg"), name="Aggregator") .add_fan_in_edges(["Producer1", "Producer2"], "Aggregator") - .set_start_executor("Producer1") .build() ) """ @@ -880,12 +882,11 @@ async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: # Chain executors in sequence workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="step1") .register_executor(lambda: Step1(id="step1"), name="step1") .register_executor(lambda: Step2(id="step2"), name="step2") .register_executor(lambda: Step3(id="step3"), name="step3") .add_chain(["step1", "step2", "step3"]) - .set_start_executor("step1") .build() ) """ @@ -911,46 +912,12 @@ async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: self.add_edge(wrapped[i], wrapped[i + 1]) return self - def set_start_executor(self, executor: Executor | SupportsAgentRun | str) -> Self: - """Set the starting executor for the workflow. - - The start executor is the entry point for the workflow. When the workflow is executed, - the initial message will be sent to this executor. + def _set_start_executor(self, executor: Executor | SupportsAgentRun | str) -> None: + """Set the starting executor for the workflow (internal method). Args: executor: The starting executor, which can be an Executor instance, SupportsAgentRun instance, or the name of a registered executor factory. - - Returns: - Self: The WorkflowBuilder instance for method chaining. - - Example: - .. code-block:: python - - from typing_extensions import Never - from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler - - - class EntryPoint(Executor): - @handler - async def process(self, text: str, ctx: WorkflowContext[str]) -> None: - await ctx.send_message(text.upper()) - - - class Processor(Executor): - @handler - async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: - await ctx.yield_output(text) - - - workflow = ( - WorkflowBuilder() - .register_executor(lambda: EntryPoint(id="entry"), name="EntryPoint") - .register_executor(lambda: Processor(id="proc"), name="Processor") - .add_edge("EntryPoint", "Processor") - .set_start_executor("EntryPoint") - .build() - ) """ if self._start_executor is not None: start_id = self._start_executor if isinstance(self._start_executor, str) else self._start_executor.id @@ -966,123 +933,9 @@ async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: existing = self._executors.get(wrapped.id) if existing is not wrapped: self._add_executor(wrapped) - return self - - def set_max_iterations(self, max_iterations: int) -> Self: - """Set the maximum number of iterations for the workflow. - - When a workflow contains cycles, this limit prevents infinite loops by capping - the total number of executor invocations. The default is 100 iterations. - - Args: - max_iterations: The maximum number of iterations the workflow will run for convergence. - - Returns: - Self: The WorkflowBuilder instance for method chaining. - - Example: - .. code-block:: python - - from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler - - - class StepA(Executor): - @handler - async def process(self, count: int, ctx: WorkflowContext[int]) -> None: - if count < 10: - await ctx.send_message(count + 1) - - - class StepB(Executor): - @handler - async def process(self, count: int, ctx: WorkflowContext[int]) -> None: - await ctx.send_message(count) - - - # Set a custom iteration limit for workflow with cycles - workflow = ( - WorkflowBuilder() - .set_max_iterations(500) - .register_executor(lambda: StepA(id="step_a"), name="StepA") - .register_executor(lambda: StepB(id="step_b"), name="StepB") - .add_edge("StepA", "StepB") - .add_edge("StepB", "StepA") # Cycle - .set_start_executor("StepA") - .build() - ) - """ - self._max_iterations = max_iterations - return self # Removed explicit set_agent_streaming() API; agents always stream updates. - def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> Self: - """Enable checkpointing with the specified storage. - - Checkpointing allows workflows to save their state periodically, enabling - pause/resume functionality and recovery from failures. The checkpoint storage - implementation determines where checkpoints are persisted. - - Args: - checkpoint_storage: The checkpoint storage implementation to use. - - Returns: - Self: The WorkflowBuilder instance for method chaining. - - Example: - .. code-block:: python - - from typing_extensions import Never - from agent_framework import Executor, WorkflowBuilder, WorkflowContext, handler - from agent_framework import FileCheckpointStorage - - - class ProcessorA(Executor): - @handler - async def process(self, text: str, ctx: WorkflowContext[str]) -> None: - await ctx.send_message(text.upper()) - - - class ProcessorB(Executor): - @handler - async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: - await ctx.yield_output(text) - - - # Enable checkpointing with file-based storage - storage = FileCheckpointStorage("./checkpoints") - workflow = ( - WorkflowBuilder() - .register_executor(lambda: ProcessorA(id="proc_a"), name="ProcessorA") - .register_executor(lambda: ProcessorB(id="proc_b"), name="ProcessorB") - .add_edge("ProcessorA", "ProcessorB") - .set_start_executor("ProcessorA") - .with_checkpointing(storage) - .build() - ) - - # Run with checkpoint saving - events = await workflow.run("input") - """ - self._checkpoint_storage = checkpoint_storage - return self - - def with_output_from(self, executors: list[Executor | SupportsAgentRun | str]) -> Self: - """Specify which executors' outputs should be collected as workflow outputs. - - By default, outputs from all executors are collected. This method allows - filtering to only include outputs from specified executors. - - Args: - executors: A list of executors or registered names of the executor factories - whose outputs should be collected. - - Returns: - Self: The WorkflowBuilder instance for method chaining. - """ - self._output_executors = list(executors) - return self - def _resolve_edge_registry(self) -> tuple[Executor, dict[str, Executor], list[EdgeGroup]]: """Resolve deferred edge registrations into executors and edge groups. @@ -1097,7 +950,9 @@ def _resolve_edge_registry(self) -> tuple[Executor, dict[str, Executor], list[Ed as they are already part of the workflow builder's internal state. """ if not self._start_executor: - raise ValueError("Starting executor must be set using set_start_executor before building the workflow.") + raise ValueError( + "Starting executor must be set via the start_executor constructor parameter before building." + ) start_executor: Executor | None = None if isinstance(self._start_executor, Executor): @@ -1200,9 +1055,8 @@ async def process(self, text: str, ctx: WorkflowContext[Never, str]) -> None: # Build and execute a workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="MyExecutor") .register_executor(lambda: MyExecutor(id="executor"), name="MyExecutor") - .set_start_executor("MyExecutor") .build() ) diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 3cbd369bf4..841ef84b85 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -70,7 +70,7 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: executor = AgentExecutor(initial_agent, agent_thread=initial_thread) # Build workflow with checkpointing enabled - wf = SequentialBuilder().participants([executor]).with_checkpointing(storage).build() + wf = SequentialBuilder(participants=[executor], checkpoint_storage=storage).build() # Run the workflow with a user message first_run_output: AgentExecutorResponse | None = None @@ -124,7 +124,7 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: assert restored_agent.call_count == 0 # Build new workflow with the restored executor - wf_resume = SequentialBuilder().participants([restored_executor]).with_checkpointing(storage).build() + wf_resume = SequentialBuilder(participants=[restored_executor], checkpoint_storage=storage).build() # Resume from checkpoint resumed_output: AgentExecutorResponse | None = None diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 4e7fb601e4..051a2109e5 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -96,7 +96,7 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: agent = _ToolCallingAgent(id="tool_agent", name="ToolAgent") agent_exec = AgentExecutor(agent, id="tool_exec") - workflow = WorkflowBuilder().set_start_executor(agent_exec).build() + workflow = WorkflowBuilder(start_executor=agent_exec).build() # Act: run in streaming mode events: list[WorkflowEvent[AgentResponseUpdate]] = [] @@ -249,11 +249,7 @@ async def test_agent_executor_tool_call_with_approval() -> None: ) workflow = ( - WorkflowBuilder() - .set_start_executor(agent) - .add_edge(agent, test_executor) - .with_output_from([test_executor]) - .build() + WorkflowBuilder(start_executor=agent, output_executors=[test_executor]).add_edge(agent, test_executor).build() ) # Act @@ -286,7 +282,7 @@ async def test_agent_executor_tool_call_with_approval_streaming() -> None: tools=[mock_tool_requiring_approval], ) - workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() + workflow = WorkflowBuilder(start_executor=agent).add_edge(agent, test_executor).build() # Act request_info_events: list[WorkflowEvent] = [] @@ -324,11 +320,7 @@ async def test_agent_executor_parallel_tool_call_with_approval() -> None: ) workflow = ( - WorkflowBuilder() - .set_start_executor(agent) - .add_edge(agent, test_executor) - .with_output_from([test_executor]) - .build() + WorkflowBuilder(start_executor=agent, output_executors=[test_executor]).add_edge(agent, test_executor).build() ) # Act @@ -363,7 +355,7 @@ async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> No tools=[mock_tool_requiring_approval], ) - workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() + workflow = WorkflowBuilder(start_executor=agent).add_edge(agent, test_executor).build() # Act request_info_events: list[WorkflowEvent] = [] diff --git a/python/packages/core/tests/workflow/test_checkpoint_validation.py b/python/packages/core/tests/workflow/test_checkpoint_validation.py index 3139fa302a..c028a94b40 100644 --- a/python/packages/core/tests/workflow/test_checkpoint_validation.py +++ b/python/packages/core/tests/workflow/test_checkpoint_validation.py @@ -30,8 +30,9 @@ def build_workflow(storage: InMemoryCheckpointStorage, finish_id: str = "finish" start = StartExecutor(id="start") finish = FinishExecutor(id=finish_id) - builder = WorkflowBuilder(max_iterations=3).set_start_executor(start).add_edge(start, finish) - builder = builder.with_checkpointing(checkpoint_storage=storage) + builder = WorkflowBuilder(max_iterations=3, start_executor=start, checkpoint_storage=storage).add_edge( + start, finish + ) return builder.build() diff --git a/python/packages/core/tests/workflow/test_executor.py b/python/packages/core/tests/workflow/test_executor.py index b08bd2be81..507b798e96 100644 --- a/python/packages/core/tests/workflow/test_executor.py +++ b/python/packages/core/tests/workflow/test_executor.py @@ -153,7 +153,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: upper = UpperCaseExecutor(id="upper") collector = CollectorExecutor(id="collector") - workflow = WorkflowBuilder().add_edge(upper, collector).set_start_executor(upper).build() + workflow = WorkflowBuilder(start_executor=upper).add_edge(upper, collector).build() events = await workflow.run("hello world") invoked_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_invoked"] @@ -190,7 +190,7 @@ async def handle(self, text: str, ctx: WorkflowContext) -> None: sender = MultiSenderExecutor(id="sender") collector = CollectorExecutor(id="collector") - workflow = WorkflowBuilder().add_edge(sender, collector).set_start_executor(sender).build() + workflow = WorkflowBuilder(start_executor=sender).add_edge(sender, collector).build() events = await workflow.run("hello") completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] @@ -217,7 +217,7 @@ async def handle(self, text: str, ctx: WorkflowContext[Never, str]) -> None: await ctx.yield_output(text.upper()) executor = YieldOnlyExecutor(id="yielder") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() events = await workflow.run("test") completed_events = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "executor_completed"] @@ -260,7 +260,7 @@ async def handle(self, response: Response, ctx: WorkflowContext) -> None: processor = ProcessorExecutor(id="processor") collector = CollectorExecutor(id="collector") - workflow = WorkflowBuilder().add_edge(processor, collector).set_start_executor(processor).build() + workflow = WorkflowBuilder(start_executor=processor).add_edge(processor, collector).build() input_request = Request(query="hello", limit=3) events = await workflow.run(input_request) @@ -539,7 +539,7 @@ async def mutator(messages: list[ChatMessage], ctx: WorkflowContext[list[ChatMes # Verify mutation happened assert len(messages) == original_len + 1 - workflow = WorkflowBuilder().set_start_executor(mutator).build() + workflow = WorkflowBuilder(start_executor=mutator).build() # Run with a single user message input_messages = [ChatMessage(role="user", text="hello")] diff --git a/python/packages/core/tests/workflow/test_full_conversation.py b/python/packages/core/tests/workflow/test_full_conversation.py index 7ebb9b03d6..c29dd61fe5 100644 --- a/python/packages/core/tests/workflow/test_full_conversation.py +++ b/python/packages/core/tests/workflow/test_full_conversation.py @@ -76,13 +76,7 @@ async def test_agent_executor_populates_full_conversation_non_streaming() -> Non agent_exec = AgentExecutor(agent, id="agent1-exec") capturer = _CaptureFullConversation(id="capture") - wf = ( - WorkflowBuilder() - .set_start_executor(agent_exec) - .add_edge(agent_exec, capturer) - .with_output_from([capturer]) - .build() - ) + wf = WorkflowBuilder(start_executor=agent_exec, output_executors=[capturer]).add_edge(agent_exec, capturer).build() # Act: use run() to test non-streaming mode result = await wf.run("hello world") @@ -144,7 +138,7 @@ async def test_sequential_adapter_uses_full_conversation() -> None: a1 = _CaptureAgent(id="agent1", name="A1", reply_text="A1 reply") a2 = _CaptureAgent(id="agent2", name="A2", reply_text="A2 reply") - wf = SequentialBuilder().participants([a1, a2]).build() + wf = SequentialBuilder(participants=[a1, a2]).build() # Act async for ev in wf.run("hello seq", stream=True): diff --git a/python/packages/core/tests/workflow/test_function_executor.py b/python/packages/core/tests/workflow/test_function_executor.py index a06f1445e1..3d274f8cd7 100644 --- a/python/packages/core/tests/workflow/test_function_executor.py +++ b/python/packages/core/tests/workflow/test_function_executor.py @@ -236,7 +236,7 @@ async def reverse_text(text: str, ctx: WorkflowContext[Any, str]) -> None: assert reverse_spec["output_types"] == [Any] # First parameter is Any assert reverse_spec["workflow_output_types"] == [str] # Second parameter is str - workflow = WorkflowBuilder().add_edge(to_upper, reverse_text).set_start_executor(to_upper).build() + workflow = WorkflowBuilder(start_executor=to_upper).add_edge(to_upper, reverse_text).build() # Run workflow events = await workflow.run("hello world") @@ -345,7 +345,7 @@ async def double_value(value: int): # Since single-parameter functions can't send messages, # they're typically used as terminal nodes or for side effects - WorkflowBuilder().set_start_executor(double_value).build() + WorkflowBuilder(start_executor=double_value).build() # For testing purposes, we can check that the handler is registered correctly assert double_value.can_handle(Message(data=5, source_id="mock")) diff --git a/python/packages/core/tests/workflow/test_request_info_and_response.py b/python/packages/core/tests/workflow/test_request_info_and_response.py index 488bc2633f..b62bfafb7c 100644 --- a/python/packages/core/tests/workflow/test_request_info_and_response.py +++ b/python/packages/core/tests/workflow/test_request_info_and_response.py @@ -178,7 +178,7 @@ class TestRequestInfoAndResponse: async def test_approval_workflow(self): """Test end-to-end workflow with approval request.""" executor = ApprovalRequiredExecutor(id="approval_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # First run the workflow until it emits a request request_info_event: WorkflowEvent | None = None @@ -203,7 +203,7 @@ async def test_approval_workflow(self): async def test_calculation_workflow(self): """Test end-to-end workflow with calculation request.""" executor = CalculationExecutor(id="calc_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # First run the workflow until it emits a calculation request request_info_event: WorkflowEvent | None = None @@ -230,7 +230,7 @@ async def test_calculation_workflow(self): async def test_multiple_requests_workflow(self): """Test workflow with multiple concurrent requests.""" executor = MultiRequestExecutor(id="multi_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Collect all request events by running the full stream request_events: list[WorkflowEvent] = [] @@ -264,7 +264,7 @@ async def test_multiple_requests_workflow(self): async def test_denied_approval_workflow(self): """Test workflow when approval is denied.""" executor = ApprovalRequiredExecutor(id="approval_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # First run the workflow until it emits a request request_info_event: WorkflowEvent | None = None @@ -287,7 +287,7 @@ async def test_denied_approval_workflow(self): async def test_workflow_state_with_pending_requests(self): """Test workflow state when waiting for responses.""" executor = ApprovalRequiredExecutor(id="approval_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Run workflow until idle with pending requests request_info_event: WorkflowEvent | None = None @@ -312,7 +312,7 @@ async def test_workflow_state_with_pending_requests(self): async def test_invalid_calculation_input(self): """Test workflow handling of invalid calculation input.""" executor = CalculationExecutor(id="calc_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Send invalid input (no numbers) completed = False @@ -334,7 +334,7 @@ async def test_checkpoint_with_pending_request_info_events(self): # Create workflow with checkpointing enabled executor = ApprovalRequiredExecutor(id="approval_executor") - workflow = WorkflowBuilder().set_start_executor(executor).with_checkpointing(storage).build() + workflow = WorkflowBuilder(start_executor=executor, checkpoint_storage=storage).build() # Step 1: Run workflow to completion to ensure checkpoints are created request_info_event: WorkflowEvent | None = None @@ -372,7 +372,7 @@ async def test_checkpoint_with_pending_request_info_events(self): # Step 4: Create a fresh workflow and restore from checkpoint new_executor = ApprovalRequiredExecutor(id="approval_executor") - restored_workflow = WorkflowBuilder().set_start_executor(new_executor).with_checkpointing(storage).build() + restored_workflow = WorkflowBuilder(start_executor=new_executor, checkpoint_storage=storage).build() # Step 5: Resume from checkpoint and verify the request can be continued completed = False diff --git a/python/packages/core/tests/workflow/test_serialization.py b/python/packages/core/tests/workflow/test_serialization.py index b22de85cc0..f579c1be76 100644 --- a/python/packages/core/tests/workflow/test_serialization.py +++ b/python/packages/core/tests/workflow/test_serialization.py @@ -413,16 +413,14 @@ def test_nested_workflow_executor_serialization(self) -> None: """ # Create innermost workflow inner_executor = SampleExecutor(id="inner-exec") - inner_workflow = WorkflowBuilder().set_start_executor(inner_executor).set_max_iterations(10).build() + inner_workflow = WorkflowBuilder(max_iterations=10, start_executor=inner_executor).build() # Create middle workflow with WorkflowExecutor inner_workflow_executor = WorkflowExecutor(workflow=inner_workflow, id="inner-workflow-exec") middle_executor = SampleExecutor(id="middle-exec") middle_workflow = ( - WorkflowBuilder() - .set_start_executor(middle_executor) + WorkflowBuilder(max_iterations=20, start_executor=middle_executor) .add_edge(middle_executor, inner_workflow_executor) - .set_max_iterations(20) .build() ) @@ -430,10 +428,8 @@ def test_nested_workflow_executor_serialization(self) -> None: middle_workflow_executor = WorkflowExecutor(workflow=middle_workflow, id="middle-workflow-exec") outer_executor = SampleExecutor(id="outer-exec") outer_workflow = ( - WorkflowBuilder() - .set_start_executor(outer_executor) + WorkflowBuilder(max_iterations=30, start_executor=outer_executor) .add_edge(outer_executor, middle_workflow_executor) - .set_max_iterations(30) .build() ) @@ -543,7 +539,7 @@ def test_workflow_serialization(self) -> None: executor1 = SampleExecutor(id="executor1") executor2 = SampleExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() # Test model_dump data = workflow.to_dict() @@ -616,7 +612,7 @@ def test_workflow_serialization_excludes_non_serializable_fields(self) -> None: executor1 = SampleExecutor(id="executor1") executor2 = SampleExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() # Test model_dump - should not include private runtime objects data = workflow.to_dict() @@ -629,11 +625,11 @@ def test_workflow_serialization_excludes_non_serializable_fields(self) -> None: def test_workflow_name_description_serialization(self) -> None: """Test that workflow name and description are serialized correctly.""" # Test 1: With name and description - workflow1 = ( - WorkflowBuilder(name="Test Pipeline", description="Test workflow description") - .set_start_executor(SampleExecutor(id="e1")) - .build() - ) + workflow1 = WorkflowBuilder( + name="Test Pipeline", + description="Test workflow description", + start_executor=SampleExecutor(id="e1"), + ).build() assert workflow1.name == "Test Pipeline" assert workflow1.description == "Test workflow description" @@ -649,7 +645,7 @@ def test_workflow_name_description_serialization(self) -> None: assert parsed1["description"] == "Test workflow description" # Test 2: Without name and description (defaults) - workflow2 = WorkflowBuilder().set_start_executor(SampleExecutor(id="e2")).build() + workflow2 = WorkflowBuilder(start_executor=SampleExecutor(id="e2")).build() assert workflow2.name is None assert workflow2.description is None @@ -659,7 +655,7 @@ def test_workflow_name_description_serialization(self) -> None: assert "description" not in data2 # Test 3: With only name (no description) - workflow3 = WorkflowBuilder(name="Named Only").set_start_executor(SampleExecutor(id="e3")).build() + workflow3 = WorkflowBuilder(name="Named Only", start_executor=SampleExecutor(id="e3")).build() assert workflow3.name == "Named Only" assert workflow3.description is None @@ -706,8 +702,7 @@ def test_comprehensive_edge_groups_workflow_serialization() -> None: # Build workflow with all three edge group types workflow = ( - WorkflowBuilder() - .set_start_executor(router) + WorkflowBuilder(start_executor=router) # 1. SwitchCaseEdgeGroup: Conditional routing .add_switch_case_edge_group( router, diff --git a/python/packages/core/tests/workflow/test_sub_workflow.py b/python/packages/core/tests/workflow/test_sub_workflow.py index cb387add5f..55afad880f 100644 --- a/python/packages/core/tests/workflow/test_sub_workflow.py +++ b/python/packages/core/tests/workflow/test_sub_workflow.py @@ -167,8 +167,7 @@ def create_email_validation_workflow() -> Workflow: email_domain_validator = EmailDomainValidator() return ( - WorkflowBuilder() - .set_start_executor(email_format_validator) + WorkflowBuilder(start_executor=email_format_validator) .add_edge(email_format_validator, email_domain_validator) .build() ) @@ -184,8 +183,7 @@ async def test_basic_sub_workflow() -> None: workflow_executor = WorkflowExecutor(validation_workflow, "email_validation_workflow") main_workflow = ( - WorkflowBuilder() - .set_start_executor(parent) + WorkflowBuilder(start_executor=parent) .add_edge(parent, workflow_executor) .add_edge(workflow_executor, parent) .build() @@ -223,8 +221,7 @@ async def test_sub_workflow_with_interception(): workflow_executor = WorkflowExecutor(validation_workflow, "email_workflow") main_workflow = ( - WorkflowBuilder() - .set_start_executor(parent) + WorkflowBuilder(start_executor=parent) .add_edge(parent, workflow_executor) .add_edge(workflow_executor, parent) .build() @@ -340,8 +337,7 @@ async def collect(self, result: ValidationResult, ctx: WorkflowContext) -> None: executor_b = WorkflowExecutor(workflow_b, "workflow_b") main_workflow = ( - WorkflowBuilder() - .set_start_executor(parent) + WorkflowBuilder(start_executor=parent) .add_edge(parent, executor_a) .add_edge(parent, executor_b) .add_edge(executor_a, parent) @@ -422,8 +418,7 @@ async def collect_result(self, result: ValidationResult, ctx: WorkflowContext) - workflow_executor = WorkflowExecutor(validation_workflow, "email_workflow") main_workflow = ( - WorkflowBuilder() - .set_start_executor(processor) + WorkflowBuilder(start_executor=processor) .add_edge(processor, workflow_executor) .add_edge(workflow_executor, processor) .build() @@ -564,16 +559,14 @@ async def on_checkpoint_restore(self, state: dict[str, Any]) -> None: def _build_checkpoint_test_workflow(storage: InMemoryCheckpointStorage) -> Workflow: """Build the main workflow with checkpointing for testing.""" two_step_executor = TwoStepSubWorkflowExecutor() - sub_workflow = WorkflowBuilder().set_start_executor(two_step_executor).build() + sub_workflow = WorkflowBuilder(start_executor=two_step_executor).build() sub_workflow_executor = WorkflowExecutor(sub_workflow, id="sub_workflow_executor") coordinator = CheckpointTestCoordinator() return ( - WorkflowBuilder() - .set_start_executor(coordinator) + WorkflowBuilder(start_executor=coordinator, checkpoint_storage=storage) .add_edge(coordinator, sub_workflow_executor) .add_edge(sub_workflow_executor, coordinator) - .with_checkpointing(storage) .build() ) diff --git a/python/packages/core/tests/workflow/test_validation.py b/python/packages/core/tests/workflow/test_validation.py index 3fbb1d6d59..ae694c8354 100644 --- a/python/packages/core/tests/workflow/test_validation.py +++ b/python/packages/core/tests/workflow/test_validation.py @@ -69,9 +69,8 @@ def test_valid_workflow_passes_validation(): # Create a valid workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=executor1) .add_edge(executor1, executor2) - .set_start_executor(executor1) .build() # This should not raise any exceptions ) @@ -83,7 +82,7 @@ def test_duplicate_executor_ids_fail_validation(): executor2 = IntExecutor(id="dup") with pytest.raises(ValueError) as exc_info: - (WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build()) + (WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build()) assert str(exc_info.value) == "Duplicate executor ID 'dup' detected in workflow." @@ -93,9 +92,7 @@ def test_edge_duplication_validation_fails(): executor2 = StringExecutor(id="executor2") with pytest.raises(EdgeDuplicationError) as exc_info: - WorkflowBuilder().add_edge(executor1, executor2).add_edge(executor1, executor2).set_start_executor( - executor1 - ).build() + WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).add_edge(executor1, executor2).build() assert "executor1->executor2" in str(exc_info.value) assert exc_info.value.validation_type == ValidationTypeEnum.EDGE_DUPLICATION @@ -106,7 +103,7 @@ def test_type_compatibility_validation_fails(): int_executor = IntExecutor(id="int_executor") with pytest.raises(TypeCompatibilityError) as exc_info: - WorkflowBuilder().add_edge(string_executor, int_executor).set_start_executor(string_executor).build() + WorkflowBuilder(start_executor=string_executor).add_edge(string_executor, int_executor).build() error = exc_info.value assert error.source_executor_id == "string_executor" @@ -119,7 +116,7 @@ def test_type_compatibility_with_any_type_passes(): any_executor = AnyExecutor(id="any_executor") # This should not raise an exception - workflow = WorkflowBuilder().add_edge(string_executor, any_executor).set_start_executor(string_executor).build() + workflow = WorkflowBuilder(start_executor=string_executor).add_edge(string_executor, any_executor).build() assert workflow is not None @@ -129,9 +126,7 @@ def test_type_compatibility_with_no_output_types(): string_executor = StringExecutor(id="string_executor") # This should pass validation since no output types are specified - workflow = ( - WorkflowBuilder().add_edge(no_output_executor, string_executor).set_start_executor(no_output_executor).build() - ) + workflow = WorkflowBuilder(start_executor=no_output_executor).add_edge(no_output_executor, string_executor).build() assert workflow is not None @@ -141,9 +136,7 @@ def test_multi_type_executor_compatibility(): multi_type_executor = MultiTypeExecutor(id="multi_type") # String executor outputs strings, multi-type can handle strings - workflow = ( - WorkflowBuilder().add_edge(string_executor, multi_type_executor).set_start_executor(string_executor).build() - ) + workflow = WorkflowBuilder(start_executor=string_executor).add_edge(string_executor, multi_type_executor).build() assert workflow is not None @@ -154,9 +147,7 @@ def test_graph_connectivity_unreachable_executors(): executor3 = StringExecutor(id="executor3") # This will be unreachable with pytest.raises(GraphConnectivityError) as exc_info: - WorkflowBuilder().add_edge(executor1, executor2).add_edge(executor3, executor2).set_start_executor( - executor1 - ).build() + WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).add_edge(executor3, executor2).build() assert "unreachable" in str(exc_info.value).lower() assert "executor3" in str(exc_info.value) @@ -189,19 +180,14 @@ def test_disconnected_start_executor_not_in_graph(): executor3 = StringExecutor(id="executor3") # Not in graph with pytest.raises(GraphConnectivityError) as exc_info: - WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor3).build() + WorkflowBuilder(start_executor=executor3).add_edge(executor1, executor2).build() assert "The following executors are unreachable from the start executor 'executor3'" in str(exc_info.value) def test_missing_start_executor(): - executor1 = StringExecutor(id="executor1") - executor2 = StringExecutor(id="executor2") - - with pytest.raises(ValueError) as exc_info: - WorkflowBuilder().add_edge(executor1, executor2).build() - - assert "Starting executor must be set" in str(exc_info.value) + with pytest.raises(TypeError): + WorkflowBuilder() # type: ignore[call-arg] def test_workflow_validation_error_base_class(): @@ -219,12 +205,11 @@ def test_complex_workflow_validation(): executor4 = AnyExecutor(id="executor4") workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=executor1) .add_edge(executor1, executor2) # str -> MultiType (compatible) .add_edge(executor2, executor3) # MultiType -> str (compatible) .add_edge(executor2, executor4) # MultiType -> Any (compatible) .add_edge(executor3, executor4) # str -> Any (compatible) - .set_start_executor(executor1) .build() ) @@ -246,7 +231,7 @@ async def handle_derived(self, message: str, ctx: WorkflowContext[str]) -> None: derived_executor = DerivedExecutor(id="derived") # This should pass since both handle str - workflow = WorkflowBuilder().add_edge(base_executor, derived_executor).set_start_executor(base_executor).build() + workflow = WorkflowBuilder(start_executor=base_executor).add_edge(base_executor, derived_executor).build() assert workflow is not None @@ -271,7 +256,7 @@ def test_fan_out_validation(): target1 = StringExecutor(id="target1") target2 = AnyExecutor(id="target2") - workflow = WorkflowBuilder().add_fan_out_edges(source, [target1, target2]).set_start_executor(source).build() + workflow = WorkflowBuilder(start_executor=source).add_fan_out_edges(source, [target1, target2]).build() assert workflow is not None @@ -284,11 +269,10 @@ def test_fan_in_validation(): # Create a proper fan-in by having a start executor that connects to both sources workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=start_executor) .add_edge(start_executor, source1) # Start connects to source1 .add_edge(start_executor, source2) # Start connects to source2 .add_fan_in_edges([source1, source2], target) # Both sources fan-in to target - .set_start_executor(start_executor) .build() ) @@ -300,7 +284,7 @@ def test_chain_validation(): executor2 = StringExecutor(id="executor2") executor3 = AnyExecutor(id="executor3") - workflow = WorkflowBuilder().add_chain([executor1, executor2, executor3]).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_chain([executor1, executor2, executor3]).build() assert workflow is not None @@ -313,9 +297,7 @@ def test_logging_for_missing_output_types(caplog: Any) -> None: string_executor = StringExecutor(id="string_executor") # This should trigger a warning log - workflow = ( - WorkflowBuilder().add_edge(no_output_executor, string_executor).set_start_executor(no_output_executor).build() - ) + workflow = WorkflowBuilder(start_executor=no_output_executor).add_edge(no_output_executor, string_executor).build() assert workflow is not None assert "has no output type annotations" in caplog.text @@ -338,9 +320,7 @@ def _discover_handlers(self) -> None: no_input_executor = NoInputTypesExecutor(id="no_input") # This should pass since NoInputTypesExecutor has no proper input types - workflow = ( - WorkflowBuilder().add_edge(string_executor, no_input_executor).set_start_executor(string_executor).build() - ) + workflow = WorkflowBuilder(start_executor=string_executor).add_edge(string_executor, no_input_executor).build() assert workflow is not None @@ -351,7 +331,7 @@ def test_self_loop_detection_warning(caplog: Any) -> None: executor = StringExecutor(id="self_loop_executor") # Create a self-loop - workflow = WorkflowBuilder().add_edge(executor, executor).set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).add_edge(executor, executor).build() assert workflow is not None assert "Self-loop detected" in caplog.text @@ -365,7 +345,7 @@ def test_handler_validation_basic(caplog: Any) -> None: start_executor = StringExecutor(id="start") target_executor = StringExecutor(id="target") - workflow = WorkflowBuilder().add_edge(start_executor, target_executor).set_start_executor(start_executor).build() + workflow = WorkflowBuilder(start_executor=start_executor).add_edge(start_executor, target_executor).build() assert workflow is not None # Just ensure the validation runs without errors @@ -377,7 +357,7 @@ def test_dead_end_detection(caplog: Any) -> None: executor1 = StringExecutor(id="executor1") executor2 = StringExecutor(id="executor2") # This will be a dead end - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() assert workflow is not None assert "Dead-end executors detected" in caplog.text @@ -391,7 +371,7 @@ def test_successful_type_compatibility_logging(caplog: Any) -> None: executor1 = StringExecutor(id="executor1") executor2 = StringExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() assert workflow is not None assert "Type compatibility validated for edge" in caplog.text @@ -406,11 +386,7 @@ def test_multiple_dead_ends_detection(caplog: Any) -> None: executor3 = StringExecutor(id="executor3") # Dead end workflow = ( - WorkflowBuilder() - .add_edge(executor1, executor2) - .add_edge(executor1, executor3) - .set_start_executor(executor1) - .build() + WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).add_edge(executor1, executor3).build() ) assert workflow is not None @@ -426,7 +402,7 @@ def test_single_executor_workflow(caplog: Any) -> None: executor2 = StringExecutor(id="executor2") # Create a simple two-executor workflow to avoid graph validation issues - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() assert workflow is not None # Should detect executor2 as dead end @@ -438,7 +414,7 @@ def test_enhanced_type_compatibility_error_details(): int_executor = IntExecutor(id="int_executor") with pytest.raises(TypeCompatibilityError) as exc_info: - WorkflowBuilder().add_edge(string_executor, int_executor).set_start_executor(string_executor).build() + WorkflowBuilder(start_executor=string_executor).add_edge(string_executor, int_executor).build() error = exc_info.value # Verify enhanced error contains detailed type information @@ -463,7 +439,7 @@ async def handle_message(self, message: str, ctx: WorkflowContext[str]) -> None: union_input = UnionInputExecutor(id="union_input") # This should pass validation due to type compatibility (str) - workflow = WorkflowBuilder().add_edge(union_output, union_input).set_start_executor(union_output).build() + workflow = WorkflowBuilder(start_executor=union_output).add_edge(union_output, union_input).build() assert workflow is not None @@ -483,7 +459,7 @@ async def handle_message(self, message: list[str], ctx: WorkflowContext[str]) -> list_input = ListInputExecutor(id="list_input") # This should pass validation for generic type compatibility - workflow = WorkflowBuilder().add_edge(list_output, list_input).set_start_executor(list_output).build() + workflow = WorkflowBuilder(start_executor=list_output).add_edge(list_output, list_input).build() assert workflow is not None @@ -539,7 +515,7 @@ async def handle(self, message: str, ctx: WorkflowContext) -> None: none_exec = NoneExecutor(id="n") # Should build successfully - wf = WorkflowBuilder().add_edge(start, none_exec).set_start_executor(start).build() + wf = WorkflowBuilder(start_executor=start).add_edge(start, none_exec).build() assert wf is not None @@ -555,7 +531,7 @@ async def handle(self, message: str, ctx: WorkflowContext[Any]) -> None: any_out = AnyOutExecutor(id="a") # Builds; later edges from this executor will skip type compatibility when outputs are unspecified - wf = WorkflowBuilder().add_edge(start, any_out).set_start_executor(start).build() + wf = WorkflowBuilder(start_executor=start).add_edge(start, any_out).build() assert wf is not None @@ -575,11 +551,7 @@ def test_output_validation_with_valid_output_executors(): # Build workflow with valid output executors workflow = ( - WorkflowBuilder() - .add_edge(executor1, executor2) - .set_start_executor(executor1) - .with_output_from([executor2]) - .build() + WorkflowBuilder(start_executor=executor1, output_executors=[executor2]).add_edge(executor1, executor2).build() ) assert workflow is not None @@ -593,11 +565,9 @@ def test_output_validation_with_multiple_valid_output_executors(): executor3 = OutputExecutor(id="executor3") workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=executor1, output_executors=[executor1, executor3]) .add_edge(executor1, executor2) .add_edge(executor2, executor3) - .set_start_executor(executor1) - .with_output_from([executor1, executor3]) .build() ) @@ -628,10 +598,8 @@ def test_output_validation_fails_for_executor_without_output_types(): with pytest.raises(WorkflowValidationError) as exc_info: ( - WorkflowBuilder() + WorkflowBuilder(start_executor=executor1, output_executors=[no_output_executor]) .add_edge(executor1, no_output_executor) - .set_start_executor(executor1) - .with_output_from([no_output_executor]) .build() ) @@ -645,9 +613,7 @@ def test_output_validation_empty_list_passes(): executor1 = OutputExecutor(id="executor1") executor2 = OutputExecutor(id="executor2") - workflow = ( - WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).with_output_from([]).build() - ) + workflow = WorkflowBuilder(start_executor=executor1, output_executors=[]).add_edge(executor1, executor2).build() assert workflow is not None # All executors are outputs diff --git a/python/packages/core/tests/workflow/test_viz.py b/python/packages/core/tests/workflow/test_viz.py index 3856a3c5de..bf7bbffee1 100644 --- a/python/packages/core/tests/workflow/test_viz.py +++ b/python/packages/core/tests/workflow/test_viz.py @@ -31,7 +31,7 @@ def basic_sub_workflow(): sub_exec1 = MockExecutor(id="sub_exec1") sub_exec2 = MockExecutor(id="sub_exec2") - sub_workflow = WorkflowBuilder().add_edge(sub_exec1, sub_exec2).set_start_executor(sub_exec1).build() + sub_workflow = WorkflowBuilder(start_executor=sub_exec1).add_edge(sub_exec1, sub_exec2).build() # Create a workflow executor that wraps the sub-workflow workflow_executor = WorkflowExecutor(sub_workflow, id="workflow_executor_1") @@ -41,10 +41,9 @@ def basic_sub_workflow(): final_exec = MockExecutor(id="final_executor") main_workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=main_exec) .add_edge(main_exec, workflow_executor) .add_edge(workflow_executor, final_exec) - .set_start_executor(main_exec) .build() ) @@ -65,7 +64,7 @@ def test_workflow_viz_to_digraph(): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() viz = WorkflowViz(workflow) dot_content = viz.to_digraph() @@ -84,7 +83,7 @@ def test_workflow_viz_export_dot(): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() viz = WorkflowViz(workflow) @@ -104,7 +103,7 @@ def test_workflow_viz_export_dot_with_filename(tmp_path): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() viz = WorkflowViz(workflow) @@ -128,12 +127,11 @@ def test_workflow_viz_complex_workflow(): executor4 = MockExecutor(id="end") workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=executor1) .add_edge(executor1, executor2) .add_edge(executor1, executor3) .add_edge(executor2, executor4) .add_edge(executor3, executor4) - .set_start_executor(executor1) .build() ) @@ -162,7 +160,7 @@ def test_workflow_viz_export_svg(): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() viz = WorkflowViz(workflow) @@ -178,7 +176,7 @@ def test_workflow_viz_unsupported_format(): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() viz = WorkflowViz(workflow) @@ -196,7 +194,7 @@ def test_workflow_viz_graphviz_binary_not_found(): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() viz = WorkflowViz(workflow) # Mock graphviz.Source.render to raise ExecutableNotFound @@ -224,13 +222,7 @@ def test_workflow_viz_conditional_edge(): def only_if_foo(msg: str) -> bool: # pragma: no cover - simple predicate return msg == "foo" - wf = ( - WorkflowBuilder() - .add_edge(start, mid, condition=only_if_foo) - .add_edge(mid, end) - .set_start_executor(start) - .build() - ) + wf = WorkflowBuilder(start_executor=start).add_edge(start, mid, condition=only_if_foo).add_edge(mid, end).build() dot = WorkflowViz(wf).to_digraph() @@ -249,13 +241,7 @@ def test_workflow_viz_fan_in_edge_group(): t = ListStrTargetExecutor(id="t") # Build a connected workflow: start fans out to s1 and s2, which then fan-in to t - wf = ( - WorkflowBuilder() - .add_fan_out_edges(start, [s1, s2]) - .add_fan_in_edges([s1, s2], t) - .set_start_executor(start) - .build() - ) + wf = WorkflowBuilder(start_executor=start).add_fan_out_edges(start, [s1, s2]).add_fan_in_edges([s1, s2], t).build() dot = WorkflowViz(wf).to_digraph() @@ -287,7 +273,7 @@ def test_workflow_viz_to_mermaid_basic(): executor1 = MockExecutor(id="executor1") executor2 = MockExecutor(id="executor2") - workflow = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() mermaid = WorkflowViz(workflow).to_mermaid() # Start node and normal node @@ -305,7 +291,7 @@ def test_workflow_viz_mermaid_conditional_edge(): def only_if_foo(msg: str) -> bool: # pragma: no cover - simple predicate return msg == "foo" - wf = WorkflowBuilder().add_edge(start, mid, condition=only_if_foo).set_start_executor(start).build() + wf = WorkflowBuilder(start_executor=start).add_edge(start, mid, condition=only_if_foo).build() mermaid = WorkflowViz(wf).to_mermaid() assert "start -. conditional .-> mid" in mermaid @@ -318,13 +304,7 @@ def test_workflow_viz_mermaid_fan_in_edge_group(): s2 = MockExecutor(id="s2") t = ListStrTargetExecutor(id="t") - wf = ( - WorkflowBuilder() - .add_fan_out_edges(start, [s1, s2]) - .add_fan_in_edges([s1, s2], t) - .set_start_executor(start) - .build() - ) + wf = WorkflowBuilder(start_executor=start).add_fan_out_edges(start, [s1, s2]).add_fan_in_edges([s1, s2], t).build() mermaid = WorkflowViz(wf).to_mermaid() lines = [line.strip() for line in mermaid.splitlines()] @@ -398,23 +378,19 @@ def test_workflow_viz_nested_sub_workflows(): """Test visualization of deeply nested sub-workflows.""" # Create innermost sub-workflow inner_exec = MockExecutor(id="inner_exec") - inner_workflow = WorkflowBuilder().set_start_executor(inner_exec).build() + inner_workflow = WorkflowBuilder(start_executor=inner_exec).build() # Create middle sub-workflow that contains the inner one inner_workflow_executor = WorkflowExecutor(inner_workflow, id="inner_wf_exec") middle_exec = MockExecutor(id="middle_exec") - middle_workflow = ( - WorkflowBuilder().add_edge(middle_exec, inner_workflow_executor).set_start_executor(middle_exec).build() - ) + middle_workflow = WorkflowBuilder(start_executor=middle_exec).add_edge(middle_exec, inner_workflow_executor).build() # Create outer workflow middle_workflow_executor = WorkflowExecutor(middle_workflow, id="middle_wf_exec") outer_exec = MockExecutor(id="outer_exec") - outer_workflow = ( - WorkflowBuilder().add_edge(outer_exec, middle_workflow_executor).set_start_executor(outer_exec).build() - ) + outer_workflow = WorkflowBuilder(start_executor=outer_exec).add_edge(outer_exec, middle_workflow_executor).build() viz = WorkflowViz(outer_workflow) dot_content = viz.to_digraph() diff --git a/python/packages/core/tests/workflow/test_workflow.py b/python/packages/core/tests/workflow/test_workflow.py index 271099e07a..1e98ff08c5 100644 --- a/python/packages/core/tests/workflow/test_workflow.py +++ b/python/packages/core/tests/workflow/test_workflow.py @@ -110,8 +110,7 @@ async def test_workflow_run_streaming() -> None: executor_b = IncrementExecutor(id="executor_b") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_a) .build() @@ -132,11 +131,9 @@ async def test_workflow_run_stream_not_completed(): executor_b = IncrementExecutor(id="executor_b") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(max_iterations=5, start_executor=executor_a) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_a) - .set_max_iterations(5) .build() ) @@ -151,8 +148,7 @@ async def test_workflow_run(): executor_b = IncrementExecutor(id="executor_b") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_a) .build() @@ -170,11 +166,9 @@ async def test_workflow_run_not_completed(): executor_b = IncrementExecutor(id="executor_b") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(max_iterations=5, start_executor=executor_a) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_a) - .set_max_iterations(5) .build() ) @@ -189,7 +183,7 @@ async def test_fan_out(): executor_c = IncrementExecutor(id="executor_c", limit=2) # This executor will not complete the workflow workflow = ( - WorkflowBuilder().set_start_executor(executor_a).add_fan_out_edges(executor_a, [executor_b, executor_c]).build() + WorkflowBuilder(start_executor=executor_a).add_fan_out_edges(executor_a, [executor_b, executor_c]).build() ) events = await workflow.run(NumberMessage(data=0)) @@ -214,7 +208,7 @@ async def test_fan_out_multiple_completed_events(): executor_c = IncrementExecutor(id="executor_c", limit=1) workflow = ( - WorkflowBuilder().set_start_executor(executor_a).add_fan_out_edges(executor_a, [executor_b, executor_c]).build() + WorkflowBuilder(start_executor=executor_a).add_fan_out_edges(executor_a, [executor_b, executor_c]).build() ) events = await workflow.run(NumberMessage(data=0)) @@ -239,8 +233,7 @@ async def test_fan_in(): aggregator = AggregatorExecutor(id="aggregator") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a) .add_fan_out_edges(executor_a, [executor_b, executor_c]) .add_fan_in_edges([executor_b, executor_c], aggregator) .build() @@ -276,10 +269,8 @@ async def test_workflow_with_checkpointing_enabled(simple_executor: Executor): # Build workflow with checkpointing - should not raise any errors workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor, checkpoint_storage=storage) .add_edge(simple_executor, simple_executor) # Self-loop to satisfy graph requirements - .set_start_executor(simple_executor) - .with_checkpointing(storage) .build() ) @@ -295,9 +286,8 @@ async def test_workflow_checkpointing_not_enabled_for_external_restore( """Test that external checkpoint restoration fails when workflow doesn't support checkpointing.""" # Build workflow WITHOUT checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor) .add_edge(simple_executor, simple_executor) # Self-loop to satisfy graph requirements - .set_start_executor(simple_executor) .build() ) @@ -315,9 +305,8 @@ async def test_workflow_run_stream_from_checkpoint_no_checkpointing_enabled( ): # Build workflow WITHOUT checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor) .add_edge(simple_executor, simple_executor) # Self-loop to satisfy graph requirements - .set_start_executor(simple_executor) .build() ) @@ -340,10 +329,8 @@ async def test_workflow_run_stream_from_checkpoint_invalid_checkpoint( # Build workflow with checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor, checkpoint_storage=storage) .add_edge(simple_executor, simple_executor) # Self-loop to satisfy graph requirements - .set_start_executor(simple_executor) - .with_checkpointing(storage) .build() ) @@ -376,7 +363,7 @@ async def test_workflow_run_stream_from_checkpoint_with_external_storage( # Create a workflow WITHOUT checkpointing workflow_without_checkpointing = ( - WorkflowBuilder().add_edge(simple_executor, simple_executor).set_start_executor(simple_executor).build() + WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() ) # Resume from checkpoint using external storage parameter @@ -411,10 +398,8 @@ async def test_workflow_run_from_checkpoint_non_streaming(simple_executor: Execu # Build workflow with checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor, checkpoint_storage=storage) .add_edge(simple_executor, simple_executor) - .set_start_executor(simple_executor) - .with_checkpointing(storage) .build() ) @@ -452,10 +437,8 @@ async def test_workflow_run_stream_from_checkpoint_with_responses( # Build workflow with checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor, checkpoint_storage=storage) .add_edge(simple_executor, simple_executor) - .set_start_executor(simple_executor) - .with_checkpointing(storage) .build() ) @@ -512,10 +495,8 @@ async def test_workflow_multiple_runs_no_state_collision(): # Build workflow with checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=state_executor, checkpoint_storage=storage) .add_edge(state_executor, state_executor) # Self-loop to satisfy graph requirements - .set_start_executor(state_executor) - .with_checkpointing(storage) .build() ) @@ -552,9 +533,7 @@ async def test_workflow_checkpoint_runtime_only_configuration( storage = FileCheckpointStorage(temp_dir) # Build workflow WITHOUT checkpointing at build time - workflow = ( - WorkflowBuilder().add_edge(simple_executor, simple_executor).set_start_executor(simple_executor).build() - ) + workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() # Run with runtime checkpoint storage - should create checkpoints test_message = Message(data="runtime checkpoint test", source_id="test", target_id=None) @@ -575,7 +554,7 @@ async def test_workflow_checkpoint_runtime_only_configuration( # Create new workflow instance (still without build-time checkpointing) workflow_resume = ( - WorkflowBuilder().add_edge(simple_executor, simple_executor).set_start_executor(simple_executor).build() + WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() ) # Resume from checkpoint using runtime checkpoint storage @@ -602,10 +581,8 @@ async def test_workflow_checkpoint_runtime_overrides_buildtime( # Build workflow with build-time checkpointing workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=simple_executor, checkpoint_storage=buildtime_storage) .add_edge(simple_executor, simple_executor) - .set_start_executor(simple_executor) - .with_checkpointing(buildtime_storage) .build() ) @@ -643,8 +620,7 @@ async def test_comprehensive_edge_groups_workflow(): # 3. FanOut: fanout_hub -> [parallel_1, parallel_2] # 4. FanIn: [parallel_1, parallel_2] -> aggregator workflow = ( - WorkflowBuilder() - .set_start_executor(router) + WorkflowBuilder(start_executor=router) # Switch-case routing based on message data .add_switch_case_edge_group( router, @@ -713,8 +689,7 @@ async def test_workflow_with_simple_cycle_and_exit_condition(): # Simple cycle: A -> B -> A, A exits when limit reached workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a) .add_edge(executor_a, executor_b) # A -> B .add_edge(executor_b, executor_a) # B -> A (creates cycle) .build() @@ -746,7 +721,7 @@ async def test_workflow_concurrent_execution_prevention(): """Test that concurrent workflow executions are prevented.""" # Create a simple workflow that takes some time to execute executor = IncrementExecutor(id="slow_executor", limit=3, increment=1) - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Create a task that will run the workflow async def run_workflow(): @@ -778,7 +753,7 @@ async def test_workflow_concurrent_execution_prevention_streaming(): """Test that concurrent workflow streaming executions are prevented.""" # Create a simple workflow executor = IncrementExecutor(id="slow_executor", limit=3, increment=1) - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Create an async generator that will consume the stream slowly async def consume_stream_slowly(): @@ -814,7 +789,7 @@ async def test_workflow_concurrent_execution_prevention_mixed_methods(): """Test that concurrent executions are prevented across different execution methods.""" # Create a simple workflow executor = IncrementExecutor(id="slow_executor", limit=3, increment=1) - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Start a streaming execution async def consume_stream(): @@ -884,7 +859,7 @@ async def test_agent_streaming_vs_non_streaming() -> None: agent = _StreamingTestAgent(id="test_agent", name="TestAgent", reply_text="Hello World") agent_exec = AgentExecutor(agent, id="agent_exec") - workflow = WorkflowBuilder().set_start_executor(agent_exec).build() + workflow = WorkflowBuilder(start_executor=agent_exec).build() # Test non-streaming mode with run() result = await workflow.run("test message") @@ -934,7 +909,7 @@ async def test_agent_streaming_vs_non_streaming() -> None: async def test_workflow_run_parameter_validation(simple_executor: Executor) -> None: """Test that stream properly validate parameter combinations.""" - workflow = WorkflowBuilder().add_edge(simple_executor, simple_executor).set_start_executor(simple_executor).build() + workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() test_message = Message(data="test", source_id="test", target_id=None) @@ -965,7 +940,7 @@ async def test_workflow_run_stream_parameter_validation( simple_executor: Executor, ) -> None: """Test stream=True specific parameter validation scenarios.""" - workflow = WorkflowBuilder().add_edge(simple_executor, simple_executor).set_start_executor(simple_executor).build() + workflow = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, simple_executor).build() test_message = Message(data="test", source_id="test", target_id=None) @@ -1014,7 +989,7 @@ async def test_output_executors_empty_yields_all_outputs() -> None: executor_b = OutputProducerExecutor(id="executor_b", output_value=20) # Build workflow with a -> b - workflow = WorkflowBuilder().set_start_executor(executor_a).add_edge(executor_a, executor_b).build() + workflow = WorkflowBuilder(start_executor=executor_a).add_edge(executor_a, executor_b).build() result = await workflow.run(NumberMessage(data=0)) outputs = result.get_outputs() @@ -1037,10 +1012,8 @@ async def test_output_executors_filters_outputs_non_streaming() -> None: # Build workflow with a -> b workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_b]) .add_edge(executor_a, executor_b) - .with_output_from([executor_b]) .build() ) @@ -1064,10 +1037,8 @@ async def test_output_executors_filters_outputs_streaming() -> None: # Build workflow with a -> b workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_a]) .add_edge(executor_a, executor_b) - .with_output_from([executor_a]) .build() ) @@ -1092,11 +1063,9 @@ async def test_output_executors_with_multiple_specified_executors() -> None: # Build workflow with a -> b -> c workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_a, executor_c]) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_c) - .with_output_from([executor_a, executor_c]) .build() ) @@ -1114,7 +1083,7 @@ async def test_output_executors_with_nonexistent_executor_id() -> None: """Test that specifying a non-existent executor ID doesn't break the workflow.""" executor_a = OutputProducerExecutor(id="executor_a", output_value=42) - workflow = WorkflowBuilder().set_start_executor(executor_a).build() + workflow = WorkflowBuilder(start_executor=executor_a).build() # Set output_executors to an ID that doesn't exist workflow._output_executors = ["nonexistent_executor"] # type: ignore @@ -1157,11 +1126,9 @@ async def handle(self, message: NumberMessage, ctx: WorkflowContext[NumberMessag # Build fan-in workflow: start -> [a, b] -> aggregator workflow = ( - WorkflowBuilder() - .set_start_executor(executor_start) + WorkflowBuilder(start_executor=executor_start, output_executors=[aggregator]) .add_fan_out_edges(executor_start, [executor_a, executor_b]) .add_fan_in_edges([executor_a, executor_b], aggregator) - .with_output_from([aggregator]) .build() ) @@ -1178,7 +1145,7 @@ async def test_output_executors_filtering_with_run_responses() -> None: """Test output filtering works correctly with run(responses=...) method.""" executor = MockExecutorRequestApproval(id="approval_executor") - workflow = WorkflowBuilder().set_start_executor(executor).with_output_from([executor]).build() + workflow = WorkflowBuilder(start_executor=executor, output_executors=[executor]).build() # Run workflow which will request approval result = await workflow.run(NumberMessage(data=42)) @@ -1201,7 +1168,7 @@ async def test_output_executors_filtering_with_run_responses_streaming() -> None """Test output filtering works correctly with run(responses=..., stream=True) method.""" executor = MockExecutorRequestApproval(id="approval_executor") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Run workflow which will request approval events_list: list[WorkflowEvent] = [] diff --git a/python/packages/core/tests/workflow/test_workflow_agent.py b/python/packages/core/tests/workflow/test_workflow_agent.py index f0f0ff7660..c121f369fa 100644 --- a/python/packages/core/tests/workflow/test_workflow_agent.py +++ b/python/packages/core/tests/workflow/test_workflow_agent.py @@ -150,7 +150,7 @@ async def test_end_to_end_basic_workflow(self): executor1 = SimpleExecutor(id="executor1", response_text="Step1", streaming=False) executor2 = SimpleExecutor(id="executor2", response_text="Step2", streaming=False) - workflow = WorkflowBuilder().set_start_executor(executor1).add_edge(executor1, executor2).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() agent = WorkflowAgent(workflow=workflow, name="Test Agent") @@ -194,7 +194,7 @@ async def test_end_to_end_basic_workflow_streaming(self): executor2 = SimpleExecutor(id="stream2", response_text="Streaming2") # Create workflow with just one executor - workflow = WorkflowBuilder().set_start_executor(executor1).add_edge(executor1, executor2).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() agent = WorkflowAgent(workflow=workflow, name="Streaming Test Agent") @@ -224,7 +224,7 @@ async def test_end_to_end_request_info_handling(self): requesting_executor = RequestingExecutor(id="requester", streaming=False) workflow = ( - WorkflowBuilder().set_start_executor(simple_executor).add_edge(simple_executor, requesting_executor).build() + WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, requesting_executor).build() ) agent = WorkflowAgent(workflow=workflow, name="Request Test Agent") @@ -296,7 +296,7 @@ def test_workflow_as_agent_method(self) -> None: """Test that Workflow.as_agent() creates a properly configured WorkflowAgent.""" # Create a simple workflow executor = SimpleExecutor(id="executor1", response_text="Response") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Test as_agent with a name agent = workflow.as_agent(name="TestAgent") @@ -322,7 +322,7 @@ async def handle_bool(self, message: bool, context: WorkflowContext[Any]) -> Non # Create a simple workflow executor = _Executor(id="test") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() # Try to create an agent with unsupported input types with pytest.raises(ValueError, match="Workflow's start executor cannot handle list\\[ChatMessage\\]"): @@ -341,7 +341,7 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Ne input_text = messages[0].text if messages else "no input" await ctx.yield_output(f"processed: {input_text}") - workflow = WorkflowBuilder().set_start_executor(yielding_executor).build() + workflow = WorkflowBuilder(start_executor=yielding_executor).build() # Run directly - should return output event (type='output') in result direct_result = await workflow.run([ChatMessage(role="user", text="hello")]) @@ -365,7 +365,7 @@ async def yielding_executor(messages: list[ChatMessage], ctx: WorkflowContext[Ne await ctx.yield_output("first output") await ctx.yield_output("second output") - workflow = WorkflowBuilder().set_start_executor(yielding_executor).build() + workflow = WorkflowBuilder(start_executor=yielding_executor).build() agent = workflow.as_agent("test-agent") updates: list[AgentResponseUpdate] = [] @@ -387,7 +387,7 @@ async def content_yielding_executor(messages: list[ChatMessage], ctx: WorkflowCo await ctx.yield_output(Content.from_data(data=b"binary data", media_type="application/octet-stream")) await ctx.yield_output(Content.from_uri(uri="https://example.com/image.png", media_type="image/png")) - workflow = WorkflowBuilder().set_start_executor(content_yielding_executor).build() + workflow = WorkflowBuilder(start_executor=content_yielding_executor).build() agent = workflow.as_agent("content-test-agent") result = await agent.run("test") @@ -417,7 +417,7 @@ async def chat_message_executor(messages: list[ChatMessage], ctx: WorkflowContex ) await ctx.yield_output(msg) - workflow = WorkflowBuilder().set_start_executor(chat_message_executor).build() + workflow = WorkflowBuilder(start_executor=chat_message_executor).build() agent = workflow.as_agent("chat-msg-agent") result = await agent.run("test") @@ -448,7 +448,7 @@ async def raw_yielding_executor( custom = CustomData(42) await ctx.yield_output(custom) - workflow = WorkflowBuilder().set_start_executor(raw_yielding_executor).build() + workflow = WorkflowBuilder(start_executor=raw_yielding_executor).build() agent = workflow.as_agent("raw-test-agent") updates: list[AgentResponseUpdate] = [] @@ -490,7 +490,7 @@ async def list_yielding_executor( ] await ctx.yield_output(msg_list) - workflow = WorkflowBuilder().set_start_executor(list_yielding_executor).build() + workflow = WorkflowBuilder(start_executor=list_yielding_executor).build() agent = workflow.as_agent("list-msg-agent") # Verify streaming returns the update with all 4 contents before coalescing @@ -521,7 +521,7 @@ async def test_thread_conversation_history_included_in_workflow_run(self) -> Non """ # Create an executor that captures all received messages capturing_executor = ConversationHistoryCapturingExecutor(id="capturing", streaming=False) - workflow = WorkflowBuilder().set_start_executor(capturing_executor).build() + workflow = WorkflowBuilder(start_executor=capturing_executor).build() agent = WorkflowAgent(workflow=workflow, name="Thread History Test Agent") # Create a thread with existing conversation history @@ -551,7 +551,7 @@ async def test_thread_conversation_history_included_in_workflow_stream(self) -> """ # Create an executor that captures all received messages capturing_executor = ConversationHistoryCapturingExecutor(id="capturing_stream") - workflow = WorkflowBuilder().set_start_executor(capturing_executor).build() + workflow = WorkflowBuilder(start_executor=capturing_executor).build() agent = WorkflowAgent(workflow=workflow, name="Thread Stream Test Agent") # Create a thread with existing conversation history @@ -579,7 +579,7 @@ async def test_thread_conversation_history_included_in_workflow_stream(self) -> async def test_empty_thread_works_correctly(self) -> None: """Test that an empty thread (no message store) works correctly.""" capturing_executor = ConversationHistoryCapturingExecutor(id="empty_thread_test") - workflow = WorkflowBuilder().set_start_executor(capturing_executor).build() + workflow = WorkflowBuilder(start_executor=capturing_executor).build() agent = WorkflowAgent(workflow=workflow, name="Empty Thread Test Agent") # Create an empty thread @@ -597,7 +597,7 @@ async def test_checkpoint_storage_passed_to_workflow(self) -> None: from agent_framework import InMemoryCheckpointStorage capturing_executor = ConversationHistoryCapturingExecutor(id="checkpoint_test") - workflow = WorkflowBuilder().set_start_executor(capturing_executor).build() + workflow = WorkflowBuilder(start_executor=capturing_executor).build() agent = WorkflowAgent(workflow=workflow, name="Checkpoint Test Agent") # Create checkpoint storage @@ -675,17 +675,11 @@ async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext[Agent await ctx.send_message(AgentExecutorRequest(messages=messages, should_respond=True)) # Build workflow: start -> agent1 (no output) -> agent2 (output_response=True) - workflow = ( - WorkflowBuilder() - .register_executor(lambda: start_executor, "start") - .register_agent(lambda: MockAgent("agent1", "Agent1 output - should NOT appear"), "agent1") - .register_agent(lambda: MockAgent("agent2", "Agent2 output - SHOULD appear"), "agent2") - .set_start_executor("start") - .add_edge("start", "agent1") - .add_edge("agent1", "agent2") - .with_output_from(["start", "agent2"]) - .build() - ) + builder = WorkflowBuilder(start_executor="start", output_executors=["start", "agent2"]) + builder.register_executor(lambda: start_executor, "start") + builder.register_agent(lambda: MockAgent("agent1", "Agent1 output - should NOT appear"), "agent1") + builder.register_agent(lambda: MockAgent("agent2", "Agent2 output - SHOULD appear"), "agent2") + workflow = builder.add_edge("start", "agent1").add_edge("agent1", "agent2").build() agent = WorkflowAgent(workflow=workflow, name="Test Agent") result = await agent.run("Test input") @@ -765,10 +759,9 @@ async def start_executor(messages: list[ChatMessage], ctx: WorkflowContext[Agent # Build workflow with single agent workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="start") .register_executor(lambda: start_executor, "start") .register_agent(lambda: MockAgent("agent", "Unique response text"), "agent") - .set_start_executor("start") .add_edge("start", "agent") .build() ) @@ -794,7 +787,7 @@ async def test_agent_response_update_gets_executor_id_as_author_name(self): """ # Create workflow with executor that emits AgentResponseUpdate without author_name executor1 = SimpleExecutor(id="my_executor_id", response_text="Response", streaming=True) - workflow = WorkflowBuilder().set_start_executor(executor1).build() + workflow = WorkflowBuilder(start_executor=executor1).build() agent = WorkflowAgent(workflow=workflow, name="Test Agent") # Collect streaming updates @@ -830,7 +823,7 @@ async def handle_message( await ctx.yield_output(update) executor = AuthorNameExecutor(id="executor_id") - workflow = WorkflowBuilder().set_start_executor(executor).build() + workflow = WorkflowBuilder(start_executor=executor).build() agent = WorkflowAgent(workflow=workflow, name="Test Agent") # Collect streaming updates @@ -848,7 +841,7 @@ async def test_multiple_executors_have_distinct_author_names(self): executor1 = SimpleExecutor(id="first_executor", response_text="First") executor2 = SimpleExecutor(id="second_executor", response_text="Second") - workflow = WorkflowBuilder().set_start_executor(executor1).add_edge(executor1, executor2).build() + workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() agent = WorkflowAgent(workflow=workflow, name="Multi-Executor Agent") # Collect streaming updates diff --git a/python/packages/core/tests/workflow/test_workflow_builder.py b/python/packages/core/tests/workflow/test_workflow_builder.py index 9b504fbaa5..39c60717c2 100644 --- a/python/packages/core/tests/workflow/test_workflow_builder.py +++ b/python/packages/core/tests/workflow/test_workflow_builder.py @@ -45,7 +45,7 @@ def test_builder_accepts_agents_directly(): agent1 = DummyAgent(id="agent1", name="writer") agent2 = DummyAgent(id="agent2", name="reviewer") - wf = WorkflowBuilder().set_start_executor(agent1).add_edge(agent1, agent2).build() + wf = WorkflowBuilder(start_executor=agent1).add_edge(agent1, agent2).build() # Confirm auto-wrapped executors use agent names as IDs assert wf.start_executor_id == "writer" @@ -79,10 +79,8 @@ async def mock_handler(self, messages: list[MockMessage], ctx: WorkflowContext[M def test_workflow_builder_without_start_executor_throws(): """Test creating a workflow builder without a start executor.""" - - builder = WorkflowBuilder() - with pytest.raises(ValueError): - builder.build() + with pytest.raises(TypeError): + WorkflowBuilder() # type: ignore[call-arg] def test_workflow_builder_fluent_api(): @@ -95,13 +93,11 @@ def test_workflow_builder_fluent_api(): executor_f = MockExecutor(id="executor_f") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(max_iterations=5, start_executor=executor_a) .add_edge(executor_a, executor_b) .add_fan_out_edges(executor_b, [executor_c, executor_d]) .add_fan_in_edges([executor_c, executor_d], executor_e) .add_chain([executor_e, executor_f]) - .set_max_iterations(5) .build() ) @@ -115,9 +111,8 @@ def test_add_agent_reuses_same_wrapper(): reuse_agent = DummyAgent(id="agent_reuse", name="reuse_agent") agent_a = DummyAgent(id="agent_a", name="agent_a") - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor=reuse_agent) # Use the same agent instance in add_edge - should reuse the same wrapper - builder.set_start_executor(reuse_agent) builder.add_edge(reuse_agent, agent_a) builder.add_edge(agent_a, reuse_agent) @@ -133,10 +128,10 @@ def test_add_agent_duplicate_id_raises_error(): """Test that adding agents with duplicate IDs raises an error.""" agent1 = DummyAgent(id="agent1", name="first") agent2 = DummyAgent(id="agent2", name="first") # Same name as agent1 - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor=agent1) with pytest.raises(ValueError, match="Duplicate executor ID"): - builder.set_start_executor(agent1).add_edge(agent1, agent2).build() + builder.add_edge(agent1, agent2).build() # Tests for new executor registration patterns @@ -144,7 +139,7 @@ def test_add_agent_duplicate_id_raises_error(): def test_register_executor_basic(): """Test basic executor registration with lazy initialization.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="TestExecutor") # Register an executor factory - ID must match the registered name result = builder.register_executor(lambda: MockExecutor(id="TestExecutor"), name="TestExecutor") @@ -153,14 +148,14 @@ def test_register_executor_basic(): assert result is builder # Build workflow and verify executor is instantiated - workflow = builder.set_start_executor("TestExecutor").build() + workflow = builder.build() assert "TestExecutor" in workflow.executors assert isinstance(workflow.executors["TestExecutor"], MockExecutor) def test_register_multiple_executors(): """Test registering multiple executors and connecting them with edges.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="ExecutorA") # Register multiple executors - IDs must match registered names builder.register_executor(lambda: MockExecutor(id="ExecutorA"), name="ExecutorA") @@ -168,13 +163,7 @@ def test_register_multiple_executors(): builder.register_executor(lambda: MockExecutor(id="ExecutorC"), name="ExecutorC") # Build workflow with edges using registered names - workflow = ( - builder - .set_start_executor("ExecutorA") - .add_edge("ExecutorA", "ExecutorB") - .add_edge("ExecutorB", "ExecutorC") - .build() - ) + workflow = builder.add_edge("ExecutorA", "ExecutorB").add_edge("ExecutorB", "ExecutorC").build() # Verify all executors are present assert "ExecutorA" in workflow.executors @@ -185,7 +174,7 @@ def test_register_multiple_executors(): def test_register_with_multiple_names(): """Test registering the same factory function under multiple names.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="ExecutorA") # Register same executor factory under multiple names # Note: Each call creates a new instance, so IDs won't conflict @@ -198,7 +187,7 @@ def make_executor(): builder.register_executor(make_executor, name=["ExecutorA", "ExecutorB"]) # Set up workflow - workflow = builder.set_start_executor("ExecutorA").add_edge("ExecutorA", "ExecutorB").build() + workflow = builder.add_edge("ExecutorA", "ExecutorB").build() # Verify both executors are present assert "ExecutorA" in workflow.executors @@ -208,7 +197,7 @@ def make_executor(): def test_register_duplicate_name_raises_error(): """Test that registering duplicate names raises an error.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="MyExecutor") # Register first executor builder.register_executor(lambda: MockExecutor(id="executor_1"), name="MyExecutor") @@ -220,12 +209,11 @@ def test_register_duplicate_name_raises_error(): def test_register_duplicate_id_raises_error(): """Test that registering duplicate id raises an error.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="MyExecutor1") # Register first executor builder.register_executor(lambda: MockExecutor(id="executor"), name="MyExecutor1") builder.register_executor(lambda: MockExecutor(id="executor"), name="MyExecutor2") - builder.set_start_executor("MyExecutor1") # Registering second executor with same ID should raise ValueError with pytest.raises(ValueError, match="Executor with ID 'executor' has already been registered."): @@ -234,7 +222,7 @@ def test_register_duplicate_id_raises_error(): def test_register_agent_basic(): """Test basic agent registration with lazy initialization.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="TestAgent") # Register an agent factory result = builder.register_agent(lambda: DummyAgent(id="agent_test", name="test_agent"), name="TestAgent") @@ -243,14 +231,14 @@ def test_register_agent_basic(): assert result is builder # Build workflow and verify agent is wrapped in AgentExecutor - workflow = builder.set_start_executor("TestAgent").build() + workflow = builder.build() assert "test_agent" in workflow.executors assert isinstance(workflow.executors["test_agent"], AgentExecutor) def test_register_agent_with_thread(): """Test registering an agent with a custom thread.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="ThreadedAgent") custom_thread = AgentThread() # Register agent with custom thread @@ -261,7 +249,7 @@ def test_register_agent_with_thread(): ) # Build workflow and verify agent executor configuration - workflow = builder.set_start_executor("ThreadedAgent").build() + workflow = builder.build() executor = workflow.executors["threaded_agent"] assert isinstance(executor, AgentExecutor) @@ -271,7 +259,7 @@ def test_register_agent_with_thread(): def test_register_agent_duplicate_name_raises_error(): """Test that registering agents with duplicate names raises an error.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="MyAgent") # Register first agent builder.register_agent(lambda: DummyAgent(id="agent1", name="first"), name="MyAgent") @@ -283,14 +271,14 @@ def test_register_agent_duplicate_name_raises_error(): def test_register_and_add_edge_with_strings(): """Test that registered executors can be connected using string names.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Source") # Register executors builder.register_executor(lambda: MockExecutor(id="source"), name="Source") builder.register_executor(lambda: MockExecutor(id="target"), name="Target") # Add edge using string names - workflow = builder.set_start_executor("Source").add_edge("Source", "Target").build() + workflow = builder.add_edge("Source", "Target").build() # Verify edge is created correctly assert workflow.start_executor_id == "source" @@ -300,14 +288,14 @@ def test_register_and_add_edge_with_strings(): def test_register_agent_and_add_edge_with_strings(): """Test that registered agents can be connected using string names.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Writer") # Register agents builder.register_agent(lambda: DummyAgent(id="writer_id", name="writer"), name="Writer") builder.register_agent(lambda: DummyAgent(id="reviewer_id", name="reviewer"), name="Reviewer") # Add edge using string names - workflow = builder.set_start_executor("Writer").add_edge("Writer", "Reviewer").build() + workflow = builder.add_edge("Writer", "Reviewer").build() # Verify edge is created correctly assert workflow.start_executor_id == "writer" @@ -318,7 +306,7 @@ def test_register_agent_and_add_edge_with_strings(): def test_register_with_fan_out_edges(): """Test using registered names with fan-out edge groups.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Source") # Register executors - IDs must match registered names builder.register_executor(lambda: MockExecutor(id="Source"), name="Source") @@ -326,7 +314,7 @@ def test_register_with_fan_out_edges(): builder.register_executor(lambda: MockExecutor(id="Target2"), name="Target2") # Add fan-out edges using registered names - workflow = builder.set_start_executor("Source").add_fan_out_edges("Source", ["Target1", "Target2"]).build() + workflow = builder.add_fan_out_edges("Source", ["Target1", "Target2"]).build() # Verify all executors are present assert "Source" in workflow.executors @@ -336,7 +324,7 @@ def test_register_with_fan_out_edges(): def test_register_with_fan_in_edges(): """Test using registered names with fan-in edge groups.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Source1") # Register executors - IDs must match registered names builder.register_executor(lambda: MockExecutor(id="Source1"), name="Source1") @@ -345,13 +333,7 @@ def test_register_with_fan_in_edges(): # Add fan-in edges using registered names # Both Source1 and Source2 need to be reachable, so connect Source1 to Source2 - workflow = ( - builder - .set_start_executor("Source1") - .add_edge("Source1", "Source2") - .add_fan_in_edges(["Source1", "Source2"], "Aggregator") - .build() - ) + workflow = builder.add_edge("Source1", "Source2").add_fan_in_edges(["Source1", "Source2"], "Aggregator").build() # Verify all executors are present assert "Source1" in workflow.executors @@ -361,7 +343,7 @@ def test_register_with_fan_in_edges(): def test_register_with_chain(): """Test using registered names with add_chain.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Step1") # Register executors - IDs must match registered names builder.register_executor(lambda: MockExecutor(id="Step1"), name="Step1") @@ -369,7 +351,7 @@ def test_register_with_chain(): builder.register_executor(lambda: MockExecutor(id="Step3"), name="Step3") # Add chain using registered names - workflow = builder.add_chain(["Step1", "Step2", "Step3"]).set_start_executor("Step1").build() + workflow = builder.add_chain(["Step1", "Step2", "Step3"]).build() # Verify all executors are present assert "Step1" in workflow.executors @@ -387,15 +369,12 @@ def factory(): call_count += 1 return MockExecutor(id="Test") - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Test") builder.register_executor(factory, name="Test") # Factory should not be called yet assert call_count == 0 - # Add edge without building - builder.set_start_executor("Test") - # Factory should still not be called assert call_count == 0 @@ -409,7 +388,7 @@ def factory(): def test_mixing_eager_and_lazy_initialization_error(): """Test that mixing eager executor instances with lazy string names raises appropriate error.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Lazy") # Create an eager executor instance eager_executor = MockExecutor(id="eager") @@ -430,7 +409,7 @@ def test_mixing_eager_and_lazy_initialization_error(): def test_register_with_condition(): """Test adding edges with conditions using registered names.""" - builder = WorkflowBuilder() + builder = WorkflowBuilder(start_executor="Source") def condition_func(msg: MockMessage) -> bool: return msg.data > 0 @@ -440,7 +419,7 @@ def condition_func(msg: MockMessage) -> bool: builder.register_executor(lambda: MockExecutor(id="Target"), name="Target") # Add edge with condition - workflow = builder.set_start_executor("Source").add_edge("Source", "Target", condition=condition_func).build() + workflow = builder.add_edge("Source", "Target", condition=condition_func).build() # Verify workflow is built correctly assert "Source" in workflow.executors @@ -457,14 +436,14 @@ def agent_factory() -> DummyAgent: return agent # Build first workflow - builder1 = WorkflowBuilder() + builder1 = WorkflowBuilder(start_executor="Agent") builder1.register_agent(agent_factory, name="Agent") - _ = builder1.set_start_executor("Agent").build() + _ = builder1.build() # Build second workflow - builder2 = WorkflowBuilder() + builder2 = WorkflowBuilder(start_executor="Agent") builder2.register_agent(agent_factory, name="Agent") - _ = builder2.set_start_executor("Agent").build() + _ = builder2.build() # Verify that two different agent instances were created assert len(instance_ids) == 2 @@ -477,11 +456,10 @@ def agent_factory() -> DummyAgent: def test_with_output_from_returns_builder(): """Test that with_output_from returns the builder for method chaining.""" executor_a = MockExecutor(id="executor_a") - builder = WorkflowBuilder() + builder = WorkflowBuilder(output_executors=[executor_a], start_executor=executor_a) - result = builder.with_output_from([executor_a]) - - assert result is builder + # Verify builder was created with output_executors + assert builder._output_executors == [executor_a] def test_with_output_from_with_executor_instances(): @@ -490,10 +468,8 @@ def test_with_output_from_with_executor_instances(): executor_b = MockExecutor(id="executor_b") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_b]) .add_edge(executor_a, executor_b) - .with_output_from([executor_b]) .build() ) @@ -506,9 +482,7 @@ def test_with_output_from_with_agent_instances(): agent_a = DummyAgent(id="agent_a", name="writer") agent_b = DummyAgent(id="agent_b", name="reviewer") - workflow = ( - WorkflowBuilder().set_start_executor(agent_a).add_edge(agent_a, agent_b).with_output_from([agent_b]).build() - ) + workflow = WorkflowBuilder(start_executor=agent_a, output_executors=[agent_b]).add_edge(agent_a, agent_b).build() # Verify that the workflow was built with the agent's name as output executor assert workflow._output_executors == ["reviewer"] # type: ignore @@ -516,15 +490,10 @@ def test_with_output_from_with_agent_instances(): def test_with_output_from_with_registered_names(): """Test with_output_from with registered factory names (strings).""" - workflow = ( - WorkflowBuilder() - .register_executor(lambda: MockExecutor(id="ExecutorA"), name="ExecutorAFactory") - .register_executor(lambda: MockExecutor(id="ExecutorB"), name="ExecutorBFactory") - .set_start_executor("ExecutorAFactory") - .add_edge("ExecutorAFactory", "ExecutorBFactory") - .with_output_from(["ExecutorBFactory"]) - .build() - ) + builder = WorkflowBuilder(start_executor="ExecutorAFactory", output_executors=["ExecutorBFactory"]) + builder.register_executor(lambda: MockExecutor(id="ExecutorA"), name="ExecutorAFactory") + builder.register_executor(lambda: MockExecutor(id="ExecutorB"), name="ExecutorBFactory") + workflow = builder.add_edge("ExecutorAFactory", "ExecutorBFactory").build() # Verify that the workflow was built with the correct output executors assert workflow._output_executors == ["ExecutorB"] # type: ignore @@ -537,11 +506,9 @@ def test_with_output_from_with_multiple_executors(): executor_c = MockExecutor(id="executor_c") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_a, executor_c]) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_c) - .with_output_from([executor_a, executor_c]) .build() ) @@ -549,51 +516,41 @@ def test_with_output_from_with_multiple_executors(): assert set(workflow._output_executors) == {"executor_a", "executor_c"} # type: ignore -def test_with_output_from_can_be_called_multiple_times(): - """Test that calling with_output_from multiple times overwrites the previous setting.""" +def test_with_output_from_can_be_set_to_different_value(): + """Test that output_executors can be set at construction time.""" executor_a = MockExecutor(id="executor_a") executor_b = MockExecutor(id="executor_b") workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_b]) .add_edge(executor_a, executor_b) - .with_output_from([executor_a]) - .with_output_from([executor_b]) # This should overwrite the previous setting .build() ) - # Verify that only the last setting is applied + # Verify that the setting is applied assert workflow._output_executors == ["executor_b"] # type: ignore def test_with_output_from_with_registered_agents(): """Test with_output_from with registered agent factory names.""" - workflow = ( - WorkflowBuilder() - .register_agent(lambda: DummyAgent(id="agent1", name="writer"), name="WriterAgent") - .register_agent(lambda: DummyAgent(id="agent2", name="reviewer"), name="ReviewerAgent") - .set_start_executor("WriterAgent") - .add_edge("WriterAgent", "ReviewerAgent") - .with_output_from(["ReviewerAgent"]) - .build() - ) + builder = WorkflowBuilder(start_executor="WriterAgent", output_executors=["ReviewerAgent"]) + builder.register_agent(lambda: DummyAgent(id="agent1", name="writer"), name="WriterAgent") + builder.register_agent(lambda: DummyAgent(id="agent2", name="reviewer"), name="ReviewerAgent") + workflow = builder.add_edge("WriterAgent", "ReviewerAgent").build() # Verify that the workflow was built with the agent's resolved name assert workflow._output_executors == ["reviewer"] # type: ignore -def test_with_output_from_in_fluent_chain(): - """Test that with_output_from works correctly in a fluent builder chain.""" +def test_with_output_from_in_constructor(): + """Test that output_executors works correctly when set in the constructor.""" executor_a = MockExecutor(id="executor_a") executor_b = MockExecutor(id="executor_b") executor_c = MockExecutor(id="executor_c") - # Build workflow with with_output_from in the middle of the chain + # Build workflow with output_executors in the constructor workflow = ( - WorkflowBuilder() - .set_start_executor(executor_a) - .with_output_from([executor_c]) # Set early in the chain + WorkflowBuilder(start_executor=executor_a, output_executors=[executor_c]) .add_edge(executor_a, executor_b) .add_edge(executor_b, executor_c) .build() @@ -607,13 +564,13 @@ def test_with_output_from_with_invalid_executor_raises_validation_error(): """Test that with_output_from with an invalid executor raises an error.""" executor_a = MockExecutor(id="executor_a") - builder = WorkflowBuilder().set_start_executor(executor_a) + builder = WorkflowBuilder(start_executor=executor_a, output_executors=[MockExecutor(id="executor_b")]) # Attempting to set output from an executor not in the workflow should raise an error with pytest.raises( WorkflowValidationError, match="Output executor 'executor_b' is not present in the workflow graph" ): - builder.with_output_from([MockExecutor(id="executor_b")]).build() + builder.build() # endregion diff --git a/python/packages/core/tests/workflow/test_workflow_context.py b/python/packages/core/tests/workflow/test_workflow_context.py index 03aa1d78d9..53a7e44903 100644 --- a/python/packages/core/tests/workflow/test_workflow_context.py +++ b/python/packages/core/tests/workflow/test_workflow_context.py @@ -93,7 +93,7 @@ async def test_workflow_context_type_annotations_no_parameter() -> None: async def func1(text: str, ctx: WorkflowContext) -> None: await ctx.add_event(_TestEvent()) - wf = WorkflowBuilder().set_start_executor(func1).build() + wf = WorkflowBuilder(start_executor=func1).build() events = await wf.run("hello") test_events = [e for e in events if isinstance(e, _TestEvent)] assert len(test_events) == 1 @@ -110,7 +110,7 @@ async def func1(self, text: str, ctx: WorkflowContext) -> None: assert executor1.output_types == [] assert executor1.workflow_output_types == [] - wf2 = WorkflowBuilder().set_start_executor(executor1).build() + wf2 = WorkflowBuilder(start_executor=executor1).build() events2 = await wf2.run("hello") test_events2 = [e for e in events2 if isinstance(e, _TestEvent)] assert len(test_events2) == 1 @@ -126,7 +126,7 @@ async def func1(text: str, ctx: WorkflowContext[str]) -> None: async def func2(text: str, ctx: WorkflowContext) -> None: await ctx.add_event(_TestEvent(data=text)) - wf = WorkflowBuilder().add_edge(func1, func2).set_start_executor(func1).build() + wf = WorkflowBuilder(start_executor=func1).add_edge(func1, func2).build() events = await wf.run("hello") test_events = [e for e in events if isinstance(e, _TestEvent)] assert len(test_events) == 1 @@ -153,7 +153,7 @@ async def func2(self, text: str, ctx: WorkflowContext) -> None: assert executor2.output_types == [] assert executor2.workflow_output_types == [] - wf2 = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + wf2 = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() events2 = await wf2.run("hello") test_events2 = [e for e in events2 if isinstance(e, _TestEvent)] assert len(test_events2) == 1 @@ -171,7 +171,7 @@ async def func2(text: str, ctx: WorkflowContext[Never, str]) -> None: await ctx.add_event(_TestEvent(data=text)) await ctx.yield_output(text) - wf = WorkflowBuilder().add_edge(func1, func2).set_start_executor(func1).build() + wf = WorkflowBuilder(start_executor=func1).add_edge(func1, func2).build() events = await wf.run("hello") outputs = events.get_outputs() assert len(outputs) == 1 @@ -199,7 +199,7 @@ async def func2(self, text: str, ctx: WorkflowContext[Never, str]) -> None: assert executor2.output_types == [] assert executor2.workflow_output_types == [str] - wf2 = WorkflowBuilder().add_edge(executor1, executor2).set_start_executor(executor1).build() + wf2 = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() events2 = await wf2.run("hello") outputs2 = events2.get_outputs() assert len(outputs2) == 1 diff --git a/python/packages/core/tests/workflow/test_workflow_kwargs.py b/python/packages/core/tests/workflow/test_workflow_kwargs.py index e35430f453..2e46454601 100644 --- a/python/packages/core/tests/workflow/test_workflow_kwargs.py +++ b/python/packages/core/tests/workflow/test_workflow_kwargs.py @@ -78,7 +78,7 @@ async def _run() -> AgentResponse: async def test_sequential_kwargs_flow_to_agent() -> None: """Test that kwargs passed to SequentialBuilder workflow flow through to agent.""" agent = _KwargsCapturingAgent(name="seq_agent") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() custom_data = {"endpoint": "https://api.example.com", "version": "v1"} user_token = {"user_name": "alice", "access_level": "admin"} @@ -105,7 +105,7 @@ async def test_sequential_kwargs_flow_to_multiple_agents() -> None: """Test that kwargs flow to all agents in a sequential workflow.""" agent1 = _KwargsCapturingAgent(name="agent1") agent2 = _KwargsCapturingAgent(name="agent2") - workflow = SequentialBuilder().participants([agent1, agent2]).build() + workflow = SequentialBuilder(participants=[agent1, agent2]).build() custom_data = {"key": "value"} @@ -123,7 +123,7 @@ async def test_sequential_kwargs_flow_to_multiple_agents() -> None: async def test_sequential_run_kwargs_flow() -> None: """Test that kwargs flow through workflow.run() (non-streaming).""" agent = _KwargsCapturingAgent(name="run_agent") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() _ = await workflow.run("test message", custom_data={"test": True}) @@ -141,7 +141,7 @@ async def test_concurrent_kwargs_flow_to_agents() -> None: """Test that kwargs flow to all agents in a concurrent workflow.""" agent1 = _KwargsCapturingAgent(name="concurrent1") agent2 = _KwargsCapturingAgent(name="concurrent2") - workflow = ConcurrentBuilder().participants([agent1, agent2]).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2]).build() custom_data = {"batch_id": "123"} user_token = {"user_name": "bob"} @@ -188,13 +188,11 @@ def simple_selector(state: GroupChatState) -> str: names = list(state.participants.keys()) return names[(turn_count - 1) % len(names)] - workflow = ( - GroupChatBuilder() - .participants([agent1, agent2]) - .with_orchestrator(selection_func=simple_selector) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) + workflow = GroupChatBuilder( + participants=[agent1, agent2], + max_rounds=2, # Limit rounds to prevent infinite loop + selection_func=simple_selector, + ).build() custom_data = {"session_id": "group123"} @@ -230,7 +228,7 @@ async def inspect(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatM await ctx.send_message(msgs) inspector = _StateInspector(id="inspector") - workflow = SequentialBuilder().participants([inspector]).build() + workflow = SequentialBuilder(participants=[inspector]).build() async for event in workflow.run("test", my_kwarg="my_value", another=123, stream=True): if event.type == "status" and event.state == WorkflowRunState.IDLE: @@ -255,7 +253,7 @@ async def check(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMes await ctx.send_message(msgs) checker = _StateChecker(id="checker") - workflow = SequentialBuilder().participants([checker]).build() + workflow = SequentialBuilder(participants=[checker]).build() # Run without any kwargs async for event in workflow.run("test", stream=True): @@ -275,7 +273,7 @@ async def check(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[ChatMes async def test_kwargs_with_none_values() -> None: """Test that kwargs with None values are passed through correctly.""" agent = _KwargsCapturingAgent(name="none_test") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() async for event in workflow.run("test", optional_param=None, other_param="value", stream=True): if event.type == "status" and event.state == WorkflowRunState.IDLE: @@ -291,7 +289,7 @@ async def test_kwargs_with_none_values() -> None: async def test_kwargs_with_complex_nested_data() -> None: """Test that complex nested data structures flow through correctly.""" agent = _KwargsCapturingAgent(name="nested_test") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() complex_data = { "level1": { @@ -318,8 +316,8 @@ async def test_kwargs_preserved_across_workflow_reruns() -> None: agent = _KwargsCapturingAgent(name="rerun_test") # Build separate workflows for each run to avoid "already running" error - workflow1 = SequentialBuilder().participants([agent]).build() - workflow2 = SequentialBuilder().participants([agent]).build() + workflow1 = SequentialBuilder(participants=[agent]).build() + workflow2 = SequentialBuilder(participants=[agent]).build() # First run async for event in workflow1.run("run1", run_id="first", stream=True): @@ -349,11 +347,10 @@ async def test_handoff_kwargs_flow_to_agents() -> None: agent2 = _KwargsCapturingAgent(name="specialist") workflow = ( - HandoffBuilder() + HandoffBuilder(termination_condition=lambda conv: len(conv) >= 4) .participants([agent1, agent2]) .with_start_agent(agent1) .with_autonomous_mode() - .with_termination_condition(lambda conv: len(conv) >= 4) .build() ) @@ -413,7 +410,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM agent = _KwargsCapturingAgent(name="agent1") manager = _MockManager() - workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() + workflow = MagenticBuilder(participants=[agent], manager=manager).build() custom_data = {"session_id": "magentic123"} @@ -463,7 +460,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM agent = _KwargsCapturingAgent(name="agent1") manager = _MockManager() - magentic_workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() + magentic_workflow = MagenticBuilder(participants=[agent], manager=manager).build() # Use MagenticWorkflow.run() which goes through the kwargs attachment path custom_data = {"magentic_key": "magentic_value"} @@ -485,7 +482,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM async def test_workflow_as_agent_run_propagates_kwargs_to_underlying_agent() -> None: """Test that kwargs passed to workflow_agent.run() flow through to the underlying agents.""" agent = _KwargsCapturingAgent(name="inner_agent") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() workflow_agent = workflow.as_agent(name="TestWorkflowAgent") custom_data = {"endpoint": "https://api.example.com", "version": "v1"} @@ -509,7 +506,7 @@ async def test_workflow_as_agent_run_propagates_kwargs_to_underlying_agent() -> async def test_workflow_as_agent_run_stream_propagates_kwargs_to_underlying_agent() -> None: """Test that kwargs passed to workflow_agent.run() flow through to the underlying agents.""" agent = _KwargsCapturingAgent(name="inner_agent") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() workflow_agent = workflow.as_agent(name="TestWorkflowAgent") custom_data = {"session_id": "xyz123"} @@ -536,7 +533,7 @@ async def test_workflow_as_agent_propagates_kwargs_to_multiple_agents() -> None: """Test that kwargs flow to all agents when using workflow.as_agent().""" agent1 = _KwargsCapturingAgent(name="agent1") agent2 = _KwargsCapturingAgent(name="agent2") - workflow = SequentialBuilder().participants([agent1, agent2]).build() + workflow = SequentialBuilder(participants=[agent1, agent2]).build() workflow_agent = workflow.as_agent(name="MultiAgentWorkflow") custom_data = {"batch_id": "batch-001"} @@ -553,7 +550,7 @@ async def test_workflow_as_agent_propagates_kwargs_to_multiple_agents() -> None: async def test_workflow_as_agent_kwargs_with_none_values() -> None: """Test that kwargs with None values are passed through correctly via as_agent().""" agent = _KwargsCapturingAgent(name="none_test_agent") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() workflow_agent = workflow.as_agent(name="NoneTestWorkflow") _ = await workflow_agent.run("test", optional_param=None, other_param="value") @@ -568,7 +565,7 @@ async def test_workflow_as_agent_kwargs_with_none_values() -> None: async def test_workflow_as_agent_kwargs_with_complex_nested_data() -> None: """Test that complex nested data structures flow through correctly via as_agent().""" agent = _KwargsCapturingAgent(name="nested_agent") - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() workflow_agent = workflow.as_agent(name="NestedDataWorkflow") complex_data = { @@ -606,13 +603,13 @@ async def test_subworkflow_kwargs_propagation() -> None: inner_agent = _KwargsCapturingAgent(name="inner_agent") # Build the inner (sub) workflow with the agent - inner_workflow = SequentialBuilder().participants([inner_agent]).build() + inner_workflow = SequentialBuilder(participants=[inner_agent]).build() # Wrap the inner workflow in a WorkflowExecutor so it can be used as a subworkflow subworkflow_executor = WorkflowExecutor(workflow=inner_workflow, id="subworkflow_executor") # Build the outer (parent) workflow containing the subworkflow - outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() + outer_workflow = SequentialBuilder(participants=[subworkflow_executor]).build() # Define kwargs that should propagate to subworkflow custom_data = {"api_key": "secret123", "endpoint": "https://api.example.com"} @@ -670,13 +667,13 @@ async def read_kwargs(self, msgs: list[ChatMessage], ctx: WorkflowContext[list[C # Build inner workflow with State reader state_reader = _StateReader(id="state_reader") - inner_workflow = SequentialBuilder().participants([state_reader]).build() + inner_workflow = SequentialBuilder(participants=[state_reader]).build() # Wrap as subworkflow subworkflow_executor = WorkflowExecutor(workflow=inner_workflow, id="subworkflow") # Build outer workflow - outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() + outer_workflow = SequentialBuilder(participants=[subworkflow_executor]).build() # Run with kwargs async for event in outer_workflow.run( @@ -715,15 +712,15 @@ async def test_nested_subworkflow_kwargs_propagation() -> None: inner_agent = _KwargsCapturingAgent(name="deeply_nested_agent") # Build inner workflow - inner_workflow = SequentialBuilder().participants([inner_agent]).build() + inner_workflow = SequentialBuilder(participants=[inner_agent]).build() inner_executor = WorkflowExecutor(workflow=inner_workflow, id="inner_executor") # Build middle workflow containing inner - middle_workflow = SequentialBuilder().participants([inner_executor]).build() + middle_workflow = SequentialBuilder(participants=[inner_executor]).build() middle_executor = WorkflowExecutor(workflow=middle_workflow, id="middle_executor") # Build outer workflow containing middle - outer_workflow = SequentialBuilder().participants([middle_executor]).build() + outer_workflow = SequentialBuilder(participants=[middle_executor]).build() # Run with kwargs async for event in outer_workflow.run( diff --git a/python/packages/core/tests/workflow/test_workflow_observability.py b/python/packages/core/tests/workflow/test_workflow_observability.py index 82419510c6..b81e0acae0 100644 --- a/python/packages/core/tests/workflow/test_workflow_observability.py +++ b/python/packages/core/tests/workflow/test_workflow_observability.py @@ -268,8 +268,7 @@ async def test_end_to_end_workflow_tracing(span_exporter: InMemorySpanExporter) # Create workflow with fan-in: executor1 -> [executor2, executor3] -> aggregator workflow = ( - WorkflowBuilder() - .set_start_executor(executor1) + WorkflowBuilder(start_executor=executor1) .add_fan_out_edges(executor1, [executor2, executor3]) .add_fan_in_edges([executor2, executor3], aggregator) .build() @@ -297,11 +296,11 @@ async def test_end_to_end_workflow_tracing(span_exporter: InMemorySpanExporter) span_exporter.clear() # Test workflow with name and description - verify OTEL attributes - ( - WorkflowBuilder(name="Test Pipeline", description="Test workflow description") - .set_start_executor(MockExecutor("start")) - .build() - ) + WorkflowBuilder( + name="Test Pipeline", + description="Test workflow description", + start_executor=MockExecutor("start"), + ).build() build_spans_with_metadata = [s for s in span_exporter.get_finished_spans() if s.name == "workflow.build"] assert len(build_spans_with_metadata) == 1 @@ -412,7 +411,7 @@ async def handle_message(self, message: str, ctx: WorkflowContext) -> None: raise ValueError("Test error") failing_executor = FailingExecutor() - workflow = WorkflowBuilder().set_start_executor(failing_executor).build() + workflow = WorkflowBuilder(start_executor=failing_executor).build() # Run workflow and expect error with pytest.raises(ValueError, match="Test error"): @@ -475,10 +474,10 @@ async def test_message_trace_context_serialization(span_exporter: InMemorySpanEx async def test_workflow_build_error_tracing(span_exporter: InMemorySpanExporter) -> None: """Test that build errors are properly recorded in build spans.""" - # Test validation error by not setting start executor - builder = WorkflowBuilder() + # Test validation error by referencing a non-existent start executor + builder = WorkflowBuilder(start_executor="NonExistent") - with pytest.raises(ValueError, match="Starting executor must be set"): + with pytest.raises(ValueError): builder.build() spans = span_exporter.get_finished_spans() @@ -501,5 +500,5 @@ async def test_workflow_build_error_tracing(span_exporter: InMemorySpanExporter) error_event = error_events[0] assert error_event.attributes is not None - assert "Starting executor must be set" in str(error_event.attributes.get("build.error.message")) + assert "starting executor" in str(error_event.attributes.get("build.error.message")).lower() assert error_event.attributes.get("build.error.type") == "ValueError" diff --git a/python/packages/core/tests/workflow/test_workflow_states.py b/python/packages/core/tests/workflow/test_workflow_states.py index 90b4a8dd58..0ccf84b103 100644 --- a/python/packages/core/tests/workflow/test_workflow_states.py +++ b/python/packages/core/tests/workflow/test_workflow_states.py @@ -28,7 +28,7 @@ async def fail(self, msg: int, ctx: WorkflowContext) -> None: # pragma: no cove async def test_executor_failed_and_workflow_failed_events_streaming(): failing = FailingExecutor(id="f") - wf: Workflow = WorkflowBuilder().set_start_executor(failing).build() + wf: Workflow = WorkflowBuilder(start_executor=failing).build() events: list[object] = [] with pytest.raises(RuntimeError, match="boom"): @@ -86,7 +86,7 @@ async def test_executor_failed_event_from_second_executor_in_chain(): """Test that executor_failed event is emitted when a non-start executor fails.""" passthrough = PassthroughExecutor(id="passthrough") failing = FailingExecutor(id="failing") - wf: Workflow = WorkflowBuilder().set_start_executor(passthrough).add_edge(passthrough, failing).build() + wf: Workflow = WorkflowBuilder(start_executor=passthrough).add_edge(passthrough, failing).build() events: list[object] = [] with pytest.raises(RuntimeError, match="boom"): @@ -131,7 +131,7 @@ async def ask(self, _: str, ctx: WorkflowContext) -> None: # pragma: no cover async def test_idle_with_pending_requests_status_streaming(): simple_executor = SimpleExecutor(id="simple") requester = Requester(id="req") - wf = WorkflowBuilder().set_start_executor(simple_executor).add_edge(simple_executor, requester).build() + wf = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, requester).build() events = [ev async for ev in wf.run("start", stream=True)] # Consume stream fully @@ -153,7 +153,7 @@ async def run(self, msg: str, ctx: WorkflowContext[Never, str]) -> None: # prag async def test_completed_status_streaming(): c = Completer(id="c") - wf = WorkflowBuilder().set_start_executor(c).build() + wf = WorkflowBuilder(start_executor=c).build() events = [ev async for ev in wf.run("ok", stream=True)] # no raise # Last status should be IDLE status = [e for e in events if isinstance(e, WorkflowEvent) and e.type == "status"] @@ -163,7 +163,7 @@ async def test_completed_status_streaming(): async def test_started_and_completed_event_origins(): c = Completer(id="c-origin") - wf = WorkflowBuilder().set_start_executor(c).build() + wf = WorkflowBuilder(start_executor=c).build() events = [ev async for ev in wf.run("payload", stream=True)] started = next(e for e in events if isinstance(e, WorkflowEvent) and e.type == "started") @@ -181,21 +181,21 @@ async def test_started_and_completed_event_origins(): async def test_non_streaming_final_state_helpers(): # Completed case c = Completer(id="c") - wf1 = WorkflowBuilder().set_start_executor(c).build() + wf1 = WorkflowBuilder(start_executor=c).build() result1: WorkflowRunResult = await wf1.run("done") assert result1.get_final_state() == WorkflowRunState.IDLE # Idle-with-pending-request case simple_executor = SimpleExecutor(id="simple") requester = Requester(id="req") - wf2 = WorkflowBuilder().set_start_executor(simple_executor).add_edge(simple_executor, requester).build() + wf2 = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, requester).build() result2: WorkflowRunResult = await wf2.run("start") assert result2.get_final_state() == WorkflowRunState.IDLE_WITH_PENDING_REQUESTS async def test_run_includes_status_events_completed(): c = Completer(id="c2") - wf = WorkflowBuilder().set_start_executor(c).build() + wf = WorkflowBuilder(start_executor=c).build() result: WorkflowRunResult = await wf.run("ok") timeline = result.status_timeline() assert timeline, "Expected status timeline in non-streaming run() results" @@ -205,7 +205,7 @@ async def test_run_includes_status_events_completed(): async def test_run_includes_status_events_idle_with_requests(): simple_executor = SimpleExecutor(id="simple") requester = Requester(id="req2") - wf = WorkflowBuilder().set_start_executor(simple_executor).add_edge(simple_executor, requester).build() + wf = WorkflowBuilder(start_executor=simple_executor).add_edge(simple_executor, requester).build() result: WorkflowRunResult = await wf.run("start") timeline = result.status_timeline() assert timeline, "Expected status timeline in non-streaming run() results" diff --git a/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_builder.py b/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_builder.py index 84ecc8ea4e..4e649f8f04 100644 --- a/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_builder.py +++ b/python/packages/declarative/agent_framework_declarative/_workflows/_declarative_builder.py @@ -137,12 +137,6 @@ def build(self) -> Workflow: Raises: ValueError: If no actions are defined (empty workflow), or validation fails """ - builder = WorkflowBuilder(name=self._workflow_id) - - # Enable checkpointing if storage is provided - if self._checkpoint_storage: - builder.with_checkpointing(self._checkpoint_storage) - actions = self._yaml_def.get("actions", []) if not actions: # Empty workflow - raise an error since we need at least one executor @@ -152,6 +146,13 @@ def build(self) -> Workflow: if self._validate: self._validate_workflow(actions) + # Use a placeholder for start_executor; it will be overwritten below via _set_start_executor + builder = WorkflowBuilder( + start_executor="_declarative_placeholder", + name=self._workflow_id, + checkpoint_storage=self._checkpoint_storage, + ) + # First pass: create all executors entry_executor = self._create_executors_for_actions(actions, builder) @@ -164,11 +165,11 @@ def build(self) -> Workflow: # Create an entry passthrough node and wire to the structure's branches entry_node = JoinExecutor({"kind": "Entry"}, id="_workflow_entry") self._executors[entry_node.id] = entry_node - builder.set_start_executor(entry_node) + builder._set_start_executor(entry_node) # Use _add_sequential_edge which knows how to wire to structures self._add_sequential_edge(builder, entry_node, entry_executor) else: - builder.set_start_executor(entry_executor) + builder._set_start_executor(entry_executor) else: raise ValueError("Failed to create any executors from actions.") diff --git a/python/packages/declarative/tests/test_graph_coverage.py b/python/packages/declarative/tests/test_graph_coverage.py index ad03fc9b97..fd01faf2a4 100644 --- a/python/packages/declarative/tests/test_graph_coverage.py +++ b/python/packages/declarative/tests/test_graph_coverage.py @@ -2012,7 +2012,7 @@ def test_create_goto_reference(self): # Create builder with minimal yaml definition yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") action_def = { "kind": "GotoAction", @@ -2036,7 +2036,7 @@ def test_create_goto_reference_auto_id(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") action_def = { "kind": "GotoAction", @@ -2056,7 +2056,7 @@ def test_create_goto_reference_no_target(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") action_def = { "kind": "GotoAction", @@ -2094,7 +2094,7 @@ def test_create_break_executor(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") # Create a mock loop_next executor loop_next = ForeachNextExecutor( @@ -2124,7 +2124,7 @@ def test_create_break_executor_no_loop_context(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") action_def = { "kind": "BreakLoop", @@ -2149,7 +2149,7 @@ def test_create_continue_executor(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") # Create a mock loop_next executor loop_next = ForeachNextExecutor( @@ -2179,7 +2179,7 @@ def test_create_continue_executor_no_loop_context(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") action_def = { "kind": "ContinueLoop", @@ -2203,7 +2203,7 @@ def test_wire_to_target_with_if_structure(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") # Create a mock source executor source = SendActivityExecutor({"kind": "SendActivity", "activity": {"text": "test"}}, id="source") @@ -2236,7 +2236,7 @@ def test_wire_to_target_normal_executor(self): yaml_def = {"name": "test_workflow", "actions": []} graph_builder = DeclarativeWorkflowBuilder(yaml_def) - wb = WorkflowBuilder() + wb = WorkflowBuilder(start_executor="dummy") source = SendActivityExecutor({"kind": "SendActivity", "activity": {"text": "source"}}, id="source") target = SendActivityExecutor({"kind": "SendActivity", "activity": {"text": "target"}}, id="target") diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index fb14469905..f984c56799 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -96,7 +96,7 @@ agents/ │ ├── agent.py │ └── .env # Optional: API keys, config vars ├── my_workflow/ -│ ├── __init__.py # Must export: workflow = WorkflowBuilder()... +│ ├── __init__.py # Must export: workflow = WorkflowBuilder(start_executor=...)... │ ├── workflow.py │ └── .env # Optional: environment variables └── .env # Optional: shared environment variables diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index af185f8c3c..6bae42efac 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -540,7 +540,7 @@ def _has_entity_exports(self, file_path: Path) -> bool: This safely checks for module-level assignments like: - agent = ChatAgent(...) - - workflow = WorkflowBuilder()... + - workflow = WorkflowBuilder(start_executor=...)... Args: file_path: Python file to check diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index ee5537f2bd..0a487cbad3 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -441,7 +441,7 @@ async def _execute_workflow( if not checkpoint_id: error_msg = ( "Cannot process HIL responses without a checkpoint. " - "Workflows using HIL must be configured with .with_checkpointing() " + "Workflows using HIL must be configured with checkpoint_storage in constructor" "and a checkpoint must exist before sending responses." ) logger.error(error_msg) diff --git a/python/packages/devui/tests/devui/conftest.py b/python/packages/devui/tests/devui/conftest.py index a6240108c6..b229b0e9e6 100644 --- a/python/packages/devui/tests/devui/conftest.py +++ b/python/packages/devui/tests/devui/conftest.py @@ -488,7 +488,7 @@ async def sequential_workflow() -> tuple[AgentFrameworkExecutor, str, MockBaseCh system_message="You are a reviewer. Provide constructive feedback.", ) - workflow = SequentialBuilder().participants([writer, reviewer]).build() + workflow = SequentialBuilder(participants=[writer, reviewer]).build() discovery = EntityDiscovery(None) mapper = MessageMapper() @@ -540,7 +540,7 @@ async def concurrent_workflow() -> tuple[AgentFrameworkExecutor, str, MockBaseCh system_message="You are a summarizer. Provide concise summaries.", ) - workflow = ConcurrentBuilder().participants([researcher, analyst, summarizer]).build() + workflow = ConcurrentBuilder(participants=[researcher, analyst, summarizer]).build() discovery = EntityDiscovery(None) mapper = MessageMapper() diff --git a/python/packages/devui/tests/devui/test_checkpoints.py b/python/packages/devui/tests/devui/test_checkpoints.py index dddb51cdb2..ffbbf93022 100644 --- a/python/packages/devui/tests/devui/test_checkpoints.py +++ b/python/packages/devui/tests/devui/test_checkpoints.py @@ -76,12 +76,12 @@ def test_workflow(): executor = WorkflowTestExecutor(id="test_executor") checkpoint_storage = InMemoryCheckpointStorage() - return ( - WorkflowBuilder(name="Test Workflow", description="Test checkpoint behavior") - .set_start_executor(executor) - .with_checkpointing(checkpoint_storage) - .build() - ) + return WorkflowBuilder( + name="Test Workflow", + description="Test checkpoint behavior", + start_executor=executor, + checkpoint_storage=checkpoint_storage, + ).build() class TestCheckpointConversationManager: @@ -335,7 +335,7 @@ async def test_manual_checkpoint_save_via_injected_storage(self, checkpoint_mana # Get checkpoint storage for this session checkpoint_storage = checkpoint_manager.get_checkpoint_storage(conversation_id) - # Set build-time storage (equivalent to .with_checkpointing() at build time) + # Set build-time storage (equivalent to checkpoint_storage= at build time) # Note: In production, DevUI uses runtime injection via run(stream=True) parameter if hasattr(test_workflow, "_runner") and hasattr(test_workflow._runner, "context"): test_workflow._runner.context._checkpoint_storage = checkpoint_storage @@ -399,7 +399,7 @@ async def test_workflow_auto_saves_checkpoints_to_injected_storage(self, checkpo """Test that workflows automatically save checkpoints to our conversation-backed storage. This is the critical end-to-end test that verifies the entire checkpoint flow: - 1. Storage is set as build-time storage (simulates .with_checkpointing()) + 1. Storage is set as build-time storage (simulates checkpoint_storage=...) 2. Workflow runs and pauses at HIL point (IDLE_WITH_PENDING_REQUESTS status) 3. Framework automatically saves checkpoint to our storage 4. Checkpoint is accessible via manager for UI to list/resume diff --git a/python/packages/devui/tests/devui/test_discovery.py b/python/packages/devui/tests/devui/test_discovery.py index ac88f3bf3d..c5e92b4645 100644 --- a/python/packages/devui/tests/devui/test_discovery.py +++ b/python/packages/devui/tests/devui/test_discovery.py @@ -135,10 +135,8 @@ async def test_lazy_loading(): def test_func(input: str) -> str: return f"Processed: {input}" -builder = WorkflowBuilder() executor = FunctionExecutor(id="test_executor", func=test_func) -builder.set_start_executor(executor) -workflow = builder.build() +workflow = WorkflowBuilder(start_executor=executor).build() """) discovery = EntityDiscovery(str(temp_path)) @@ -182,10 +180,8 @@ async def test_type_detection(): def test_func(input: str) -> str: return f"Processed: {input}" -builder = WorkflowBuilder() executor = FunctionExecutor(id="test_executor", func=test_func) -builder.set_start_executor(executor) -workflow = builder.build() +workflow = WorkflowBuilder(start_executor=executor).build() """) # Create agent with agent.py @@ -243,10 +239,8 @@ async def test_hot_reload(): def test_func(input: str) -> str: return "v1" -builder = WorkflowBuilder() executor = FunctionExecutor(id="test_executor", func=test_func) -builder.set_start_executor(executor) -workflow = builder.build() +workflow = WorkflowBuilder(start_executor=executor).build() """) discovery = EntityDiscovery(str(temp_path)) @@ -266,12 +260,9 @@ def test_func(input: str) -> str: def test_func2(input: str) -> str: return "v2_extra" -builder = WorkflowBuilder() executor1 = FunctionExecutor(id="test_executor", func=test_func) executor2 = FunctionExecutor(id="test_executor2", func=test_func2) -builder.set_start_executor(executor1) -builder.add_edge(executor1, executor2) -workflow = builder.build() +workflow = WorkflowBuilder(start_executor=executor1).add_edge(executor1, executor2).build() """) # Without invalidation, gets cached version @@ -298,10 +289,8 @@ async def test_in_memory_entities_bypass_lazy_loading(): def test_func(input: str) -> str: return f"Processed: {input}" - builder = WorkflowBuilder() executor = FunctionExecutor(id="test_executor", func=test_func) - builder.set_start_executor(executor) - workflow = builder.build() + workflow = WorkflowBuilder(start_executor=executor).build() discovery = EntityDiscovery() diff --git a/python/packages/devui/tests/devui/test_execution.py b/python/packages/devui/tests/devui/test_execution.py index 2a92f48486..3dd417cbf6 100644 --- a/python/packages/devui/tests/devui/test_execution.py +++ b/python/packages/devui/tests/devui/test_execution.py @@ -175,10 +175,12 @@ async def test_workflow_streaming_execution(): def process_input(input_data: str) -> str: return f"Processed: {input_data}" - builder = WorkflowBuilder(name="Test Workflow", description="Test workflow for execution") start_executor = FunctionExecutor(id="process", func=process_input) - builder.set_start_executor(start_executor) - workflow = builder.build() + workflow = WorkflowBuilder( + name="Test Workflow", + description="Test workflow for execution", + start_executor=start_executor, + ).build() # Create executor and register workflow discovery = EntityDiscovery(None) @@ -213,10 +215,12 @@ async def test_workflow_sync_execution(): def echo(text: str) -> str: return f"Echo: {text}" - builder = WorkflowBuilder(name="Echo Workflow", description="Simple echo workflow") start_executor = FunctionExecutor(id="echo", func=echo) - builder.set_start_executor(start_executor) - workflow = builder.build() + workflow = WorkflowBuilder( + name="Echo Workflow", + description="Simple echo workflow", + start_executor=start_executor, + ).build() # Create executor and register workflow discovery = EntityDiscovery(None) @@ -308,10 +312,12 @@ async def test_full_pipeline_workflow_events_are_json_serializable(): system_message="You are a test assistant.", ) - builder = WorkflowBuilder(name="Serialization Test Workflow", description="Test workflow") agent_executor = AgentExecutor(id="agent_node", agent=agent) - builder.set_start_executor(agent_executor) - workflow = builder.build() + workflow = WorkflowBuilder( + name="Serialization Test Workflow", + description="Test workflow", + start_executor=agent_executor, + ).build() # Create executor and register discovery = EntityDiscovery(None) @@ -420,11 +426,11 @@ class StringInputExecutor(Executor): async def process(self, text: str, ctx: WorkflowContext[Any, Any]) -> None: await ctx.yield_output(f"Got: {text}") - workflow = ( - WorkflowBuilder(name="String Workflow", description="Accepts string") - .set_start_executor(StringInputExecutor(id="str_exec")) - .build() - ) + workflow = WorkflowBuilder( + name="String Workflow", + description="Accepts string", + start_executor=StringInputExecutor(id="str_exec"), + ).build() executor = AgentFrameworkExecutor(EntityDiscovery(None), MessageMapper()) @@ -445,11 +451,11 @@ class StringInputExecutor(Executor): async def process(self, text: str, ctx: WorkflowContext[Any, Any]) -> None: await ctx.yield_output(f"Got: {text}") - workflow = ( - WorkflowBuilder(name="String Workflow", description="Accepts string") - .set_start_executor(StringInputExecutor(id="str_exec")) - .build() - ) + workflow = WorkflowBuilder( + name="String Workflow", + description="Accepts string", + start_executor=StringInputExecutor(id="str_exec"), + ).build() executor = AgentFrameworkExecutor(EntityDiscovery(None), MessageMapper()) @@ -490,11 +496,11 @@ async def process(self, data: WorkflowInput, ctx: WorkflowContext[Any, Any]) -> await ctx.yield_output(f"Got: {data.input}") # Build workflow with Pydantic input type - workflow = ( - WorkflowBuilder(name="Pydantic Workflow", description="Accepts Pydantic input") - .set_start_executor(PydanticInputExecutor(id="pydantic_exec")) - .build() - ) + workflow = WorkflowBuilder( + name="Pydantic Workflow", + description="Accepts Pydantic input", + start_executor=PydanticInputExecutor(id="pydantic_exec"), + ).build() executor = AgentFrameworkExecutor(EntityDiscovery(None), MessageMapper()) @@ -689,11 +695,11 @@ async def process(self, input_text: str, ctx: WorkflowContext[Any, Any]) -> None await ctx.yield_output({"final": "result", "data": [1, 2, 3]}) # Build workflow - workflow = ( - WorkflowBuilder(name="Output Workflow", description="Tests yield_output") - .set_start_executor(OutputtingExecutor(id="outputter")) - .build() - ) + workflow = WorkflowBuilder( + name="Output Workflow", + description="Tests yield_output", + start_executor=OutputtingExecutor(id="outputter"), + ).build() # Create DevUI executor and register workflow discovery = EntityDiscovery(None) diff --git a/python/packages/lab/lightning/tests/test_lightning.py b/python/packages/lab/lightning/tests/test_lightning.py index 6770f9d974..c528bd8d78 100644 --- a/python/packages/lab/lightning/tests/test_lightning.py +++ b/python/packages/lab/lightning/tests/test_lightning.py @@ -97,10 +97,7 @@ def workflow_two_agents(): # Build workflow: analyzer -> advisor workflow = ( - WorkflowBuilder() - .set_start_executor(analyzer_executor) - .add_edge(analyzer_executor, advisor_executor) - .build() + WorkflowBuilder(start_executor=analyzer_executor).add_edge(analyzer_executor, advisor_executor).build() ) yield workflow diff --git a/python/packages/lab/tau2/README.md b/python/packages/lab/tau2/README.md index d5d205de36..a0b587ea3c 100644 --- a/python/packages/lab/tau2/README.md +++ b/python/packages/lab/tau2/README.md @@ -165,15 +165,12 @@ from agent_framework.lab.tau2 import TaskRunner class WorkflowTaskRunner(TaskRunner): def build_conversation_workflow(self, assistant_agent, user_simulator_agent): - # Build a custom workflow - builder = WorkflowBuilder() - # Create agent executors assistant_executor = AgentExecutor(assistant_agent, id="assistant_agent") user_executor = AgentExecutor(user_simulator_agent, id="user_simulator") - # Add workflow edges and conditions - builder.set_start_executor(assistant_executor) + # Build a custom workflow with start executor + builder = WorkflowBuilder(start_executor=assistant_executor) builder.add_edge(assistant_executor, user_executor) builder.add_edge(user_executor, assistant_executor, condition=self.should_not_stop) diff --git a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py index 4822835316..c2e5ff6816 100644 --- a/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py +++ b/python/packages/lab/tau2/agent_framework_lab_tau2/runner.py @@ -288,8 +288,8 @@ def build_conversation_workflow(self, assistant_agent: ChatAgent, user_simulator # Creates a cyclic workflow: Orchestrator -> Assistant -> Orchestrator -> User -> Orchestrator... # The orchestrator acts as a message router that flips roles and routes to appropriate agent return ( - WorkflowBuilder(max_iterations=10000) # Unlimited - we control termination via should_not_stop - .set_start_executor(orchestrator) # Orchestrator manages the conversation flow + # Orchestrator manages the conversation flow + WorkflowBuilder(max_iterations=10000, start_executor=orchestrator) .add_edge(orchestrator, self._assistant_executor) # Route messages to assistant .add_edge( self._assistant_executor, orchestrator, condition=self.should_not_stop diff --git a/python/packages/orchestrations/README.md b/python/packages/orchestrations/README.md index 68ddebe267..7ffc75e00d 100644 --- a/python/packages/orchestrations/README.md +++ b/python/packages/orchestrations/README.md @@ -52,12 +52,10 @@ Orchestrator-directed multi-agent conversations: ```python from agent_framework_orchestrations import GroupChatBuilder -workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=my_selector) - .participants([agent1, agent2]) - .build() -) +workflow = GroupChatBuilder( + participants=[agent1, agent2], + selection_func=my_selector, +).build() ``` ### MagenticBuilder @@ -67,12 +65,10 @@ Sophisticated multi-agent orchestration using the Magentic One pattern: ```python from agent_framework_orchestrations import MagenticBuilder -workflow = ( - MagenticBuilder() - .participants([researcher, writer, reviewer]) - .with_manager(agent=manager_agent) - .build() -) +workflow = MagenticBuilder( + participants=[researcher, writer, reviewer], + manager_agent=manager_agent, +).build() ``` ## Usage with agent_framework diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py index 85ef566c11..9163168859 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_concurrent.py @@ -29,8 +29,8 @@ - a default aggregator that combines all agent conversations and completes the workflow Notes: -- Participants can be provided as SupportsAgentRun or Executor instances via `.participants()`, - or as factories returning SupportsAgentRun or Executor via `.register_participants()`. +- Participants can be provided as SupportsAgentRun or Executor instances via `participants=[...]`, + or as factories returning SupportsAgentRun or Executor via `participant_factories=[...]`. - A custom aggregator can be provided as: - an Executor instance (it should handle list[AgentExecutorResponse], yield output), or @@ -186,8 +186,8 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon class ConcurrentBuilder: r"""High-level builder for concurrent agent workflows. - - `participants([...])` accepts a list of SupportsAgentRun (recommended) or Executor. - - `register_participants([...])` accepts a list of factories for SupportsAgentRun (recommended) + - `participants=[...]` accepts a list of SupportsAgentRun (recommended) or Executor. + - `participant_factories=[...]` accepts a list of factories for SupportsAgentRun (recommended) or Executor factories - `build()` wires: dispatcher -> fan-out -> participants -> fan-in -> aggregator. - `with_aggregator(...)` overrides the default aggregator with an Executor or callback. @@ -200,10 +200,10 @@ class ConcurrentBuilder: from agent_framework_orchestrations import ConcurrentBuilder # Minimal: use default aggregator (returns list[ChatMessage]) - workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3]).build() # With agent factories - workflow = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build() + workflow = ConcurrentBuilder(participant_factories=[create_agent1, create_agent2, create_agent3]).build() # Custom aggregator via callback (sync or async). The callback receives @@ -212,7 +212,7 @@ def summarize(results: list[AgentExecutorResponse]) -> str: return " | ".join(r.agent_response.messages[-1].text for r in results) - workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_aggregator(summarize).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3]).with_aggregator(summarize).build() # Custom aggregator via a factory @@ -223,112 +223,76 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon workflow = ( - ConcurrentBuilder() - .register_participants([create_agent1, create_agent2, create_agent3]) + ConcurrentBuilder(participant_factories=[create_agent1, create_agent2, create_agent3]) .register_aggregator(lambda: MyAggregator(id="my_aggregator")) .build() ) # Enable checkpoint persistence so runs can resume - workflow = ConcurrentBuilder().participants([agent1, agent2, agent3]).with_checkpointing(storage).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2, agent3], checkpoint_storage=storage).build() # Enable request info before aggregation - workflow = ConcurrentBuilder().participants([agent1, agent2]).with_request_info().build() + workflow = ConcurrentBuilder(participants=[agent1, agent2]).with_request_info().build() """ - def __init__(self) -> None: + def __init__( + self, + *, + participants: Sequence[SupportsAgentRun | Executor] | None = None, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]] | None = None, + checkpoint_storage: CheckpointStorage | None = None, + intermediate_outputs: bool = False, + ) -> None: + """Initialize the ConcurrentBuilder. + + Args: + participants: Optional sequence of agent or executor instances to run in parallel. + participant_factories: Optional sequence of callables returning agent or executor instances. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + intermediate_outputs: If True, enables intermediate outputs from agent participants + before aggregation. + """ self._participants: list[SupportsAgentRun | Executor] = [] self._participant_factories: list[Callable[[], SupportsAgentRun | Executor]] = [] self._aggregator: Executor | None = None self._aggregator_factory: Callable[[], Executor] | None = None - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._request_info_enabled: bool = False self._request_info_filter: set[str] | None = None - self._intermediate_outputs: bool = False - - def register_participants( - self, - participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]], - ) -> "ConcurrentBuilder": - r"""Define the parallel participants for this concurrent workflow. - - Accepts factories (callables) that return SupportsAgentRun instances (e.g., created - by a chat client) or Executor instances. Each participant created by a factory - is wired as a parallel branch using fan-out edges from an internal dispatcher. - - Args: - participant_factories: Sequence of callables returning SupportsAgentRun or Executor instances - - Raises: - ValueError: if `participant_factories` is empty or `.participants()` - or `.register_participants()` were already called + self._intermediate_outputs: bool = intermediate_outputs - Example: - - .. code-block:: python - - def create_researcher() -> ChatAgent: - return ... - - - def create_marketer() -> ChatAgent: - return ... - - - def create_legal() -> ChatAgent: - return ... + if participants is None and participant_factories is None: + raise ValueError("Either participants or participant_factories must be provided.") + if participant_factories is not None: + self._set_participant_factories(participant_factories) + if participants is not None: + self._set_participants(participants) - class MyCustomExecutor(Executor): ... - - - wf = ConcurrentBuilder().register_participants([create_researcher, create_marketer, create_legal]).build() - - # Mixing agent(s) and executor(s) is supported - wf2 = ConcurrentBuilder().register_participants([create_researcher, MyCustomExecutor]).build() - """ + def _set_participant_factories( + self, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]], + ) -> None: + """Set participant factories (internal).""" if self._participants: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participant_factories: - raise ValueError("register_participants() has already been called on this builder instance.") + raise ValueError("participant_factories already set.") if not participant_factories: raise ValueError("participant_factories cannot be empty") self._participant_factories = list(participant_factories) - return self - - def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> "ConcurrentBuilder": - r"""Define the parallel participants for this concurrent workflow. - - Accepts SupportsAgentRun instances (e.g., created by a chat client) or Executor - instances. Each participant is wired as a parallel branch using fan-out edges - from an internal dispatcher. - - Args: - participants: Sequence of SupportsAgentRun or Executor instances - - Raises: - ValueError: if `participants` is empty, contains duplicates, or `.register_participants()` - or `.participants()` were already called - TypeError: if any entry is not SupportsAgentRun or Executor - Example: - - .. code-block:: python - - wf = ConcurrentBuilder().participants([researcher_agent, marketer_agent, legal_agent]).build() - - # Mixing agent(s) and executor(s) is supported - wf2 = ConcurrentBuilder().participants([researcher_agent, my_custom_executor]).build() - """ + def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: + """Set participants (internal).""" if self._participant_factories: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participants: - raise ValueError("participants() has already been called on this builder instance.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty") @@ -350,7 +314,6 @@ def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> " raise TypeError(f"participants must be SupportsAgentRun or Executor instances; got {type(p).__name__}") self._participants = list(participants) - return self def register_aggregator(self, aggregator_factory: Callable[[], Executor]) -> "ConcurrentBuilder": r"""Define a custom aggregator for this concurrent workflow. @@ -412,7 +375,7 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) - wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(CustomAggregator()).build() + wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(CustomAggregator()).build() # Callback-based aggregator (string result) @@ -420,7 +383,7 @@ async def summarize(results: list[AgentExecutorResponse]) -> str: return " | ".join(r.agent_response.messages[-1].text for r in results) - wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() + wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(summarize).build() # Callback-based aggregator (yield result) @@ -428,7 +391,7 @@ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[N await ctx.yield_output(" | ".join(r.agent_response.messages[-1].text for r in results)) - wf = ConcurrentBuilder().participants([a1, a2, a3]).with_aggregator(summarize).build() + wf = ConcurrentBuilder(participants=[a1, a2, a3]).with_aggregator(summarize).build() """ if self._aggregator_factory is not None: raise ValueError( @@ -447,15 +410,6 @@ async def summarize(results: list[AgentExecutorResponse], ctx: WorkflowContext[N return self - def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "ConcurrentBuilder": - """Enable checkpoint persistence using the provided storage backend. - - Args: - checkpoint_storage: CheckpointStorage instance for persisting workflow state - """ - self._checkpoint_storage = checkpoint_storage - return self - def with_request_info( self, *, @@ -489,23 +443,10 @@ def with_request_info( return self - def with_intermediate_outputs(self) -> "ConcurrentBuilder": - """Enable intermediate outputs from agent participants before aggregation. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the aggregator will - always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants and not self._participant_factories: - raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + raise ValueError("No participants provided. Pass participants or participant_factories to the constructor.") # We don't need to check if both are set since that is handled in the respective methods participants: list[Executor | SupportsAgentRun] = [] @@ -557,7 +498,7 @@ def build(self) -> Workflow: .. code-block:: python - workflow = ConcurrentBuilder().participants([agent1, agent2]).build() + workflow = ConcurrentBuilder(participants=[agent1, agent2]).build() """ # Internal nodes dispatcher = _DispatchToAllParticipants(id="dispatcher") @@ -574,18 +515,14 @@ def build(self) -> Workflow: # Resolve participants and participant factories to executors participants: list[Executor] = self._resolve_participants() - builder = WorkflowBuilder() - builder.set_start_executor(dispatcher) + builder = WorkflowBuilder( + start_executor=dispatcher, + checkpoint_storage=self._checkpoint_storage, + output_executors=[aggregator] if not self._intermediate_outputs else None, + ) # Fan-out for parallel execution builder.add_fan_out_edges(dispatcher, participants) # Direct fan-in to aggregator builder.add_fan_in_edges(participants, aggregator) - if not self._intermediate_outputs: - # Constrain output to aggregator only - builder = builder.with_output_from([aggregator]) - - if self._checkpoint_storage is not None: - builder = builder.with_checkpointing(self._checkpoint_storage) - return builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py index 6ee764de20..3ed609c483 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_group_chat.py @@ -24,7 +24,7 @@ from collections import OrderedDict from collections.abc import Awaitable, Callable, Sequence from dataclasses import dataclass -from typing import Any, ClassVar, cast, overload +from typing import Any, ClassVar, cast from agent_framework import ChatAgent, SupportsAgentRun from agent_framework._threads import AgentThread @@ -521,8 +521,39 @@ class GroupChatBuilder: DEFAULT_ORCHESTRATOR_ID: ClassVar[str] = "group_chat_orchestrator" - def __init__(self) -> None: - """Initialize the GroupChatBuilder.""" + def __init__( + self, + *, + participants: Sequence[SupportsAgentRun | Executor] | None = None, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]] | None = None, + # Orchestrator config (exactly one required) + orchestrator_agent: ChatAgent | Callable[[], ChatAgent] | None = None, + orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, + selection_func: GroupChatSelectionFunction | None = None, + orchestrator_name: str | None = None, + # Existing params + termination_condition: TerminationCondition | None = None, + max_rounds: int | None = None, + checkpoint_storage: CheckpointStorage | None = None, + intermediate_outputs: bool = False, + ) -> None: + """Initialize the GroupChatBuilder. + + Args: + participants: Optional sequence of agent or executor instances for the group chat. + participant_factories: Optional sequence of callables returning agent or executor instances. + orchestrator_agent: An instance of ChatAgent or a callable that produces one to manage the group chat. + orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to manage the + group chat. + selection_func: Callable that receives the current GroupChatState and returns the name of the next + participant to speak. + orchestrator_name: Optional display name for the orchestrator when using a selection function. + termination_condition: Optional callable that receives the conversation history and returns + True to terminate the conversation, False to continue. + max_rounds: Optional maximum number of orchestrator rounds to prevent infinite conversations. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + intermediate_outputs: If True, enables intermediate outputs from agent participants. + """ self._participants: dict[str, SupportsAgentRun | Executor] = {} self._participant_factories: list[Callable[[], SupportsAgentRun | Executor]] = [] @@ -531,96 +562,49 @@ def __init__(self) -> None: self._orchestrator_factory: Callable[[], ChatAgent | BaseGroupChatOrchestrator] | None = None self._selection_func: GroupChatSelectionFunction | None = None self._agent_orchestrator: ChatAgent | None = None - self._termination_condition: TerminationCondition | None = None - self._max_rounds: int | None = None + self._termination_condition: TerminationCondition | None = termination_condition + self._max_rounds: int | None = max_rounds self._orchestrator_name: str | None = None # Checkpoint related members - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage # Request info related members self._request_info_enabled: bool = False self._request_info_filter: set[str] = set() # Intermediate outputs - self._intermediate_outputs = False - - @overload - def with_orchestrator(self, *, agent: ChatAgent | Callable[[], ChatAgent]) -> "GroupChatBuilder": - """Set the orchestrator for this group chat workflow using a ChatAgent. - - Args: - agent: An instance of ChatAgent or a callable that produces one to manage the group chat. - - Returns: - Self for fluent chaining. - """ - ... - - @overload - def with_orchestrator( - self, *, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] - ) -> "GroupChatBuilder": - """Set the orchestrator for this group chat workflow using a custom orchestrator. - - Args: - orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to - manage the group chat. - - Returns: - Self for fluent chaining. - - Note: - When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, setting - `termination_condition` and `max_rounds` on the builder will have no effect since the - orchestrator is already fully defined. - """ - ... - - @overload - def with_orchestrator( - self, - *, - selection_func: GroupChatSelectionFunction, - orchestrator_name: str | None = None, - ) -> "GroupChatBuilder": - """Set the orchestrator for this group chat workflow using a selection function. - - Args: - selection_func: Callable that receives the current GroupChatState and returns - the name of the next participant to speak, or None to finish. - orchestrator_name: Optional display name for the orchestrator in the workflow. - If not provided, defaults to `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. - - Returns: - Self for fluent chaining. - """ - ... + self._intermediate_outputs = intermediate_outputs + + if participants is None and participant_factories is None: + raise ValueError("Either participants or participant_factories must be provided.") + + if participant_factories is not None: + self._set_participant_factories(participant_factories) + if participants is not None: + self._set_participants(participants) + + # Set orchestrator if provided + if any(x is not None for x in [orchestrator_agent, orchestrator, selection_func]): + self._set_orchestrator( + orchestrator_agent=orchestrator_agent, + orchestrator=orchestrator, + selection_func=selection_func, + orchestrator_name=orchestrator_name, + ) - def with_orchestrator( + def _set_orchestrator( self, *, - agent: ChatAgent | Callable[[], ChatAgent] | None = None, + orchestrator_agent: ChatAgent | Callable[[], ChatAgent] | None = None, orchestrator: BaseGroupChatOrchestrator | Callable[[], BaseGroupChatOrchestrator] | None = None, selection_func: GroupChatSelectionFunction | None = None, orchestrator_name: str | None = None, - ) -> "GroupChatBuilder": - """Set the orchestrator for this group chat workflow. - - An group chat orchestrator is responsible for managing the flow of conversation, making - sure all participants are synced and picking the next speaker according to the defined logic - until the termination conditions are met. - - There are a few ways to configure the orchestrator: - 1. Provide a ChatAgent instance or a factory function that produces one to use an agent-based orchestrator - 2. Provide a BaseGroupChatOrchestrator instance or a factory function that produces one to use a custom - orchestrator - 3. Provide a selection function to use that picks the next speaker based on the function logic - - You can only use one of the above methods to configure the orchestrator. + ) -> None: + """Set the orchestrator for this group chat workflow (internal). Args: - agent: An instance of ChatAgent or a callable that produces one to manage the group chat. + orchestrator_agent: An instance of ChatAgent or a callable that produces one to manage the group chat. orchestrator: An instance of BaseGroupChatOrchestrator or a callable that produces one to manage the group chat. selection_func: Callable that receives the current GroupChatState and returns @@ -630,121 +614,58 @@ def with_orchestrator( `GroupChatBuilder.DEFAULT_ORCHESTRATOR_ID`. This parameter is ignored if using an agent or custom orchestrator. - Returns: - Self for fluent chaining. - Raises: ValueError: If an orchestrator has already been set or if none or multiple of the parameters are provided. - - Note: - When using a custom orchestrator that implements `BaseGroupChatOrchestrator`, either - via the `orchestrator` or `orchestrator_factory` parameters, setting `termination_condition` - and `max_rounds` on the builder will have no effect since the orchestrator is already - fully defined. - - Example: - .. code-block:: python - - from agent_framework_orchestrations import GroupChatBuilder - - - orchestrator = CustomGroupChatOrchestrator(...) - workflow = GroupChatBuilder().with_orchestrator(orchestrator).participants([agent1, agent2]).build() """ if self._agent_orchestrator is not None: - raise ValueError( - "An agent orchestrator has already been configured. Call with_orchestrator(...) once only." - ) + raise ValueError("An agent orchestrator has already been configured. Set orchestrator config once only.") if self._orchestrator is not None: - raise ValueError("An orchestrator has already been configured. Call with_orchestrator(...) once only.") + raise ValueError("An orchestrator has already been configured. Set orchestrator config once only.") if self._orchestrator_factory is not None: - raise ValueError("A factory has already been configured. Call with_orchestrator(...) once only.") + raise ValueError("A factory has already been configured. Set orchestrator config once only.") if self._selection_func is not None: - raise ValueError("A selection function has already been configured. Call with_orchestrator(...) once only.") + raise ValueError("A selection function has already been configured. Set orchestrator config once only.") - if sum(x is not None for x in [agent, orchestrator, selection_func]) != 1: - raise ValueError("Exactly one of agent, orchestrator, or selection_func must be provided.") + if sum(x is not None for x in [orchestrator_agent, orchestrator, selection_func]) != 1: + raise ValueError("Exactly one of orchestrator_agent, orchestrator, or selection_func must be provided.") - if agent is not None and isinstance(agent, ChatAgent): - self._agent_orchestrator = agent + if orchestrator_agent is not None and isinstance(orchestrator_agent, ChatAgent): + self._agent_orchestrator = orchestrator_agent elif orchestrator is not None and isinstance(orchestrator, BaseGroupChatOrchestrator): self._orchestrator = orchestrator elif selection_func is not None: self._selection_func = selection_func self._orchestrator_name = orchestrator_name else: - self._orchestrator_factory = agent or orchestrator + self._orchestrator_factory = orchestrator_agent or orchestrator - return self - - def register_participants( + def _set_participant_factories( self, participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]], - ) -> "GroupChatBuilder": - """Register participant factories for this group chat workflow. - - Args: - participant_factories: Sequence of callables that produce participant definitions - when invoked. Each callable should return either an SupportsAgentRun instance - (auto-wrapped as AgentExecutor) or an Executor instance. - - Returns: - Self for fluent chaining - - Raises: - ValueError: If participant_factories is empty, or participants - or participant factories are already set - """ + ) -> None: + """Set participant factories (internal).""" if self._participants: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participant_factories: - raise ValueError("register_participants() has already been called on this builder instance.") + raise ValueError("participant_factories already set.") if not participant_factories: raise ValueError("participant_factories cannot be empty") self._participant_factories = list(participant_factories) - return self - - def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> "GroupChatBuilder": - """Define participants for this group chat workflow. - - Accepts SupportsAgentRun instances (auto-wrapped as AgentExecutor) or Executor instances. - - Args: - participants: Sequence of participant definitions - - Returns: - Self for fluent chaining - - Raises: - ValueError: If participants are empty, names are duplicated, or participants - or participant factories are already set - TypeError: If any participant is not SupportsAgentRun or Executor instance - - Example: - .. code-block:: python - - from agent_framework_orchestrations import GroupChatBuilder - - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=my_selection_function) - .participants([agent1, agent2, custom_executor]) - .build() - ) - """ + def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: + """Set participants (internal).""" if self._participant_factories: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participants: - raise ValueError("participants have already been set. Call participants() at most once.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty.") @@ -770,8 +691,6 @@ def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> " self._participants = named - return self - def with_termination_condition(self, termination_condition: TerminationCondition) -> "GroupChatBuilder": """Set a custom termination condition for the group chat workflow. @@ -797,9 +716,10 @@ def stop_after_two_calls(conversation: list[ChatMessage]) -> bool: specialist_agent = ... workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=my_selection_function) - .participants([agent1, specialist_agent]) + GroupChatBuilder( + participants=[agent1, specialist_agent], + selection_func=my_selection_function, + ) .with_termination_condition(stop_after_two_calls) .build() ) @@ -851,9 +771,10 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "GroupCha storage = MemoryCheckpointStorage() workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=my_selection_function) - .participants([agent1, agent2]) + GroupChatBuilder( + participants=[agent1, agent2], + selection_func=my_selection_function, + ) .with_checkpointing(storage) .build() ) @@ -890,19 +811,6 @@ def with_request_info(self, *, agents: Sequence[str | SupportsAgentRun] | None = return self - def with_intermediate_outputs(self) -> "GroupChatBuilder": - """Enable intermediate outputs from agent participants. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the orchestrator will - always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: """Determine the orchestrator to use for the workflow. @@ -913,8 +821,11 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: x is None for x in [self._agent_orchestrator, self._selection_func, self._orchestrator, self._orchestrator_factory] ): - raise ValueError("No orchestrator has been configured. Call with_orchestrator() to set one.") - # We don't need to check if multiple are set since that is handled in with_orchestrator() + raise ValueError( + "No orchestrator has been configured. " + "Pass orchestrator_agent, orchestrator, or selection_func to the constructor." + ) + # We don't need to check if multiple are set since that is handled in _set_orchestrator() if self._agent_orchestrator: return AgentBasedGroupChatOrchestrator( @@ -954,12 +865,15 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: ) # This should never be reached due to the checks above - raise RuntimeError("Orchestrator could not be resolved. Please provide one via with_orchestrator()") + raise RuntimeError( + "Orchestrator could not be resolved. " + "Pass orchestrator_agent, orchestrator, or selection_func to the constructor." + ) def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants and not self._participant_factories: - raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + raise ValueError("No participants provided. Pass participants or participant_factories to the constructor.") # We don't need to check if both are set since that is handled in the respective methods participants: list[Executor | SupportsAgentRun] = [] @@ -1004,19 +918,16 @@ def build(self) -> Workflow: orchestrator: Executor = self._resolve_orchestrator(participants) # Build workflow graph - workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) + workflow_builder = WorkflowBuilder( + start_executor=orchestrator, + checkpoint_storage=self._checkpoint_storage, + output_executors=[orchestrator] if not self._intermediate_outputs else None, + ) for participant in participants: # Orchestrator and participant bi-directional edges workflow_builder = workflow_builder.add_edge(orchestrator, participant) workflow_builder = workflow_builder.add_edge(participant, orchestrator) - if not self._intermediate_outputs: - # Constrain output to orchestrator only - workflow_builder = workflow_builder.with_output_from([orchestrator]) - - if self._checkpoint_storage is not None: - workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) - return workflow_builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py index 3bbfccba8a..c31e468490 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_handoff.py @@ -577,6 +577,8 @@ def __init__( participants: Sequence[SupportsAgentRun] | None = None, participant_factories: Mapping[str, Callable[[], SupportsAgentRun]] | None = None, description: str | None = None, + checkpoint_storage: CheckpointStorage | None = None, + termination_condition: TerminationCondition | None = None, ) -> None: r"""Initialize a HandoffBuilder for creating conversational handoff workflows. @@ -599,6 +601,9 @@ def __init__( created by this builder. description: Optional human-readable description explaining the workflow's purpose. Useful for documentation and observability. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + termination_condition: Optional callable that receives the full conversation and returns True + (or awaitable True) if the workflow should terminate. """ self._name = name self._description = description @@ -617,7 +622,7 @@ def __init__( self._handoff_config: dict[str, set[HandoffConfiguration]] = {} # Checkpoint related members - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage # Autonomous mode related self._autonomous_mode: bool = False @@ -626,7 +631,9 @@ def __init__( self._autonomous_mode_enabled_agents: list[str] = [] # Termination related members - self._termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] | None = None + self._termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]] | None = ( + termination_condition + ) def register_participants( self, participant_factories: Mapping[str, Callable[[], SupportsAgentRun]] @@ -1060,7 +1067,9 @@ def build(self) -> Workflow: builder = WorkflowBuilder( name=self._name, description=self._description, - ).set_start_executor(start_executor) + start_executor=start_executor, + checkpoint_storage=self._checkpoint_storage, + ) # Add the appropriate edges # In handoff workflows, all executors are connected, making a fully connected graph. @@ -1076,10 +1085,6 @@ def build(self) -> Workflow: elif len(targets) == 1: builder = builder.add_edge(executor, targets[0]) - # Configure checkpointing if enabled - if self._checkpoint_storage: - builder.with_checkpointing(self._checkpoint_storage) - return builder.build() # region Internal Helper Methods diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py index 1f6f95a71b..779dad2d5a 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_magentic.py @@ -10,7 +10,7 @@ from collections.abc import Callable, Sequence from dataclasses import dataclass, field from enum import Enum -from typing import Any, ClassVar, TypeVar, cast, overload +from typing import Any, ClassVar, TypeVar, cast from agent_framework import ( AgentResponse, @@ -41,10 +41,6 @@ from typing import override # type: ignore # pragma: no cover else: from typing_extensions import override # type: ignore # pragma: no cover -if sys.version_info >= (3, 11): - from typing import Self # type: ignore # pragma: no cover -else: - from typing_extensions import Self # type: ignore # pragma: no cover logger = logging.getLogger(__name__) @@ -1366,7 +1362,7 @@ class MagenticBuilder: Human-in-the-loop Support: Magentic provides specialized HITL mechanisms via: - - `.with_plan_review()` - Review and approve/revise plans before execution + - `enable_plan_review=True` - Review and approve/revise plans before execution - `.with_human_input_on_stall()` - Intervene when workflow stalls - Tool approval via `function_approval_request` - Approve individual tool calls @@ -1375,8 +1371,57 @@ class MagenticBuilder: for Magentic's planning-based orchestration. """ - def __init__(self) -> None: - """Initialize the Magentic workflow builder.""" + def __init__( + self, + *, + participants: Sequence[SupportsAgentRun | Executor] | None = None, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]] | None = None, + # Manager config (exactly one required) + manager: MagenticManagerBase | None = None, + manager_factory: Callable[[], MagenticManagerBase] | None = None, + manager_agent: SupportsAgentRun | None = None, + manager_agent_factory: Callable[[], SupportsAgentRun] | None = None, + # StandardMagenticManager options (used with manager_agent/manager_agent_factory) + task_ledger: _MagenticTaskLedger | None = None, + task_ledger_facts_prompt: str | None = None, + task_ledger_plan_prompt: str | None = None, + task_ledger_full_prompt: str | None = None, + task_ledger_facts_update_prompt: str | None = None, + task_ledger_plan_update_prompt: str | None = None, + progress_ledger_prompt: str | None = None, + final_answer_prompt: str | None = None, + max_stall_count: int = 3, + max_reset_count: int | None = None, + max_round_count: int | None = None, + # Existing params + enable_plan_review: bool = False, + checkpoint_storage: CheckpointStorage | None = None, + intermediate_outputs: bool = False, + ) -> None: + """Initialize the Magentic workflow builder. + + Args: + participants: Optional sequence of agent or executor instances for the workflow. + participant_factories: Optional sequence of callables returning agent or executor instances. + manager: Pre-configured manager instance (subclass of MagenticManagerBase). + manager_factory: Callable that returns a new MagenticManagerBase instance. + manager_agent: Agent instance for creating a StandardMagenticManager. + manager_agent_factory: Callable that returns a new agent instance for creating a StandardMagenticManager. + task_ledger: Optional custom task ledger (used with manager_agent/manager_agent_factory). + task_ledger_facts_prompt: Custom prompt for extracting facts. + task_ledger_plan_prompt: Custom prompt for generating initial plan. + task_ledger_full_prompt: Custom prompt for complete task ledger. + task_ledger_facts_update_prompt: Custom prompt for updating facts. + task_ledger_plan_update_prompt: Custom prompt for replanning. + progress_ledger_prompt: Custom prompt for assessing progress. + final_answer_prompt: Custom prompt for synthesizing final response. + max_stall_count: Max consecutive rounds without progress before replan (default 3). + max_reset_count: Max number of resets allowed. None means unlimited. + max_round_count: Max total coordination rounds. None means unlimited. + enable_plan_review: If True, requires human approval of the initial plan before proceeding. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + intermediate_outputs: If True, enables intermediate outputs from agent participants. + """ self._participants: dict[str, SupportsAgentRun | Executor] = {} self._participant_factories: list[Callable[[], SupportsAgentRun | Executor]] = [] @@ -1385,78 +1430,64 @@ def __init__(self) -> None: self._manager_factory: Callable[[], MagenticManagerBase] | None = None self._manager_agent_factory: Callable[[], SupportsAgentRun] | None = None self._standard_manager_options: dict[str, Any] = {} - self._enable_plan_review: bool = False + self._enable_plan_review: bool = enable_plan_review - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage # Intermediate outputs - self._intermediate_outputs = False + self._intermediate_outputs = intermediate_outputs + + if participants is None and participant_factories is None: + raise ValueError("Either participants or participant_factories must be provided.") + + if participant_factories is not None: + self._set_participant_factories(participant_factories) + if participants is not None: + self._set_participants(participants) + + # Set manager if provided + if any(x is not None for x in [manager, manager_factory, manager_agent, manager_agent_factory]): + self._set_manager( + manager=manager, + manager_factory=manager_factory, + manager_agent=manager_agent, + manager_agent_factory=manager_agent_factory, + task_ledger=task_ledger, + task_ledger_facts_prompt=task_ledger_facts_prompt, + task_ledger_plan_prompt=task_ledger_plan_prompt, + task_ledger_full_prompt=task_ledger_full_prompt, + task_ledger_facts_update_prompt=task_ledger_facts_update_prompt, + task_ledger_plan_update_prompt=task_ledger_plan_update_prompt, + progress_ledger_prompt=progress_ledger_prompt, + final_answer_prompt=final_answer_prompt, + max_stall_count=max_stall_count, + max_reset_count=max_reset_count, + max_round_count=max_round_count, + ) - def register_participants( + def _set_participant_factories( self, participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]], - ) -> "MagenticBuilder": - """Register participant factories for this Magentic workflow. - - Args: - participant_factories: Sequence of callables that return SupportsAgentRun or Executor instances. - - Returns: - Self for method chaining - - Raises: - ValueError: If participant_factories is empty, or participants - or participant factories are already set - """ + ) -> None: + """Set participant factories (internal).""" if self._participants: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participant_factories: - raise ValueError("register_participants() has already been called on this builder instance.") + raise ValueError("participant_factories already set.") if not participant_factories: raise ValueError("participant_factories cannot be empty") self._participant_factories = list(participant_factories) - return self - - def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> Self: - """Define participants for this Magentic workflow. - Accepts SupportsAgentRun instances (auto-wrapped as AgentExecutor) or Executor instances. - - Args: - participants: Sequence of participant definitions - - Returns: - Self for method chaining - - Raises: - ValueError: If participants are empty, names are duplicated, or participants - or participant factories are already set - TypeError: If any participant is not SupportsAgentRun or Executor instance - - Example: - - .. code-block:: python - - workflow = ( - MagenticBuilder() - .participants([research_agent, writing_agent, coding_agent, review_agent]) - .with_manager(agent=manager_agent) - .build() - ) - - Notes: - - Participant names become part of the manager's context for selection - - Agent descriptions (if available) are extracted and provided to the manager - - Can be called multiple times to add participants incrementally - """ + def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: + """Set participants (internal).""" if self._participant_factories: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participants: - raise ValueError("participants have already been set. Call participants(...) at most once.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty.") @@ -1482,8 +1513,6 @@ def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> S self._participants = named - return self - def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": """Enable or disable human-in-the-loop plan review before task execution. @@ -1509,9 +1538,7 @@ def with_plan_review(self, enable: bool = True) -> "MagenticBuilder": .. code-block:: python workflow = ( - MagenticBuilder() - .participants(agent1=agent1) - .with_manager(agent=manager_agent) + MagenticBuilder(participants=[agent1], manager_agent=manager_agent) .with_plan_review(enable=True) .build() ) @@ -1556,11 +1583,7 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "Magentic storage = InMemoryCheckpointStorage() workflow = ( - MagenticBuilder() - .participants([agent1]) - .with_manager(agent=manager_agent) - .with_checkpointing(storage) - .build() + MagenticBuilder(participants=[agent1], manager_agent=manager_agent).with_checkpointing(storage).build() ) # First run @@ -1580,144 +1603,14 @@ def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "Magentic self._checkpoint_storage = checkpoint_storage return self - @overload - def with_manager(self, *, manager: MagenticManagerBase) -> Self: - """Configure the workflow with a pre-defined Magentic manager instance. - - Args: - manager: A custom manager instance (subclass of MagenticManagerBase) - - Returns: - Self for method chaining - """ - ... - - @overload - def with_manager(self, *, manager_factory: Callable[[], MagenticManagerBase]) -> Self: - """Configure the workflow with a factory for creating custom Magentic manager instances. - - Args: - manager_factory: Callable that returns a new MagenticManagerBase instance - - Returns: - Self for method chaining - """ - ... - - @overload - def with_manager( - self, - *, - agent: SupportsAgentRun, - task_ledger: _MagenticTaskLedger | None = None, - # Prompt overrides - task_ledger_facts_prompt: str | None = None, - task_ledger_plan_prompt: str | None = None, - task_ledger_full_prompt: str | None = None, - task_ledger_facts_update_prompt: str | None = None, - task_ledger_plan_update_prompt: str | None = None, - progress_ledger_prompt: str | None = None, - final_answer_prompt: str | None = None, - # Limits - max_stall_count: int = 3, - max_reset_count: int | None = None, - max_round_count: int | None = None, - ) -> Self: - """Configure the workflow with an agent for creating a standard manager. - - This will create a StandardMagenticManager using the provided agent. - - Args: - agent: SupportsAgentRun instance for the standard magentic manager - (`StandardMagenticManager`) - task_ledger: Optional custom task ledger implementation for specialized - prompting or structured output requirements - task_ledger_facts_prompt: Custom prompt template for extracting facts from - task description - task_ledger_plan_prompt: Custom prompt template for generating initial plan - task_ledger_full_prompt: Custom prompt template for complete task ledger - (facts + plan combined) - task_ledger_facts_update_prompt: Custom prompt template for updating facts - based on agent progress - task_ledger_plan_update_prompt: Custom prompt template for replanning when - needed - progress_ledger_prompt: Custom prompt template for assessing progress and - determining next actions - final_answer_prompt: Custom prompt template for synthesizing final response - when task is complete - max_stall_count: Maximum consecutive rounds without progress before triggering - replan (default 3). Set to 0 to disable stall detection. - max_reset_count: Maximum number of complete resets allowed before failing. - None means unlimited resets. - max_round_count: Maximum total coordination rounds before stopping with - partial result. None means unlimited rounds. - - Returns: - Self for method chaining - """ - ... - - @overload - def with_manager( - self, - *, - agent_factory: Callable[[], SupportsAgentRun], - task_ledger: _MagenticTaskLedger | None = None, - # Prompt overrides - task_ledger_facts_prompt: str | None = None, - task_ledger_plan_prompt: str | None = None, - task_ledger_full_prompt: str | None = None, - task_ledger_facts_update_prompt: str | None = None, - task_ledger_plan_update_prompt: str | None = None, - progress_ledger_prompt: str | None = None, - final_answer_prompt: str | None = None, - # Limits - max_stall_count: int = 3, - max_reset_count: int | None = None, - max_round_count: int | None = None, - ) -> Self: - """Configure the workflow with a factory for creating the manager agent. - - This will create a StandardMagenticManager using the provided agent factory. - - Args: - agent_factory: Callable that returns a new SupportsAgentRun instance for the standard - magentic manager (`StandardMagenticManager`) - task_ledger: Optional custom task ledger implementation for specialized - prompting or structured output requirements - task_ledger_facts_prompt: Custom prompt template for extracting facts from - task description - task_ledger_plan_prompt: Custom prompt template for generating initial plan - task_ledger_full_prompt: Custom prompt template for complete task ledger - (facts + plan combined) - task_ledger_facts_update_prompt: Custom prompt template for updating facts - based on agent progress - task_ledger_plan_update_prompt: Custom prompt template for replanning when - needed - progress_ledger_prompt: Custom prompt template for assessing progress and - determining next actions - final_answer_prompt: Custom prompt template for synthesizing final response - when task is complete - max_stall_count: Maximum consecutive rounds without progress before triggering - replan (default 3). Set to 0 to disable stall detection. - max_reset_count: Maximum number of complete resets allowed before failing. - None means unlimited resets. - max_round_count: Maximum total coordination rounds before stopping with - partial result. None means unlimited rounds. - - Returns: - Self for method chaining - """ - ... - - def with_manager( + def _set_manager( self, *, manager: MagenticManagerBase | None = None, manager_factory: Callable[[], MagenticManagerBase] | None = None, - agent_factory: Callable[[], SupportsAgentRun] | None = None, + manager_agent: SupportsAgentRun | None = None, + manager_agent_factory: Callable[[], SupportsAgentRun] | None = None, # Constructor args for StandardMagenticManager when manager is not provided - agent: SupportsAgentRun | None = None, task_ledger: _MagenticTaskLedger | None = None, # Prompt overrides task_ledger_facts_prompt: str | None = None, @@ -1731,123 +1624,37 @@ def with_manager( max_stall_count: int = 3, max_reset_count: int | None = None, max_round_count: int | None = None, - ) -> Self: - """Configure the workflow manager for task planning and agent coordination. - - The manager is responsible for creating plans, selecting agents, tracking progress, - and deciding when to replan or complete. This method supports four usage patterns: - - 1. **Provide existing manager**: Pass a pre-configured manager instance (custom - or standard) for full control over behavior - 2. **Factory for custom manager**: Pass a callable that returns a new manager - instance for more advanced scenarios so that the builder can be reused - 3. **Factory for agent**: Pass a callable that returns a new agent instance to - automatically create a `StandardMagenticManager` - 4. **Auto-create with agent**: Pass an agent to automatically create a `StandardMagenticManager` + ) -> None: + """Configure the workflow manager for task planning and agent coordination (internal). Args: - manager: Pre-configured manager instance (`StandardMagenticManager` or custom - `MagenticManagerBase` subclass). If provided, all other arguments are ignored. + manager: Pre-configured manager instance. manager_factory: Callable that returns a new manager instance. - agent_factory: Callable that returns a new agent instance. - agent: Agent instance for generating plans and decisions. The agent's - configured instructions and options (temperature, seed, etc.) will be - applied. - task_ledger: Optional custom task ledger implementation for specialized - prompting or structured output requirements - task_ledger_facts_prompt: Custom prompt template for extracting facts from - task description - task_ledger_plan_prompt: Custom prompt template for generating initial plan - task_ledger_full_prompt: Custom prompt template for complete task ledger - (facts + plan combined) - task_ledger_facts_update_prompt: Custom prompt template for updating facts - based on agent progress - task_ledger_plan_update_prompt: Custom prompt template for replanning when - needed - progress_ledger_prompt: Custom prompt template for assessing progress and - determining next actions - final_answer_prompt: Custom prompt template for synthesizing final response - when task is complete - max_stall_count: Maximum consecutive rounds without progress before triggering - replan (default 3). Set to 0 to disable stall detection. - max_reset_count: Maximum number of complete resets allowed before failing. - None means unlimited resets. - max_round_count: Maximum total coordination rounds before stopping with - partial result. None means unlimited rounds. - - Returns: - Self for method chaining + manager_agent: Agent instance for creating a StandardMagenticManager. + manager_agent_factory: Callable that returns a new agent instance for creating a StandardMagenticManager. + task_ledger: Optional custom task ledger implementation. + task_ledger_facts_prompt: Custom prompt for extracting facts. + task_ledger_plan_prompt: Custom prompt for generating initial plan. + task_ledger_full_prompt: Custom prompt for complete task ledger. + task_ledger_facts_update_prompt: Custom prompt for updating facts. + task_ledger_plan_update_prompt: Custom prompt for replanning. + progress_ledger_prompt: Custom prompt for assessing progress. + final_answer_prompt: Custom prompt for synthesizing final response. + max_stall_count: Max consecutive rounds without progress before replan (default 3). + max_reset_count: Max number of resets allowed. None means unlimited. + max_round_count: Max total coordination rounds. None means unlimited. Raises: - ValueError: If manager is None and agent is not provided. - - Usage with agent (recommended): - - .. code-block:: python - - from agent_framework import ChatAgent, ChatOptions - from agent_framework.openai import OpenAIChatClient - - # Configure manager agent with specific options and instructions - manager_agent = ChatAgent( - name="Coordinator", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - options=ChatOptions(temperature=0.3, seed=42), - instructions="Be concise and focus on accuracy", - ) - - workflow = ( - MagenticBuilder() - .participants(agent1=agent1, agent2=agent2) - .with_manager( - agent=manager_agent, - max_round_count=20, - max_stall_count=3, - ) - .build() - ) - - Usage with custom manager: - - .. code-block:: python - - class MyManager(MagenticManagerBase): - async def plan(self, context: MagenticContext) -> ChatMessage: - # Custom planning logic - return ChatMessage(role="assistant", text="...") - - - manager = MyManager() - workflow = MagenticBuilder().participants(agent1=agent1).with_manager(manager).build() - - Usage with prompt customization: - - .. code-block:: python - - workflow = ( - MagenticBuilder() - .participants(coder=coder_agent, reviewer=reviewer_agent) - .with_manager( - agent=manager_agent, - task_ledger_plan_prompt="Create a detailed step-by-step plan...", - progress_ledger_prompt="Assess progress and decide next action...", - max_stall_count=2, - ) - .build() - ) - - Notes: - - StandardMagenticManager uses structured LLM calls for all decisions - - Custom managers can implement alternative selection strategies - - Prompt templates support Jinja2-style variable substitution - - Stall detection helps prevent infinite loops in stuck scenarios - - The agent's instructions are used as system instructions for all manager prompts + ValueError: If a manager has already been set or if none or multiple + of the primary parameters are provided. """ if any([self._manager, self._manager_factory, self._manager_agent_factory]): - raise ValueError("with_manager() has already been called on this builder instance.") + raise ValueError("Manager has already been configured. Set manager config once only.") - if sum(x is not None for x in [manager, agent, manager_factory, agent_factory]) != 1: - raise ValueError("Exactly one of manager, agent, manager_factory, or agent_factory must be provided.") + if sum(x is not None for x in [manager, manager_agent, manager_factory, manager_agent_factory]) != 1: + raise ValueError( + "Exactly one of manager, manager_agent, manager_factory, or manager_agent_factory must be provided." + ) def _log_warning_if_constructor_args_provided() -> None: if any( @@ -1866,14 +1673,14 @@ def _log_warning_if_constructor_args_provided() -> None: max_round_count, ] ): - logger.warning("Customer manager provided; all other with_manager() arguments will be ignored.") + logger.warning("Custom manager provided; all other manager arguments will be ignored.") if manager is not None: self._manager = manager _log_warning_if_constructor_args_provided() - elif agent is not None: + elif manager_agent is not None: self._manager = StandardMagenticManager( - agent=agent, + agent=manager_agent, task_ledger=task_ledger, task_ledger_facts_prompt=task_ledger_facts_prompt, task_ledger_plan_prompt=task_ledger_plan_prompt, @@ -1889,8 +1696,8 @@ def _log_warning_if_constructor_args_provided() -> None: elif manager_factory is not None: self._manager_factory = manager_factory _log_warning_if_constructor_args_provided() - elif agent_factory is not None: - self._manager_agent_factory = agent_factory + elif manager_agent_factory is not None: + self._manager_agent_factory = manager_agent_factory self._standard_manager_options = { "task_ledger": task_ledger, "task_ledger_facts_prompt": task_ledger_facts_prompt, @@ -1905,21 +1712,6 @@ def _log_warning_if_constructor_args_provided() -> None: "max_round_count": max_round_count, } - return self - - def with_intermediate_outputs(self) -> Self: - """Enable intermediate outputs from agent participants before aggregation. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the orchestrator will - always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: """Determine the orchestrator to use for the workflow. @@ -1927,8 +1719,11 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: participants: List of resolved participant executors """ if all(x is None for x in [self._manager, self._manager_factory, self._manager_agent_factory]): - raise ValueError("No manager configured. Call with_manager(...) before building the orchestrator.") - # We don't need to check if multiple are set since that is handled in with_orchestrator() + raise ValueError( + "No manager configured. " + "Pass manager, manager_factory, manager_agent, or manager_agent_factory to the constructor." + ) + # We don't need to check if multiple are set since that is handled in _set_manager() if self._manager: manager = self._manager @@ -1942,7 +1737,10 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: ) else: # This should never be reached due to the checks above - raise RuntimeError("Manager could not be resolved. Please set the manager properly with with_manager().") + raise RuntimeError( + "Manager could not be resolved. " + "Pass manager, manager_factory, manager_agent, or manager_agent_factory to the constructor." + ) return MagenticOrchestrator( manager=manager, @@ -1953,7 +1751,7 @@ def _resolve_orchestrator(self, participants: Sequence[Executor]) -> Executor: def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants and not self._participant_factories: - raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + raise ValueError("No participants provided. Pass participants or participant_factories to the constructor.") # We don't need to check if both are set since that is handled in the respective methods participants: list[Executor | SupportsAgentRun] = [] @@ -1985,17 +1783,15 @@ def build(self) -> Workflow: orchestrator: Executor = self._resolve_orchestrator(participants) # Build workflow graph - workflow_builder = WorkflowBuilder().set_start_executor(orchestrator) + workflow_builder = WorkflowBuilder( + start_executor=orchestrator, + checkpoint_storage=self._checkpoint_storage, + output_executors=[orchestrator] if not self._intermediate_outputs else None, + ) for participant in participants: # Orchestrator and participant bi-directional edges workflow_builder = workflow_builder.add_edge(orchestrator, participant) workflow_builder = workflow_builder.add_edge(participant, orchestrator) - if self._checkpoint_storage is not None: - workflow_builder = workflow_builder.with_checkpointing(self._checkpoint_storage) - - if not self._intermediate_outputs: - # Constrain output to orchestrator only - workflow_builder = workflow_builder.with_output_from([orchestrator]) return workflow_builder.build() diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py index 9fb22d908b..51f4e27898 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_orchestration_request_info.py @@ -132,11 +132,10 @@ def _build_workflow(self, agent: SupportsAgentRun) -> Workflow: request_info_executor = AgentRequestInfoExecutor(id="agent_request_info_executor") return ( - WorkflowBuilder() + WorkflowBuilder(start_executor=agent_executor) # Create a loop between agent executor and request info executor .add_edge(agent_executor, request_info_executor) .add_edge(request_info_executor, agent_executor) - .set_start_executor(agent_executor) .build() ) diff --git a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py index 3546824033..3ddecd56dc 100644 --- a/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py +++ b/python/packages/orchestrations/agent_framework_orchestrations/_sequential.py @@ -4,8 +4,8 @@ This module provides a high-level, agent-focused API to assemble a sequential workflow where: -- Participants can be provided as SupportsAgentRun or Executor instances via `.participants()`, - or as factories returning SupportsAgentRun or Executor via `.register_participants()` +- Participants can be provided as SupportsAgentRun or Executor instances via `participants=[...]`, + or as factories returning SupportsAgentRun or Executor via `participant_factories=[...]` - A shared conversation context (list[ChatMessage]) is passed along the chain - Agents append their assistant messages to the context - Custom executors can transform or summarize and return a refined context @@ -109,8 +109,8 @@ async def end_with_agent_executor_response( class SequentialBuilder: r"""High-level builder for sequential agent/executor workflows with shared context. - - `participants([...])` accepts a list of SupportsAgentRun (recommended) or Executor instances - - `register_participants([...])` accepts a list of factories for SupportsAgentRun (recommended) + - `participants=[...]` accepts a list of SupportsAgentRun (recommended) or Executor instances + - `participant_factories=[...]` accepts a list of factories for SupportsAgentRun (recommended) or Executor factories - Executors must define a handler that consumes list[ChatMessage] and sends out a list[ChatMessage] - The workflow wires participants in order, passing a list[ChatMessage] down the chain @@ -125,64 +125,81 @@ class SequentialBuilder: from agent_framework_orchestrations import SequentialBuilder # With agent instances - workflow = SequentialBuilder().participants([agent1, agent2, summarizer_exec]).build() + workflow = SequentialBuilder(participants=[agent1, agent2, summarizer_exec]).build() # With agent factories - workflow = ( - SequentialBuilder().register_participants([create_agent1, create_agent2, create_summarizer_exec]).build() - ) + workflow = SequentialBuilder( + participant_factories=[create_agent1, create_agent2, create_summarizer_exec] + ).build() # Enable checkpoint persistence - workflow = SequentialBuilder().participants([agent1, agent2]).with_checkpointing(storage).build() + workflow = SequentialBuilder(participants=[agent1, agent2], checkpoint_storage=storage).build() # Enable request info for mid-workflow feedback (pauses before each agent) - workflow = SequentialBuilder().participants([agent1, agent2]).with_request_info().build() + workflow = SequentialBuilder(participants=[agent1, agent2]).with_request_info().build() # Enable request info only for specific agents workflow = ( - SequentialBuilder() - .participants([agent1, agent2, agent3]) + SequentialBuilder(participants=[agent1, agent2, agent3]) .with_request_info(agents=[agent2]) # Only pause before agent2 .build() ) """ - def __init__(self) -> None: + def __init__( + self, + *, + participants: Sequence[SupportsAgentRun | Executor] | None = None, + participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]] | None = None, + checkpoint_storage: CheckpointStorage | None = None, + intermediate_outputs: bool = False, + ) -> None: + """Initialize the SequentialBuilder. + + Args: + participants: Optional sequence of agent or executor instances to run sequentially. + participant_factories: Optional sequence of callables returning agent or executor instances. + checkpoint_storage: Optional checkpoint storage for enabling workflow state persistence. + intermediate_outputs: If True, enables intermediate outputs from agent participants. + """ self._participants: list[SupportsAgentRun | Executor] = [] self._participant_factories: list[Callable[[], SupportsAgentRun | Executor]] = [] - self._checkpoint_storage: CheckpointStorage | None = None + self._checkpoint_storage: CheckpointStorage | None = checkpoint_storage self._request_info_enabled: bool = False self._request_info_filter: set[str] | None = None - self._intermediate_outputs: bool = False + self._intermediate_outputs: bool = intermediate_outputs + + if participants is None and participant_factories is None: + raise ValueError("Either participants or participant_factories must be provided.") - def register_participants( + if participant_factories is not None: + self._set_participant_factories(participant_factories) + if participants is not None: + self._set_participants(participants) + + def _set_participant_factories( self, participant_factories: Sequence[Callable[[], SupportsAgentRun | Executor]], - ) -> "SequentialBuilder": - """Register participant factories for this sequential workflow.""" + ) -> None: + """Set participant factories (internal).""" if self._participants: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participant_factories: - raise ValueError("register_participants() has already been called on this builder instance.") + raise ValueError("participant_factories already set.") if not participant_factories: raise ValueError("participant_factories cannot be empty") self._participant_factories = list(participant_factories) - return self - - def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> "SequentialBuilder": - """Define the ordered participants for this sequential workflow. - Accepts SupportsAgentRun instances (auto-wrapped as AgentExecutor) or Executor instances. - Raises if empty or duplicates are provided for clarity. - """ + def _set_participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> None: + """Set participants (internal).""" if self._participant_factories: - raise ValueError("Cannot mix .participants() and .register_participants() in the same builder instance.") + raise ValueError("Cannot provide both participants and participant_factories.") if self._participants: - raise ValueError("participants() has already been called on this builder instance.") + raise ValueError("participants already set.") if not participants: raise ValueError("participants cannot be empty") @@ -203,12 +220,6 @@ def participants(self, participants: Sequence[SupportsAgentRun | Executor]) -> " seen_agent_ids.add(pid) self._participants = list(participants) - return self - - def with_checkpointing(self, checkpoint_storage: CheckpointStorage) -> "SequentialBuilder": - """Enable checkpointing for the built workflow using the provided storage.""" - self._checkpoint_storage = checkpoint_storage - return self def with_request_info( self, @@ -243,23 +254,10 @@ def with_request_info( return self - def with_intermediate_outputs(self) -> "SequentialBuilder": - """Enable intermediate outputs from agent participants. - - When enabled, the workflow returns each agent participant's response or yields - streaming updates as they become available. The output of the last participant - will always be available as the final output of the workflow. - - Returns: - Self for fluent chaining - """ - self._intermediate_outputs = True - return self - def _resolve_participants(self) -> list[Executor]: """Resolve participant instances into Executor objects.""" if not self._participants and not self._participant_factories: - raise ValueError("No participants provided. Call .participants() or .register_participants() first.") + raise ValueError("No participants provided. Pass participants or participant_factories to the constructor.") # We don't need to check if both are set since that is handled in the respective methods participants: list[Executor | SupportsAgentRun] = [] @@ -308,8 +306,11 @@ def build(self) -> Workflow: # Resolve participants and participant factories to executors participants: list[Executor] = self._resolve_participants() - builder = WorkflowBuilder() - builder.set_start_executor(input_conv) + builder = WorkflowBuilder( + start_executor=input_conv, + checkpoint_storage=self._checkpoint_storage, + output_executors=[end] if not self._intermediate_outputs else None, + ) # Start of the chain is the input normalizer prior: Executor | SupportsAgentRun = input_conv @@ -319,11 +320,4 @@ def build(self) -> Workflow: # Terminate with the final conversation builder.add_edge(prior, end) - if not self._intermediate_outputs: - # Constrain output to end only - builder = builder.with_output_from([end]) - - if self._checkpoint_storage is not None: - builder = builder.with_checkpointing(self._checkpoint_storage) - return builder.build() diff --git a/python/packages/orchestrations/tests/test_concurrent.py b/python/packages/orchestrations/tests/test_concurrent.py index 0b0c279b14..cecc8500c8 100644 --- a/python/packages/orchestrations/tests/test_concurrent.py +++ b/python/packages/orchestrations/tests/test_concurrent.py @@ -39,14 +39,14 @@ async def run(self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExe def test_concurrent_builder_rejects_empty_participants() -> None: with pytest.raises(ValueError): - ConcurrentBuilder().participants([]) + ConcurrentBuilder(participants=[]) def test_concurrent_builder_rejects_duplicate_executors() -> None: a = _FakeAgentExec("dup", "A") b = _FakeAgentExec("dup", "B") # same executor id with pytest.raises(ValueError): - ConcurrentBuilder().participants([a, b]) + ConcurrentBuilder(participants=[a, b]) def test_concurrent_builder_rejects_duplicate_executors_from_factories() -> None: @@ -58,43 +58,35 @@ def create_dup1() -> Executor: def create_dup2() -> Executor: return _FakeAgentExec("dup", "B") # same executor id - builder = ConcurrentBuilder().register_participants([create_dup1, create_dup2]) + builder = ConcurrentBuilder(participant_factories=[create_dup1, create_dup2]) with pytest.raises(ValueError, match="Duplicate executor ID 'dup' detected in workflow."): builder.build() def test_concurrent_builder_rejects_mixed_participants_and_factories() -> None: - """Test that mixing .participants() and .register_participants() raises an error.""" - # Case 1: participants first, then register_participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - ( - ConcurrentBuilder() - .participants([_FakeAgentExec("a", "A")]) - .register_participants([lambda: _FakeAgentExec("b", "B")]) - ) - - # Case 2: register_participants first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - ( - ConcurrentBuilder() - .register_participants([lambda: _FakeAgentExec("a", "A")]) - .participants([_FakeAgentExec("b", "B")]) + """Test that passing both participants and participant_factories to the constructor raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + ConcurrentBuilder( + participants=[_FakeAgentExec("a", "A")], + participant_factories=[lambda: _FakeAgentExec("b", "B")], ) -def test_concurrent_builder_rejects_multiple_calls_to_participants() -> None: - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match=r"participants\(\) has already been called"): - (ConcurrentBuilder().participants([_FakeAgentExec("a", "A")]).participants([_FakeAgentExec("b", "B")])) +def test_concurrent_builder_rejects_both_participants_and_factories() -> None: + """Test that passing both participants and participant_factories raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + ConcurrentBuilder( + participants=[_FakeAgentExec("a", "A")], + participant_factories=[lambda: _FakeAgentExec("b", "B")], + ) -def test_concurrent_builder_rejects_multiple_calls_to_register_participants() -> None: - """Test that multiple calls to .register_participants() raises an error.""" - with pytest.raises(ValueError, match=r"register_participants\(\) has already been called"): - ( - ConcurrentBuilder() - .register_participants([lambda: _FakeAgentExec("a", "A")]) - .register_participants([lambda: _FakeAgentExec("b", "B")]) +def test_concurrent_builder_rejects_both_factories_and_participants() -> None: + """Test that passing both participant_factories and participants raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + ConcurrentBuilder( + participant_factories=[lambda: _FakeAgentExec("a", "A")], + participants=[_FakeAgentExec("b", "B")], ) @@ -104,7 +96,7 @@ async def test_concurrent_default_aggregator_emits_single_user_and_assistants() e2 = _FakeAgentExec("agentB", "Beta") e3 = _FakeAgentExec("agentC", "Gamma") - wf = ConcurrentBuilder().participants([e1, e2, e3]).build() + wf = ConcurrentBuilder(participants=[e1, e2, e3]).build() completed = False output: list[ChatMessage] | None = None @@ -142,7 +134,7 @@ async def summarize(results: list[AgentExecutorResponse]) -> str: texts.append(msgs[-1].text if msgs else "") return " | ".join(sorted(texts)) - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize).build() + wf = ConcurrentBuilder(participants=[e1, e2]).with_aggregator(summarize).build() completed = False output: str | None = None @@ -173,7 +165,7 @@ def summarize_sync(results: list[AgentExecutorResponse], _ctx: WorkflowContext[A texts.append(msgs[-1].text if msgs else "") return " | ".join(sorted(texts)) - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize_sync).build() + wf = ConcurrentBuilder(participants=[e1, e2]).with_aggregator(summarize_sync).build() completed = False output: str | None = None @@ -198,7 +190,7 @@ def test_concurrent_custom_aggregator_uses_callback_name_for_id() -> None: def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[override] return str(len(results)) - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(summarize).build() + wf = ConcurrentBuilder(participants=[e1, e2]).with_aggregator(summarize).build() assert "summarize" in wf.executors aggregator = wf.executors["summarize"] @@ -221,7 +213,7 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon e2 = _FakeAgentExec("agentB", "Two") aggregator_instance = CustomAggregator(id="instance_aggregator") - wf = ConcurrentBuilder().participants([e1, e2]).with_aggregator(aggregator_instance).build() + wf = ConcurrentBuilder(participants=[e1, e2]).with_aggregator(aggregator_instance).build() completed = False output: str | None = None @@ -255,8 +247,7 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon e2 = _FakeAgentExec("agentB", "Two") wf = ( - ConcurrentBuilder() - .participants([e1, e2]) + ConcurrentBuilder(participants=[e1, e2]) .register_aggregator(lambda: CustomAggregator(id="custom_aggregator")) .build() ) @@ -295,7 +286,7 @@ async def aggregate(self, results: list[AgentExecutorResponse], ctx: WorkflowCon e1 = _FakeAgentExec("agentA", "One") e2 = _FakeAgentExec("agentB", "Two") - wf = ConcurrentBuilder().participants([e1, e2]).register_aggregator(CustomAggregator).build() + wf = ConcurrentBuilder(participants=[e1, e2]).register_aggregator(CustomAggregator).build() completed = False output: str | None = None @@ -320,7 +311,11 @@ def summarize(results: list[AgentExecutorResponse]) -> str: # type: ignore[over return str(len(results)) with pytest.raises(ValueError, match=r"with_aggregator\(\) has already been called"): - (ConcurrentBuilder().with_aggregator(summarize).with_aggregator(summarize)) + ( + ConcurrentBuilder(participants=[_FakeAgentExec("a", "A")]) + .with_aggregator(summarize) + .with_aggregator(summarize) + ) def test_concurrent_builder_rejects_multiple_calls_to_register_aggregator() -> None: @@ -331,7 +326,7 @@ class CustomAggregator(Executor): with pytest.raises(ValueError, match=r"register_aggregator\(\) has already been called"): ( - ConcurrentBuilder() + ConcurrentBuilder(participants=[_FakeAgentExec("a", "A")]) .register_aggregator(lambda: CustomAggregator(id="agg1")) .register_aggregator(lambda: CustomAggregator(id="agg2")) ) @@ -346,7 +341,7 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: _FakeAgentExec("agentC", "Gamma"), ) - wf = ConcurrentBuilder().participants(list(participants)).with_checkpointing(storage).build() + wf = ConcurrentBuilder(participants=list(participants), checkpoint_storage=storage).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("checkpoint concurrent", stream=True): @@ -370,7 +365,7 @@ async def test_concurrent_checkpoint_resume_round_trip() -> None: _FakeAgentExec("agentB", "Beta"), _FakeAgentExec("agentC", "Gamma"), ) - wf_resume = ConcurrentBuilder().participants(list(resumed_participants)).with_checkpointing(storage).build() + wf_resume = ConcurrentBuilder(participants=list(resumed_participants), checkpoint_storage=storage).build() resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): @@ -392,7 +387,7 @@ async def test_concurrent_checkpoint_runtime_only() -> None: storage = InMemoryCheckpointStorage() agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] - wf = ConcurrentBuilder().participants(agents).build() + wf = ConcurrentBuilder(participants=agents).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): @@ -413,7 +408,7 @@ async def test_concurrent_checkpoint_runtime_only() -> None: ) resumed_agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] - wf_resume = ConcurrentBuilder().participants(resumed_agents).build() + wf_resume = ConcurrentBuilder(participants=resumed_agents).build() resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run( @@ -442,7 +437,7 @@ async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: runtime_storage = FileCheckpointStorage(temp_dir2) agents = [_FakeAgentExec(id="agent1", reply_text="A1"), _FakeAgentExec(id="agent2", reply_text="A2")] - wf = ConcurrentBuilder().participants(agents).with_checkpointing(buildtime_storage).build() + wf = ConcurrentBuilder(participants=agents, checkpoint_storage=buildtime_storage).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): @@ -462,7 +457,7 @@ async def test_concurrent_checkpoint_runtime_overrides_buildtime() -> None: def test_concurrent_builder_rejects_empty_participant_factories() -> None: with pytest.raises(ValueError): - ConcurrentBuilder().register_participants([]) + ConcurrentBuilder(participant_factories=[]) async def test_concurrent_builder_reusable_after_build_with_participants() -> None: @@ -470,7 +465,7 @@ async def test_concurrent_builder_reusable_after_build_with_participants() -> No e1 = _FakeAgentExec("agentA", "One") e2 = _FakeAgentExec("agentB", "Two") - builder = ConcurrentBuilder().participants([e1, e2]) + builder = ConcurrentBuilder(participants=[e1, e2]) builder.build() @@ -493,7 +488,7 @@ def create_agent_executor_b() -> Executor: call_count += 1 return _FakeAgentExec("agentB", "Two") - builder = ConcurrentBuilder().register_participants([create_agent_executor_a, create_agent_executor_b]) + builder = ConcurrentBuilder(participant_factories=[create_agent_executor_a, create_agent_executor_b]) # Build the first workflow wf1 = builder.build() @@ -523,7 +518,7 @@ def create_agent2() -> Executor: def create_agent3() -> Executor: return _FakeAgentExec("agentC", "Gamma") - wf = ConcurrentBuilder().register_participants([create_agent1, create_agent2, create_agent3]).build() + wf = ConcurrentBuilder(participant_factories=[create_agent1, create_agent2, create_agent3]).build() completed = False output: list[ChatMessage] | None = None diff --git a/python/packages/orchestrations/tests/test_group_chat.py b/python/packages/orchestrations/tests/test_group_chat.py index 306d4eda44..718b8eb3a7 100644 --- a/python/packages/orchestrations/tests/test_group_chat.py +++ b/python/packages/orchestrations/tests/test_group_chat.py @@ -178,13 +178,12 @@ async def test_group_chat_builder_basic_flow() -> None: alpha = StubAgent("alpha", "ack from alpha") beta = StubAgent("beta", "ack from beta") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) + workflow = GroupChatBuilder( + participants=[alpha, beta], + max_rounds=2, # Limit rounds to prevent infinite loop + selection_func=selector, + orchestrator_name="manager", + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("coordinate task", stream=True): @@ -205,13 +204,12 @@ async def test_group_chat_as_agent_accepts_conversation() -> None: alpha = StubAgent("alpha", "ack from alpha") beta = StubAgent("beta", "ack from beta") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) # Limit rounds to prevent infinite loop - .build() - ) + workflow = GroupChatBuilder( + participants=[alpha, beta], + max_rounds=2, # Limit rounds to prevent infinite loop + selection_func=selector, + orchestrator_name="manager", + ).build() agent = workflow.as_agent(name="group-chat-agent") conversation = [ @@ -233,64 +231,47 @@ def test_build_without_manager_raises_error(self) -> None: """Test that building without a manager raises ValueError.""" agent = StubAgent("test", "response") - builder = GroupChatBuilder().participants([agent]) + builder = GroupChatBuilder(participants=[agent]) with pytest.raises( - ValueError, match=r"No orchestrator has been configured\. Call with_orchestrator\(\) to set one\." + ValueError, + match=r"No orchestrator has been configured\.", ): builder.build() def test_build_without_participants_raises_error(self) -> None: - """Test that building without participants raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - + """Test that constructing without participants raises ValueError.""" with pytest.raises( ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + match=r"Either participants or participant_factories must be provided\.", ): - builder.build() + GroupChatBuilder() def test_duplicate_manager_configuration_raises_error(self) -> None: - """Test that configuring multiple managers raises ValueError.""" + """Test that configuring multiple orchestrator options raises ValueError.""" + agent = StubAgent("test", "response") def selector(state: GroupChatState) -> str: return "agent" - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises( ValueError, - match=r"A selection function has already been configured\. Call with_orchestrator\(\.\.\.\) once only\.", + match=r"Exactly one of", ): - builder.with_orchestrator(selection_func=selector) + GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=StubManagerAgent()) def test_empty_participants_raises_error(self) -> None: """Test that empty participants list raises ValueError.""" - - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="participants cannot be empty"): - builder.participants([]) + GroupChatBuilder(participants=[]) def test_duplicate_participant_names_raises_error(self) -> None: """Test that duplicate participant names raise ValueError.""" agent1 = StubAgent("test", "response1") agent2 = StubAgent("test", "response2") - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="Duplicate participant name 'test'"): - builder.participants([agent1, agent2]) + GroupChatBuilder(participants=[agent1, agent2]) def test_agent_without_name_raises_error(self) -> None: """Test that agent without name attribute raises ValueError.""" @@ -315,25 +296,15 @@ async def _run_impl(self) -> AgentResponse: agent = AgentWithoutName() - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="SupportsAgentRun participants must have a non-empty name"): - builder.participants([agent]) + GroupChatBuilder(participants=[agent]) def test_empty_participant_name_raises_error(self) -> None: """Test that empty participant name raises ValueError.""" agent = StubAgent("", "response") # Agent with empty name - def selector(state: GroupChatState) -> str: - return "agent" - - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) - with pytest.raises(ValueError, match="SupportsAgentRun participants must have a non-empty name"): - builder.participants([agent]) + GroupChatBuilder(participants=[agent]) class TestGroupChatWorkflow: @@ -350,13 +321,11 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(2) # Limit to 2 rounds - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=2, # Limit to 2 rounds + selection_func=selector, + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): @@ -385,13 +354,11 @@ def termination_condition(conversation: list[ChatMessage]) -> bool: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_termination_condition(termination_condition) - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + termination_condition=termination_condition, + selection_func=selector, + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): @@ -413,13 +380,11 @@ async def test_termination_condition_agent_manager_finalizes(self) -> None: manager = StubManagerAgent() worker = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=manager) - .participants([worker]) - .with_termination_condition(lambda conv: any(msg.author_name == "agent" for msg in conv)) - .build() - ) + workflow = GroupChatBuilder( + participants=[worker], + termination_condition=lambda conv: any(msg.author_name == "agent" for msg in conv), + orchestrator_agent=manager, + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): @@ -441,7 +406,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = GroupChatBuilder().with_orchestrator(selection_func=selector).participants([agent]).build() + workflow = GroupChatBuilder(participants=[agent], selection_func=selector).build() with pytest.raises(RuntimeError, match="Selection function returned unknown participant 'unknown_agent'"): async for _ in workflow.run("test task", stream=True): @@ -460,14 +425,12 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") storage = InMemoryCheckpointStorage() - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .with_checkpointing(storage) - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=1, + checkpoint_storage=storage, + selection_func=selector, + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test task", stream=True): @@ -490,13 +453,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() with pytest.raises(ValueError, match="At least one ChatMessage is required to start the group chat workflow."): async for _ in workflow.run([], stream=True): @@ -514,13 +471,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test string", stream=True): @@ -543,13 +494,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run(task_message, stream=True): @@ -575,13 +520,7 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder(participants=[agent], max_rounds=1, selection_func=selector).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run(conversation, stream=True): @@ -607,13 +546,11 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) # Very low limit - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=1, # Very low limit + selection_func=selector, + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test", stream=True): @@ -642,13 +579,11 @@ def selector(state: GroupChatState) -> str: agent = StubAgent("agent", "response from agent") - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector) - .participants([agent]) - .with_max_rounds(1) # Hit limit after first response - .build() - ) + workflow = GroupChatBuilder( + participants=[agent], + max_rounds=1, # Hit limit after first response + selection_func=selector, + ).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run("test", stream=True): @@ -674,13 +609,7 @@ async def test_group_chat_checkpoint_runtime_only() -> None: agent_b = StubAgent("agentB", "Reply from B") selector = make_sequence_selector() - wf = ( - GroupChatBuilder() - .participants([agent_a, agent_b]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .build() - ) + wf = GroupChatBuilder(participants=[agent_a, agent_b], max_rounds=2, selection_func=selector).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): @@ -712,14 +641,12 @@ async def test_group_chat_checkpoint_runtime_overrides_buildtime() -> None: agent_b = StubAgent("agentB", "Reply from B") selector = make_sequence_selector() - wf = ( - GroupChatBuilder() - .participants([agent_a, agent_b]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .with_checkpointing(buildtime_storage) - .build() - ) + wf = GroupChatBuilder( + participants=[agent_a, agent_b], + max_rounds=2, + checkpoint_storage=buildtime_storage, + selection_func=selector, + ).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): if ev.type == "output": @@ -759,10 +686,12 @@ async def selector(state: GroupChatState) -> str: return "alpha" workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha, beta]) - .with_max_rounds(2) + GroupChatBuilder( + participants=[alpha, beta], + max_rounds=2, + selection_func=selector, + orchestrator_name="manager", + ) .with_request_info(agents=["beta"]) # Only pause before beta runs .build() ) @@ -811,10 +740,12 @@ async def selector(state: GroupChatState) -> str: return "alpha" workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=selector, orchestrator_name="manager") - .participants([alpha]) - .with_max_rounds(1) + GroupChatBuilder( + participants=[alpha], + max_rounds=1, + selection_func=selector, + orchestrator_name="manager", + ) .with_request_info() # No filter - pause for all .build() ) @@ -833,12 +764,13 @@ async def selector(state: GroupChatState) -> str: def test_group_chat_builder_with_request_info_returns_self(): """Test that with_request_info() returns self for method chaining.""" - builder = GroupChatBuilder() + agent = StubAgent("test", "response") + builder = GroupChatBuilder(participants=[agent]) result = builder.with_request_info() assert result is builder # Also test with agents parameter - builder2 = GroupChatBuilder() + builder2 = GroupChatBuilder(participants=[agent]) result2 = builder2.with_request_info(agents=["test"]) assert result2 is builder2 @@ -853,47 +785,41 @@ def selector(state: GroupChatState) -> str: return list(state.participants.keys())[0] with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): - GroupChatBuilder().register_participants([]) + GroupChatBuilder(participant_factories=[]) with pytest.raises( ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + match=r"Either participants or participant_factories must be provided\.", ): - GroupChatBuilder().with_orchestrator(selection_func=selector).build() + GroupChatBuilder() def test_group_chat_builder_rejects_mixing_participants_and_factories(): - """Test that mixing .participants() and .register_participants() raises an error.""" + """Test that passing both participants and participant_factories to the constructor raises an error.""" alpha = StubAgent("alpha", "reply from alpha") - # Case 1: participants first, then register_participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - GroupChatBuilder().participants([alpha]).register_participants([lambda: StubAgent("beta", "reply from beta")]) - - # Case 2: register_participants first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - GroupChatBuilder().register_participants([lambda: alpha]).participants([StubAgent("beta", "reply from beta")]) + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + GroupChatBuilder( + participants=[alpha], + participant_factories=[lambda: StubAgent("beta", "reply from beta")], + ) -def test_group_chat_builder_rejects_multiple_calls_to_register_participants(): - """Test that multiple calls to .register_participants() raises an error.""" - with pytest.raises( - ValueError, match=r"register_participants\(\) has already been called on this builder instance." - ): - ( - GroupChatBuilder() - .register_participants([lambda: StubAgent("alpha", "reply from alpha")]) - .register_participants([lambda: StubAgent("beta", "reply from beta")]) +def test_group_chat_builder_rejects_both_factories_and_participants(): + """Test that passing both participant_factories and participants raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + GroupChatBuilder( + participant_factories=[lambda: StubAgent("alpha", "reply from alpha")], + participants=[StubAgent("beta", "reply from beta")], ) -def test_group_chat_builder_rejects_multiple_calls_to_participants(): - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match="participants have already been set"): - ( - GroupChatBuilder() - .participants([StubAgent("alpha", "reply from alpha")]) - .participants([StubAgent("beta", "reply from beta")]) +def test_group_chat_builder_rejects_both_participants_and_factories(): + """Test that passing both participants and participant_factories raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + GroupChatBuilder( + participants=[StubAgent("alpha", "reply from alpha")], + participant_factories=[lambda: StubAgent("beta", "reply from beta")], ) @@ -913,13 +839,11 @@ def create_beta() -> StubAgent: selector = make_sequence_selector() - workflow = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - .build() - ) + workflow = GroupChatBuilder( + participant_factories=[create_alpha, create_beta], + max_rounds=2, + selection_func=selector, + ).build() # Factories should be called during build assert call_count == 2 @@ -948,12 +872,7 @@ def create_beta() -> StubAgent: selector = make_sequence_selector() - builder = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(selection_func=selector) - .with_max_rounds(2) - ) + builder = GroupChatBuilder(participant_factories=[create_alpha, create_beta], max_rounds=2, selection_func=selector) # Build first workflow wf1 = builder.build() @@ -980,14 +899,12 @@ def create_beta() -> StubAgent: selector = make_sequence_selector() - workflow = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(selection_func=selector) - .with_checkpointing(storage) - .with_max_rounds(2) - .build() - ) + workflow = GroupChatBuilder( + participant_factories=[create_alpha, create_beta], + checkpoint_storage=storage, + max_rounds=2, + selection_func=selector, + ).build() outputs: list[WorkflowEvent] = [] async for event in workflow.run("checkpoint test", stream=True): @@ -1014,16 +931,15 @@ def selector(state: GroupChatState) -> str: def agent_factory() -> ChatAgent: return cast(ChatAgent, StubManagerAgent()) - builder = GroupChatBuilder().with_orchestrator(selection_func=selector) + agent = StubAgent("test", "response") - # Already has a selection_func, should fail on second call - with pytest.raises(ValueError, match=r"A selection function has already been configured"): - builder.with_orchestrator(selection_func=selector) + # Both selection_func and orchestrator_agent provided simultaneously - should fail + with pytest.raises(ValueError, match=r"Exactly one of"): + GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=StubManagerAgent()) - # Test with agent_factory - builder2 = GroupChatBuilder().with_orchestrator(agent=agent_factory) - with pytest.raises(ValueError, match=r"A factory has already been configured"): - builder2.with_orchestrator(agent=agent_factory) + # Test with agent_factory - already has factory, should fail with second config + with pytest.raises(ValueError, match=r"Exactly one of"): + GroupChatBuilder(participants=[agent], orchestrator_agent=agent_factory, selection_func=selector) def test_group_chat_builder_requires_exactly_one_orchestrator_option(): @@ -1035,13 +951,15 @@ def selector(state: GroupChatState) -> str: def agent_factory() -> ChatAgent: return cast(ChatAgent, StubManagerAgent()) - # No options provided - with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder().with_orchestrator() # type: ignore + agent = StubAgent("test", "response") + + # No orchestrator options provided - only fails at build() time + with pytest.raises(ValueError, match="No orchestrator has been configured"): + GroupChatBuilder(participants=[agent]).build() # Multiple options provided with pytest.raises(ValueError, match="Exactly one of"): - GroupChatBuilder().with_orchestrator(selection_func=selector, agent=agent_factory) # type: ignore + GroupChatBuilder(participants=[agent], selection_func=selector, orchestrator_agent=agent_factory) async def test_group_chat_with_orchestrator_factory_returning_chat_agent(): @@ -1112,7 +1030,7 @@ def agent_factory() -> ChatAgent: alpha = StubAgent("alpha", "reply from alpha") beta = StubAgent("beta", "reply from beta") - workflow = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory).build() + workflow = GroupChatBuilder(participants=[alpha, beta], orchestrator_agent=agent_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -1156,7 +1074,7 @@ def orchestrator_factory() -> BaseGroupChatOrchestrator: alpha = StubAgent("alpha", "reply from alpha") - workflow = GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=orchestrator_factory).build() + workflow = GroupChatBuilder(participants=[alpha], orchestrator=orchestrator_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -1176,7 +1094,7 @@ def agent_factory() -> ChatAgent: alpha = StubAgent("alpha", "reply from alpha") beta = StubAgent("beta", "reply from beta") - builder = GroupChatBuilder().participants([alpha, beta]).with_orchestrator(agent=agent_factory) + builder = GroupChatBuilder(participants=[alpha, beta], orchestrator_agent=agent_factory) # Build first workflow wf1 = builder.build() @@ -1202,13 +1120,13 @@ def invalid_factory() -> Any: TypeError, match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", ): - (GroupChatBuilder().participants([alpha]).with_orchestrator(orchestrator=invalid_factory).build()) + GroupChatBuilder(participants=[alpha], orchestrator=invalid_factory).build() with pytest.raises( TypeError, match=r"Orchestrator factory must return ChatAgent or BaseGroupChatOrchestrator instance", ): - (GroupChatBuilder().participants([alpha]).with_orchestrator(agent=invalid_factory).build()) + GroupChatBuilder(participants=[alpha], orchestrator_agent=invalid_factory).build() def test_group_chat_with_both_participant_and_orchestrator_factories(): @@ -1231,12 +1149,10 @@ def agent_factory() -> ChatAgent: agent_factory_call_count += 1 return cast(ChatAgent, StubManagerAgent()) - workflow = ( - GroupChatBuilder() - .register_participants([create_alpha, create_beta]) - .with_orchestrator(agent=agent_factory) - .build() - ) + workflow = GroupChatBuilder( + participant_factories=[create_alpha, create_beta], + orchestrator_agent=agent_factory, + ).build() # All factories should be called during build assert participant_factory_call_count == 2 @@ -1268,9 +1184,7 @@ def agent_factory() -> ChatAgent: agent_factory_call_count += 1 return cast(ChatAgent, StubManagerAgent()) - builder = ( - GroupChatBuilder().register_participants([create_alpha, create_beta]).with_orchestrator(agent=agent_factory) - ) + builder = GroupChatBuilder(participant_factories=[create_alpha, create_beta], orchestrator_agent=agent_factory) # Build first workflow wf1 = builder.build() diff --git a/python/packages/orchestrations/tests/test_handoff.py b/python/packages/orchestrations/tests/test_handoff.py index 18e3b6e06c..6247afaa61 100644 --- a/python/packages/orchestrations/tests/test_handoff.py +++ b/python/packages/orchestrations/tests/test_handoff.py @@ -138,9 +138,11 @@ async def test_handoff(): # Without explicitly defining handoffs, the builder will create connections # between all agents. workflow = ( - HandoffBuilder(participants=[triage, specialist, escalation]) + HandoffBuilder( + participants=[triage, specialist, escalation], + termination_condition=lambda conv: sum(1 for m in conv if m.role == "user") >= 2, + ) .with_start_agent(triage) - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) .build() ) @@ -164,7 +166,15 @@ async def test_autonomous_mode_yields_output_without_user_request(): specialist = MockHandoffAgent(name="specialist") workflow = ( - HandoffBuilder(participants=[triage, specialist]) + HandoffBuilder( + participants=[triage, specialist], + # This termination condition ensures the workflow runs through both agents. + # First message is the user message to triage, second is triage's response, which + # is a handoff to specialist, third is specialist's response that should not request + # user input due to autonomous mode. Fourth message will come from the specialist + # again and will trigger termination. + termination_condition=lambda conv: len(conv) >= 4, + ) .with_start_agent(triage) # Since specialist has no handoff, the specialist will be generating normal responses. # With autonomous mode, this should continue until the termination condition is met. @@ -172,12 +182,6 @@ async def test_autonomous_mode_yields_output_without_user_request(): agents=[specialist], turn_limits={resolve_agent_id(specialist): 1}, ) - # This termination condition ensures the workflow runs through both agents. - # First message is the user message to triage, second is triage's response, which - # is a handoff to specialist, third is specialist's response that should not request - # user input due to autonomous mode. Fourth message will come from the specialist - # again and will trigger termination. - .with_termination_condition(lambda conv: len(conv) >= 4) .build() ) @@ -200,10 +204,9 @@ async def test_autonomous_mode_resumes_user_input_on_turn_limit(): worker = MockHandoffAgent(name="worker") workflow = ( - HandoffBuilder(participants=[triage, worker]) + HandoffBuilder(participants=[triage, worker], termination_condition=lambda conv: False) .with_start_agent(triage) .with_autonomous_mode(agents=[worker], turn_limits={resolve_agent_id(worker): 2}) - .with_termination_condition(lambda conv: False) .build() ) @@ -244,9 +247,8 @@ async def async_termination(conv: list[ChatMessage]) -> bool: worker = MockHandoffAgent(name="worker") workflow = ( - HandoffBuilder(participants=[coordinator, worker]) + HandoffBuilder(participants=[coordinator, worker], termination_condition=async_termination) .with_start_agent(coordinator) - .with_termination_condition(async_termination) .build() ) @@ -493,9 +495,11 @@ def create_specialist() -> MockHandoffAgent: return MockHandoffAgent(name="specialist") workflow = ( - HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) + HandoffBuilder( + participant_factories={"triage": create_triage, "specialist": create_specialist}, + termination_condition=lambda conv: sum(1 for m in conv if m.role == "user") >= 2, + ) .with_start_agent("triage") - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) .build() ) @@ -563,12 +567,12 @@ def create_specialist_b() -> MockHandoffAgent: "triage": create_triage, "specialist_a": create_specialist_a, "specialist_b": create_specialist_b, - } + }, + termination_condition=lambda conv: sum(1 for m in conv if m.role == "user") >= 3, ) .with_start_agent("triage") .add_handoff("triage", ["specialist_a", "specialist_b"]) .add_handoff("specialist_a", ["specialist_b"]) - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 3) .build() ) @@ -606,10 +610,12 @@ def create_specialist() -> MockHandoffAgent: return MockHandoffAgent(name="specialist") workflow = ( - HandoffBuilder(participant_factories={"triage": create_triage, "specialist": create_specialist}) + HandoffBuilder( + participant_factories={"triage": create_triage, "specialist": create_specialist}, + checkpoint_storage=storage, + termination_condition=lambda conv: sum(1 for m in conv if m.role == "user") >= 2, + ) .with_start_agent("triage") - .with_checkpointing(storage) - .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == "user") >= 2) .build() ) diff --git a/python/packages/orchestrations/tests/test_magentic.py b/python/packages/orchestrations/tests/test_magentic.py index f237385d1b..5846b56ae4 100644 --- a/python/packages/orchestrations/tests/test_magentic.py +++ b/python/packages/orchestrations/tests/test_magentic.py @@ -186,7 +186,7 @@ async def test_magentic_builder_returns_workflow_and_runs() -> None: manager = FakeManager() agent = StubAgent(manager.next_speaker_name, "first draft") - workflow = MagenticBuilder().participants([agent]).with_manager(manager=manager).build() + workflow = MagenticBuilder(participants=[agent], manager=manager).build() assert isinstance(workflow, Workflow) @@ -212,7 +212,7 @@ async def test_magentic_as_agent_does_not_accept_conversation() -> None: manager = FakeManager() writer = StubAgent(manager.next_speaker_name, "summary response") - workflow = MagenticBuilder().participants([writer]).with_manager(manager=manager).build() + workflow = MagenticBuilder(participants=[writer], manager=manager).build() agent = workflow.as_agent(name="magentic-agent") conversation = [ @@ -240,7 +240,7 @@ async def test_standard_manager_plan_and_replan_combined_ledger(): async def test_magentic_workflow_plan_review_approval_to_completion(): manager = FakeManager() - wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).with_plan_review().build() + wf = MagenticBuilder(participants=[DummyExec("agentA")], enable_plan_review=True, manager=manager).build() req_event: WorkflowEvent | None = None async for ev in wf.run("do work", stream=True): @@ -278,13 +278,11 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ return await super().replan(magentic_context) manager = CountingManager() - wf = ( - MagenticBuilder() - .participants([DummyExec(name=manager.next_speaker_name)]) - .with_manager(manager=manager) - .with_plan_review() - .build() - ) + wf = MagenticBuilder( + participants=[DummyExec(name=manager.next_speaker_name)], + enable_plan_review=True, + manager=manager, + ).build() # Wait for the initial plan review request req_event: WorkflowEvent | None = None @@ -324,12 +322,7 @@ async def replan(self, magentic_context: MagenticContext) -> ChatMessage: # typ async def test_magentic_orchestrator_round_limit_produces_partial_result(): manager = FakeManager(max_round_count=1) - wf = ( - MagenticBuilder() - .participants([DummyExec(name=manager.next_speaker_name)]) - .with_manager(manager=manager) - .build() - ) + wf = MagenticBuilder(participants=[DummyExec(name=manager.next_speaker_name)], manager=manager).build() events: list[WorkflowEvent] = [] async for ev in wf.run("round limit test", stream=True): @@ -354,14 +347,12 @@ async def test_magentic_checkpoint_resume_round_trip(): storage = InMemoryCheckpointStorage() manager1 = FakeManager() - wf = ( - MagenticBuilder() - .participants([DummyExec(name=manager1.next_speaker_name)]) - .with_manager(manager=manager1) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) + wf = MagenticBuilder( + participants=[DummyExec(name=manager1.next_speaker_name)], + enable_plan_review=True, + checkpoint_storage=storage, + manager=manager1, + ).build() task_text = "checkpoint task" req_event: WorkflowEvent | None = None @@ -377,14 +368,12 @@ async def test_magentic_checkpoint_resume_round_trip(): resume_checkpoint = checkpoints[-1] manager2 = FakeManager() - wf_resume = ( - MagenticBuilder() - .participants([DummyExec(name=manager2.next_speaker_name)]) - .with_manager(manager=manager2) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) + wf_resume = MagenticBuilder( + participants=[DummyExec(name=manager2.next_speaker_name)], + enable_plan_review=True, + checkpoint_storage=storage, + manager=manager2, + ).build() completed: WorkflowEvent | None = None req_event = None @@ -580,13 +569,7 @@ async def _run_stream(self): async def _collect_agent_responses_setup(participant: SupportsAgentRun) -> list[ChatMessage]: captured: list[ChatMessage] = [] - wf = ( - MagenticBuilder() - .participants([participant]) - .with_manager(manager=InvokeOnceManager()) - .with_intermediate_outputs() - .build() - ) + wf = MagenticBuilder(participants=[participant], intermediate_outputs=True, manager=InvokeOnceManager()).build() # Run a bounded stream to allow one invoke and then completion events: list[WorkflowEvent] = [] @@ -632,13 +615,9 @@ async def _collect_checkpoints( async def test_magentic_checkpoint_resume_inner_loop_superstep(): storage = InMemoryCheckpointStorage() - workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=InvokeOnceManager()) - .with_checkpointing(storage) - .build() - ) + workflow = MagenticBuilder( + participants=[StubThreadAgent()], checkpoint_storage=storage, manager=InvokeOnceManager() + ).build() async for event in workflow.run("inner-loop task", stream=True): if event.type == "output": @@ -647,13 +626,9 @@ async def test_magentic_checkpoint_resume_inner_loop_superstep(): checkpoints = await _collect_checkpoints(storage) inner_loop_checkpoint = next(cp for cp in checkpoints if cp.metadata.get("superstep") == 1) # type: ignore[reportUnknownMemberType] - resumed = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=InvokeOnceManager()) - .with_checkpointing(storage) - .build() - ) + resumed = MagenticBuilder( + participants=[StubThreadAgent()], checkpoint_storage=storage, manager=InvokeOnceManager() + ).build() completed: WorkflowEvent | None = None async for event in resumed.run(checkpoint_id=inner_loop_checkpoint.checkpoint_id, stream=True): # type: ignore[reportUnknownMemberType] @@ -670,13 +645,7 @@ async def test_magentic_checkpoint_resume_from_saved_state(): # Use the working InvokeOnceManager first to get a completed workflow manager = InvokeOnceManager() - workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=manager) - .with_checkpointing(storage) - .build() - ) + workflow = MagenticBuilder(participants=[StubThreadAgent()], checkpoint_storage=storage, manager=manager).build() async for event in workflow.run("checkpoint resume task", stream=True): if event.type == "output": @@ -687,13 +656,9 @@ async def test_magentic_checkpoint_resume_from_saved_state(): # Verify we can resume from the last saved checkpoint resumed_state = checkpoints[-1] # Use the last checkpoint - resumed_workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=InvokeOnceManager()) - .with_checkpointing(storage) - .build() - ) + resumed_workflow = MagenticBuilder( + participants=[StubThreadAgent()], checkpoint_storage=storage, manager=InvokeOnceManager() + ).build() completed: WorkflowEvent | None = None async for event in resumed_workflow.run(checkpoint_id=resumed_state.checkpoint_id, stream=True): @@ -708,14 +673,12 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): manager = InvokeOnceManager() - workflow = ( - MagenticBuilder() - .participants([StubThreadAgent()]) - .with_manager(manager=manager) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) + workflow = MagenticBuilder( + participants=[StubThreadAgent()], + enable_plan_review=True, + checkpoint_storage=storage, + manager=manager, + ).build() req_event: WorkflowEvent | None = None async for event in workflow.run("task", stream=True): @@ -728,14 +691,12 @@ async def test_magentic_checkpoint_resume_rejects_participant_renames(): checkpoints = await _collect_checkpoints(storage) target_checkpoint = checkpoints[-1] - renamed_workflow = ( - MagenticBuilder() - .participants([StubThreadAgent(name="renamedAgent")]) - .with_manager(manager=InvokeOnceManager()) - .with_plan_review() - .with_checkpointing(storage) - .build() - ) + renamed_workflow = MagenticBuilder( + participants=[StubThreadAgent(name="renamedAgent")], + enable_plan_review=True, + checkpoint_storage=storage, + manager=InvokeOnceManager(), + ).build() with pytest.raises(WorkflowCheckpointException, match="Workflow graph has changed"): async for _ in renamed_workflow.run( @@ -772,7 +733,7 @@ async def prepare_final_answer(self, magentic_context: MagenticContext) -> ChatM async def test_magentic_stall_and_reset_reach_limits(): manager = NotProgressingManager(max_round_count=10, max_stall_count=0, max_reset_count=1) - wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).build() + wf = MagenticBuilder(participants=[DummyExec("agentA")], manager=manager).build() events: list[WorkflowEvent] = [] async for ev in wf.run("test limits", stream=True): @@ -797,7 +758,7 @@ async def test_magentic_checkpoint_runtime_only() -> None: storage = InMemoryCheckpointStorage() manager = FakeManager(max_round_count=10) - wf = MagenticBuilder().participants([DummyExec("agentA")]).with_manager(manager=manager).build() + wf = MagenticBuilder(participants=[DummyExec("agentA")], manager=manager).build() baseline_output: ChatMessage | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): @@ -829,13 +790,9 @@ async def test_magentic_checkpoint_runtime_overrides_buildtime() -> None: runtime_storage = FileCheckpointStorage(temp_dir2) manager = FakeManager(max_round_count=10) - wf = ( - MagenticBuilder() - .participants([DummyExec("agentA")]) - .with_manager(manager=manager) - .with_checkpointing(buildtime_storage) - .build() - ) + wf = MagenticBuilder( + participants=[DummyExec("agentA")], checkpoint_storage=buildtime_storage, manager=manager + ).build() baseline_output: ChatMessage | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): @@ -884,13 +841,7 @@ async def test_magentic_checkpoint_restore_no_duplicate_history(): manager = FakeManager(max_round_count=10) storage = InMemoryCheckpointStorage() - wf = ( - MagenticBuilder() - .participants([DummyExec("agentA")]) - .with_manager(manager=manager) - .with_checkpointing(storage) - .build() - ) + wf = MagenticBuilder(participants=[DummyExec("agentA")], checkpoint_storage=storage, manager=manager).build() # Run with conversation history to create initial checkpoint conversation: list[ChatMessage] = [ @@ -947,47 +898,41 @@ async def test_magentic_checkpoint_restore_no_duplicate_history(): def test_magentic_builder_rejects_empty_participant_factories(): """Test that MagenticBuilder rejects empty participant_factories list.""" with pytest.raises(ValueError, match=r"participant_factories cannot be empty"): - MagenticBuilder().register_participants([]) + MagenticBuilder(participant_factories=[]) with pytest.raises( ValueError, - match=r"No participants provided\. Call \.participants\(\) or \.register_participants\(\) first\.", + match=r"Either participants or participant_factories must be provided\.", ): - MagenticBuilder().with_manager(manager=FakeManager()).build() + MagenticBuilder() def test_magentic_builder_rejects_mixing_participants_and_factories(): - """Test that mixing .participants() and .register_participants() raises an error.""" + """Test that passing both participants and participant_factories to the constructor raises an error.""" agent = StubAgent("agentA", "reply from agentA") - # Case 1: participants first, then register_participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - MagenticBuilder().participants([agent]).register_participants([lambda: StubAgent("agentB", "reply")]) - - # Case 2: register_participants first, then participants - with pytest.raises(ValueError, match="Cannot mix .participants"): - MagenticBuilder().register_participants([lambda: agent]).participants([StubAgent("agentB", "reply")]) + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + MagenticBuilder( + participants=[agent], + participant_factories=[lambda: StubAgent("agentB", "reply")], + ) -def test_magentic_builder_rejects_multiple_calls_to_register_participants(): - """Test that multiple calls to .register_participants() raises an error.""" - with pytest.raises( - ValueError, match=r"register_participants\(\) has already been called on this builder instance." - ): - ( - MagenticBuilder() - .register_participants([lambda: StubAgent("agentA", "reply from agentA")]) - .register_participants([lambda: StubAgent("agentB", "reply from agentB")]) +def test_magentic_builder_rejects_both_factories_and_participants(): + """Test that passing both participant_factories and participants raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + MagenticBuilder( + participant_factories=[lambda: StubAgent("agentA", "reply from agentA")], + participants=[StubAgent("agentB", "reply from agentB")], ) -def test_magentic_builder_rejects_multiple_calls_to_participants(): - """Test that multiple calls to .participants() raises an error.""" - with pytest.raises(ValueError, match="participants have already been set"): - ( - MagenticBuilder() - .participants([StubAgent("agentA", "reply from agentA")]) - .participants([StubAgent("agentB", "reply from agentB")]) +def test_magentic_builder_rejects_both_participants_and_factories(): + """Test that passing both participants and participant_factories raises an error.""" + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + MagenticBuilder( + participants=[StubAgent("agentA", "reply from agentA")], + participant_factories=[lambda: StubAgent("agentB", "reply from agentB")], ) @@ -1001,7 +946,7 @@ def create_agent() -> StubAgent: return StubAgent("agentA", "reply from agentA") manager = FakeManager() - workflow = MagenticBuilder().register_participants([create_agent]).with_manager(manager=manager).build() + workflow = MagenticBuilder(participant_factories=[create_agent], manager=manager).build() # Factory should be called during build assert call_count == 1 @@ -1023,7 +968,7 @@ def create_agent() -> StubAgent: call_count += 1 return StubAgent("agentA", "reply from agentA") - builder = MagenticBuilder().register_participants([create_agent]).with_manager(manager=FakeManager()) + builder = MagenticBuilder(participant_factories=[create_agent], manager=FakeManager()) # Build first workflow wf1 = builder.build() @@ -1045,13 +990,9 @@ def create_agent() -> StubAgent: return StubAgent("agentA", "reply from agentA") manager = FakeManager() - workflow = ( - MagenticBuilder() - .register_participants([create_agent]) - .with_manager(manager=manager) - .with_checkpointing(storage) - .build() - ) + workflow = MagenticBuilder( + participant_factories=[create_agent], checkpoint_storage=storage, manager=manager + ).build() outputs: list[WorkflowEvent] = [] async for event in workflow.run("checkpoint test", stream=True): @@ -1072,27 +1013,27 @@ def create_agent() -> StubAgent: def test_magentic_builder_rejects_multiple_manager_configurations(): """Test that configuring multiple managers raises ValueError.""" manager = FakeManager() + agent = StubAgent("agentA", "reply") - builder = MagenticBuilder().with_manager(manager=manager) - - with pytest.raises(ValueError, match=r"with_manager\(\) has already been called"): - builder.with_manager(manager=manager) + with pytest.raises(ValueError, match=r"Exactly one of"): + MagenticBuilder(participants=[agent], manager=manager, manager_agent=StubManagerAgent()) def test_magentic_builder_requires_exactly_one_manager_option(): """Test that exactly one manager option must be provided.""" manager = FakeManager() + agent = StubAgent("agentA", "reply") def manager_factory() -> MagenticManagerBase: return FakeManager() - # No options provided - with pytest.raises(ValueError, match="Exactly one of"): - MagenticBuilder().with_manager() # type: ignore + # No options provided - only fails at build() time + with pytest.raises(ValueError, match="No manager configured"): + MagenticBuilder(participants=[agent]).build() # Multiple options provided with pytest.raises(ValueError, match="Exactly one of"): - MagenticBuilder().with_manager(manager=manager, manager_factory=manager_factory) # type: ignore + MagenticBuilder(participants=[agent], manager=manager, manager_factory=manager_factory) async def test_magentic_with_manager_factory(): @@ -1105,7 +1046,7 @@ def manager_factory() -> MagenticManagerBase: return FakeManager() agent = StubAgent("agentA", "reply from agentA") - workflow = MagenticBuilder().participants([agent]).with_manager(manager_factory=manager_factory).build() + workflow = MagenticBuilder(participants=[agent], manager_factory=manager_factory).build() # Factory should be called during build assert factory_call_count == 1 @@ -1128,12 +1069,9 @@ def agent_factory() -> SupportsAgentRun: return cast(SupportsAgentRun, StubManagerAgent()) participant = StubAgent("agentA", "reply from agentA") - workflow = ( - MagenticBuilder() - .participants([participant]) - .with_manager(agent_factory=agent_factory, max_round_count=1) - .build() - ) + workflow = MagenticBuilder( + participants=[participant], manager_agent_factory=agent_factory, max_round_count=1 + ).build() # Factory should be called during build assert factory_call_count == 1 @@ -1158,7 +1096,7 @@ def manager_factory() -> MagenticManagerBase: return FakeManager() agent = StubAgent("agentA", "reply from agentA") - builder = MagenticBuilder().participants([agent]).with_manager(manager_factory=manager_factory) + builder = MagenticBuilder(participants=[agent], manager_factory=manager_factory) # Build first workflow wf1 = builder.build() @@ -1189,9 +1127,7 @@ def manager_factory() -> MagenticManagerBase: manager_factory_call_count += 1 return FakeManager() - workflow = ( - MagenticBuilder().register_participants([create_agent]).with_manager(manager_factory=manager_factory).build() - ) + workflow = MagenticBuilder(participant_factories=[create_agent], manager_factory=manager_factory).build() # All factories should be called during build assert participant_factory_call_count == 1 @@ -1216,7 +1152,7 @@ def manager_factory() -> MagenticManagerBase: manager_factory_call_count += 1 return FakeManager() - builder = MagenticBuilder().register_participants([create_agent]).with_manager(manager_factory=manager_factory) + builder = MagenticBuilder(participant_factories=[create_agent], manager_factory=manager_factory) # Build first workflow wf1 = builder.build() @@ -1266,25 +1202,21 @@ def agent_factory() -> SupportsAgentRun: ) participant = StubAgent("agentA", "reply from agentA") - workflow = ( - MagenticBuilder() - .participants([participant]) - .with_manager( - agent_factory=agent_factory, - task_ledger=custom_task_ledger, - max_stall_count=custom_max_stall_count, - max_reset_count=custom_max_reset_count, - max_round_count=custom_max_round_count, - task_ledger_facts_prompt=custom_facts_prompt, - task_ledger_plan_prompt=custom_plan_prompt, - task_ledger_full_prompt=custom_full_prompt, - task_ledger_facts_update_prompt=custom_facts_update_prompt, - task_ledger_plan_update_prompt=custom_plan_update_prompt, - progress_ledger_prompt=custom_progress_prompt, - final_answer_prompt=custom_final_prompt, - ) - .build() - ) + workflow = MagenticBuilder( + participants=[participant], + manager_agent_factory=agent_factory, + task_ledger=custom_task_ledger, + max_stall_count=custom_max_stall_count, + max_reset_count=custom_max_reset_count, + max_round_count=custom_max_round_count, + task_ledger_facts_prompt=custom_facts_prompt, + task_ledger_plan_prompt=custom_plan_prompt, + task_ledger_full_prompt=custom_full_prompt, + task_ledger_facts_update_prompt=custom_facts_update_prompt, + task_ledger_plan_update_prompt=custom_plan_update_prompt, + progress_ledger_prompt=custom_progress_prompt, + final_answer_prompt=custom_final_prompt, + ).build() # Factory should be called during build assert factory_call_count == 1 diff --git a/python/packages/orchestrations/tests/test_sequential.py b/python/packages/orchestrations/tests/test_sequential.py index 68d78b1fa9..cb6f3b0872 100644 --- a/python/packages/orchestrations/tests/test_sequential.py +++ b/python/packages/orchestrations/tests/test_sequential.py @@ -68,38 +68,36 @@ async def summarize(self, conversation: list[str], ctx: WorkflowContext[list[Cha def test_sequential_builder_rejects_empty_participants() -> None: with pytest.raises(ValueError): - SequentialBuilder().participants([]) + SequentialBuilder(participants=[]) def test_sequential_builder_rejects_empty_participant_factories() -> None: with pytest.raises(ValueError): - SequentialBuilder().register_participants([]) + SequentialBuilder(participant_factories=[]) def test_sequential_builder_rejects_mixing_participants_and_factories() -> None: - """Test that mixing .participants() and .register_participants() raises an error.""" + """Test that passing both participants and participant_factories to the constructor raises an error.""" a1 = _EchoAgent(id="agent1", name="A1") - # Try .participants() then .register_participants() - with pytest.raises(ValueError, match="Cannot mix"): - SequentialBuilder().participants([a1]).register_participants([lambda: _EchoAgent(id="agent2", name="A2")]) - - # Try .register_participants() then .participants() - with pytest.raises(ValueError, match="Cannot mix"): - SequentialBuilder().register_participants([lambda: _EchoAgent(id="agent1", name="A1")]).participants([a1]) + with pytest.raises(ValueError, match="Cannot provide both participants and participant_factories"): + SequentialBuilder( + participants=[a1], + participant_factories=[lambda: _EchoAgent(id="agent2", name="A2")], + ) def test_sequential_builder_validation_rejects_invalid_executor() -> None: """Test that adding an invalid executor to the builder raises an error.""" with pytest.raises(TypeCompatibilityError): - SequentialBuilder().participants([_EchoAgent(id="agent1", name="A1"), _InvalidExecutor(id="invalid")]).build() + SequentialBuilder(participants=[_EchoAgent(id="agent1", name="A1"), _InvalidExecutor(id="invalid")]).build() async def test_sequential_agents_append_to_context() -> None: a1 = _EchoAgent(id="agent1", name="A1") a2 = _EchoAgent(id="agent2", name="A2") - wf = SequentialBuilder().participants([a1, a2]).build() + wf = SequentialBuilder(participants=[a1, a2]).build() completed = False output: list[ChatMessage] | None = None @@ -132,7 +130,7 @@ def create_agent1() -> _EchoAgent: def create_agent2() -> _EchoAgent: return _EchoAgent(id="agent2", name="A2") - wf = SequentialBuilder().register_participants([create_agent1, create_agent2]).build() + wf = SequentialBuilder(participant_factories=[create_agent1, create_agent2]).build() completed = False output: list[ChatMessage] | None = None @@ -158,7 +156,7 @@ async def test_sequential_with_custom_executor_summary() -> None: a1 = _EchoAgent(id="agent1", name="A1") summarizer = _SummarizerExec(id="summarizer") - wf = SequentialBuilder().participants([a1, summarizer]).build() + wf = SequentialBuilder(participants=[a1, summarizer]).build() completed = False output: list[ChatMessage] | None = None @@ -189,7 +187,7 @@ def create_agent() -> _EchoAgent: def create_summarizer() -> _SummarizerExec: return _SummarizerExec(id="summarizer") - wf = SequentialBuilder().register_participants([create_agent, create_summarizer]).build() + wf = SequentialBuilder(participant_factories=[create_agent, create_summarizer]).build() completed = False output: list[ChatMessage] | None = None @@ -215,7 +213,7 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: storage = InMemoryCheckpointStorage() initial_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf = SequentialBuilder().participants(list(initial_agents)).with_checkpointing(storage).build() + wf = SequentialBuilder(participants=list(initial_agents), checkpoint_storage=storage).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("checkpoint sequential", stream=True): @@ -236,7 +234,7 @@ async def test_sequential_checkpoint_resume_round_trip() -> None: ) resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf_resume = SequentialBuilder().participants(list(resumed_agents)).with_checkpointing(storage).build() + wf_resume = SequentialBuilder(participants=list(resumed_agents), checkpoint_storage=storage).build() resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): @@ -258,7 +256,7 @@ async def test_sequential_checkpoint_runtime_only() -> None: storage = InMemoryCheckpointStorage() agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf = SequentialBuilder().participants(list(agents)).build() + wf = SequentialBuilder(participants=list(agents)).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("runtime checkpoint test", checkpoint_storage=storage, stream=True): @@ -279,7 +277,7 @@ async def test_sequential_checkpoint_runtime_only() -> None: ) resumed_agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf_resume = SequentialBuilder().participants(list(resumed_agents)).build() + wf_resume = SequentialBuilder(participants=list(resumed_agents)).build() resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run( @@ -309,7 +307,7 @@ async def test_sequential_checkpoint_runtime_overrides_buildtime() -> None: runtime_storage = FileCheckpointStorage(temp_dir2) agents = (_EchoAgent(id="agent1", name="A1"), _EchoAgent(id="agent2", name="A2")) - wf = SequentialBuilder().participants(list(agents)).with_checkpointing(buildtime_storage).build() + wf = SequentialBuilder(participants=list(agents), checkpoint_storage=buildtime_storage).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("override test", checkpoint_storage=runtime_storage, stream=True): @@ -337,7 +335,7 @@ def create_agent1() -> _EchoAgent: def create_agent2() -> _EchoAgent: return _EchoAgent(id="agent2", name="A2") - wf = SequentialBuilder().register_participants([create_agent1, create_agent2]).with_checkpointing(storage).build() + wf = SequentialBuilder(participant_factories=[create_agent1, create_agent2], checkpoint_storage=storage).build() baseline_output: list[ChatMessage] | None = None async for ev in wf.run("checkpoint with factories", stream=True): @@ -357,9 +355,9 @@ def create_agent2() -> _EchoAgent: checkpoints[-1], ) - wf_resume = ( - SequentialBuilder().register_participants([create_agent1, create_agent2]).with_checkpointing(storage).build() - ) + wf_resume = SequentialBuilder( + participant_factories=[create_agent1, create_agent2], checkpoint_storage=storage + ).build() resumed_output: list[ChatMessage] | None = None async for ev in wf_resume.run(checkpoint_id=resume_checkpoint.checkpoint_id, stream=True): @@ -385,7 +383,7 @@ def create_agent() -> _EchoAgent: call_count += 1 return _EchoAgent(id=f"agent{call_count}", name=f"A{call_count}") - builder = SequentialBuilder().register_participants([create_agent, create_agent]) + builder = SequentialBuilder(participant_factories=[create_agent, create_agent]) # Factories should not be called yet assert call_count == 0 @@ -418,7 +416,7 @@ async def test_sequential_builder_reusable_after_build_with_participants() -> No a1 = _EchoAgent(id="agent1", name="A1") a2 = _EchoAgent(id="agent2", name="A2") - builder = SequentialBuilder().participants([a1, a2]) + builder = SequentialBuilder(participants=[a1, a2]) # Build first workflow builder.build() @@ -442,7 +440,7 @@ def create_agent2() -> _EchoAgent: call_count += 1 return _EchoAgent(id="agent2", name="A2") - builder = SequentialBuilder().register_participants([create_agent1, create_agent2]) + builder = SequentialBuilder(participant_factories=[create_agent1, create_agent2]) # Build first workflow - factories should be called builder.build() diff --git a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py index f00aafe91e..0f7827deae 100644 --- a/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/01_round_robin_group_chat.py @@ -77,7 +77,7 @@ async def run_agent_framework() -> None: ) # Create sequential workflow - workflow = SequentialBuilder().participants([researcher, writer, editor]).build() + workflow = SequentialBuilder(participants=[researcher, writer, editor]).build() # Run the workflow print("[Agent Framework] Sequential conversation:") @@ -137,7 +137,7 @@ async def check_approval( await context.send_message(AgentExecutorRequest(messages=response.full_conversation, should_respond=True)) workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=researcher) .add_edge(researcher, writer) .add_edge(writer, editor) .add_edge( @@ -145,7 +145,6 @@ async def check_approval( check_approval, ) .add_edge(check_approval, researcher) - .set_start_executor(researcher) .build() ) diff --git a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py index 476d8008e9..2cb34bb4d2 100644 --- a/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py +++ b/python/samples/autogen-migration/orchestrations/02_selector_group_chat.py @@ -85,18 +85,14 @@ async def run_agent_framework() -> None: description="Expert in databases and SQL", ) - workflow = ( - GroupChatBuilder() - .participants([python_expert, javascript_expert, database_expert]) - .with_orchestrator( - agent=client.as_agent( - name="selector_manager", - instructions="Based on the conversation, select the most appropriate expert to respond next.", - ), - ) - .with_max_rounds(1) - .build() - ) + workflow = GroupChatBuilder( + participants=[python_expert, javascript_expert, database_expert], + max_rounds=1, + orchestrator_agent=client.as_agent( + name="selector_manager", + instructions="Based on the conversation, select the most appropriate expert to respond next.", + ), + ).build() # Run with a question that requires expert selection print("[Agent Framework] Group chat conversation:") diff --git a/python/samples/autogen-migration/orchestrations/03_swarm.py b/python/samples/autogen-migration/orchestrations/03_swarm.py index 7559fbac1e..fd6085bbef 100644 --- a/python/samples/autogen-migration/orchestrations/03_swarm.py +++ b/python/samples/autogen-migration/orchestrations/03_swarm.py @@ -138,10 +138,10 @@ async def run_agent_framework() -> None: HandoffBuilder( name="support_handoff", participants=[triage_agent, billing_agent, tech_support], + termination_condition=lambda conv: sum(1 for msg in conv if msg.role == "user") > 3, ) .with_start_agent(triage_agent) .add_handoff(triage_agent, [billing_agent, tech_support]) - .with_termination_condition(lambda conv: sum(1 for msg in conv if msg.role == "user") > 3) .build() ) diff --git a/python/samples/autogen-migration/orchestrations/04_magentic_one.py b/python/samples/autogen-migration/orchestrations/04_magentic_one.py index 201e653693..caddaa3b43 100644 --- a/python/samples/autogen-migration/orchestrations/04_magentic_one.py +++ b/python/samples/autogen-migration/orchestrations/04_magentic_one.py @@ -91,21 +91,17 @@ async def run_agent_framework() -> None: ) # Create Magentic workflow - workflow = ( - MagenticBuilder() - .participants([researcher, coder, reviewer]) - .with_manager( - agent=client.as_agent( - name="magentic_manager", - instructions="You coordinate a team to complete complex tasks efficiently.", - description="Orchestrator for team coordination", - ), - max_round_count=20, - max_stall_count=3, - max_reset_count=1, - ) - .build() - ) + workflow = MagenticBuilder( + participants=[researcher, coder, reviewer], + manager_agent=client.as_agent( + name="magentic_manager", + instructions="You coordinate a team to complete complex tasks efficiently.", + description="Orchestrator for team coordination", + ), + max_round_count=20, + max_stall_count=3, + max_reset_count=1, + ).build() # Run complex task last_message_id: str | None = None diff --git a/python/samples/demos/hosted_agents/agents_in_workflow/main.py b/python/samples/demos/hosted_agents/agents_in_workflow/main.py index be2035c847..5402e962ac 100644 --- a/python/samples/demos/hosted_agents/agents_in_workflow/main.py +++ b/python/samples/demos/hosted_agents/agents_in_workflow/main.py @@ -31,7 +31,7 @@ def main(): ) # Build a concurrent workflow - workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() + workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).build() # Convert the workflow to an agent workflow_agent = workflow.as_agent() diff --git a/python/samples/demos/workflow_evaluation/create_workflow.py b/python/samples/demos/workflow_evaluation/create_workflow.py index c8033fd2ae..7e87a499da 100644 --- a/python/samples/demos/workflow_evaluation/create_workflow.py +++ b/python/samples/demos/workflow_evaluation/create_workflow.py @@ -319,8 +319,7 @@ async def _create_workflow(project_client, credential): # 7. booking_info_aggregation, booking_payment, activity_search → final_coordinator (final aggregation, fan-in) workflow = ( - WorkflowBuilder(name="Travel Planning Workflow") - .set_start_executor(start_executor) + WorkflowBuilder(name="Travel Planning Workflow", start_executor=start_executor) .add_edge(start_executor, travel_request_handler) .add_fan_out_edges(travel_request_handler, [hotel_search_agent, flight_search_agent, activity_search_agent]) .add_edge(hotel_search_agent, booking_info_aggregation_agent) diff --git a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py index 7e2b13635f..19223c5195 100644 --- a/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py +++ b/python/samples/getting_started/agents/azure_ai/azure_ai_with_code_interpreter_file_download.py @@ -8,7 +8,6 @@ AgentResponseUpdate, ChatAgent, CitationAnnotation, - Content, HostedCodeInterpreterTool, HostedFileContent, TextContent, diff --git a/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py b/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py index 52e1e42eda..4fbf2b0da5 100644 --- a/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py +++ b/python/samples/getting_started/agents/openai/openai_responses_client_streaming_image_generation.py @@ -4,7 +4,7 @@ import base64 import anyio -from agent_framework import Content, HostedImageGenerationTool +from agent_framework import HostedImageGenerationTool from agent_framework.openai import OpenAIResponsesClient """OpenAI Responses Client Streaming Image Generation Example diff --git a/python/samples/getting_started/devui/fanout_workflow/workflow.py b/python/samples/getting_started/devui/fanout_workflow/workflow.py index 9a5f99a26b..00dc92b3e0 100644 --- a/python/samples/getting_started/devui/fanout_workflow/workflow.py +++ b/python/samples/getting_started/devui/fanout_workflow/workflow.py @@ -662,8 +662,8 @@ def create_complex_workflow(): WorkflowBuilder( name="Data Processing Pipeline", description="Complex workflow with parallel validation, transformation, and quality assurance stages", + start_executor=data_ingestion, ) - .set_start_executor(data_ingestion) # Fan-out to validation stage .add_fan_out_edges(data_ingestion, [schema_validator, quality_validator, security_validator]) # Fan-in from validation to aggregator diff --git a/python/samples/getting_started/devui/in_memory_mode.py b/python/samples/getting_started/devui/in_memory_mode.py index e8441e9eb5..9f98d9be50 100644 --- a/python/samples/getting_started/devui/in_memory_mode.py +++ b/python/samples/getting_started/devui/in_memory_mode.py @@ -102,8 +102,8 @@ def main(): WorkflowBuilder( name="Text Transformer", description="Simple 2-step workflow that converts text to uppercase and adds exclamation", + start_executor=upper_executor, ) - .set_start_executor(upper_executor) .add_edge(upper_executor, exclaim_executor) .build() ) diff --git a/python/samples/getting_started/devui/spam_workflow/workflow.py b/python/samples/getting_started/devui/spam_workflow/workflow.py index 73be349cc6..af95af2f92 100644 --- a/python/samples/getting_started/devui/spam_workflow/workflow.py +++ b/python/samples/getting_started/devui/spam_workflow/workflow.py @@ -392,13 +392,13 @@ async def handle_processing_result( final_processor = FinalProcessor(id="final_processor") # Build the comprehensive 4-step workflow with branching logic and HIL support -# Note: No .with_checkpointing() call - DevUI will pass checkpoint_storage at runtime +# Note: No checkpoint_storage in constructor - DevUI will pass checkpoint_storage at runtime workflow = ( WorkflowBuilder( name="Email Spam Detector", description="4-step email classification workflow with human-in-the-loop spam approval", + start_executor=email_preprocessor, ) - .set_start_executor(email_preprocessor) .add_edge(email_preprocessor, spam_detector) # HIL handled within spam_detector via @response_handler # Continue with branching logic after human approval diff --git a/python/samples/getting_started/devui/workflow_agents/workflow.py b/python/samples/getting_started/devui/workflow_agents/workflow.py index c4f7ca1440..288c9d5279 100644 --- a/python/samples/getting_started/devui/workflow_agents/workflow.py +++ b/python/samples/getting_started/devui/workflow_agents/workflow.py @@ -132,8 +132,8 @@ def is_approved(message: Any) -> bool: WorkflowBuilder( name="Content Review Workflow", description="Multi-agent content creation workflow with quality-based routing (Writer → Reviewer → Editor/Publisher)", + start_executor=writer, ) - .set_start_executor(writer) .add_edge(writer, reviewer) # Branch 1: High quality (>= 80) goes directly to publisher .add_edge(reviewer, publisher, condition=is_approved) diff --git a/python/samples/getting_started/observability/workflow_observability.py b/python/samples/getting_started/observability/workflow_observability.py index e08eaa37af..1726117178 100644 --- a/python/samples/getting_started/observability/workflow_observability.py +++ b/python/samples/getting_started/observability/workflow_observability.py @@ -81,9 +81,8 @@ async def run_sequential_workflow() -> None: # Step 2: Build the workflow with the defined edges. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=upper_case_executor) .add_edge(upper_case_executor, reverse_text_executor) - .set_start_executor(upper_case_executor) .build() ) diff --git a/python/samples/getting_started/orchestrations/concurrent_agents.py b/python/samples/getting_started/orchestrations/concurrent_agents.py index cdfc5de05e..8333b91c89 100644 --- a/python/samples/getting_started/orchestrations/concurrent_agents.py +++ b/python/samples/getting_started/orchestrations/concurrent_agents.py @@ -17,7 +17,7 @@ a list[ChatMessage] representing the concatenated conversations from all agents. Demonstrates: -- Minimal wiring with ConcurrentBuilder().participants([...]).build() +- Minimal wiring with ConcurrentBuilder(participants=[...]).build() - Fan-out to multiple agents, fan-in aggregation of final ChatMessages - Workflow completion when idle with no pending work @@ -57,7 +57,7 @@ async def main() -> None: # 2) Build a concurrent workflow # Participants are either Agents (type of SupportsAgentRun) or Executors - workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() + workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).build() # 3) Run with a single prompt and pretty-print the final combined messages events = await workflow.run("We are launching a new budget-friendly electric bike for urban commuters.") diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py index 55512ecc6e..9463ba1915 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_agent_executors.py @@ -27,7 +27,7 @@ Demonstrates: - Executors that create their ChatAgent in __init__ (via AzureOpenAIChatClient) - A @handler that converts AgentExecutorRequest -> AgentExecutorResponse -- ConcurrentBuilder().participants([...]) to build fan-out/fan-in +- ConcurrentBuilder(participants=[...]) to build fan-out/fan-in - Default aggregator returning list[ChatMessage] (one user + one assistant per agent) - Workflow completion when all participants become idle @@ -103,7 +103,7 @@ async def main() -> None: marketer = MarketerExec(chat_client) legal = LegalExec(chat_client) - workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() + workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).build() events = await workflow.run("We are launching a new budget-friendly electric bike for urban commuters.") outputs = events.get_outputs() diff --git a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py index 4a5865021b..a15cae06fd 100644 --- a/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py +++ b/python/samples/getting_started/orchestrations/concurrent_custom_aggregator.py @@ -18,7 +18,7 @@ The workflow completes when all participants become idle. Demonstrates: -- ConcurrentBuilder().participants([...]).with_aggregator(callback) +- ConcurrentBuilder(participants=[...]).with_aggregator(callback) - Fan-out to agents and fan-in at an aggregator - Aggregation implemented via an LLM call (chat_client.get_response) - Workflow output yielded with the synthesized summary string @@ -87,7 +87,7 @@ async def summarize_results(results: list[Any]) -> str: # • Custom callback -> return value becomes workflow output (string here) # The callback can be sync or async; it receives list[AgentExecutorResponse]. workflow = ( - ConcurrentBuilder().participants([researcher, marketer, legal]).with_aggregator(summarize_results).build() + ConcurrentBuilder(participants=[researcher, marketer, legal]).with_aggregator(summarize_results).build() ) events = await workflow.run("We are launching a new budget-friendly electric bike for urban commuters.") diff --git a/python/samples/getting_started/orchestrations/concurrent_participant_factory.py b/python/samples/getting_started/orchestrations/concurrent_participant_factory.py index 1b31027c55..8d1da7f0fd 100644 --- a/python/samples/getting_started/orchestrations/concurrent_participant_factory.py +++ b/python/samples/getting_started/orchestrations/concurrent_participant_factory.py @@ -33,7 +33,7 @@ requests or tasks in parallel with stateful participants. Demonstrates: -- ConcurrentBuilder().register_participants([...]).with_aggregator(callback) +- ConcurrentBuilder(participant_factories=[...]).with_aggregator(callback) - Fan-out to agents and fan-in at an aggregator - Aggregation implemented via an LLM call (chat_client.get_response) - Workflow output yielded with the synthesized summary string @@ -125,8 +125,7 @@ async def main() -> None: # SupportsAgentRun (agents) or Executor instances. # - register_aggregator(...) takes a factory function that returns an Executor instance. concurrent_builder = ( - ConcurrentBuilder() - .register_participants([create_researcher, create_marketer, create_legal]) + ConcurrentBuilder(participant_factories=[create_researcher, create_marketer, create_legal]) .register_aggregator(SummarizationExecutor) ) diff --git a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py index 9624e2ed5b..33d62d98da 100644 --- a/python/samples/getting_started/orchestrations/group_chat_agent_manager.py +++ b/python/samples/getting_started/orchestrations/group_chat_agent_manager.py @@ -65,16 +65,20 @@ async def main() -> None: ) # Build the group chat workflow + # termination_condition: stop after 4 assistant messages + # (The agent orchestrator will intelligently decide when to end before this limit but just in case) + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=orchestrator_agent) - .participants([researcher, writer]) + GroupChatBuilder( + participants=[researcher, writer], + termination_condition=lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 4, + intermediate_outputs=True, + orchestrator_agent=orchestrator_agent, + ) # Set a hard termination condition: stop after 4 assistant messages # The agent orchestrator will intelligently decide when to end before this limit but just in case .with_termination_condition(lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 4) - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent with type "output" events - .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py index a8e06e55d7..be2579f496 100644 --- a/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py +++ b/python/samples/getting_started/orchestrations/group_chat_philosophical_debate.py @@ -207,14 +207,17 @@ async def main() -> None: chat_client=_get_chat_client(), ) + # termination_condition: stop after 10 assistant messages + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=moderator) - .participants([farmer, developer, teacher, activist, spiritual_leader, artist, immigrant, doctor]) + GroupChatBuilder( + participants=[farmer, developer, teacher, activist, spiritual_leader, artist, immigrant, doctor], + termination_condition=lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 10, + intermediate_outputs=True, + orchestrator_agent=moderator, + ) .with_termination_condition(lambda messages: sum(1 for msg in messages if msg.role == "assistant") >= 10) - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent with type "output" events - .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py index 3e7ea3fe11..bb76e97de1 100644 --- a/python/samples/getting_started/orchestrations/group_chat_simple_selector.py +++ b/python/samples/getting_started/orchestrations/group_chat_simple_selector.py @@ -16,7 +16,7 @@ Sample: Group Chat with a round-robin speaker selector What it does: -- Demonstrates the with_orchestrator() API for GroupChat orchestration +- Demonstrates the selection_func parameter for GroupChat orchestration - Uses a pure Python function to control speaker selection based on conversation state Prerequisites: @@ -80,19 +80,26 @@ async def main() -> None: ) # Build the group chat workflow + # termination_condition: stop after 6 messages (user task + one full rounds + 1) + # One round is expert -> verifier -> clarifier -> skeptic, after which the expert gets to respond again. + # This will end the conversation after the expert has spoken 2 times (one iteration loop) + # Note: it's possible that the expert gets it right the first time and the other participants + # have nothing to add, but for demo purposes we want to see at least one full round of interaction. + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) workflow = ( - GroupChatBuilder() - .participants([expert, verifier, clarifier, skeptic]) - .with_orchestrator(selection_func=round_robin_selector) + GroupChatBuilder( + participants=[expert, verifier, clarifier, skeptic], + termination_condition=lambda conversation: len(conversation) >= 6, + intermediate_outputs=True, + selection_func=round_robin_selector, + ) # Set a hard termination condition: stop after 6 messages (user task + one full rounds + 1) # One round is expert -> verifier -> clarifier -> skeptic, after which the expert gets to respond again. # This will end the conversation after the expert has spoken 2 times (one iteration loop) # Note: it's possible that the expert gets it right the first time and the other participants # have nothing to add, but for demo purposes we want to see at least one full round of interaction. .with_termination_condition(lambda conversation: len(conversation) >= 6) - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent with type "output" events - .with_intermediate_outputs() .build() ) diff --git a/python/samples/getting_started/orchestrations/handoff_autonomous.py b/python/samples/getting_started/orchestrations/handoff_autonomous.py index faadd8486e..9b151b656a 100644 --- a/python/samples/getting_started/orchestrations/handoff_autonomous.py +++ b/python/samples/getting_started/orchestrations/handoff_autonomous.py @@ -78,10 +78,15 @@ async def main() -> None: # Build the workflow with autonomous mode # In autonomous mode, agents continue iterating until they invoke a handoff tool + # termination_condition: Terminate after coordinator provides 5 assistant responses workflow = ( HandoffBuilder( name="autonomous_iteration_handoff", participants=[coordinator, research_agent, summary_agent], + termination_condition=lambda conv: sum( + 1 for msg in conv if msg.author_name == "coordinator" and msg.role == "assistant" + ) + >= 5, ) .with_start_agent(coordinator) .add_handoff(coordinator, [research_agent, summary_agent]) @@ -98,10 +103,6 @@ async def main() -> None: resolve_agent_id(summary_agent): 5, } ) - .with_termination_condition( - # Terminate after coordinator provides 5 assistant responses - lambda conv: sum(1 for msg in conv if msg.author_name == "coordinator" and msg.role == "assistant") >= 5 - ) .build() ) diff --git a/python/samples/getting_started/orchestrations/handoff_participant_factory.py b/python/samples/getting_started/orchestrations/handoff_participant_factory.py index bab1611244..7abb7b59c6 100644 --- a/python/samples/getting_started/orchestrations/handoff_participant_factory.py +++ b/python/samples/getting_started/orchestrations/handoff_participant_factory.py @@ -217,6 +217,9 @@ async def _run_workflow(workflow: Workflow, user_inputs: list[str]) -> None: async def main() -> None: """Run the autonomous handoff workflow with participant factories.""" # Build the handoff workflow using participant factories + # termination_condition: Custom termination that checks if the triage agent has provided a closing message. + # This looks for the last message being from triage_agent and containing "welcome", + # which indicates the conversation has concluded naturally. workflow_builder = ( HandoffBuilder( name="Autonomous Handoff with Participant Factories", @@ -226,18 +229,13 @@ async def main() -> None: "order_status": create_order_status_agent, "return": create_return_agent, }, - ) - .with_start_agent("triage") - .with_termination_condition( - # Custom termination: Check if the triage agent has provided a closing message. - # This looks for the last message being from triage_agent and containing "welcome", - # which indicates the conversation has concluded naturally. - lambda conversation: ( + termination_condition=lambda conversation: ( len(conversation) > 0 and conversation[-1].author_name == "triage_agent" and "welcome" in conversation[-1].text.lower() - ) + ), ) + .with_start_agent("triage") ) # Scripted user responses for reproducible demo diff --git a/python/samples/getting_started/orchestrations/handoff_simple.py b/python/samples/getting_started/orchestrations/handoff_simple.py index 3be912ab6b..53e6bbcd60 100644 --- a/python/samples/getting_started/orchestrations/handoff_simple.py +++ b/python/samples/getting_started/orchestrations/handoff_simple.py @@ -198,7 +198,7 @@ async def main() -> None: # - participants: All agents that can participate in the workflow # - with_start_agent: The triage agent is designated as the start agent, which means # it receives all user input first and orchestrates handoffs to specialists - # - with_termination_condition: Custom logic to stop the request/response loop. + # - termination_condition: Custom logic to stop the request/response loop. # Without this, the default behavior continues requesting user input until max_turns # is reached. Here we use a custom condition that checks if the conversation has ended # naturally (when one of the agents says something like "you're welcome"). @@ -206,14 +206,14 @@ async def main() -> None: HandoffBuilder( name="customer_support_handoff", participants=[triage, refund, order, support], - ) - .with_start_agent(triage) - .with_termination_condition( # Custom termination: Check if one of the agents has provided a closing message. # This looks for the last message containing "welcome", which indicates the # conversation has concluded naturally. - lambda conversation: len(conversation) > 0 and "welcome" in conversation[-1].text.lower() + termination_condition=lambda conversation: ( + len(conversation) > 0 and "welcome" in conversation[-1].text.lower() + ), ) + .with_start_agent(triage) .build() ) diff --git a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py index 046da851c0..159105d54c 100644 --- a/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py +++ b/python/samples/getting_started/orchestrations/handoff_with_code_interpreter_file.py @@ -163,10 +163,11 @@ async def main() -> None: async with create_agents(credential) as (triage, code_specialist): workflow = ( - HandoffBuilder() + HandoffBuilder( + termination_condition=lambda conv: sum(1 for msg in conv if msg.role == "user") >= 2, + ) .participants([triage, code_specialist]) .with_start_agent(triage) - .with_termination_condition(lambda conv: sum(1 for msg in conv if msg.role == "user") >= 2) .build() ) diff --git a/python/samples/getting_started/orchestrations/magentic.py b/python/samples/getting_started/orchestrations/magentic.py index 35ca98b617..d0e4f13703 100644 --- a/python/samples/getting_started/orchestrations/magentic.py +++ b/python/samples/getting_started/orchestrations/magentic.py @@ -72,20 +72,16 @@ async def main() -> None: print("\nBuilding Magentic Workflow...") - workflow = ( - MagenticBuilder() - .participants([researcher_agent, coder_agent]) - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=3, - max_reset_count=2, - ) - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent events - .with_intermediate_outputs() - .build() - ) + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) + workflow = MagenticBuilder( + participants=[researcher_agent, coder_agent], + intermediate_outputs=True, + manager_agent=manager_agent, + max_round_count=10, + max_stall_count=3, + max_reset_count=2, + ).build() task = ( "I am preparing a report on the energy efficiency of different machine learning model architectures. " diff --git a/python/samples/getting_started/orchestrations/magentic_checkpoint.py b/python/samples/getting_started/orchestrations/magentic_checkpoint.py index ab2114a2db..08e26909e0 100644 --- a/python/samples/getting_started/orchestrations/magentic_checkpoint.py +++ b/python/samples/getting_started/orchestrations/magentic_checkpoint.py @@ -76,18 +76,14 @@ def build_workflow(checkpoint_storage: FileCheckpointStorage): # The builder wires in the Magentic orchestrator, sets the plan review path, and # stores the checkpoint backend so the runtime knows where to persist snapshots. - return ( - MagenticBuilder() - .participants([researcher, writer]) - .with_plan_review() - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=3, - ) - .with_checkpointing(checkpoint_storage) - .build() - ) + return MagenticBuilder( + participants=[researcher, writer], + enable_plan_review=True, + checkpoint_storage=checkpoint_storage, + manager_agent=manager_agent, + max_round_count=10, + max_stall_count=3, + ).build() async def main() -> None: diff --git a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py index 9a38507efb..24757a1692 100644 --- a/python/samples/getting_started/orchestrations/magentic_human_plan_review.py +++ b/python/samples/getting_started/orchestrations/magentic_human_plan_review.py @@ -115,22 +115,18 @@ async def main() -> None: print("\nBuilding Magentic Workflow with Human Plan Review...") - workflow = ( - MagenticBuilder() - .participants([researcher_agent, analyst_agent]) - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=1, - max_reset_count=2, - ) - # Request human input for plan review - .with_plan_review() - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent with type "output" - .with_intermediate_outputs() - .build() - ) + # enable_plan_review=True: Request human input for plan review + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) + workflow = MagenticBuilder( + participants=[researcher_agent, analyst_agent], + enable_plan_review=True, + intermediate_outputs=True, + manager_agent=manager_agent, + max_round_count=10, + max_stall_count=1, + max_reset_count=2, + ).build() task = "Research sustainable aviation fuel technology and summarize the findings." diff --git a/python/samples/getting_started/orchestrations/sequential_agents.py b/python/samples/getting_started/orchestrations/sequential_agents.py index 03e5c42e9a..37c9afe975 100644 --- a/python/samples/getting_started/orchestrations/sequential_agents.py +++ b/python/samples/getting_started/orchestrations/sequential_agents.py @@ -43,7 +43,7 @@ async def main() -> None: ) # 2) Build sequential workflow: writer -> reviewer - workflow = SequentialBuilder().participants([writer, reviewer]).build() + workflow = SequentialBuilder(participants=[writer, reviewer]).build() # 3) Run and collect outputs outputs: list[list[ChatMessage]] = [] diff --git a/python/samples/getting_started/orchestrations/sequential_custom_executors.py b/python/samples/getting_started/orchestrations/sequential_custom_executors.py index 8b1cc8d8eb..d421e85f1c 100644 --- a/python/samples/getting_started/orchestrations/sequential_custom_executors.py +++ b/python/samples/getting_started/orchestrations/sequential_custom_executors.py @@ -66,7 +66,7 @@ async def main() -> None: # 2) Build sequential workflow: content -> summarizer summarizer = Summarizer(id="summarizer") - workflow = SequentialBuilder().participants([content, summarizer]).build() + workflow = SequentialBuilder(participants=[content, summarizer]).build() # 3) Run workflow and extract final conversation events = await workflow.run("Explain the benefits of budget eBikes for commuters.") diff --git a/python/samples/getting_started/orchestrations/sequential_participant_factory.py b/python/samples/getting_started/orchestrations/sequential_participant_factory.py index 243c4b145a..38cacfffcd 100644 --- a/python/samples/getting_started/orchestrations/sequential_participant_factory.py +++ b/python/samples/getting_started/orchestrations/sequential_participant_factory.py @@ -70,7 +70,7 @@ async def run_workflow(workflow: Workflow, query: str) -> None: async def main() -> None: # 1) Create a builder with participant factories - builder = SequentialBuilder().register_participants([ + builder = SequentialBuilder(participant_factories=[ lambda: Accumulate("accumulator"), create_agent, ]) diff --git a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py index 98460844f6..8975795e35 100644 --- a/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py +++ b/python/samples/getting_started/workflows/_start-here/step1_executors_and_edges.py @@ -160,10 +160,10 @@ async def main(): upper_case = UpperCase(id="upper_case_executor") # Build the workflow using a fluent pattern: - # 1) add_edge(from_node, to_node) defines a directed edge upper_case -> reverse_text - # 2) set_start_executor(node) declares the entry point + # 1) start_executor=... in constructor declares the entry point + # 2) add_edge(from_node, to_node) defines a directed edge upper_case -> reverse_text # 3) build() finalizes and returns an immutable Workflow object - workflow1 = WorkflowBuilder().add_edge(upper_case, reverse_text).set_start_executor(upper_case).build() + workflow1 = WorkflowBuilder(start_executor=upper_case).add_edge(upper_case, reverse_text).build() # Run the workflow by sending the initial message to the start node. # The run(...) call returns an event collection; its get_outputs() method @@ -181,10 +181,9 @@ async def main(): # exclamation_adder uses @handler(input=str, output=str) to # explicitly declare types instead of relying on introspection. workflow2 = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=upper_case) .add_edge(upper_case, exclamation_adder) .add_edge(exclamation_adder, reverse_text) - .set_start_executor(upper_case) .build() ) diff --git a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py index b2fcbb1aa0..aa6378c433 100644 --- a/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py +++ b/python/samples/getting_started/workflows/_start-here/step2_agents_in_a_workflow.py @@ -45,8 +45,8 @@ async def main(): ) # Build the workflow using the fluent builder. - # Set the start node and connect an edge from writer to reviewer. - workflow = WorkflowBuilder().set_start_executor(writer_agent).add_edge(writer_agent, reviewer_agent).build() + # Set the start node via constructor and connect an edge from writer to reviewer. + workflow = WorkflowBuilder(start_executor=writer_agent).add_edge(writer_agent, reviewer_agent).build() # Run the workflow with the user's initial message. # For foundational clarity, use run (non streaming) and print the terminal event. diff --git a/python/samples/getting_started/workflows/_start-here/step3_streaming.py b/python/samples/getting_started/workflows/_start-here/step3_streaming.py index 8ca951aa0a..c9cfa6843d 100644 --- a/python/samples/getting_started/workflows/_start-here/step3_streaming.py +++ b/python/samples/getting_started/workflows/_start-here/step3_streaming.py @@ -44,8 +44,8 @@ async def main(): ) # Build the workflow using the fluent builder. - # Set the start node and connect an edge from writer to reviewer. - workflow = WorkflowBuilder().set_start_executor(writer_agent).add_edge(writer_agent, reviewer_agent).build() + # Set the start node via constructor and connect an edge from writer to reviewer. + workflow = WorkflowBuilder(start_executor=writer_agent).add_edge(writer_agent, reviewer_agent).build() # Track the last author to format streaming output. last_author: str | None = None diff --git a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py index 166514f7ac..b5554fae81 100644 --- a/python/samples/getting_started/workflows/_start-here/step4_using_factories.py +++ b/python/samples/getting_started/workflows/_start-here/step4_using_factories.py @@ -73,12 +73,11 @@ async def main(): # 4) set_start_executor(node) declares the entry point # 5) build() finalizes and returns an immutable Workflow object workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="UpperCase") .register_executor(lambda: UpperCase(id="upper_case_executor"), name="UpperCase") .register_executor(lambda: reverse_text, name="ReverseText") .register_agent(create_agent, name="DecoderAgent") .add_chain(["UpperCase", "ReverseText", "DecoderAgent"]) - .set_start_executor("UpperCase") .build() ) diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py index 43c35a8082..d05fcbf319 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_streaming.py @@ -38,8 +38,8 @@ async def main() -> None: ) # Build the workflow by adding agents directly as edges. - # Agents adapt to workflow mode: run(stream=True) for complete responses, run() for incremental updates. - workflow = WorkflowBuilder().set_start_executor(writer_agent).add_edge(writer_agent, reviewer_agent).build() + # Agents adapt to workflow mode: run(stream=True) for incremental updates, run() for complete responses. + workflow = WorkflowBuilder(start_executor=writer_agent).add_edge(writer_agent, reviewer_agent).build() # Track the last author to format streaming output. last_author: str | None = None diff --git a/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py b/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py index 874cb2956a..890dbe396f 100644 --- a/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py +++ b/python/samples/getting_started/workflows/agents/azure_ai_agents_with_shared_thread.py @@ -71,7 +71,7 @@ async def main() -> None: shared_thread.message_store = ChatMessageStore() workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="writer") .register_agent(factory_func=lambda: writer, name="writer", agent_thread=shared_thread) .register_agent(factory_func=lambda: reviewer, name="reviewer", agent_thread=shared_thread) .register_executor( @@ -79,7 +79,6 @@ async def main() -> None: name="intercept_agent_response", ) .add_chain(["writer", "intercept_agent_response", "reviewer"]) - .set_start_executor("writer") .build() ) diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py index c9a31cf6f7..3e3751fd86 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_and_executor.py @@ -110,8 +110,7 @@ async def main() -> None: ) workflow = ( - WorkflowBuilder() - .set_start_executor(research_agent) + WorkflowBuilder(start_executor=research_agent) .add_edge(research_agent, enrich_with_references) .add_edge(enrich_with_references, final_editor_agent) .build() diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py index 73d520b182..04c08a0602 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_streaming.py @@ -40,7 +40,7 @@ async def main(): # Build the workflow using the fluent builder. # Set the start node and connect an edge from writer to reviewer. # Agents adapt to workflow mode: run(stream=True) for incremental updates, run() for complete responses. - workflow = WorkflowBuilder().set_start_executor(writer_agent).add_edge(writer_agent, reviewer_agent).build() + workflow = WorkflowBuilder(start_executor=writer_agent).add_edge(writer_agent, reviewer_agent).build() # Track the last author to format streaming output. last_author: str | None = None diff --git a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py index ae0f442771..3515709157 100644 --- a/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py +++ b/python/samples/getting_started/workflows/agents/azure_chat_agents_tool_calls_with_feedback.py @@ -240,7 +240,7 @@ async def main() -> None: # Build the workflow. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="writer_agent") .register_agent(create_writer_agent, name="writer_agent") .register_agent(create_final_editor_agent, name="final_editor_agent") .register_executor( @@ -251,7 +251,6 @@ async def main() -> None: ), name="coordinator", ) - .set_start_executor("writer_agent") .add_edge("writer_agent", "coordinator") .add_edge("coordinator", "writer_agent") .add_edge("final_editor_agent", "coordinator") diff --git a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py index 89b003dd5f..7c10455eaa 100644 --- a/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/concurrent_workflow_as_agent.py @@ -65,7 +65,7 @@ async def main() -> None: ) # 2) Build a concurrent workflow - workflow = ConcurrentBuilder().participants([researcher, marketer, legal]).build() + workflow = ConcurrentBuilder(participants=[researcher, marketer, legal]).build() # 3) Expose the concurrent workflow as an agent for easy reuse agent = workflow.as_agent(name="ConcurrentWorkflowAgent") diff --git a/python/samples/getting_started/workflows/agents/custom_agent_executors.py b/python/samples/getting_started/workflows/agents/custom_agent_executors.py index cab73bc761..c193e7368d 100644 --- a/python/samples/getting_started/workflows/agents/custom_agent_executors.py +++ b/python/samples/getting_started/workflows/agents/custom_agent_executors.py @@ -113,7 +113,7 @@ async def main(): # Build the workflow using the fluent builder. # Set the start node and connect an edge from writer to reviewer. - workflow = WorkflowBuilder().set_start_executor(writer).add_edge(writer, reviewer).build() + workflow = WorkflowBuilder(start_executor=writer).add_edge(writer, reviewer).build() # Run the workflow with the user's initial message. # For foundational clarity, use run (non streaming) and print the workflow output. diff --git a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py index 4193d1fdfc..1693aeb642 100644 --- a/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/group_chat_workflow_as_agent.py @@ -33,20 +33,16 @@ async def main() -> None: chat_client=OpenAIResponsesClient(), ) - workflow = ( - GroupChatBuilder() - .with_orchestrator( - agent=OpenAIChatClient().as_agent( - name="Orchestrator", - instructions="You coordinate a team conversation to solve the user's task.", - ) - ) - .participants([researcher, writer]) - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent with type "output" events - .with_intermediate_outputs() - .build() - ) + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) + workflow = GroupChatBuilder( + participants=[researcher, writer], + intermediate_outputs=True, + orchestrator_agent=OpenAIChatClient().as_agent( + name="Orchestrator", + instructions="You coordinate a team conversation to solve the user's task.", + ), + ).build() task = "Outline the core considerations for planning a community hackathon, and finish with a concise action plan." diff --git a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py index e083cf7d60..f3dcefab7a 100644 --- a/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/handoff_workflow_as_agent.py @@ -156,7 +156,7 @@ async def main() -> None: # - participants: All agents that can participate in the workflow # - with_start_agent: The triage agent is designated as the start agent, which means # it receives all user input first and orchestrates handoffs to specialists - # - with_termination_condition: Custom logic to stop the request/response loop. + # - termination_condition: Custom logic to stop the request/response loop. # Without this, the default behavior continues requesting user input until max_turns # is reached. Here we use a custom condition that checks if the conversation has ended # naturally (when one of the agents says something like "you're welcome"). @@ -164,14 +164,14 @@ async def main() -> None: HandoffBuilder( name="customer_support_handoff", participants=[triage, refund, order, support], - ) - .with_start_agent(triage) - .with_termination_condition( # Custom termination: Check if one of the agents has provided a closing message. # This looks for the last message containing "welcome", which indicates the # conversation has concluded naturally. - lambda conversation: len(conversation) > 0 and "welcome" in conversation[-1].text.lower() + termination_condition=lambda conversation: ( + len(conversation) > 0 and "welcome" in conversation[-1].text.lower() + ), ) + .with_start_agent(triage) .build() .as_agent() # Convert workflow to agent interface ) diff --git a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py index 4ea460e64b..4d687514c1 100644 --- a/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/magentic_workflow_as_agent.py @@ -50,20 +50,16 @@ async def main() -> None: print("\nBuilding Magentic Workflow...") - workflow = ( - MagenticBuilder() - .participants([researcher_agent, coder_agent]) - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=3, - max_reset_count=2, - ) - # Enable intermediate outputs to observe the conversation as it unfolds - # Intermediate outputs will be emitted as WorkflowEvent with type "output" events - .with_intermediate_outputs() - .build() - ) + # intermediate_outputs=True: Enable intermediate outputs to observe the conversation as it unfolds + # (Intermediate outputs will be emitted as WorkflowOutputEvent events) + workflow = MagenticBuilder( + participants=[researcher_agent, coder_agent], + intermediate_outputs=True, + manager_agent=manager_agent, + max_round_count=10, + max_stall_count=3, + max_reset_count=2, + ).build() task = ( "I am preparing a report on the energy efficiency of different machine learning model architectures. " diff --git a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py index ba09785f0c..7fc1720cbc 100644 --- a/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py +++ b/python/samples/getting_started/workflows/agents/sequential_workflow_as_agent.py @@ -40,7 +40,7 @@ async def main() -> None: ) # 2) Build sequential workflow: writer -> reviewer - workflow = SequentialBuilder().participants([writer, reviewer]).build() + workflow = SequentialBuilder(participants=[writer, reviewer]).build() # 3) Treat the workflow itself as an agent for follow-up invocations agent = workflow.as_agent(name="SequentialWorkflowAgent") diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py index d1bdcb71ba..af405084dc 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_human_in_the_loop.py @@ -99,7 +99,7 @@ async def main() -> None: # Build a workflow with bidirectional communication between Worker and Reviewer, # and escalation paths for human review. agent = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="worker") .register_executor( lambda: Worker( id="sub-worker", @@ -113,7 +113,6 @@ async def main() -> None: ) .add_edge("worker", "reviewer") # Worker sends requests to Reviewer .add_edge("reviewer", "worker") # Reviewer sends feedback to Worker - .set_start_executor("worker") .build() .as_agent() # Convert workflow into an agent interface ) diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py index 23b4d1e5ee..aefcf9b1e5 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_kwargs.py @@ -94,7 +94,7 @@ async def main() -> None: ) # Build a sequential workflow - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() # Expose the workflow as an agent using .as_agent() workflow_agent = workflow.as_agent(name="WorkflowAgent") diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py index 2db380ea77..3d205cbbb2 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_reflection_pattern.py @@ -187,7 +187,7 @@ async def main() -> None: print("Building workflow with Worker ↔ Reviewer cycle...") agent = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="worker") .register_executor( lambda: Worker(id="worker", chat_client=OpenAIChatClient(model_id="gpt-4.1-nano")), name="worker", @@ -198,7 +198,6 @@ async def main() -> None: ) .add_edge("worker", "reviewer") # Worker sends responses to Reviewer .add_edge("reviewer", "worker") # Reviewer provides feedback to Worker - .set_start_executor("worker") .build() .as_agent() # Wrap workflow as an agent ) diff --git a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py index 01d5626589..621d54216f 100644 --- a/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py +++ b/python/samples/getting_started/workflows/agents/workflow_as_agent_with_thread.py @@ -59,7 +59,7 @@ def create_summarizer() -> ChatAgent: ) # Build a sequential workflow: assistant -> summarizer - workflow = SequentialBuilder().register_participants([create_assistant, create_summarizer]).build() + workflow = SequentialBuilder(participant_factories=[create_assistant, create_summarizer]).build() # Wrap the workflow as an agent agent = workflow.as_agent(name="ConversationalWorkflowAgent") @@ -130,7 +130,7 @@ def create_assistant() -> ChatAgent: instructions="You are a helpful assistant with good memory. Remember details from our conversation.", ) - workflow = SequentialBuilder().register_participants([create_assistant]).build() + workflow = SequentialBuilder(participant_factories=[create_assistant]).build() agent = workflow.as_agent(name="MemoryWorkflowAgent") # Create initial thread and have a conversation diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py index b6fa97539a..fd5bda8551 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_human_in_the_loop.py @@ -179,7 +179,9 @@ def create_workflow(checkpoint_storage: FileCheckpointStorage) -> Workflow: # module docstring. Because `WorkflowBuilder` is declarative, reading these # edges is often the quickest way to understand execution order. workflow_builder = ( - WorkflowBuilder(max_iterations=6) + WorkflowBuilder( + max_iterations=6, start_executor="prepare_brief", checkpoint_storage=checkpoint_storage + ) .register_agent( lambda: AzureOpenAIChatClient(credential=AzureCliCredential()).as_agent( instructions="Write concise, warm release notes that sound human and helpful.", @@ -190,11 +192,9 @@ def create_workflow(checkpoint_storage: FileCheckpointStorage) -> Workflow: ) .register_executor(lambda: ReviewGateway(id="review_gateway", writer_id="writer"), name="review_gateway") .register_executor(lambda: BriefPreparer(id="prepare_brief", agent_id="writer"), name="prepare_brief") - .set_start_executor("prepare_brief") .add_edge("prepare_brief", "writer") .add_edge("writer", "review_gateway") .add_edge("review_gateway", "writer") # revisions loop - .with_checkpointing(checkpoint_storage=checkpoint_storage) ) return workflow_builder.build() diff --git a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py index ff23b1af5b..7d453b6126 100644 --- a/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/checkpoint_with_resume.py @@ -104,16 +104,14 @@ async def on_checkpoint_restore(self, state: dict[str, Any]) -> None: async def main(): # Build workflow with checkpointing enabled + checkpoint_storage = InMemoryCheckpointStorage() workflow_builder = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="start", checkpoint_storage=checkpoint_storage) .register_executor(lambda: StartExecutor(id="start"), name="start") .register_executor(lambda: WorkerExecutor(id="worker"), name="worker") - .set_start_executor("start") .add_edge("start", "worker") .add_edge("worker", "worker") # Self-loop for iterative processing ) - checkpoint_storage = InMemoryCheckpointStorage() - workflow_builder = workflow_builder.with_checkpointing(checkpoint_storage=checkpoint_storage) # Run workflow with automatic checkpoint recovery latest_checkpoint: WorkflowCheckpoint | None = None diff --git a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py index a89a848257..99875c94c6 100644 --- a/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py +++ b/python/samples/getting_started/workflows/checkpoint/handoff_with_tool_approval_checkpoint_resume.py @@ -97,17 +97,16 @@ def create_workflow(checkpoint_storage: FileCheckpointStorage) -> tuple[Workflow client = AzureOpenAIChatClient(credential=AzureCliCredential()) triage, refund, order = create_agents(client) + # checkpoint_storage: Enable checkpointing for resume + # termination_condition: Terminate after 5 user messages for this demo workflow = ( HandoffBuilder( name="checkpoint_handoff_demo", participants=[triage, refund, order], + checkpoint_storage=checkpoint_storage, + termination_condition=lambda conv: sum(1 for msg in conv if msg.role == "user") >= 5, ) .with_start_agent(triage) - .with_checkpointing(checkpoint_storage) - .with_termination_condition( - # Terminate after 5 user messages for this demo - lambda conv: sum(1 for msg in conv if msg.role == "user") >= 5 - ) .build() ) diff --git a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py index 770a4ee81c..c975a10ae1 100644 --- a/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/sub_workflow_checkpoint.py @@ -298,11 +298,10 @@ async def on_checkpoint_restore(self, state: dict[str, Any]) -> None: def build_sub_workflow() -> WorkflowExecutor: """Assemble the sub-workflow used by the parent workflow executor.""" sub_workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="writer") .register_executor(DraftWriter, name="writer") .register_executor(DraftReviewRouter, name="router") .register_executor(DraftFinaliser, name="finaliser") - .set_start_executor("writer") .add_edge("writer", "router") .add_edge("router", "finaliser") .add_edge("finaliser", "writer") # permits revision loops @@ -315,13 +314,11 @@ def build_sub_workflow() -> WorkflowExecutor: def build_parent_workflow(storage: FileCheckpointStorage) -> Workflow: """Assemble the parent workflow that embeds the sub-workflow.""" return ( - WorkflowBuilder() + WorkflowBuilder(start_executor="coordinator", checkpoint_storage=storage) .register_executor(LaunchCoordinator, name="coordinator") .register_executor(build_sub_workflow, name="sub_executor") - .set_start_executor("coordinator") .add_edge("coordinator", "sub_executor") .add_edge("sub_executor", "coordinator") - .with_checkpointing(storage) .build() ) diff --git a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py index 52d2f99843..18a0cf9258 100644 --- a/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py +++ b/python/samples/getting_started/workflows/checkpoint/workflow_as_agent_checkpoint.py @@ -56,7 +56,7 @@ def create_reviewer() -> ChatAgent: ) # Build sequential workflow with participant factories - workflow = SequentialBuilder().register_participants([create_assistant, create_reviewer]).build() + workflow = SequentialBuilder(participant_factories=[create_assistant, create_reviewer]).build() agent = workflow.as_agent(name="CheckpointedAgent") # Create checkpoint storage @@ -93,7 +93,7 @@ def create_assistant() -> ChatAgent: instructions="You are a helpful assistant with good memory. Reference previous conversation when relevant.", ) - workflow = SequentialBuilder().register_participants([create_assistant]).build() + workflow = SequentialBuilder(participant_factories=[create_assistant]).build() agent = workflow.as_agent(name="MemoryAgent") # Create both thread (for conversation) and checkpoint storage (for workflow state) @@ -137,7 +137,7 @@ def create_assistant() -> ChatAgent: instructions="You are a helpful assistant.", ) - workflow = SequentialBuilder().register_participants([create_assistant]).build() + workflow = SequentialBuilder(participant_factories=[create_assistant]).build() agent = workflow.as_agent(name="StreamingCheckpointAgent") checkpoint_storage = InMemoryCheckpointStorage() diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_basics.py b/python/samples/getting_started/workflows/composition/sub_workflow_basics.py index 826425a0ae..9d5168db80 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_basics.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_basics.py @@ -141,9 +141,8 @@ def create_sub_workflow() -> WorkflowExecutor: print("🚀 Setting up sub-workflow...") processing_workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="text_processor") .register_executor(TextProcessor, name="text_processor") - .set_start_executor("text_processor") .build() ) @@ -155,10 +154,9 @@ async def main(): print("🔧 Setting up parent workflow...") # Step 1: Create the parent workflow main_workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="text_orchestrator") .register_executor(TextProcessingOrchestrator, name="text_orchestrator") .register_executor(create_sub_workflow, name="text_processor_workflow") - .set_start_executor("text_orchestrator") .add_edge("text_orchestrator", "text_processor_workflow") .add_edge("text_processor_workflow", "text_orchestrator") .build() diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py index 4c77fc5202..5d74ec42d3 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_kwargs.py @@ -88,7 +88,7 @@ async def main() -> None: ) # Build the inner (sub) workflow with the agent - inner_workflow = SequentialBuilder().participants([inner_agent]).build() + inner_workflow = SequentialBuilder(participants=[inner_agent]).build() # Wrap the inner workflow in a WorkflowExecutor to use it as a sub-workflow subworkflow_executor = WorkflowExecutor( @@ -97,7 +97,7 @@ async def main() -> None: ) # Build the outer (parent) workflow containing the sub-workflow - outer_workflow = SequentialBuilder().participants([subworkflow_executor]).build() + outer_workflow = SequentialBuilder(participants=[subworkflow_executor]).build() # Define custom context that will flow through to the sub-workflow's agent user_token = { diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py index e3c067fcb8..c272d7d21c 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_parallel_requests.py @@ -170,12 +170,11 @@ async def collect(self, response: ResourceResponse | PolicyResponse, ctx: Workfl raise ValueError("Received more responses than expected") return ( - WorkflowBuilder() + WorkflowBuilder(start_executor="orchestrator") .register_executor(lambda: RequestDistribution("orchestrator"), name="orchestrator") .register_executor(lambda: ResourceRequester("resource_requester"), name="resource_requester") .register_executor(lambda: PolicyChecker("policy_checker"), name="policy_checker") .register_executor(lambda: ResultCollector("result_collector"), name="result_collector") - .set_start_executor("orchestrator") .add_edge("orchestrator", "resource_requester") .add_edge("orchestrator", "policy_checker") .add_edge("resource_requester", "result_collector") @@ -289,7 +288,7 @@ async def handle_external_response( async def main() -> None: # Build the main workflow main_workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="sub_workflow_executor") .register_executor(lambda: ResourceAllocator("resource_allocator"), name="resource_allocator") .register_executor(lambda: PolicyEngine("policy_engine"), name="policy_engine") .register_executor( @@ -303,7 +302,6 @@ async def main() -> None: ), name="sub_workflow_executor", ) - .set_start_executor("sub_workflow_executor") .add_edge("sub_workflow_executor", "resource_allocator") .add_edge("resource_allocator", "sub_workflow_executor") .add_edge("sub_workflow_executor", "policy_engine") diff --git a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py index 9b0637652b..b5fe3fb7b4 100644 --- a/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py +++ b/python/samples/getting_started/workflows/composition/sub_workflow_request_interception.py @@ -154,11 +154,10 @@ async def handle_domain_validation_response( # Build the workflow return ( - WorkflowBuilder() + WorkflowBuilder(start_executor="email_sanitizer") .register_executor(lambda: EmailSanitizer(id="email_sanitizer"), name="email_sanitizer") .register_executor(lambda: EmailFormatValidator(id="email_format_validator"), name="email_format_validator") .register_executor(lambda: DomainValidator(id="domain_validator"), name="domain_validator") - .set_start_executor("email_sanitizer") .add_edge("email_sanitizer", "email_format_validator") .add_edge("email_format_validator", "domain_validator") .build() @@ -270,7 +269,7 @@ async def main() -> None: # Build the main workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="smart_email_orchestrator") .register_executor( lambda: SmartEmailOrchestrator(id="smart_email_orchestrator", approved_domains=approved_domains), name="smart_email_orchestrator", @@ -280,7 +279,6 @@ async def main() -> None: lambda: WorkflowExecutor(build_email_address_validation_workflow(), id="email_validation_workflow"), name="email_validation_workflow", ) - .set_start_executor("smart_email_orchestrator") .add_edge("smart_email_orchestrator", "email_validation_workflow") .add_edge("email_validation_workflow", "smart_email_orchestrator") .add_edge("smart_email_orchestrator", "email_delivery") diff --git a/python/samples/getting_started/workflows/control-flow/edge_condition.py b/python/samples/getting_started/workflows/control-flow/edge_condition.py index 8c7dc4b760..1f5636764d 100644 --- a/python/samples/getting_started/workflows/control-flow/edge_condition.py +++ b/python/samples/getting_started/workflows/control-flow/edge_condition.py @@ -162,13 +162,12 @@ async def main() -> None: # then call the email assistant, then finalize. # If spam, go directly to the spam handler and finalize. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="spam_detection_agent") .register_agent(create_spam_detector_agent, name="spam_detection_agent") .register_agent(create_email_assistant_agent, name="email_assistant_agent") .register_executor(lambda: to_email_assistant_request, name="to_email_assistant_request") .register_executor(lambda: handle_email_response, name="send_email") .register_executor(lambda: handle_spam_classifier_response, name="handle_spam") - .set_start_executor("spam_detection_agent") # Not spam path: transform response -> request for assistant -> assistant -> send email .add_edge("spam_detection_agent", "to_email_assistant_request", condition=get_condition(False)) .add_edge("to_email_assistant_request", "email_assistant_agent") diff --git a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py index 67058435c9..d2739b410e 100644 --- a/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/multi_selection_edge_group.py @@ -225,7 +225,7 @@ def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str] return [handle_uncertain_id] workflow_builder = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="store_email") .register_agent(create_email_analysis_agent, name="email_analysis_agent") .register_agent(create_email_assistant_agent, name="email_assistant_agent") .register_agent(create_email_summary_agent, name="email_summary_agent") @@ -242,7 +242,6 @@ def select_targets(analysis: AnalysisResult, target_ids: list[str]) -> list[str] workflow = ( workflow_builder - .set_start_executor("store_email") .add_edge("store_email", "email_analysis_agent") .add_edge("email_analysis_agent", "to_analysis_result") .add_multi_selection_edge_group( diff --git a/python/samples/getting_started/workflows/control-flow/sequential_executors.py b/python/samples/getting_started/workflows/control-flow/sequential_executors.py index d69aafcfe9..bae05bf302 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_executors.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_executors.py @@ -63,11 +63,10 @@ async def main() -> None: # Step 1: Build the workflow graph. # Order matters. We connect upper_case_executor -> reverse_text_executor and set the start. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="upper_case_executor") .register_executor(lambda: UpperCaseExecutor(id="upper_case_executor"), name="upper_case_executor") .register_executor(lambda: ReverseTextExecutor(id="reverse_text_executor"), name="reverse_text_executor") .add_edge("upper_case_executor", "reverse_text_executor") - .set_start_executor("upper_case_executor") .build() ) diff --git a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py index cb06157d1a..3be1a4ef8d 100644 --- a/python/samples/getting_started/workflows/control-flow/sequential_streaming.py +++ b/python/samples/getting_started/workflows/control-flow/sequential_streaming.py @@ -56,11 +56,10 @@ async def main(): # Step 1: Build the workflow with the defined edges. # Order matters. upper_case_executor runs first, then reverse_text_executor. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="upper_case_executor") .register_executor(lambda: to_upper_case, name="upper_case_executor") .register_executor(lambda: reverse_text, name="reverse_text_executor") .add_edge("upper_case_executor", "reverse_text_executor") - .set_start_executor("upper_case_executor") .build() ) diff --git a/python/samples/getting_started/workflows/control-flow/simple_loop.py b/python/samples/getting_started/workflows/control-flow/simple_loop.py index e9fca78510..21e7907a5f 100644 --- a/python/samples/getting_started/workflows/control-flow/simple_loop.py +++ b/python/samples/getting_started/workflows/control-flow/simple_loop.py @@ -126,7 +126,7 @@ async def main(): # Step 1: Build the workflow with the defined edges. # This time we are creating a loop in the workflow. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="guess_number") .register_executor(lambda: GuessNumberExecutor((1, 100), "guess_number"), name="guess_number") .register_agent(create_judge_agent, name="judge_agent") .register_executor(lambda: SubmitToJudgeAgent(judge_agent_id="judge_agent", target=30), name="submit_judge") @@ -135,7 +135,6 @@ async def main(): .add_edge("submit_judge", "judge_agent") .add_edge("judge_agent", "parse_judge") .add_edge("parse_judge", "guess_number") - .set_start_executor("guess_number") .build() ) diff --git a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py index b4d1852e9a..640119347c 100644 --- a/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py +++ b/python/samples/getting_started/workflows/control-flow/switch_case_edge_group.py @@ -179,7 +179,7 @@ async def main(): # Build workflow: store -> detection agent -> to_detection_result -> switch (NotSpam or Spam or Default). # The switch-case group evaluates cases in order, then falls back to Default when none match. workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="store_email") .register_agent(create_spam_detection_agent, name="spam_detection_agent") .register_agent(create_email_assistant_agent, name="email_assistant_agent") .register_executor(lambda: store_email, name="store_email") @@ -188,7 +188,6 @@ async def main(): .register_executor(lambda: finalize_and_send, name="finalize_and_send") .register_executor(lambda: handle_spam, name="handle_spam") .register_executor(lambda: handle_uncertain, name="handle_uncertain") - .set_start_executor("store_email") .add_edge("store_email", "spam_detection_agent") .add_edge("spam_detection_agent", "to_detection_result") .add_switch_case_edge_group( diff --git a/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py b/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py index e921fbe9cf..d553331fad 100644 --- a/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py +++ b/python/samples/getting_started/workflows/control-flow/workflow_cancellation.py @@ -51,13 +51,12 @@ async def step3(text: str, ctx: WorkflowContext[Never, str]) -> None: def build_workflow(): """Build a simple 3-step sequential workflow (~6 seconds total).""" return ( - WorkflowBuilder() + WorkflowBuilder(start_executor="step1") .register_executor(lambda: step1, name="step1") .register_executor(lambda: step2, name="step2") .register_executor(lambda: step3, name="step3") .add_edge("step1", "step2") .add_edge("step2", "step3") - .set_start_executor("step1") .build() ) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py index e49642ac72..16810b68a9 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_HITL.py @@ -184,8 +184,7 @@ async def main() -> None: # Build the workflow. workflow = ( - WorkflowBuilder() - .set_start_executor(writer_agent) + WorkflowBuilder(start_executor=writer_agent) .add_edge(writer_agent, coordinator) .add_edge(coordinator, writer_agent) .add_edge(final_editor_agent, coordinator) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py index 72d4f11501..c0d935bc03 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/agents_with_approval_requests.py @@ -233,11 +233,9 @@ async def main() -> None: # Build the workflow workflow = ( - WorkflowBuilder() - .set_start_executor(email_processor) + WorkflowBuilder(start_executor=email_processor, output_executors=[conclude_workflow]) .add_edge(email_processor, email_writer_agent) .add_edge(email_writer_agent, conclude_workflow) - .with_output_from([conclude_workflow]) .build() ) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py index 3575610676..fbc996038c 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/concurrent_request_info.py @@ -174,8 +174,7 @@ async def main() -> None: # Build workflow with request info enabled and custom aggregator workflow = ( - ConcurrentBuilder() - .participants([technical_analyst, business_analyst, user_experience_analyst]) + ConcurrentBuilder(participants=[technical_analyst, business_analyst, user_experience_analyst]) .with_aggregator(aggregate_with_synthesis) # Only enable request info for the technical analyst agent .with_request_info(agents=["technical_analyst"]) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py index 7552bcf8e0..6a400a5bab 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/group_chat_request_info.py @@ -137,11 +137,13 @@ async def main() -> None: # Build workflow with request info enabled # Using agents= filter to only pause before pragmatist speaks (not every turn) + # max_rounds=6: Limit to 6 rounds workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=orchestrator) - .participants([optimist, pragmatist, creative]) - .with_max_rounds(6) + GroupChatBuilder( + participants=[optimist, pragmatist, creative], + max_rounds=6, + orchestrator_agent=orchestrator, + ) .with_request_info(agents=[pragmatist]) # Only pause before pragmatist speaks .build() ) diff --git a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py index 68c7cd912f..fcadfe1575 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/guessing_game_with_human_input.py @@ -198,8 +198,7 @@ async def main() -> None: # Build a simple loop: TurnManager <-> AgentExecutor. workflow = ( - WorkflowBuilder() - .set_start_executor(turn_manager) + WorkflowBuilder(start_executor=turn_manager) .add_edge(turn_manager, guessing_agent) # Ask agent to make/adjust a guess .add_edge(guessing_agent, turn_manager) # Agent's response comes back to coordinator ).build() diff --git a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py index 2e0424d410..503f016a71 100644 --- a/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py +++ b/python/samples/getting_started/workflows/human-in-the-loop/sequential_request_info.py @@ -114,8 +114,7 @@ async def main() -> None: # Build workflow with request info enabled (pauses after each agent responds) workflow = ( - SequentialBuilder() - .participants([drafter, editor, finalizer]) + SequentialBuilder(participants=[drafter, editor, finalizer]) # Only enable request info for the editor agent .with_request_info(agents=["editor"]) .build() diff --git a/python/samples/getting_started/workflows/observability/executor_io_observation.py b/python/samples/getting_started/workflows/observability/executor_io_observation.py index 822d0a7c72..3129fcf158 100644 --- a/python/samples/getting_started/workflows/observability/executor_io_observation.py +++ b/python/samples/getting_started/workflows/observability/executor_io_observation.py @@ -84,7 +84,7 @@ async def main() -> None: upper_case = UpperCaseExecutor() reverse_text = ReverseTextExecutor() - workflow = WorkflowBuilder().add_edge(upper_case, reverse_text).set_start_executor(upper_case).build() + workflow = WorkflowBuilder(start_executor=upper_case).add_edge(upper_case, reverse_text).build() print("Running workflow with executor I/O observation...\n") diff --git a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py b/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py deleted file mode 100644 index 8107b387a8..0000000000 --- a/python/samples/getting_started/workflows/orchestration/magentic_human_plan_review.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -import asyncio -import json -from typing import cast - -from agent_framework import ( - AgentRunUpdateEvent, - ChatAgent, - ChatMessage, - MagenticBuilder, - MagenticPlanReviewRequest, - WorkflowEvent, -) -from agent_framework.openai import OpenAIChatClient - -""" -Sample: Magentic Orchestration with Human Plan Review - -This sample demonstrates how humans can review and provide feedback on plans -generated by the Magentic workflow orchestrator. When plan review is enabled, -the workflow requests human approval or revision before executing each plan. - -Key concepts: -- with_plan_review(): Enables human review of generated plans -- MagenticPlanReviewRequest: The event type for plan review requests -- Human can choose to: approve the plan or provide revision feedback - -Plan review options: -- approve(): Accept the proposed plan and continue execution -- revise(feedback): Provide textual feedback to modify the plan - -Prerequisites: -- OpenAI credentials configured for `OpenAIChatClient`. -""" - - -async def main() -> None: - researcher_agent = ChatAgent( - name="ResearcherAgent", - description="Specialist in research and information gathering", - instructions="You are a Researcher. You find information and gather facts.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - analyst_agent = ChatAgent( - name="AnalystAgent", - description="Data analyst who processes and summarizes research findings", - instructions="You are an Analyst. You analyze findings and create summaries.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - manager_agent = ChatAgent( - name="MagenticManager", - description="Orchestrator that coordinates the workflow", - instructions="You coordinate a team to complete tasks efficiently.", - chat_client=OpenAIChatClient(model_id="gpt-4o"), - ) - - print("\nBuilding Magentic Workflow with Human Plan Review...") - - workflow = ( - MagenticBuilder() - .participants([researcher_agent, analyst_agent]) - .with_manager( - agent=manager_agent, - max_round_count=10, - max_stall_count=1, - max_reset_count=2, - ) - .with_plan_review() # Request human input for plan review - .build() - ) - - task = "Research sustainable aviation fuel technology and summarize the findings." - - print(f"\nTask: {task}") - print("\nStarting workflow execution...") - print("=" * 60) - - pending_request: WorkflowEvent | None = None - pending_responses: dict[str, object] | None = None - output_event: WorkflowEvent | None = None - - while not output_event: - if pending_responses is not None: - stream = workflow.run(stream=True, responses=pending_responses) - else: - stream = workflow.run(task, stream=True) - - last_message_id: str | None = None - async for event in stream: - if isinstance(event, AgentRunUpdateEvent): - message_id = event.data.message_id - if message_id != last_message_id: - if last_message_id is not None: - print("\n") - print(f"- {event.executor_id}:", end=" ", flush=True) - last_message_id = message_id - print(event.data, end="", flush=True) - - elif event.type == "request_info" and event.request_type is MagenticPlanReviewRequest: - pending_request = event - - elif event.type == "output": - output_event = event - - pending_responses = None - - # Handle plan review request if any - if pending_request is not None: - event_data = cast(MagenticPlanReviewRequest, pending_request.data) - - print("\n\n[Magentic Plan Review Request]") - if event_data.current_progress is not None: - print("Current Progress Ledger:") - print(json.dumps(event_data.current_progress.to_dict(), indent=2)) - print() - print(f"Proposed Plan:\n{event_data.plan.text}\n") - print("Please provide your feedback (press Enter to approve):") - - reply = await asyncio.get_event_loop().run_in_executor(None, input, "> ") - if reply.strip() == "": - print("Plan approved.\n") - pending_responses = {pending_request.request_id: event_data.approve()} - else: - print("Plan revised by human.\n") - pending_responses = {pending_request.request_id: event_data.revise(reply)} - pending_request = None - - print("\n" + "=" * 60) - print("WORKFLOW COMPLETED") - print("=" * 60) - print("Final Output:") - # The output of the Magentic workflow is a list of ChatMessages with only one final message - # generated by the orchestrator. - output_messages = cast(list[ChatMessage], output_event.data) - if output_messages: - output = output_messages[-1].text - print(output) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py index e4550c1ab2..6338b35c04 100644 --- a/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py +++ b/python/samples/getting_started/workflows/parallelism/aggregate_results_of_different_types.py @@ -73,12 +73,11 @@ async def handle(self, results: list[int | float], ctx: WorkflowContext[Never, l async def main() -> None: # 1) Build a simple fan out and fan in workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="dispatcher") .register_executor(lambda: Dispatcher(id="dispatcher"), name="dispatcher") .register_executor(lambda: Average(id="average"), name="average") .register_executor(lambda: Sum(id="summation"), name="summation") .register_executor(lambda: Aggregator(id="aggregator"), name="aggregator") - .set_start_executor("dispatcher") .add_fan_out_edges("dispatcher", ["average", "summation"]) .add_fan_in_edges(["average", "summation"], "aggregator") .build() diff --git a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py index 2be9bc09f7..bb359262db 100644 --- a/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py +++ b/python/samples/getting_started/workflows/parallelism/fan_out_fan_in_edges.py @@ -123,13 +123,12 @@ def create_legal_agent() -> ChatAgent: async def main() -> None: # 1) Build a simple fan out and fan in workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="dispatcher") .register_agent(create_researcher_agent, name="researcher") .register_agent(create_marketer_agent, name="marketer") .register_agent(create_legal_agent, name="legal") .register_executor(lambda: DispatchToExperts(id="dispatcher"), name="dispatcher") .register_executor(lambda: AggregateInsights(id="aggregator"), name="aggregator") - .set_start_executor("dispatcher") .add_fan_out_edges("dispatcher", ["researcher", "marketer", "legal"]) # Parallel branches .add_fan_in_edges(["researcher", "marketer", "legal"], "aggregator") # Join at the aggregator .build() diff --git a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py index 99494c59f4..1450399952 100644 --- a/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py +++ b/python/samples/getting_started/workflows/parallelism/map_reduce_and_visualization.py @@ -261,7 +261,7 @@ async def main(): # Step 1: Create the workflow builder and register executors. workflow_builder = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="split_data_executor") .register_executor(lambda: Map(id="map_executor_0"), name="map_executor_0") .register_executor(lambda: Map(id="map_executor_1"), name="map_executor_1") .register_executor(lambda: Map(id="map_executor_2"), name="map_executor_2") @@ -286,7 +286,6 @@ async def main(): # Step 2: Build the workflow graph using fan out and fan in edges. workflow = ( workflow_builder - .set_start_executor("split_data_executor") .add_fan_out_edges( "split_data_executor", ["map_executor_0", "map_executor_1", "map_executor_2"], diff --git a/python/samples/getting_started/workflows/state-management/state_with_agents.py b/python/samples/getting_started/workflows/state-management/state_with_agents.py index 1844ae40e3..929dc40362 100644 --- a/python/samples/getting_started/workflows/state-management/state_with_agents.py +++ b/python/samples/getting_started/workflows/state-management/state_with_agents.py @@ -189,7 +189,7 @@ async def main() -> None: # False -> submit_to_email_assistant -> email_assistant_agent -> finalize_and_send # True -> handle_spam workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="store_email") .register_agent(create_spam_detection_agent, name="spam_detection_agent") .register_agent(create_email_assistant_agent, name="email_assistant_agent") .register_executor(lambda: store_email, name="store_email") @@ -197,7 +197,6 @@ async def main() -> None: .register_executor(lambda: submit_to_email_assistant, name="submit_to_email_assistant") .register_executor(lambda: finalize_and_send, name="finalize_and_send") .register_executor(lambda: handle_spam, name="handle_spam") - .set_start_executor("store_email") .add_edge("store_email", "spam_detection_agent") .add_edge("spam_detection_agent", "to_detection_result") .add_edge("to_detection_result", "submit_to_email_assistant", condition=get_condition(False)) diff --git a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py index 25e46ab343..d89115463f 100644 --- a/python/samples/getting_started/workflows/state-management/workflow_kwargs.py +++ b/python/samples/getting_started/workflows/state-management/workflow_kwargs.py @@ -88,7 +88,7 @@ async def main() -> None: ) # Build a simple sequential workflow - workflow = SequentialBuilder().participants([agent]).build() + workflow = SequentialBuilder(participants=[agent]).build() # Define custom context that will flow to tools via kwargs custom_data = { diff --git a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py index 56ffe96484..6eb6e2bc6a 100644 --- a/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/concurrent_builder_tool_approval.py @@ -148,7 +148,7 @@ async def main() -> None: # 4. Build a concurrent workflow with both agents # ConcurrentBuilder requires at least 2 participants for fan-out - workflow = ConcurrentBuilder().participants([microsoft_agent, google_agent]).build() + workflow = ConcurrentBuilder(participants=[microsoft_agent, google_agent]).build() # 5. Start the workflow - both agents will process the same task in parallel print("Starting concurrent workflow with tool approval...") diff --git a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py index 7dad8c93a3..f00f79698a 100644 --- a/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/group_chat_builder_tool_approval.py @@ -146,18 +146,16 @@ async def main() -> None: ) # 4. Build a group chat workflow with the selector function - workflow = ( - GroupChatBuilder() - .with_orchestrator(selection_func=select_next_speaker) - .participants([qa_engineer, devops_engineer]) - # Set a hard limit to 4 rounds - # First round: QAEngineer speaks - # Second round: DevOpsEngineer speaks (check staging + create rollback) - # Third round: DevOpsEngineer speaks with an approval request (deploy to production) - # Fourth round: DevOpsEngineer speaks again after approval - .with_max_rounds(4) - .build() - ) + # max_rounds=4: Set a hard limit to 4 rounds + # First round: QAEngineer speaks + # Second round: DevOpsEngineer speaks (check staging + create rollback) + # Third round: DevOpsEngineer speaks with an approval request (deploy to production) + # Fourth round: DevOpsEngineer speaks again after approval + workflow = GroupChatBuilder( + participants=[qa_engineer, devops_engineer], + max_rounds=4, + selection_func=select_next_speaker, + ).build() # 5. Start the workflow print("Starting group chat workflow for software deployment...") diff --git a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py index ee2d4b3988..c203ecc084 100644 --- a/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py +++ b/python/samples/getting_started/workflows/tool-approval/sequential_builder_tool_approval.py @@ -111,7 +111,7 @@ async def main() -> None: ) # 3. Build a sequential workflow with the agent - workflow = SequentialBuilder().participants([database_agent]).build() + workflow = SequentialBuilder(participants=[database_agent]).build() # 4. Start the workflow with a user task print("Starting sequential workflow with tool approval...") diff --git a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py index 68b68c4a7a..a1c1086eec 100644 --- a/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py +++ b/python/samples/getting_started/workflows/visualization/concurrent_with_visualization.py @@ -123,13 +123,12 @@ async def main() -> None: # Build a simple fan-out/fan-in workflow workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor="dispatcher") .register_agent(create_researcher_agent, name="researcher") .register_agent(create_marketer_agent, name="marketer") .register_agent(create_legal_agent, name="legal") .register_executor(lambda: DispatchToExperts(id="dispatcher"), name="dispatcher") .register_executor(lambda: AggregateInsights(id="aggregator"), name="aggregator") - .set_start_executor("dispatcher") .add_fan_out_edges("dispatcher", ["researcher", "marketer", "legal"]) .add_fan_in_edges(["researcher", "marketer", "legal"], "aggregator") .build() diff --git a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py index 18afcda4d0..a5b012da94 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/concurrent_basic.py @@ -87,7 +87,7 @@ async def run_agent_framework_example(prompt: str) -> Sequence[list[ChatMessage] name="chemistry", ) - workflow = ConcurrentBuilder().participants([physics, chemistry]).build() + workflow = ConcurrentBuilder(participants=[physics, chemistry]).build() outputs: list[list[ChatMessage]] = [] async for event in workflow.run(prompt, stream=True): diff --git a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py index 2c8e82e9bd..dda7e7922c 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/group_chat.py +++ b/python/samples/semantic-kernel-migration/orchestrations/group_chat.py @@ -7,8 +7,9 @@ from collections.abc import Sequence from typing import Any, cast -from agent_framework import ChatAgent, ChatMessage, GroupChatBuilderWorkflowEvent +from agent_framework import ChatAgent, ChatMessage from agent_framework.azure import AzureOpenAIChatClient, AzureOpenAIResponsesClient +from agent_framework.orchestrations import GroupChatBuilder from azure.identity import AzureCliCredential from semantic_kernel.agents import Agent, ChatCompletionAgent, GroupChatOrchestration from semantic_kernel.agents.orchestration.group_chat import ( @@ -231,12 +232,10 @@ async def run_agent_framework_example(task: str) -> str: chat_client=AzureOpenAIResponsesClient(credential=credential), ) - workflow = ( - GroupChatBuilder() - .with_orchestrator(agent=AzureOpenAIChatClient(credential=credential).as_agent()) - .participants([researcher, planner]) - .build() - ) + workflow = GroupChatBuilder( + participants=[researcher, planner], + orchestrator_agent=AzureOpenAIChatClient(credential=credential).as_agent(), + ).build() final_response = "" async for event in workflow.run(task, stream=True): diff --git a/python/samples/semantic-kernel-migration/orchestrations/magentic.py b/python/samples/semantic-kernel-migration/orchestrations/magentic.py index 9c4aea6187..4eef2e9dec 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/magentic.py +++ b/python/samples/semantic-kernel-migration/orchestrations/magentic.py @@ -6,8 +6,9 @@ from collections.abc import Sequence from typing import cast -from agent_framework import ChatAgent, HostedCodeInterpreterTool, MagenticBuilderWorkflowEvent +from agent_framework import ChatAgent, HostedCodeInterpreterTool from agent_framework.openai import OpenAIChatClient, OpenAIResponsesClient +from agent_framework.orchestrations import MagenticBuilder from semantic_kernel.agents import ( Agent, ChatCompletionAgent, @@ -144,7 +145,7 @@ async def run_agent_framework_example(prompt: str) -> str | None: chat_client=OpenAIChatClient(), ) - workflow = MagenticBuilder().participants([researcher, coder]).with_manager(agent=manager_agent).build() + workflow = MagenticBuilder(participants=[researcher, coder], manager_agent=manager_agent).build() final_text: str | None = None async for event in workflow.run(prompt, stream=True): diff --git a/python/samples/semantic-kernel-migration/orchestrations/sequential.py b/python/samples/semantic-kernel-migration/orchestrations/sequential.py index 91d23b02c8..a810b3178b 100644 --- a/python/samples/semantic-kernel-migration/orchestrations/sequential.py +++ b/python/samples/semantic-kernel-migration/orchestrations/sequential.py @@ -74,7 +74,7 @@ async def run_agent_framework_example(prompt: str) -> list[ChatMessage]: name="reviewer", ) - workflow = SequentialBuilder().participants([writer, reviewer]).build() + workflow = SequentialBuilder(participants=[writer, reviewer]).build() conversation_outputs: list[list[ChatMessage]] = [] async for event in workflow.run(prompt, stream=True): diff --git a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py index 3ddb656abf..efd2253323 100644 --- a/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py +++ b/python/samples/semantic-kernel-migration/processes/fan_out_fan_in_process.py @@ -221,12 +221,11 @@ async def run_agent_framework_workflow_example() -> str | None: aggregate = FanInExecutor(required_cycles=3) workflow = ( - WorkflowBuilder() + WorkflowBuilder(start_executor=kickoff) .add_edge(kickoff, step_a) .add_edge(kickoff, step_b) .add_fan_in_edges([step_a, step_b], aggregate) .add_edge(aggregate, kickoff) - .set_start_executor(kickoff) .build() ) diff --git a/python/samples/semantic-kernel-migration/processes/nested_process.py b/python/samples/semantic-kernel-migration/processes/nested_process.py index 849457d324..ab1b2bb64c 100644 --- a/python/samples/semantic-kernel-migration/processes/nested_process.py +++ b/python/samples/semantic-kernel-migration/processes/nested_process.py @@ -232,7 +232,7 @@ def _build_inner_workflow() -> WorkflowExecutor: inner_echo = InnerEchoExecutor() inner_repeat = InnerRepeatExecutor() - inner_workflow = WorkflowBuilder().set_start_executor(inner_echo).add_edge(inner_echo, inner_repeat).build() + inner_workflow = WorkflowBuilder(start_executor=inner_echo).add_edge(inner_echo, inner_repeat).build() return WorkflowExecutor(inner_workflow, id="inner_workflow") @@ -246,8 +246,7 @@ async def run_agent_framework_nested_workflow(initial_message: str) -> Sequence[ collector = CollectResultExecutor() outer_workflow = ( - WorkflowBuilder() - .set_start_executor(kickoff) + WorkflowBuilder(start_executor=kickoff) .add_edge(kickoff, outer_echo) .add_edge(outer_echo, outer_repeat) .add_edge(outer_repeat, inner_executor)