Skip to content

Commit 3811b50

Browse files
authored
fix(router): change router block content to prompt (#1261)
* fix(router): remove prompt from router content * fixed router
1 parent abb835d commit 3811b50

File tree

4 files changed

+10
-11
lines changed

4 files changed

+10
-11
lines changed

apps/docs/content/docs/blocks/router.mdx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ Your API key for the selected LLM provider. This is securely stored and used for
117117

118118
After a router makes a decision, you can access its outputs:
119119

120-
- **`<router.content>`**: Summary of the routing decision made
120+
- **`<router.prompt>`**: Summary of the routing prompt used
121121
- **`<router.selected_path>`**: Details of the chosen destination block
122122
- **`<router.tokens>`**: Token usage statistics from the LLM
123123
- **`<router.model>`**: The model used for decision-making
@@ -182,7 +182,7 @@ Confidence Threshold: 0.7 // Minimum confidence for routing
182182
<Tab>
183183
<ul className="list-disc space-y-2 pl-6">
184184
<li>
185-
<strong>router.content</strong>: Summary of routing decision
185+
<strong>router.prompt</strong>: Summary of routing prompt used
186186
</li>
187187
<li>
188188
<strong>router.selected_path</strong>: Details of chosen destination

apps/sim/blocks/blocks/router.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ const getCurrentOllamaModels = () => {
1818

1919
interface RouterResponse extends ToolResponse {
2020
output: {
21-
content: string
21+
prompt: string
2222
model: string
2323
tokens?: {
2424
prompt?: number
@@ -198,7 +198,6 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
198198
hidden: true,
199199
min: 0,
200200
max: 2,
201-
value: () => '0.1',
202201
},
203202
{
204203
id: 'systemPrompt',
@@ -246,7 +245,7 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
246245
},
247246
},
248247
outputs: {
249-
content: { type: 'string', description: 'Routing response content' },
248+
prompt: { type: 'string', description: 'Routing prompt used' },
250249
model: { type: 'string', description: 'Model used' },
251250
tokens: { type: 'json', description: 'Token usage' },
252251
cost: { type: 'json', description: 'Cost information' },

apps/sim/executor/handlers/router/router-handler.test.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ describe('RouterBlockHandler', () => {
119119
const inputs = {
120120
prompt: 'Choose the best option.',
121121
model: 'gpt-4o',
122-
temperature: 0.5,
122+
temperature: 0.1,
123123
}
124124

125125
const expectedTargetBlocks = [
@@ -168,11 +168,11 @@ describe('RouterBlockHandler', () => {
168168
model: 'gpt-4o',
169169
systemPrompt: 'Generated System Prompt',
170170
context: JSON.stringify([{ role: 'user', content: 'Choose the best option.' }]),
171-
temperature: 0.5,
171+
temperature: 0.1,
172172
})
173173

174174
expect(result).toEqual({
175-
content: 'Choose the best option.',
175+
prompt: 'Choose the best option.',
176176
model: 'mock-model',
177177
tokens: { prompt: 100, completion: 5, total: 105 },
178178
cost: {
@@ -233,7 +233,7 @@ describe('RouterBlockHandler', () => {
233233
const requestBody = JSON.parse(fetchCallArgs[1].body)
234234
expect(requestBody).toMatchObject({
235235
model: 'gpt-4o',
236-
temperature: 0,
236+
temperature: 0.1,
237237
})
238238
})
239239

apps/sim/executor/handlers/router/router-handler.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ export class RouterBlockHandler implements BlockHandler {
5151
model: routerConfig.model,
5252
systemPrompt: systemPrompt,
5353
context: JSON.stringify(messages),
54-
temperature: routerConfig.temperature,
54+
temperature: 0.1,
5555
apiKey: routerConfig.apiKey,
5656
workflowId: context.workflowId,
5757
}
@@ -102,7 +102,7 @@ export class RouterBlockHandler implements BlockHandler {
102102
)
103103

104104
return {
105-
content: inputs.prompt,
105+
prompt: inputs.prompt,
106106
model: result.model,
107107
tokens: {
108108
prompt: tokens.prompt || 0,

0 commit comments

Comments
 (0)