Random Routing Example
Thanks @Borsuc to provide this example!
Randomly choosing between LLMs or other tasks
{
"type": "automata",
"id": "random_llm_example",
"initial": "home_state",
"inputs": {},
"outputs": {},
"context": {
"user_prompt": "",
"llm_result": ""
},
"transitions": {},
"states": {
"home_state": {
"render": {
"text": "Start by saying something..."
},
"transitions": {
"CHAT": "chat_state"
}
},
"chat_state": {
"inputs": {
"user_msg": {
"type": "IM",
"user_input": false
}
},
"outputs": {
"context.user_prompt": "{{user_msg}}"
},
"transitions": {
"ALWAYS": "random_llm_state"
}
},
"random_llm_state": {
"outputs": {
"rng": "{{3*Math.random()}}"
},
"transitions": {
"ALWAYS": [
{ "target": "llm_a_state", "condition": "{{rng<1}}" },
{ "target": "llm_b_state", "condition": "{{rng<2}}" },
{ "target": "llm_c_state", "condition": "{{true}}" }
]
}
},
"llm_a_state": {
"tasks": [
{
"name": "mixtral8x7b_instruct",
"module_type": "LlmWidgetModule",
"module_config": {
"widget_id": "1744218061138825216",
"system_prompt": "You are a friendly assistant.",
"user_prompt": "{{context.user_prompt}}",
"memory": "",
"top_p": 1.0,
"temperature": 0.5,
"frequency_penalty": 0,
"presence_penalty": 0,
"output_name": "result"
}
}
],
"outputs": { "context.llm_result": "{{result}}" },
"transitions": { "ALWAYS": "post_llm_state" }
},
"llm_b_state": {
"tasks": [
{
"name": "slerp_l2_13b",
"module_type": "LlmWidgetModule",
"module_config": {
"widget_id": "1744214446286311424",
"system_prompt": "You are an annoying tsundere assistant.",
"user_prompt": "{{context.user_prompt}}",
"memory": "",
"top_p": 1.0,
"temperature": 0.75,
"frequency_penalty": 0,
"presence_penalty": 0,
"output_name": "result"
}
}
],
"outputs": { "context.llm_result": "{{result}}" },
"transitions": { "ALWAYS": "post_llm_state" }
},
"llm_c_state": {
"tasks": [
{
"name": "airoboros_70b",
"module_type": "LlmWidgetModule",
"module_config": {
"widget_id": "1744214372646916096",
"system_prompt": "You are a cool dude answering the user with swag.",
"user_prompt": "{{context.user_prompt}}",
"memory": "",
"top_p": 1.0,
"temperature": 0.5,
"frequency_penalty": 0,
"presence_penalty": 0,
"output_name": "result"
}
}
],
"outputs": { "context.llm_result": "{{result}}" },
"transitions": { "ALWAYS": "post_llm_state" }
},
"post_llm_state": {
"render": {
"text": "{{context.llm_result.trim().replace(/[áàãâäå]/g, 'a').replace(/ç/g, 'c').replace(/ð/g, 'd').replace(/éèêë/g, 'e').replace(/íìîï/g, 'i').replace(/ñ/g, 'n').replace(/óòôöõø/g, 'o').replace(/úùûü/g, 'u').replace(/ýÿ/g, 'y').replace(/æ/g, 'ae').replace(/œ/g, 'oe').replace(/ß/g, 'ss')}}"
},
"transitions": {
"CHAT": "chat_state"
}
}
}
}The random chooser state
The LLM states
The post-processing state
Last updated