diff --git a/strix/llm/llm.py b/strix/llm/llm.py
index 0cace0e..507e8c8 100644
--- a/strix/llm/llm.py
+++ b/strix/llm/llm.py
@@ -188,6 +188,9 @@ class LLM:
conversation_history.extend(compressed)
messages.extend(compressed)
+ if messages[-1].get("role") == "assistant":
+ messages.append({"role": "user", "content": "Continue the task."})
+
if self._is_anthropic() and self.config.enable_prompt_caching:
messages = self._add_cache_control(messages)
diff --git a/strix/llm/memory_compressor.py b/strix/llm/memory_compressor.py
index 28730e8..8cad510 100644
--- a/strix/llm/memory_compressor.py
+++ b/strix/llm/memory_compressor.py
@@ -91,7 +91,7 @@ def _summarize_messages(
if not messages:
empty_summary = "{text}"
return {
- "role": "assistant",
+ "role": "user",
"content": empty_summary.format(text="No messages to summarize"),
}
@@ -123,7 +123,7 @@ def _summarize_messages(
return messages[0]
summary_msg = "{text}"
return {
- "role": "assistant",
+ "role": "user",
"content": summary_msg.format(count=len(messages), text=summary),
}
except Exception: