mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 22:01:35 +00:00
fix: avoid stale followup drain callbacks (#31902) (thanks @Lanfei)
This commit is contained in:
@@ -47,6 +47,7 @@ Docs: https://docs.openclaw.ai
|
|||||||
|
|
||||||
### Fixes
|
### Fixes
|
||||||
|
|
||||||
|
- Auto-reply/followup queue: avoid stale callback reuse across idle-window restarts by caching the followup runner only when a drain actually starts, preserving enqueue ordering after empty-finalize paths. (#31902) Thanks @Lanfei.
|
||||||
- Gateway/Heartbeat model reload: treat `models.*` and `agents.defaults.model` config updates as heartbeat hot-reload triggers so heartbeat picks up model changes without a full gateway restart. (#32046) Thanks @stakeswky.
|
- Gateway/Heartbeat model reload: treat `models.*` and `agents.defaults.model` config updates as heartbeat hot-reload triggers so heartbeat picks up model changes without a full gateway restart. (#32046) Thanks @stakeswky.
|
||||||
- Slack/inbound debounce routing: isolate top-level non-DM message debounce keys by message timestamp to avoid cross-thread collisions, preserve DM batching, and flush pending top-level buffers before immediate non-debounce follow-ups to keep ordering stable. (#31951) Thanks @scoootscooob.
|
- Slack/inbound debounce routing: isolate top-level non-DM message debounce keys by message timestamp to avoid cross-thread collisions, preserve DM batching, and flush pending top-level buffers before immediate non-debounce follow-ups to keep ordering stable. (#31951) Thanks @scoootscooob.
|
||||||
- OpenRouter/x-ai compatibility: skip `reasoning.effort` injection for `x-ai/*` models (for example Grok) so OpenRouter requests no longer fail with invalid-arguments errors on unsupported reasoning params. (#32054) Thanks @scoootscooob.
|
- OpenRouter/x-ai compatibility: skip `reasoning.effort` injection for `x-ai/*` models (for example Grok) so OpenRouter requests no longer fail with invalid-arguments errors on unsupported reasoning params. (#32054) Thanks @scoootscooob.
|
||||||
|
|||||||
@@ -67,13 +67,13 @@ export function scheduleFollowupDrain(
|
|||||||
key: string,
|
key: string,
|
||||||
runFollowup: (run: FollowupRun) => Promise<void>,
|
runFollowup: (run: FollowupRun) => Promise<void>,
|
||||||
): void {
|
): void {
|
||||||
// Cache the callback so enqueueFollowupRun can restart drain after the queue
|
|
||||||
// has been deleted and recreated (the post-drain idle window race condition).
|
|
||||||
FOLLOWUP_RUN_CALLBACKS.set(key, runFollowup);
|
|
||||||
const queue = beginQueueDrain(FOLLOWUP_QUEUES, key);
|
const queue = beginQueueDrain(FOLLOWUP_QUEUES, key);
|
||||||
if (!queue) {
|
if (!queue) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
// Cache callback only when a drain actually starts. Avoid keeping stale
|
||||||
|
// callbacks around from finalize calls where no queue work is pending.
|
||||||
|
FOLLOWUP_RUN_CALLBACKS.set(key, runFollowup);
|
||||||
void (async () => {
|
void (async () => {
|
||||||
try {
|
try {
|
||||||
const collectState = { forceIndividualCollect: false };
|
const collectState = { forceIndividualCollect: false };
|
||||||
|
|||||||
@@ -1097,6 +1097,33 @@ describe("followup queue collect routing", () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
describe("followup queue drain restart after idle window", () => {
|
describe("followup queue drain restart after idle window", () => {
|
||||||
|
it("does not retain stale callbacks when scheduleFollowupDrain runs with an empty queue", async () => {
|
||||||
|
const key = `test-no-stale-callback-${Date.now()}`;
|
||||||
|
const settings: QueueSettings = { mode: "followup", debounceMs: 0, cap: 50 };
|
||||||
|
const staleCalls: FollowupRun[] = [];
|
||||||
|
const freshCalls: FollowupRun[] = [];
|
||||||
|
const drained = createDeferred<void>();
|
||||||
|
|
||||||
|
// Simulate finalizeWithFollowup calling schedule without pending queue items.
|
||||||
|
scheduleFollowupDrain(key, async (run) => {
|
||||||
|
staleCalls.push(run);
|
||||||
|
});
|
||||||
|
|
||||||
|
enqueueFollowupRun(key, createRun({ prompt: "after-empty-schedule" }), settings);
|
||||||
|
await new Promise<void>((resolve) => setImmediate(resolve));
|
||||||
|
expect(staleCalls).toHaveLength(0);
|
||||||
|
|
||||||
|
scheduleFollowupDrain(key, async (run) => {
|
||||||
|
freshCalls.push(run);
|
||||||
|
drained.resolve();
|
||||||
|
});
|
||||||
|
await drained.promise;
|
||||||
|
|
||||||
|
expect(staleCalls).toHaveLength(0);
|
||||||
|
expect(freshCalls).toHaveLength(1);
|
||||||
|
expect(freshCalls[0]?.prompt).toBe("after-empty-schedule");
|
||||||
|
});
|
||||||
|
|
||||||
it("processes a message enqueued after the drain empties and deletes the queue", async () => {
|
it("processes a message enqueued after the drain empties and deletes the queue", async () => {
|
||||||
const key = `test-idle-window-race-${Date.now()}`;
|
const key = `test-idle-window-race-${Date.now()}`;
|
||||||
const calls: FollowupRun[] = [];
|
const calls: FollowupRun[] = [];
|
||||||
|
|||||||
Reference in New Issue
Block a user