Skip to content

Commit

Permalink
proxy: fix race condition leading to hang
Browse files Browse the repository at this point in the history
If a lua function schedules extra requests after an initial set of
requests, we need to use a workaround to ensure the second set of
requests is actually submitted to the proxy backends.

This workaround was in the wrong place. If connections are being cut and
reconnected between batches of requests it's possible for the workaround
to trigger on a connection object that has since moved to another
thread.

This patch tightens up the workaround to run before the client
connection has a chance to resume.
  • Loading branch information
dormando committed May 2, 2024
1 parent cf132f5 commit 4d9b0e0
Showing 1 changed file with 3 additions and 10 deletions.
13 changes: 3 additions & 10 deletions proxy_luafgen.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,6 @@ static void mcp_funcgen_wait_handler(const int fd, const short which, void *arg)
rctx->wait_mode = QWAIT_IDLE;

mcp_resume_rctx_from_cb(rctx);

// like proxy_return_rqu_dummy_cb, need the HACK section.
_mcp_queue_hack(rctx->c);
}

// For describing functions which generate functions which can execute
Expand Down Expand Up @@ -907,6 +904,7 @@ static void mcp_resume_rctx_from_cb(mcp_rcontext_t *rctx) {
mcp_funcgen_return_rctx(rctx);
} else if (res == LUA_YIELD) {
// normal.
_mcp_queue_hack(rctx->c);
} else {
lua_pop(rctx->Lc, 1); // drop the error message.
_mcp_resume_rctx_process_error(rctx, rqu);
Expand All @@ -930,6 +928,8 @@ static void mcp_resume_rctx_from_cb(mcp_rcontext_t *rctx) {
// call re-add directly since we're already in the worker thread.
conn_worker_readd(rctx->c);
}
} else if (res == LUA_YIELD) {
_mcp_queue_hack(rctx->c);
}
}
}
Expand All @@ -939,7 +939,6 @@ static void mcp_resume_rctx_from_cb(mcp_rcontext_t *rctx) {
static void proxy_return_rqu_dummy_cb(io_pending_t *pending) {
io_pending_proxy_t *p = (io_pending_proxy_t *)pending;
mcp_rcontext_t *rctx = p->rctx;
conn *c = rctx->c;

rctx->pending_reqs--;
assert(rctx->pending_reqs > -1);
Expand All @@ -949,8 +948,6 @@ static void proxy_return_rqu_dummy_cb(io_pending_t *pending) {
mcp_resume_rctx_from_cb(rctx);

do_cache_free(p->thread->io_cache, p);

_mcp_queue_hack(c);
}

void mcp_process_rctx_wait(mcp_rcontext_t *rctx, int handle) {
Expand Down Expand Up @@ -1089,8 +1086,6 @@ int mcp_process_rqueue_return(mcp_rcontext_t *rctx, int handle, mcp_resp_t *res)
static void proxy_return_rqu_cb(io_pending_t *pending) {
io_pending_proxy_t *p = (io_pending_proxy_t *)pending;
mcp_rcontext_t *rctx = p->rctx;
// Hold the client object before we potentially return the rctx below.
conn *c = rctx->c;

if (p->client_resp) {
mcp_process_rqueue_return(rctx, p->queue_handle, p->client_resp);
Expand All @@ -1105,8 +1100,6 @@ static void proxy_return_rqu_cb(io_pending_t *pending) {
}

do_cache_free(p->thread->io_cache, p);

_mcp_queue_hack(c);
}

void mcp_run_rcontext_handle(mcp_rcontext_t *rctx, int handle) {
Expand Down

0 comments on commit 4d9b0e0

Please sign in to comment.