Previously, requests for remote files were simply added to the queue (pointed to by request_queue_head) and no transfer actually took place[1], even though code that followed may rely on these remote files to be present (eg. the setup_revisions invocation). The code that sends out the requests on the request queue is refactored into the method run_request_queue. After the get_dav_remote_heads invocation (ie. after fetch requests are added to the queue), the requests on the queue are sent out through an invocation to run_request_queue. This invocation to run_request_queue entails adding a fill function before pushing checks take place, which may lead to accidental, unwanted pushes previously. The flag is_running_queue is introduced to prevent this from occurring. fill_active_slot is made to check the flag is_running_queue before the sending of the requests proceeds. Signed-off-by: Tay Ray Chuan <rctay89@xxxxxxxxx> --- http-push.c | 37 ++++++++++++++++++++++++++----------- t/t5540-http-push.sh | 4 ++-- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/http-push.c b/http-push.c index 0b12ffe..218e263 100644 --- a/http-push.c +++ b/http-push.c @@ -846,11 +846,12 @@ static void finish_request(struct transfer_request *request) } #ifdef USE_CURL_MULTI +static int is_running_queue; static int fill_active_slot(void *unused) { struct transfer_request *request; - if (aborted) + if (aborted || !is_running_queue) return 0; for (request = request_queue_head; request; request = request->next) { @@ -2174,6 +2175,25 @@ static int delete_remote_branch(char *pattern, int force) return 0; } +void run_request_queue() +{ +#ifdef USE_CURL_MULTI + is_running_queue = 1; + fill_active_slots(); + add_fill_function(NULL, fill_active_slot); +#endif + do { + finish_all_active_slots(); +#ifdef USE_CURL_MULTI + fill_active_slots(); +#endif + } while (request_queue_head && !aborted); + +#ifdef USE_CURL_MULTI + is_running_queue = 0; +#endif +} + int main(int argc, char **argv) { struct transfer_request *request; @@ -2278,6 +2298,8 @@ int main(int argc, char **argv) repo->url = rewritten_url; } + is_running_queue = 0; + /* Verify DAV compliance/lock support */ if (!locking_available()) { rc = 1; @@ -2307,6 +2329,7 @@ int main(int argc, char **argv) local_refs = get_local_heads(); fprintf(stderr, "Fetching remote heads...\n"); get_dav_remote_heads(); + run_request_queue(); /* Remove a remote branch if -d or -D was specified */ if (delete_branch) { @@ -2436,16 +2459,8 @@ int main(int argc, char **argv) if (objects_to_send) fprintf(stderr, " sending %d objects\n", objects_to_send); -#ifdef USE_CURL_MULTI - fill_active_slots(); - add_fill_function(NULL, fill_active_slot); -#endif - do { - finish_all_active_slots(); -#ifdef USE_CURL_MULTI - fill_active_slots(); -#endif - } while (request_queue_head && !aborted); + + run_request_queue(); /* Update the remote branch if all went well */ if (aborted || !update_remote(ref->new_sha1, ref_lock)) diff --git a/t/t5540-http-push.sh b/t/t5540-http-push.sh index ad0f14b..f4a2cf6 100755 --- a/t/t5540-http-push.sh +++ b/t/t5540-http-push.sh @@ -67,7 +67,7 @@ test_expect_success ' push to remote repository with unpacked refs' ' test $HEAD = $(git rev-parse --verify HEAD)) ' -test_expect_failure 'http-push fetches unpacked objects' ' +test_expect_success 'http-push fetches unpacked objects' ' cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git \ "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo_unpacked.git && @@ -83,7 +83,7 @@ test_expect_failure 'http-push fetches unpacked objects' ' git push -f -v $HTTPD_URL/test_repo_unpacked.git master) ' -test_expect_failure 'http-push fetches packed objects' ' +test_expect_success 'http-push fetches packed objects' ' cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git \ "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo_packed.git && -- 1.6.3.1 -- To unsubscribe from this list: send the line "unsubscribe git" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html