From d43df21d1a4068d320e3e01e92323e87ad54f171 Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Tue, 19 Sep 2023 18:20:32 -0700
Subject: [PATCH 001/207] sparse-index.c: fix use of index hashes in
 expand_index

In ac8acb4f2c7 (sparse-index: complete partial expansion, 2022-05-23),
'expand_index()' was updated to expand the index to a given pathspec.
However, the 'path_matches_pattern_list()' method used to facilitate this
has the side effect of initializing or updating the index hash variables
('name_hash', 'dir_hash', and 'name_hash_initialized'). This operation is
performed on 'istate', though, not 'full'; as a result, the initialized
hashes are later overwritten when copied from 'full'. To ensure the correct
hashes are in 'istate' after the index expansion, change the arg used in
'path_matches_pattern_list()' from 'istate' to 'full'.

Note that this does not fully solve the problem. If 'istate' does not have
an initialized 'name_hash' when its contents are copied to 'full',
initialized hashes will be copied back into 'istate' but
'name_hash_initialized' will be 0. Therefore, we also need to copy
'full->name_hash_initialized' back to 'istate' after the index expansion is
complete.

Signed-off-by: Victoria Dye <vdye@github.com>
---
 sparse-index.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/sparse-index.c b/sparse-index.c
index 5634abafaa07ed..0717f6260fc845 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -405,7 +405,7 @@ void expand_index(struct index_state *istate, struct pattern_list *pl)
 		if (pl &&
 		    path_matches_pattern_list(ce->name, ce->ce_namelen,
 					      NULL, &dtype,
-					      pl, istate) == NOT_MATCHED) {
+					      pl, full) == NOT_MATCHED) {
 			set_index_entry(full, full->cache_nr++, ce);
 			continue;
 		}
@@ -433,6 +433,7 @@ void expand_index(struct index_state *istate, struct pattern_list *pl)
 	}
 
 	/* Copy back into original index. */
+	istate->name_hash_initialized = full->name_hash_initialized;
 	memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
 	memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
 	istate->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;

From ed3fe89027b6b13f81027486b1253fd587de18e6 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Tue, 16 Jul 2024 09:17:15 -0400
Subject: [PATCH 002/207] t: remove advice from some tests

These seem to be custom tests to microsoft/git as they break without
these changes, but these changes are not needed upstream.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 t/t1091-sparse-checkout-builtin.sh | 1 +
 t/t7002-mv-sparse-checkout.sh      | 3 +++
 2 files changed, 4 insertions(+)

diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh
index ab3a105ffff253..29838259bb4fae 100755
--- a/t/t1091-sparse-checkout-builtin.sh
+++ b/t/t1091-sparse-checkout-builtin.sh
@@ -701,6 +701,7 @@ test_expect_success 'pattern-checks: contained glob characters' '
 
 test_expect_success BSLASHPSPEC 'pattern-checks: escaped characters' '
 	git clone repo escaped &&
+	git -C escaped config advice.sparseIndexExpanded false &&
 	TREEOID=$(git -C escaped rev-parse HEAD:folder1) &&
 	NEWTREE=$(git -C escaped mktree <<-EOF
 	$(git -C escaped ls-tree HEAD)
diff --git a/t/t7002-mv-sparse-checkout.sh b/t/t7002-mv-sparse-checkout.sh
index 4d3f221224fb39..602c6a064b2ce6 100755
--- a/t/t7002-mv-sparse-checkout.sh
+++ b/t/t7002-mv-sparse-checkout.sh
@@ -155,6 +155,9 @@ test_expect_success 'mv refuses to move sparse-to-non-sparse' '
 
 test_expect_success 'recursive mv refuses to move (possible) sparse' '
 	test_when_finished rm -rf b c e sub2 &&
+
+	git config advice.sparseIndexExpanded false &&
+
 	git reset --hard &&
 	# Without cone mode, "sub" and "sub2" do not match
 	git sparse-checkout set sub/dir sub2/dir &&

From 9a3602c684b939d6a7289b34539be09cd04b4db8 Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Wed, 20 Sep 2023 13:12:30 -0700
Subject: [PATCH 003/207] t1092: add test for untracked files and directories

Add a test verifying that sparse-checkout (with and without sparse index
enabled) treat untracked files & directories correctly when changing sparse
patterns. Specifically, it ensures that 'git sparse-checkout set'

* deletes empty directories outside the sparse cone
* does _not_ delete untracked files outside the sparse cone

Signed-off-by: Victoria Dye <vdye@github.com>
---
 t/t1092-sparse-checkout-compatibility.sh | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index a4c7c41fc00aa3..c9d1a2c642adc7 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -317,6 +317,22 @@ test_expect_success 'root directory cannot be sparse' '
 	test_cmp expect actual
 '
 
+test_expect_success 'sparse-checkout with untracked files and dirs' '
+	init_repos &&
+
+	# Empty directories outside sparse cone are deleted
+	run_on_sparse mkdir -p deep/empty &&
+	test_sparse_match git sparse-checkout set folder1 &&
+	test_must_be_empty sparse-checkout-err &&
+	run_on_sparse test_path_is_missing deep &&
+
+	# Untracked files outside sparse cone are not deleted
+	run_on_sparse touch folder1/another &&
+	test_sparse_match git sparse-checkout set folder2 &&
+	grep "directory ${SQ}folder1/${SQ} contains untracked files" sparse-checkout-err &&
+	run_on_sparse test_path_exists folder1/another
+'
+
 test_expect_success 'status with options' '
 	init_repos &&
 	test_sparse_match ls &&

From d5da60b467aca3a8d37978da10d48caceba54a66 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 25 Aug 2023 09:58:27 -0400
Subject: [PATCH 004/207] t5300: confirm failure of git index-pack when non-idx
 suffix requested

Add test case to demonstrate that `git index-pack -o <idx-path> pack-path`
fails if <idx-path> does not end in ".idx" when `--rev-index` is
enabled.

In e37d0b8730b (builtin/index-pack.c: write reverse indexes, 2021-01-25)
we learned to create `.rev` reverse indexes in addition to `.idx` index
files.  The `.rev` file pathname is constructed by replacing the suffix
on the `.idx` file.  The code assumes a hard-coded "idx" suffix.

In a8dd7e05b1c (config: enable `pack.writeReverseIndex` by default, 2023-04-12)
reverse indexes were enabled by default.

If the `-o <idx-path>` argument is used, the index file may have a
different suffix.  This causes an error when it tries to create the
reverse index pathname.

The test here demonstrates the failure.  (The test forces `--rev-index`
to avoid interaction with `GIT_TEST_NO_WRITE_REV_INDEX` during CI runs.)

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 t/t5300-pack-object.sh | 25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh
index 1fffa7209b785b..29c0b3dd2bf78a 100755
--- a/t/t5300-pack-object.sh
+++ b/t/t5300-pack-object.sh
@@ -357,6 +357,31 @@ test_expect_success 'build pack index for an existing pack' '
 	:
 '
 
+# The `--rev-index` option of `git index-pack` is now the default, so
+# a `foo.rev` REV file will be created when a `foo.idx` IDX file is
+# created.  Normally, these pathnames are based upon the `foo.pack`
+# PACK file pathname.
+#
+# However, the `-o` option lets you set the pathname of the IDX file
+# indepdent of the PACK file.
+#
+# Verify what happens if these suffixes are changed.
+#
+test_expect_success 'complain about index name' '
+	# Normal case { .pack, .idx, .rev }
+	cat test-1-${packname_1}.pack >test-complain-0.pack &&
+	git index-pack -o test-complain-0.idx --rev-index test-complain-0.pack &&
+	test -f test-complain-0.idx &&
+	test -f test-complain-0.rev &&
+
+	# Non .idx suffix
+	cat test-1-${packname_1}.pack >test-complain-1.pack &&
+	test_must_fail git index-pack -o test-complain-1.idx-suffix --rev-index test-complain-1.pack 2>err &&
+	grep "does not end" err &&
+	! test -f test-complain-1.idx-suffix &&
+	! test -f test-complain-1.rev
+'
+
 test_expect_success 'unpacking with --strict' '
 
 	for j in a b c d e f g

From f27fd55b79c2ae6b027bc44d90a1ca1b87593768 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 25 Aug 2023 11:06:28 -0400
Subject: [PATCH 005/207] index-pack: disable rev-index if index file has non
 .idx suffix

Teach index-pack to silently omit the reverse index if the
index file does not have the standard ".idx" suffix.

In e37d0b8730b (builtin/index-pack.c: write reverse indexes, 2021-01-25)
we learned to create `.rev` reverse indexes in addition to `.idx` index
files.  The `.rev` file pathname is constructed by replacing the suffix
on the `.idx` file.  The code assumes a hard-coded "idx" suffix.

In a8dd7e05b1c (config: enable `pack.writeReverseIndex` by default, 2023-04-12)
reverse indexes were enabled by default.

If the `-o <idx-path>` argument is used, the index file may have a
different suffix.  This causes an error when it tries to create the
reverse index pathname.

Since we do not know why the user requested a non-standard suffix for
the index, we cannot guess what the proper corresponding suffix should
be for the reverse index.  So we disable it.

The t5300 test has been updated to verify that we no longer error
out and that the .rev file is not created.

TODO We could warn the user that we skipped it (perhaps only if they
TODO explicitly requested `--rev-index` on the command line).
TODO
TODO Ideally, we should add an `--rev-index-path=<path>` argument
TODO or change `--rev-index` to take a pathname.
TODO
TODO I'll leave these questions for a future series.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 builtin/index-pack.c   | 4 ++++
 t/t5300-pack-object.sh | 7 +++----
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index d773809c4c9660..99c7fdc744d305 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -1890,6 +1890,7 @@ int cmd_index_pack(int argc,
 	unsigned foreign_nr = 1;	/* zero is a "good" value, assume bad */
 	int report_end_of_input = 0;
 	int hash_algo = 0;
+	int dash_o = 0;
 
 	/*
 	 * index-pack never needs to fetch missing objects except when
@@ -1983,6 +1984,7 @@ int cmd_index_pack(int argc,
 				if (index_name || (i+1) >= argc)
 					usage(index_pack_usage);
 				index_name = argv[++i];
+				dash_o = 1;
 			} else if (starts_with(arg, "--index-version=")) {
 				char *c;
 				opts.version = strtoul(arg + 16, &c, 10);
@@ -2036,6 +2038,8 @@ int cmd_index_pack(int argc,
 		repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
 
 	opts.flags &= ~(WRITE_REV | WRITE_REV_VERIFY);
+	if (rev_index && dash_o && !ends_with(index_name, ".idx"))
+		rev_index = 0;
 	if (rev_index) {
 		opts.flags |= verify ? WRITE_REV_VERIFY : WRITE_REV;
 		if (index_name)
diff --git a/t/t5300-pack-object.sh b/t/t5300-pack-object.sh
index 29c0b3dd2bf78a..d4952970c315a9 100755
--- a/t/t5300-pack-object.sh
+++ b/t/t5300-pack-object.sh
@@ -374,11 +374,10 @@ test_expect_success 'complain about index name' '
 	test -f test-complain-0.idx &&
 	test -f test-complain-0.rev &&
 
-	# Non .idx suffix
+	# Non .idx suffix -- implicitly omits the .rev
 	cat test-1-${packname_1}.pack >test-complain-1.pack &&
-	test_must_fail git index-pack -o test-complain-1.idx-suffix --rev-index test-complain-1.pack 2>err &&
-	grep "does not end" err &&
-	! test -f test-complain-1.idx-suffix &&
+	git index-pack -o test-complain-1.idx-suffix --rev-index test-complain-1.pack &&
+	test -f test-complain-1.idx-suffix &&
 	! test -f test-complain-1.rev
 '
 

From a83676d9c97ec8513f78ed1e52700fa5b539fc5c Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Wed, 26 Jun 2024 12:27:41 -0400
Subject: [PATCH 006/207] trace2: prefetch value of GIT_TRACE2_DST_DEBUG at
 startup

Prefetch the value of GIT_TRACE2_DST_DEBUG during startup and before
we try to open any Trace2 destination pathnames.

Normally, Trace2 always silently fails if a destination target
cannot be opened so that it doesn't affect the execution of a
Git command.  The command should run normally, but just not
generate any trace data.  This can make it difficult to debug
a telemetry setup, since the user doesn't know why telemetry
isn't being generated.  If the environment variable
GIT_TRACE2_DST_DEBUG is true, the Trace2 startup will print
a warning message with the `errno` to make debugging easier.

However, on Windows, looking up the env variable resets `errno`
so the warning message always ends with `...tracing: No error`
which is not very helpful.

Prefetch the env variable at startup.  This avoids the need
to update each call-site to capture `errno` in the usual
`saved-errno` variable.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 trace2.c         | 10 ++++++++++
 trace2/tr2_dst.c |  2 +-
 trace2/tr2_dst.h | 12 ++++++++++++
 3 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/trace2.c b/trace2.c
index 82d16e2783d986..95bcd809e9eee3 100644
--- a/trace2.c
+++ b/trace2.c
@@ -227,6 +227,16 @@ void trace2_initialize_fl(const char *file, int line)
 	if (!tr2_tgt_want_builtins())
 		return;
 	trace2_enabled = 1;
+
+	/*
+	 * getenv() on Windows stomps on `errno` and the code in
+	 * tr2_dst.c verifies that warnings are enabled before
+	 * formatting the warning message (and calling strerror()).
+	 * So prefetch the value from the environment before we need
+	 * it.
+	 */
+	tr2_dst_want_warning();
+
 	if (!git_env_bool("GIT_TRACE2_REDACT", 1))
 		trace2_redact = 0;
 
diff --git a/trace2/tr2_dst.c b/trace2/tr2_dst.c
index 5be892cd5cdefa..61579f24bdbde3 100644
--- a/trace2/tr2_dst.c
+++ b/trace2/tr2_dst.c
@@ -24,7 +24,7 @@
  */
 static int tr2env_max_files = 0;
 
-static int tr2_dst_want_warning(void)
+int tr2_dst_want_warning(void)
 {
 	static int tr2env_dst_debug = -1;
 
diff --git a/trace2/tr2_dst.h b/trace2/tr2_dst.h
index b1a8c144e073ba..4166539eb9e100 100644
--- a/trace2/tr2_dst.h
+++ b/trace2/tr2_dst.h
@@ -35,4 +35,16 @@ int tr2_dst_trace_want(struct tr2_dst *dst);
  */
 void tr2_dst_write_line(struct tr2_dst *dst, struct strbuf *buf_line);
 
+/*
+ * Return true if we want warning messages when trying to open a
+ * destination.
+ *
+ * (Trace2 always silently fails if a target cannot be opened so that
+ * we don't affect the execution of the Git command, but it is helpful
+ * for debugging telemetry configuration if we log warning messages
+ * when trying to open a target. This is controlled by another config
+ * value.)
+ */
+int tr2_dst_want_warning(void);
+
 #endif /* TR2_DST_H */

From 067993673df9d00f59196255abe824d00f219e6e Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Mon, 29 Apr 2024 12:47:27 -0400
Subject: [PATCH 007/207] survey: calculate more stats on refs

Calculate the number of symrefs, loose vs packed, and the
maximal/accumulated length of local vs remote branches.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
Signed-off-by: Johannes Schindelin <Johannes.Schindelin@gmx.de>
---
 builtin/survey.c      | 82 +++++++++++++++++++++++++++++++++++++++++--
 t/t8100-git-survey.sh | 21 +++++++----
 2 files changed, 94 insertions(+), 9 deletions(-)

diff --git a/builtin/survey.c b/builtin/survey.c
index a86b728d6a2671..4ea1218e39646d 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -15,9 +15,7 @@
 #include "revision.h"
 #include "strbuf.h"
 #include "strvec.h"
-#include "tag.h"
 #include "trace2.h"
-#include "color.h"
 
 static const char * const survey_usage[] = {
 	N_("(EXPERIMENTAL!) git survey <options>"),
@@ -53,6 +51,22 @@ struct survey_report_ref_summary {
 	size_t tags_annotated_nr;
 	size_t others_nr;
 	size_t unknown_nr;
+
+	size_t cnt_symref;
+
+	size_t cnt_packed;
+	size_t cnt_loose;
+
+	/*
+	 * Measure the length of the refnames.  We can look for
+	 * potential platform limits.  The partial sums may help us
+	 * estimate the size of a haves/wants conversation, since each
+	 * refname and a SHA must be transmitted.
+	 */
+	size_t len_max_local_refname;
+	size_t len_sum_local_refnames;
+	size_t len_max_remote_refname;
+	size_t len_sum_remote_refnames;
 };
 
 struct survey_report_object_summary {
@@ -380,6 +394,42 @@ static void survey_report_plaintext_refs(struct survey_context *ctx)
 		free(fmt);
 	}
 
+	/*
+	 * SymRefs are somewhat orthogonal to the above classification (e.g.
+	 * "HEAD" --> detached and "refs/remotes/origin/HEAD" --> remote) so the
+	 * above classified counts will already include them, but it is less
+	 * confusing to display them here than to create a whole new section.
+	 */
+	if (ctx->report.refs.cnt_symref) {
+		char *fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->cnt_symref);
+		insert_table_rowv(&table, _("Symbolic refs"), fmt, NULL);
+		free(fmt);
+	}
+
+	if (ctx->report.refs.cnt_loose || ctx->report.refs.cnt_packed) {
+		char *fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->cnt_loose);
+		insert_table_rowv(&table, _("Loose refs"), fmt, NULL);
+		free(fmt);
+		fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->cnt_packed);
+		insert_table_rowv(&table, _("Packed refs"), fmt, NULL);
+		free(fmt);
+	}
+
+	if (ctx->report.refs.len_max_local_refname || ctx->report.refs.len_max_remote_refname) {
+		char *fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->len_max_local_refname);
+		insert_table_rowv(&table, _("Max local refname length"), fmt, NULL);
+		free(fmt);
+		fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->len_sum_local_refnames);
+		insert_table_rowv(&table, _("Sum local refnames length"), fmt, NULL);
+		free(fmt);
+		fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->len_max_remote_refname);
+		insert_table_rowv(&table, _("Max remote refname length"), fmt, NULL);
+		free(fmt);
+		fmt = xstrfmt("%"PRIuMAX"", (uintmax_t)refs->len_sum_remote_refnames);
+		insert_table_rowv(&table, _("Sum remote refnames length"), fmt, NULL);
+		free(fmt);
+	}
+
 	print_table_plaintext(&table);
 	clear_table(&table);
 }
@@ -637,6 +687,7 @@ static void survey_phase_refs(struct survey_context *ctx)
 	for (int i = 0; i < ctx->ref_array.nr; i++) {
 		unsigned long size;
 		struct ref_array_item *item = ctx->ref_array.items[i];
+		size_t len = strlen(item->refname);
 
 		switch (item->kind) {
 		case FILTER_REFS_TAGS:
@@ -663,6 +714,33 @@ static void survey_phase_refs(struct survey_context *ctx)
 			ctx->report.refs.unknown_nr++;
 			break;
 		}
+
+		/*
+		 * SymRefs are somewhat orthogonal to the above
+		 * classification (e.g. "HEAD" --> detached
+		 * and "refs/remotes/origin/HEAD" --> remote) so
+		 * our totals will already include them.
+		 */
+		if (item->flag & REF_ISSYMREF)
+			ctx->report.refs.cnt_symref++;
+
+		/*
+		 * Where/how is the ref stored in GITDIR.
+		 */
+		if (item->flag & REF_ISPACKED)
+			ctx->report.refs.cnt_packed++;
+		else
+			ctx->report.refs.cnt_loose++;
+
+		if (item->kind == FILTER_REFS_REMOTES) {
+			ctx->report.refs.len_sum_remote_refnames += len;
+			if (len > ctx->report.refs.len_max_remote_refname)
+				ctx->report.refs.len_max_remote_refname = len;
+		} else {
+			ctx->report.refs.len_sum_local_refnames += len;
+			if (len > ctx->report.refs.len_max_local_refname)
+				ctx->report.refs.len_max_local_refname = len;
+		}
 	}
 
 	trace2_region_leave("survey", "phase/refs", ctx->repo);
diff --git a/t/t8100-git-survey.sh b/t/t8100-git-survey.sh
index 8c6edfcae0c6c2..0d35dfcf311827 100755
--- a/t/t8100-git-survey.sh
+++ b/t/t8100-git-survey.sh
@@ -59,13 +59,20 @@ test_expect_success 'git survey (default)' '
 	-----------------------------------------------------
 
 	REFERENCES SUMMARY
-	========================
-	,       Ref Type | Count
-	-----------------+------
-	,       Branches |     1
-	     Remote refs |     0
-	      Tags (all) |     2
-	Tags (annotated) |     2
+	==================================
+	,                 Ref Type | Count
+	---------------------------+------
+	,                 Branches |     1
+	,              Remote refs |     0
+	,               Tags (all) |     2
+	,         Tags (annotated) |     2
+	,            Symbolic refs |     1
+	,               Loose refs |     4
+	,              Packed refs |     0
+	  Max local refname length |    15
+	 Sum local refnames length |    46
+	 Max remote refname length |     0
+	Sum remote refnames length |     0
 
 	REACHABLE OBJECT SUMMARY
 	========================

From 97b6900156876653cdc416bf3e852da8132ab793 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Mon, 29 Apr 2024 15:40:00 -0400
Subject: [PATCH 008/207] survey: show some commits/trees/blobs histograms

With this commit, we gather statistics about the sizes of commits,
trees, and blobs in the repository, and then present them in the form
of "hexbins", i.e. log(16) histograms that show how many objects fall
into the 0..15 bytes range, the 16..255 range, the 256..4095 range, etc.

For commits, we also show the total count grouped by the number of
parents, and for trees we additionally show the total count grouped by
number of entries in the form of "qbins", i.e. log(4) histograms.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 builtin/survey.c | 338 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 334 insertions(+), 4 deletions(-)

diff --git a/builtin/survey.c b/builtin/survey.c
index 4ea1218e39646d..dbaa1fee92114e 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -16,6 +16,8 @@
 #include "strbuf.h"
 #include "strvec.h"
 #include "trace2.h"
+#include "tree.h"
+#include "tree-walk.h"
 
 static const char * const survey_usage[] = {
 	N_("(EXPERIMENTAL!) git survey <options>"),
@@ -69,11 +71,162 @@ struct survey_report_ref_summary {
 	size_t len_sum_remote_refnames;
 };
 
+/*
+ * HBIN -- hex binning (histogram bucketing).
+ *
+ * We create histograms for various counts and sums.  Since we have a
+ * wide range of values (objects range in size from 1 to 4G bytes), a
+ * linear bucketing is not interesting.  Instead, lets use a
+ * log16()-based bucketing.  This gives us a better spread on the low
+ * and middle range and a coarse bucketing on the high end.
+ *
+ * The idea here is that it doesn't matter if you have n 1GB blobs or
+ * n/2 1GB blobs and n/2 1.5GB blobs -- either way you have a scaling
+ * problem that we want to report on.
+ */
+#define HBIN_LEN (sizeof(unsigned long) * 2)
+#define HBIN_MASK (0xF)
+#define HBIN_SHIFT (4)
+
+static int hbin(unsigned long value)
+{
+	for (size_t k = 0; k < HBIN_LEN; k++) {
+		if ((value & ~(HBIN_MASK)) == 0)
+			return k;
+		value >>= HBIN_SHIFT;
+	}
+
+	return 0; /* should not happen */
+}
+
+/*
+ * QBIN -- base4 binning (histogram bucketing).
+ *
+ * This is the same idea as the above, but we want better granularity
+ * in the low end and don't expect as many large values.
+ */
+#define QBIN_LEN (sizeof(unsigned long) * 4)
+#define QBIN_MASK (0x3)
+#define QBIN_SHIFT (2)
+
+static int qbin(unsigned long value)
+{
+	for (size_t k = 0; k < QBIN_LEN; k++) {
+		if ((value & ~(QBIN_MASK)) == 0)
+			return k;
+		value >>= (QBIN_SHIFT);
+	}
+
+	return 0; /* should not happen */
+}
+
+/*
+ * histogram bin for objects.
+ */
+struct obj_hist_bin {
+	uint64_t sum_size;      /* sum(object_size) for all objects in this bin */
+	uint64_t sum_disk_size; /* sum(on_disk_size) for all objects in this bin */
+	uint32_t cnt_seen;      /* number seen in this bin */
+};
+
+static void incr_obj_hist_bin(struct obj_hist_bin *pbin,
+			       unsigned long object_length,
+			       off_t disk_sizep)
+{
+	pbin->sum_size += object_length;
+	pbin->sum_disk_size += disk_sizep;
+	pbin->cnt_seen++;
+}
+
+/*
+ * Common fields for any type of object.
+ */
+struct survey_stats_base_object {
+	uint32_t cnt_seen;
+
+	uint32_t cnt_missing; /* we may have a partial clone. */
+
+	/*
+	 * Number of objects grouped by where they are stored on disk.
+	 * This is a function of how the ODB is packed.
+	 */
+	uint32_t cnt_cached;   /* see oi.whence */
+	uint32_t cnt_loose;    /* see oi.whence */
+	uint32_t cnt_packed;   /* see oi.whence */
+	uint32_t cnt_dbcached; /* see oi.whence */
+
+	uint64_t sum_size; /* sum(object_size) */
+	uint64_t sum_disk_size; /* sum(disk_size) */
+
+	/*
+	 * A histogram of the count of objects, the observed size, and
+	 * the on-disk size grouped by the observed size.
+	 */
+	struct obj_hist_bin size_hbin[HBIN_LEN];
+};
+
+/*
+ * PBIN -- parent vector binning (histogram bucketing).
+ *
+ * We create a histogram based upon the number of parents
+ * in a commit.  This is a simple linear vector.  It starts
+ * at zero for "initial" commits.
+ *
+ * If a commit has more parents, just put it in the last bin.
+ */
+#define PBIN_VEC_LEN (32)
+
+struct survey_stats_commits {
+	struct survey_stats_base_object base;
+
+	/*
+	 * Count of commits with k parents.
+	 */
+	uint32_t parent_cnt_pbin[PBIN_VEC_LEN];
+};
+
+/*
+ * Stats for reachable trees.
+ */
+struct survey_stats_trees {
+	struct survey_stats_base_object base;
+
+	/*
+	 * In the following, nr_entries refers to the number of files or
+	 * subdirectories in a tree.  We are interested in how wide the
+	 * tree is and if the repo has gigantic directories.
+	 */
+	uint64_t max_entries; /* max(nr_entries) -- the width of the largest tree */
+
+	/*
+	 * Computing the sum of the number of entries across all trees
+	 * is probably not that interesting.
+	 */
+	uint64_t sum_entries; /* sum(nr_entries) -- sum across all trees */
+
+	/*
+	 * A histogram of the count of trees, the observed size, and
+	 * the on-disk size grouped by the number of entries in the tree.
+	 */
+	struct obj_hist_bin entry_qbin[QBIN_LEN];
+};
+
+/*
+ * Stats for reachable blobs.
+ */
+struct survey_stats_blobs {
+	struct survey_stats_base_object base;
+};
+
 struct survey_report_object_summary {
 	size_t commits_nr;
 	size_t tags_nr;
 	size_t trees_nr;
 	size_t blobs_nr;
+
+	struct survey_stats_commits commits;
+	struct survey_stats_trees   trees;
+	struct survey_stats_blobs   blobs;
 };
 
 /**
@@ -363,6 +516,98 @@ static void print_table_plaintext(struct survey_table *table)
 	free(column_widths);
 }
 
+static void pretty_print_bin_table(const char *title_caption,
+				   const char *bucket_header,
+				   struct obj_hist_bin *bin,
+				   uint64_t bin_len, int bin_shift, uint64_t bin_mask)
+{
+	struct survey_table table = SURVEY_TABLE_INIT;
+	struct strbuf bucket = STRBUF_INIT, cnt_seen = STRBUF_INIT;
+	struct strbuf sum_size = STRBUF_INIT, sum_disk_size = STRBUF_INIT;
+	uint64_t lower = 0;
+	uint64_t upper = bin_mask;
+
+	table.table_name = title_caption;
+	strvec_pushl(&table.header, bucket_header, "Count", "Size", "Disk Size", NULL);
+
+	for (size_t k = 0; k < bin_len; k++) {
+		struct obj_hist_bin *p = bin + k;
+		uintmax_t lower_k = lower;
+		uintmax_t upper_k = upper;
+
+		lower = upper+1;
+		upper = (upper << bin_shift) + bin_mask;
+
+		if (!p->cnt_seen)
+			continue;
+
+		strbuf_reset(&bucket);
+		strbuf_addf(&bucket, "%"PRIuMAX"..%"PRIuMAX, lower_k, upper_k);
+
+		strbuf_reset(&cnt_seen);
+		strbuf_addf(&cnt_seen, "%"PRIuMAX, (uintmax_t)p->cnt_seen);
+
+		strbuf_reset(&sum_size);
+		strbuf_addf(&sum_size, "%"PRIuMAX, (uintmax_t)p->sum_size);
+
+		strbuf_reset(&sum_disk_size);
+		strbuf_addf(&sum_disk_size, "%"PRIuMAX, (uintmax_t)p->sum_disk_size);
+
+		insert_table_rowv(&table, bucket.buf,
+			     cnt_seen.buf, sum_size.buf, sum_disk_size.buf, NULL);
+	}
+	strbuf_release(&bucket);
+	strbuf_release(&cnt_seen);
+	strbuf_release(&sum_size);
+	strbuf_release(&sum_disk_size);
+
+	print_table_plaintext(&table);
+	clear_table(&table);
+}
+
+static void survey_report_hbin(const char *title_caption,
+			       struct obj_hist_bin *bin)
+{
+	pretty_print_bin_table(title_caption,
+			       "Byte Range",
+			       bin,
+			       HBIN_LEN, HBIN_SHIFT, HBIN_MASK);
+}
+
+static void survey_report_tree_lengths(struct survey_context *ctx)
+{
+	pretty_print_bin_table(_("TREE HISTOGRAM BY NUMBER OF ENTRIES"),
+			       "Entry Range",
+			       ctx->report.reachable_objects.trees.entry_qbin,
+			       QBIN_LEN, QBIN_SHIFT, QBIN_MASK);
+}
+
+static void survey_report_commit_parents(struct survey_context *ctx)
+{
+	struct survey_stats_commits *psc = &ctx->report.reachable_objects.commits;
+	struct survey_table table = SURVEY_TABLE_INIT;
+	struct strbuf parents = STRBUF_INIT, counts = STRBUF_INIT;
+
+	table.table_name = _("HISTOGRAM BY NUMBER OF COMMIT PARENTS");
+	strvec_pushl(&table.header, "Parents", "Counts", NULL);
+
+	for (int k = 0; k < PBIN_VEC_LEN; k++)
+		if (psc->parent_cnt_pbin[k]) {
+			strbuf_reset(&parents);
+			strbuf_addf(&parents, "%02d", k);
+
+			strbuf_reset(&counts);
+			strbuf_addf(&counts, "%14"PRIuMAX, (uintmax_t)psc->parent_cnt_pbin[k]);
+
+			insert_table_rowv(&table, parents.buf, counts.buf, NULL);
+		}
+	strbuf_release(&parents);
+	strbuf_release(&counts);
+
+	print_table_plaintext(&table);
+	clear_table(&table);
+}
+
 static void survey_report_plaintext_refs(struct survey_context *ctx)
 {
 	struct survey_report_ref_summary *refs = &ctx->report.refs;
@@ -515,6 +760,19 @@ static void survey_report_plaintext(struct survey_context *ctx)
 				   ctx->report.by_type,
 				   REPORT_TYPE_COUNT);
 
+	survey_report_commit_parents(ctx);
+
+	survey_report_hbin(_("COMMITS HISTOGRAM BY SIZE IN BYTES"),
+			   ctx->report.reachable_objects.commits.base.size_hbin);
+
+	survey_report_tree_lengths(ctx);
+
+	survey_report_hbin(_("TREES HISTOGRAM BY SIZE IN BYTES"),
+			   ctx->report.reachable_objects.trees.base.size_hbin);
+
+	survey_report_hbin(_("BLOBS HISTOGRAM BY SIZE IN BYTES"),
+			   ctx->report.reachable_objects.blobs.base.size_hbin);
+
 	survey_report_plaintext_sorted_size(
 		&ctx->report.top_paths_by_count[REPORT_TYPE_TREE]);
 	survey_report_plaintext_sorted_size(
@@ -783,6 +1041,8 @@ static void increment_totals(struct survey_context *ctx,
 		unsigned long object_length = 0;
 		off_t disk_sizep = 0;
 		enum object_type type;
+		struct survey_stats_base_object *base;
+		int hb;
 
 		oi.typep = &type;
 		oi.sizep = &object_length;
@@ -791,11 +1051,81 @@ static void increment_totals(struct survey_context *ctx,
 		if (oid_object_info_extended(ctx->repo, &oids->oid[i],
 					     &oi, oi_flags) < 0) {
 			summary->num_missing++;
-		} else {
-			summary->nr++;
-			summary->disk_size += disk_sizep;
-			summary->inflated_size += object_length;
+			continue;
+		}
+
+		summary->nr++;
+		summary->disk_size += disk_sizep;
+		summary->inflated_size += object_length;
+
+		switch (type) {
+		case OBJ_COMMIT: {
+			struct commit *commit = lookup_commit(ctx->repo, &oids->oid[i]);
+			unsigned k = commit_list_count(commit->parents);
+
+			if (k >= PBIN_VEC_LEN)
+				k = PBIN_VEC_LEN - 1;
+
+			ctx->report.reachable_objects.commits.parent_cnt_pbin[k]++;
+			base = &ctx->report.reachable_objects.commits.base;
+			break;
 		}
+		case OBJ_TREE: {
+			struct tree *tree = lookup_tree(ctx->repo, &oids->oid[i]);
+			if (tree) {
+				struct survey_stats_trees *pst = &ctx->report.reachable_objects.trees;
+				struct tree_desc desc;
+				struct name_entry entry;
+				int nr_entries;
+				int qb;
+
+				parse_tree(tree);
+				init_tree_desc(&desc, &oids->oid[i], tree->buffer, tree->size);
+				nr_entries = 0;
+				while (tree_entry(&desc, &entry))
+					nr_entries++;
+
+				pst->sum_entries += nr_entries;
+
+				if (nr_entries > pst->max_entries)
+					pst->max_entries = nr_entries;
+
+				qb = qbin(nr_entries);
+				incr_obj_hist_bin(&pst->entry_qbin[qb], object_length, disk_sizep);
+			}
+			base = &ctx->report.reachable_objects.trees.base;
+			break;
+		}
+		case OBJ_BLOB:
+			base = &ctx->report.reachable_objects.blobs.base;
+			break;
+		default:
+			continue;
+		}
+
+		switch (oi.whence) {
+		case OI_CACHED:
+			base->cnt_cached++;
+			break;
+		case OI_LOOSE:
+			base->cnt_loose++;
+			break;
+		case OI_PACKED:
+			base->cnt_packed++;
+			break;
+		case OI_DBCACHED:
+			base->cnt_dbcached++;
+			break;
+		default:
+			break;
+		}
+
+		base->sum_size += object_length;
+		base->sum_disk_size += disk_sizep;
+
+		hb = hbin(object_length);
+		incr_obj_hist_bin(&base->size_hbin[hb], object_length, disk_sizep);
+
 	}
 }
 

From 73e6cd5191a939cc9cf76e5d2fdbc333a74bc5d6 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Wed, 1 May 2024 12:56:38 -0400
Subject: [PATCH 009/207] survey: add vector of largest objects for various
 scaling dimensions

Create `struct large_item` and `struct large_item_vec` to capture the
n largest commits, trees, and blobs under various scaling dimensions,
such as size in bytes, number of commit parents, or number of entries
in a tree.

Each of these have a command line option to set them independently.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 Documentation/config/survey.txt |  29 +++++
 Documentation/git-survey.txt    |  31 +++++
 builtin/survey.c                | 222 +++++++++++++++++++++++++++++++-
 3 files changed, 276 insertions(+), 6 deletions(-)

diff --git a/Documentation/config/survey.txt b/Documentation/config/survey.txt
index 9e594a2092f225..fd2d7e153b38ce 100644
--- a/Documentation/config/survey.txt
+++ b/Documentation/config/survey.txt
@@ -11,4 +11,33 @@ survey.*::
 	top::
 		This integer value implies `--top=<N>`, specifying the
 		number of entries in the detail tables.
+	showBlobSizes::
+		A non-negative integer value.  Requests details on the
+		<n> largest file blobs by size in bytes.  Provides a
+		default value for `--blob-sizes=<n>` in
+		linkgit:git-survey[1].
+	showCommitParents::
+		A non-negative integer value.  Requests details on the
+		<n> commits with the most number of parents.  Provides a
+		default value for `--commit-parents=<n>` in
+		linkgit:git-survey[1].
+	showCommitSizes::
+		A non-negative integer value.  Requests details on the
+		<n> largest commits by size in bytes.  Generally, these
+		are the commits with the largest commit messages.
+		Provides a default value for `--commit-sizes=<n>` in
+		linkgit:git-survey[1].
+	showTreeEntries::
+		A non-negative integer value.  Requests details on the
+		<n> trees (directories) with the most number of entries
+		(files and subdirectories).  Provides a default value
+		for `--tree-entries=<n>` in linkgit:git-survey[1].
+	showTreeSizes::
+		A non-negative integer value.  Requests details on the
+		<n> largest trees (directories) by size in bytes.  This
+		will set will usually be equal to the
+		`survey.showTreeEntries` set, but may be skewed by very
+		long file or subdirectory entry names.  Provides a
+		default value for `--tree-sizes=<n>` in
+		linkgit:git-survey[1].
 --
diff --git a/Documentation/git-survey.txt b/Documentation/git-survey.txt
index 44f3a0568b7697..d174ffd4164840 100644
--- a/Documentation/git-survey.txt
+++ b/Documentation/git-survey.txt
@@ -59,6 +59,32 @@ only refs for the given options are added.
 --other::
 	Add notes (`refs/notes/`) and stashes (`refs/stash/`) to the set.
 
+Large Item Selection
+~~~~~~~~~~~~~~~~~~~~
+
+The following options control the optional display of large items under
+various dimensions of scale.  The OID of the largest `n` objects will be
+displayed in reverse sorted order.  For each, `n` defaults to 10.
+
+--commit-parents::
+	Shows the OIDs of the commits with the most parent commits.
+
+--commit-sizes::
+	Shows the OIDs of the largest commits by size in bytes.  This is
+	usually the ones with the largest commit messages.
+
+--tree-entries::
+	Shows the OIDs of the trees with the most number of entries.  These
+	are the directories with the most number of files or subdirectories.
+
+--tree-sizes::
+	Shows the OIDs of the largest trees by size in bytes.  This set
+	will usually be the same as the vector of number of entries unless
+	skewed by very long entry names.
+
+--blob-sizes::
+	Shows the OIDs of the largest blobs by size in bytes.
+
 OUTPUT
 ------
 
@@ -78,6 +104,11 @@ Reachable Object Summary
 The reachable object summary shows the total number of each kind of Git
 object, including tags, commits, trees, and blobs.
 
+CONFIGURATION
+-------------
+
+include::config/survey.txt[]
+
 GIT
 ---
 Part of the linkgit:git[1] suite
diff --git a/builtin/survey.c b/builtin/survey.c
index dbaa1fee92114e..7387cda8154116 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -41,6 +41,15 @@ static struct survey_refs_wanted default_ref_options = {
 struct survey_opts {
 	int verbose;
 	int show_progress;
+
+	int show_largest_commits_by_nr_parents;
+	int show_largest_commits_by_size_bytes;
+
+	int show_largest_trees_by_nr_entries;
+	int show_largest_trees_by_size_bytes;
+
+	int show_largest_blobs_by_size_bytes;
+
 	int top_nr;
 	struct survey_refs_wanted refs;
 };
@@ -138,6 +147,87 @@ static void incr_obj_hist_bin(struct obj_hist_bin *pbin,
 	pbin->cnt_seen++;
 }
 
+/*
+ * Remember the largest n objects for some scaling dimension.  This
+ * could be the observed object size or number of entries in a tree.
+ * We'll use this to generate a sorted vector in the output for that
+ * dimension.
+ */
+struct large_item {
+	uint64_t size;
+	struct object_id oid;
+};
+
+struct large_item_vec {
+	char *dimension_label;
+	char *item_label;
+	uint64_t nr_items;
+	struct large_item items[FLEX_ARRAY]; /* nr_items */
+};
+
+static struct large_item_vec *alloc_large_item_vec(const char *dimension_label,
+						   const char *item_label,
+						   uint64_t nr_items)
+{
+	struct large_item_vec *vec;
+	size_t flex_len = nr_items * sizeof(struct large_item);
+
+	if (!nr_items)
+		return NULL;
+
+	vec = xcalloc(1, (sizeof(struct large_item_vec) + flex_len));
+	vec->dimension_label = strdup(dimension_label);
+	vec->item_label = strdup(item_label);
+	vec->nr_items = nr_items;
+
+	return vec;
+}
+
+static void free_large_item_vec(struct large_item_vec *vec)
+{
+	if (!vec)
+		return;
+
+	free(vec->dimension_label);
+	free(vec->item_label);
+	free(vec);
+}
+
+static void maybe_insert_large_item(struct large_item_vec *vec,
+				    uint64_t size,
+				    struct object_id *oid)
+{
+	size_t rest_len;
+	size_t k;
+
+	if (!vec || !vec->nr_items)
+		return;
+
+	/*
+	 * Since the odds an object being among the largest n
+	 * is small, shortcut and see if it is smaller than
+	 * the smallest one in our set and quickly reject it.
+	 */
+	if (size < vec->items[vec->nr_items - 1].size)
+		return;
+
+	for (k = 0; k < vec->nr_items; k++) {
+		if (size < vec->items[k].size)
+			continue;
+
+		/* push items[k..] down one and insert it here */
+
+		rest_len = (vec->nr_items - k - 1) * sizeof(struct large_item);
+		if (rest_len)
+			memmove(&vec->items[k + 1], &vec->items[k], rest_len);
+
+		memset(&vec->items[k], 0, sizeof(struct large_item));
+		vec->items[k].size = size;
+		oidcpy(&vec->items[k].oid, oid);
+		return;
+	}
+}
+
 /*
  * Common fields for any type of object.
  */
@@ -183,6 +273,9 @@ struct survey_stats_commits {
 	 * Count of commits with k parents.
 	 */
 	uint32_t parent_cnt_pbin[PBIN_VEC_LEN];
+
+	struct large_item_vec *vec_largest_by_nr_parents;
+	struct large_item_vec *vec_largest_by_size_bytes;
 };
 
 /*
@@ -192,11 +285,18 @@ struct survey_stats_trees {
 	struct survey_stats_base_object base;
 
 	/*
-	 * In the following, nr_entries refers to the number of files or
-	 * subdirectories in a tree.  We are interested in how wide the
-	 * tree is and if the repo has gigantic directories.
+	 * Keep a vector of the trees with the most number of entries.
+	 * This gives us a feel for the width of a tree when there are
+	 * gigantic directories.
 	 */
-	uint64_t max_entries; /* max(nr_entries) -- the width of the largest tree */
+	struct large_item_vec *vec_largest_by_nr_entries;
+
+	/*
+	 * Keep a vector of the trees with the largest size in bytes.
+	 * The contents of this may or may not match items in the other
+	 * vector, since entryname length can alter the results.
+	 */
+	struct large_item_vec *vec_largest_by_size_bytes;
 
 	/*
 	 * Computing the sum of the number of entries across all trees
@@ -216,6 +316,11 @@ struct survey_stats_trees {
  */
 struct survey_stats_blobs {
 	struct survey_stats_base_object base;
+
+	/*
+	 * Remember the OIDs of the largest n blobs.
+	 */
+	struct large_item_vec *vec_largest_by_size_bytes;
 };
 
 struct survey_report_object_summary {
@@ -396,6 +501,12 @@ struct survey_context {
 
 static void clear_survey_context(struct survey_context *ctx)
 {
+	free_large_item_vec(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents);
+	free_large_item_vec(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes);
+	free_large_item_vec(ctx->report.reachable_objects.trees.vec_largest_by_nr_entries);
+	free_large_item_vec(ctx->report.reachable_objects.trees.vec_largest_by_size_bytes);
+	free_large_item_vec(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes);
+
 	ref_array_clear(&ctx->ref_array);
 	strvec_clear(&ctx->refs);
 }
@@ -608,6 +719,32 @@ static void survey_report_commit_parents(struct survey_context *ctx)
 	clear_table(&table);
 }
 
+static void survey_report_largest_vec(struct large_item_vec *vec)
+{
+	struct survey_table table = SURVEY_TABLE_INIT;
+	struct strbuf size = STRBUF_INIT;
+
+	if (!vec || !vec->nr_items)
+		return;
+
+	table.table_name = vec->dimension_label;
+	strvec_pushl(&table.header, "Size", "OID", NULL);
+
+	for (size_t k = 0; k < vec->nr_items; k++) {
+		struct large_item *pk = &vec->items[k];
+		if (!is_null_oid(&pk->oid)) {
+			strbuf_reset(&size);
+			strbuf_addf(&size, "%"PRIuMAX, (uintmax_t)pk->size);
+
+			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), NULL);
+		}
+	}
+	strbuf_release(&size);
+
+	print_table_plaintext(&table);
+	clear_table(&table);
+}
+
 static void survey_report_plaintext_refs(struct survey_context *ctx)
 {
 	struct survey_report_ref_summary *refs = &ctx->report.refs;
@@ -787,6 +924,12 @@ static void survey_report_plaintext(struct survey_context *ctx)
 		&ctx->report.top_paths_by_inflate[REPORT_TYPE_TREE]);
 	survey_report_plaintext_sorted_size(
 		&ctx->report.top_paths_by_inflate[REPORT_TYPE_BLOB]);
+
+	survey_report_largest_vec(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents);
+	survey_report_largest_vec(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes);
+	survey_report_largest_vec(ctx->report.reachable_objects.trees.vec_largest_by_nr_entries);
+	survey_report_largest_vec(ctx->report.reachable_objects.trees.vec_largest_by_size_bytes);
+	survey_report_largest_vec(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes);
 }
 
 /*
@@ -858,6 +1001,27 @@ static int survey_load_config_cb(const char *var, const char *value,
 		ctx->opts.show_progress = git_config_bool(var, value);
 		return 0;
 	}
+	if (!strcmp(var, "survey.showcommitparents")) {
+		ctx->opts.show_largest_commits_by_nr_parents = git_config_ulong(var, value, cctx->kvi);
+		return 0;
+	}
+	if (!strcmp(var, "survey.showcommitsizes")) {
+		ctx->opts.show_largest_commits_by_size_bytes = git_config_ulong(var, value, cctx->kvi);
+		return 0;
+	}
+
+	if (!strcmp(var, "survey.showtreeentries")) {
+		ctx->opts.show_largest_trees_by_nr_entries = git_config_ulong(var, value, cctx->kvi);
+		return 0;
+	}
+	if (!strcmp(var, "survey.showtreesizes")) {
+		ctx->opts.show_largest_trees_by_size_bytes = git_config_ulong(var, value, cctx->kvi);
+		return 0;
+	}
+	if (!strcmp(var, "survey.showblobsizes")) {
+		ctx->opts.show_largest_blobs_by_size_bytes = git_config_ulong(var, value, cctx->kvi);
+		return 0;
+	}
 	if (!strcmp(var, "survey.top")) {
 		ctx->opts.top_nr = git_config_bool(var, value);
 		return 0;
@@ -1068,6 +1232,9 @@ static void increment_totals(struct survey_context *ctx,
 
 			ctx->report.reachable_objects.commits.parent_cnt_pbin[k]++;
 			base = &ctx->report.reachable_objects.commits.base;
+
+			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents, k, &commit->object.oid);
+			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes, object_length, &commit->object.oid);
 			break;
 		}
 		case OBJ_TREE: {
@@ -1087,8 +1254,8 @@ static void increment_totals(struct survey_context *ctx,
 
 				pst->sum_entries += nr_entries;
 
-				if (nr_entries > pst->max_entries)
-					pst->max_entries = nr_entries;
+				maybe_insert_large_item(pst->vec_largest_by_nr_entries, nr_entries, &tree->object.oid);
+				maybe_insert_large_item(pst->vec_largest_by_size_bytes, object_length, &tree->object.oid);
 
 				qb = qbin(nr_entries);
 				incr_obj_hist_bin(&pst->entry_qbin[qb], object_length, disk_sizep);
@@ -1098,6 +1265,8 @@ static void increment_totals(struct survey_context *ctx,
 		}
 		case OBJ_BLOB:
 			base = &ctx->report.reachable_objects.blobs.base;
+
+			maybe_insert_large_item(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes, object_length, &oids->oid[i]);
 			break;
 		default:
 			continue;
@@ -1304,6 +1473,14 @@ int cmd_survey(int argc, const char **argv, const char *prefix, struct repositor
 		OPT_BOOL_F(0, "detached", &ctx.opts.refs.want_detached, N_("include detached HEAD"),     PARSE_OPT_NONEG),
 		OPT_BOOL_F(0, "other",    &ctx.opts.refs.want_other,    N_("include notes and stashes"), PARSE_OPT_NONEG),
 
+		OPT_INTEGER_F(0, "commit-parents", &ctx.opts.show_largest_commits_by_nr_parents, N_("show N largest commits by parent count"),  PARSE_OPT_NONEG),
+		OPT_INTEGER_F(0, "commit-sizes",   &ctx.opts.show_largest_commits_by_size_bytes, N_("show N largest commits by size in bytes"), PARSE_OPT_NONEG),
+
+		OPT_INTEGER_F(0, "tree-entries",   &ctx.opts.show_largest_trees_by_nr_entries,   N_("show N largest trees by entry count"),     PARSE_OPT_NONEG),
+		OPT_INTEGER_F(0, "tree-sizes",     &ctx.opts.show_largest_trees_by_size_bytes,   N_("show N largest trees by size in bytes"),   PARSE_OPT_NONEG),
+
+		OPT_INTEGER_F(0, "blob-sizes",     &ctx.opts.show_largest_blobs_by_size_bytes,   N_("show N largest blobs by size in bytes"),   PARSE_OPT_NONEG),
+
 		OPT_END(),
 	};
 
@@ -1327,6 +1504,39 @@ int cmd_survey(int argc, const char **argv, const char *prefix, struct repositor
 
 	fixup_refs_wanted(&ctx);
 
+	if (ctx.opts.show_largest_commits_by_nr_parents)
+		ctx.report.reachable_objects.commits.vec_largest_by_nr_parents =
+			alloc_large_item_vec(
+				"largest_commits_by_nr_parents",
+				"nr_parents",
+				ctx.opts.show_largest_commits_by_nr_parents);
+	if (ctx.opts.show_largest_commits_by_size_bytes)
+		ctx.report.reachable_objects.commits.vec_largest_by_size_bytes =
+			alloc_large_item_vec(
+				"largest_commits_by_size_bytes",
+				"size",
+				ctx.opts.show_largest_commits_by_size_bytes);
+
+	if (ctx.opts.show_largest_trees_by_nr_entries)
+		ctx.report.reachable_objects.trees.vec_largest_by_nr_entries =
+			alloc_large_item_vec(
+				"largest_trees_by_nr_entries",
+				"nr_entries",
+				ctx.opts.show_largest_trees_by_nr_entries);
+	if (ctx.opts.show_largest_trees_by_size_bytes)
+		ctx.report.reachable_objects.trees.vec_largest_by_size_bytes =
+			alloc_large_item_vec(
+				"largest_trees_by_size_bytes",
+				"size",
+				ctx.opts.show_largest_trees_by_size_bytes);
+
+	if (ctx.opts.show_largest_blobs_by_size_bytes)
+		ctx.report.reachable_objects.blobs.vec_largest_by_size_bytes =
+			alloc_large_item_vec(
+				"largest_blobs_by_size_bytes",
+				"size",
+				ctx.opts.show_largest_blobs_by_size_bytes);
+
 	survey_phase_refs(&ctx);
 
 	survey_phase_objects(&ctx);

From f35a2a121034bbef020ce65ec8a6bfba0f82d208 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Wed, 15 May 2024 15:56:36 -0400
Subject: [PATCH 010/207] survey: add pathname of blob or tree to
 large_item_vec

Include the pathname of each blob or tree in the large_item_vec
to help identify the file or directory associated with the OID
and size information.

This pathname is computed during the path walk, so it reflects the
first observed pathname seen for that OID during the traversal over
all of the refs.  Since the file or directory could have moved
(without being modified), there may be multiple "correct" pathnames
for a particular OID.  Since we do not control the ref traversal
order, we should consider it to be a "suggested pathname" for the OID.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 builtin/survey.c | 43 ++++++++++++++++++++++++++++++++-----------
 1 file changed, 32 insertions(+), 11 deletions(-)

diff --git a/builtin/survey.c b/builtin/survey.c
index 7387cda8154116..9b5c002ca68973 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -156,6 +156,7 @@ static void incr_obj_hist_bin(struct obj_hist_bin *pbin,
 struct large_item {
 	uint64_t size;
 	struct object_id oid;
+	struct strbuf name;
 };
 
 struct large_item_vec {
@@ -171,6 +172,7 @@ static struct large_item_vec *alloc_large_item_vec(const char *dimension_label,
 {
 	struct large_item_vec *vec;
 	size_t flex_len = nr_items * sizeof(struct large_item);
+	size_t k;
 
 	if (!nr_items)
 		return NULL;
@@ -180,6 +182,9 @@ static struct large_item_vec *alloc_large_item_vec(const char *dimension_label,
 	vec->item_label = strdup(item_label);
 	vec->nr_items = nr_items;
 
+	for (k = 0; k < nr_items; k++)
+		strbuf_init(&vec->items[k].name, 0);
+
 	return vec;
 }
 
@@ -188,6 +193,9 @@ static void free_large_item_vec(struct large_item_vec *vec)
 	if (!vec)
 		return;
 
+	for (size_t k = 0; k < vec->nr_items; k++)
+		strbuf_release(&vec->items[k].name);
+
 	free(vec->dimension_label);
 	free(vec->item_label);
 	free(vec);
@@ -195,7 +203,8 @@ static void free_large_item_vec(struct large_item_vec *vec)
 
 static void maybe_insert_large_item(struct large_item_vec *vec,
 				    uint64_t size,
-				    struct object_id *oid)
+				    struct object_id *oid,
+				    const char *name)
 {
 	size_t rest_len;
 	size_t k;
@@ -215,7 +224,14 @@ static void maybe_insert_large_item(struct large_item_vec *vec,
 		if (size < vec->items[k].size)
 			continue;
 
-		/* push items[k..] down one and insert it here */
+		/*
+		 * The last large_item in the vector is about to be
+		 * overwritten by the previous one during the shift.
+		 * Steal its allocated strbuf and reuse it.
+		 */
+		strbuf_release(&vec->items[vec->nr_items - 1].name);
+
+		/* push items[k..] down one and insert data for this item here */
 
 		rest_len = (vec->nr_items - k - 1) * sizeof(struct large_item);
 		if (rest_len)
@@ -224,6 +240,10 @@ static void maybe_insert_large_item(struct large_item_vec *vec,
 		memset(&vec->items[k], 0, sizeof(struct large_item));
 		vec->items[k].size = size;
 		oidcpy(&vec->items[k].oid, oid);
+		strbuf_init(&vec->items[k].name, 0);
+		if (name && *name)
+			strbuf_addstr(&vec->items[k].name, name);
+
 		return;
 	}
 }
@@ -728,7 +748,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 		return;
 
 	table.table_name = vec->dimension_label;
-	strvec_pushl(&table.header, "Size", "OID", NULL);
+	strvec_pushl(&table.header, "Size", "OID", "Name", NULL);
 
 	for (size_t k = 0; k < vec->nr_items; k++) {
 		struct large_item *pk = &vec->items[k];
@@ -736,7 +756,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 			strbuf_reset(&size);
 			strbuf_addf(&size, "%"PRIuMAX, (uintmax_t)pk->size);
 
-			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), NULL);
+			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), pk->name.buf, NULL);
 		}
 	}
 	strbuf_release(&size);
@@ -1197,7 +1217,8 @@ static void increment_object_counts(
 
 static void increment_totals(struct survey_context *ctx,
 			     struct oid_array *oids,
-			     struct survey_report_object_size_summary *summary)
+			     struct survey_report_object_size_summary *summary,
+			     const char *path)
 {
 	for (size_t i = 0; i < oids->nr; i++) {
 		struct object_info oi = OBJECT_INFO_INIT;
@@ -1233,8 +1254,8 @@ static void increment_totals(struct survey_context *ctx,
 			ctx->report.reachable_objects.commits.parent_cnt_pbin[k]++;
 			base = &ctx->report.reachable_objects.commits.base;
 
-			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents, k, &commit->object.oid);
-			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes, object_length, &commit->object.oid);
+			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents, k, &commit->object.oid, NULL);
+			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes, object_length, &commit->object.oid, NULL);
 			break;
 		}
 		case OBJ_TREE: {
@@ -1254,8 +1275,8 @@ static void increment_totals(struct survey_context *ctx,
 
 				pst->sum_entries += nr_entries;
 
-				maybe_insert_large_item(pst->vec_largest_by_nr_entries, nr_entries, &tree->object.oid);
-				maybe_insert_large_item(pst->vec_largest_by_size_bytes, object_length, &tree->object.oid);
+				maybe_insert_large_item(pst->vec_largest_by_nr_entries, nr_entries, &tree->object.oid, path);
+				maybe_insert_large_item(pst->vec_largest_by_size_bytes, object_length, &tree->object.oid, path);
 
 				qb = qbin(nr_entries);
 				incr_obj_hist_bin(&pst->entry_qbin[qb], object_length, disk_sizep);
@@ -1266,7 +1287,7 @@ static void increment_totals(struct survey_context *ctx,
 		case OBJ_BLOB:
 			base = &ctx->report.reachable_objects.blobs.base;
 
-			maybe_insert_large_item(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes, object_length, &oids->oid[i]);
+			maybe_insert_large_item(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes, object_length, &oids->oid[i], path);
 			break;
 		default:
 			continue;
@@ -1306,7 +1327,7 @@ static void increment_object_totals(struct survey_context *ctx,
 	struct survey_report_object_size_summary *total;
 	struct survey_report_object_size_summary summary = { 0 };
 
-	increment_totals(ctx, oids, &summary);
+	increment_totals(ctx, oids, &summary, path);
 
 	switch (type) {
 	case OBJ_COMMIT:

From 0fb037f2a33964120d5d50632efab0a05e6c5c17 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Wed, 15 May 2024 17:44:41 -0400
Subject: [PATCH 011/207] survey: add commit-oid to large_item detail

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 builtin/survey.c | 35 +++++++++++++++++++++++++++--------
 1 file changed, 27 insertions(+), 8 deletions(-)

diff --git a/builtin/survey.c b/builtin/survey.c
index 9b5c002ca68973..4d38a93eeffdcf 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -156,7 +156,21 @@ static void incr_obj_hist_bin(struct obj_hist_bin *pbin,
 struct large_item {
 	uint64_t size;
 	struct object_id oid;
+
+	/*
+	 * For blobs and trees the name field is the pathname of the
+	 * file or directory.  Root trees will have a zero-length
+	 * name.  The name field is not currenly used for commits.
+	 */
 	struct strbuf name;
+
+	/*
+	 * For blobs and trees remember the transient commit from
+	 * the treewalk so that we can say that this large item
+	 * first appeared in this commit (relative to the treewalk
+	 * order).
+	 */
+	struct object_id containing_commit_oid;
 };
 
 struct large_item_vec {
@@ -204,7 +218,8 @@ static void free_large_item_vec(struct large_item_vec *vec)
 static void maybe_insert_large_item(struct large_item_vec *vec,
 				    uint64_t size,
 				    struct object_id *oid,
-				    const char *name)
+				    const char *name,
+				    const struct object_id *containing_commit_oid)
 {
 	size_t rest_len;
 	size_t k;
@@ -240,6 +255,7 @@ static void maybe_insert_large_item(struct large_item_vec *vec,
 		memset(&vec->items[k], 0, sizeof(struct large_item));
 		vec->items[k].size = size;
 		oidcpy(&vec->items[k].oid, oid);
+		oidcpy(&vec->items[k].containing_commit_oid, containing_commit_oid ? containing_commit_oid : null_oid());
 		strbuf_init(&vec->items[k].name, 0);
 		if (name && *name)
 			strbuf_addstr(&vec->items[k].name, name);
@@ -748,7 +764,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 		return;
 
 	table.table_name = vec->dimension_label;
-	strvec_pushl(&table.header, "Size", "OID", "Name", NULL);
+	strvec_pushl(&table.header, "Size", "OID", "Name", "Commit", NULL);
 
 	for (size_t k = 0; k < vec->nr_items; k++) {
 		struct large_item *pk = &vec->items[k];
@@ -756,7 +772,10 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 			strbuf_reset(&size);
 			strbuf_addf(&size, "%"PRIuMAX, (uintmax_t)pk->size);
 
-			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), pk->name.buf, NULL);
+			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), pk->name.buf,
+					  is_null_oid(&pk->containing_commit_oid) ?
+					  "" : oid_to_hex(&pk->containing_commit_oid),
+					  NULL);
 		}
 	}
 	strbuf_release(&size);
@@ -1254,8 +1273,8 @@ static void increment_totals(struct survey_context *ctx,
 			ctx->report.reachable_objects.commits.parent_cnt_pbin[k]++;
 			base = &ctx->report.reachable_objects.commits.base;
 
-			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents, k, &commit->object.oid, NULL);
-			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes, object_length, &commit->object.oid, NULL);
+			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents, k, &commit->object.oid, NULL, &commit->object.oid);
+			maybe_insert_large_item(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes, object_length, &commit->object.oid, NULL, &commit->object.oid);
 			break;
 		}
 		case OBJ_TREE: {
@@ -1275,8 +1294,8 @@ static void increment_totals(struct survey_context *ctx,
 
 				pst->sum_entries += nr_entries;
 
-				maybe_insert_large_item(pst->vec_largest_by_nr_entries, nr_entries, &tree->object.oid, path);
-				maybe_insert_large_item(pst->vec_largest_by_size_bytes, object_length, &tree->object.oid, path);
+				maybe_insert_large_item(pst->vec_largest_by_nr_entries, nr_entries, &tree->object.oid, path, NULL);
+				maybe_insert_large_item(pst->vec_largest_by_size_bytes, object_length, &tree->object.oid, path, NULL);
 
 				qb = qbin(nr_entries);
 				incr_obj_hist_bin(&pst->entry_qbin[qb], object_length, disk_sizep);
@@ -1287,7 +1306,7 @@ static void increment_totals(struct survey_context *ctx,
 		case OBJ_BLOB:
 			base = &ctx->report.reachable_objects.blobs.base;
 
-			maybe_insert_large_item(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes, object_length, &oids->oid[i], path);
+			maybe_insert_large_item(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes, object_length, &oids->oid[i], path, NULL);
 			break;
 		default:
 			continue;

From ba10649a97bce7e342966c40b7ad952282ef1b92 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Mon, 20 May 2024 17:23:39 -0400
Subject: [PATCH 012/207] survey: add commit name-rev lookup to each large_item

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 builtin/survey.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 86 insertions(+), 3 deletions(-)

diff --git a/builtin/survey.c b/builtin/survey.c
index 4d38a93eeffdcf..15ee7c2c17d2af 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -13,6 +13,7 @@
 #include "ref-filter.h"
 #include "refs.h"
 #include "revision.h"
+#include "run-command.h"
 #include "strbuf.h"
 #include "strvec.h"
 #include "trace2.h"
@@ -171,6 +172,12 @@ struct large_item {
 	 * order).
 	 */
 	struct object_id containing_commit_oid;
+
+	/*
+	 * Lookup `containing_commit_oid` using `git name-rev`.
+	 * Lazy allocate this post-treewalk.
+	 */
+	struct strbuf name_rev;
 };
 
 struct large_item_vec {
@@ -207,8 +214,10 @@ static void free_large_item_vec(struct large_item_vec *vec)
 	if (!vec)
 		return;
 
-	for (size_t k = 0; k < vec->nr_items; k++)
+	for (size_t k = 0; k < vec->nr_items; k++) {
 		strbuf_release(&vec->items[k].name);
+		strbuf_release(&vec->items[k].name_rev);
+	}
 
 	free(vec->dimension_label);
 	free(vec->item_label);
@@ -243,6 +252,9 @@ static void maybe_insert_large_item(struct large_item_vec *vec,
 		 * The last large_item in the vector is about to be
 		 * overwritten by the previous one during the shift.
 		 * Steal its allocated strbuf and reuse it.
+		 *
+		 * We can ignore .name_rev because it will not be
+		 * allocated until after the treewalk.
 		 */
 		strbuf_release(&vec->items[vec->nr_items - 1].name);
 
@@ -764,7 +776,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 		return;
 
 	table.table_name = vec->dimension_label;
-	strvec_pushl(&table.header, "Size", "OID", "Name", "Commit", NULL);
+	strvec_pushl(&table.header, "Size", "OID", "Name", "Commit", "Name-Rev", NULL);
 
 	for (size_t k = 0; k < vec->nr_items; k++) {
 		struct large_item *pk = &vec->items[k];
@@ -775,6 +787,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), pk->name.buf,
 					  is_null_oid(&pk->containing_commit_oid) ?
 					  "" : oid_to_hex(&pk->containing_commit_oid),
+					  pk->name_rev.len ? pk->name_rev.buf : "",
 					  NULL);
 		}
 	}
@@ -1125,6 +1138,73 @@ static void do_load_refs(struct survey_context *ctx,
 	ref_sorting_release(sorting);
 }
 
+/*
+ * Try to run `git name-rev` on each of the containing-commit-oid's
+ * in this large-item-vec to get a pretty name for each OID.  Silently
+ * ignore errors if it fails because this info is nice to have but not
+ * essential.
+ */
+static void large_item_vec_lookup_name_rev(struct survey_context *ctx,
+					   struct large_item_vec *vec)
+{
+	struct child_process cp = CHILD_PROCESS_INIT;
+	struct strbuf in = STRBUF_INIT;
+	struct strbuf out = STRBUF_INIT;
+	const char *line;
+	size_t k;
+
+	if (!vec || !vec->nr_items)
+		return;
+
+	ctx->progress_total += vec->nr_items;
+	display_progress(ctx->progress, ctx->progress_total);
+
+	for (k = 0; k < vec->nr_items; k++)
+		strbuf_addf(&in, "%s\n", oid_to_hex(&vec->items[k].containing_commit_oid));
+
+	cp.git_cmd = 1;
+	strvec_pushl(&cp.args, "name-rev", "--name-only", "--annotate-stdin", NULL);
+	if (pipe_command(&cp, in.buf, in.len, &out, 0, NULL, 0)) {
+		strbuf_release(&in);
+		strbuf_release(&out);
+		return;
+	}
+
+	line = out.buf;
+	k = 0;
+	while (*line) {
+		const char *eol = strchrnul(line, '\n');
+
+		strbuf_init(&vec->items[k].name_rev, 0);
+		strbuf_add(&vec->items[k].name_rev, line, (eol - line));
+
+		line = eol + 1;
+		k++;
+	}
+
+	strbuf_release(&in);
+	strbuf_release(&out);
+}
+
+static void do_lookup_name_rev(struct survey_context *ctx)
+{
+	if (ctx->opts.show_progress) {
+		ctx->progress_total = 0;
+		ctx->progress = start_progress(_("Resolving name-revs..."), 0);
+	}
+
+	large_item_vec_lookup_name_rev(ctx, ctx->report.reachable_objects.commits.vec_largest_by_nr_parents);
+	large_item_vec_lookup_name_rev(ctx, ctx->report.reachable_objects.commits.vec_largest_by_size_bytes);
+
+	large_item_vec_lookup_name_rev(ctx, ctx->report.reachable_objects.trees.vec_largest_by_nr_entries);
+	large_item_vec_lookup_name_rev(ctx, ctx->report.reachable_objects.trees.vec_largest_by_size_bytes);
+
+	large_item_vec_lookup_name_rev(ctx, ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes);
+
+	if (ctx->opts.show_progress)
+		stop_progress(&ctx->progress);
+}
+
 /*
  * The REFS phase:
  *
@@ -1478,7 +1558,10 @@ static void survey_phase_objects(struct survey_context *ctx)
 
 	release_revisions(&revs);
 	trace2_region_leave("survey", "phase/objects", ctx->repo);
-}
+
+	trace2_region_enter("survey", "phase/namerev", the_repository);
+	do_lookup_name_rev(ctx);
+	trace2_region_enter("survey", "phase/namerev", the_repository);}
 
 int cmd_survey(int argc, const char **argv, const char *prefix, struct repository *repo)
 {

From fcd14166f18266b01b06f18f9015bf82a5248fd1 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Tue, 4 Jun 2024 10:37:23 -0400
Subject: [PATCH 013/207] survey: add --no-name-rev option

Computing `git name-rev` on each commit, tree, and blob in each
of the various large_item_vec can be very expensive if there are
too many refs, especially if the user doesn't need the result.
Lets make it optional.

The `--no-name-rev` option can save 50 calls to `git name-rev`
since we have 5 large_item_vec's and each defaults to 10 items.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 Documentation/config/survey.txt |  4 ++++
 Documentation/git-survey.txt    |  4 ++++
 builtin/survey.c                | 38 +++++++++++++++++++++++----------
 3 files changed, 35 insertions(+), 11 deletions(-)

diff --git a/Documentation/config/survey.txt b/Documentation/config/survey.txt
index fd2d7e153b38ce..f3ae768933fe1b 100644
--- a/Documentation/config/survey.txt
+++ b/Documentation/config/survey.txt
@@ -4,6 +4,10 @@ survey.*::
 	background with these options.
 +
 --
+	survey.namerev::
+		Boolean to show/hide `git name-rev` information for each
+		reported commit and the containing commit of each
+		reported tree and blob.
 	verbose::
 		This boolean value implies the `--[no-]verbose` option.
 	progress::
diff --git a/Documentation/git-survey.txt b/Documentation/git-survey.txt
index d174ffd4164840..dc670508e09e2b 100644
--- a/Documentation/git-survey.txt
+++ b/Documentation/git-survey.txt
@@ -32,6 +32,10 @@ OPTIONS
 --progress::
 	Show progress.  This is automatically enabled when interactive.
 
+--[no-]name-rev::
+	Print `git name-rev` output for each commit, tree, and blob.
+	Defaults to true.
+
 Ref Selection
 ~~~~~~~~~~~~~
 
diff --git a/builtin/survey.c b/builtin/survey.c
index 15ee7c2c17d2af..0d5a02358fbca9 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -42,6 +42,7 @@ static struct survey_refs_wanted default_ref_options = {
 struct survey_opts {
 	int verbose;
 	int show_progress;
+	int show_name_rev;
 
 	int show_largest_commits_by_nr_parents;
 	int show_largest_commits_by_size_bytes;
@@ -767,7 +768,7 @@ static void survey_report_commit_parents(struct survey_context *ctx)
 	clear_table(&table);
 }
 
-static void survey_report_largest_vec(struct large_item_vec *vec)
+static void survey_report_largest_vec(struct survey_context *ctx, struct large_item_vec *vec)
 {
 	struct survey_table table = SURVEY_TABLE_INIT;
 	struct strbuf size = STRBUF_INIT;
@@ -776,7 +777,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 		return;
 
 	table.table_name = vec->dimension_label;
-	strvec_pushl(&table.header, "Size", "OID", "Name", "Commit", "Name-Rev", NULL);
+	strvec_pushl(&table.header, "Size", "OID", "Name", "Commit", ctx->opts.show_name_rev ? "Name-Rev" : NULL, NULL);
 
 	for (size_t k = 0; k < vec->nr_items; k++) {
 		struct large_item *pk = &vec->items[k];
@@ -787,7 +788,7 @@ static void survey_report_largest_vec(struct large_item_vec *vec)
 			insert_table_rowv(&table, size.buf, oid_to_hex(&pk->oid), pk->name.buf,
 					  is_null_oid(&pk->containing_commit_oid) ?
 					  "" : oid_to_hex(&pk->containing_commit_oid),
-					  pk->name_rev.len ? pk->name_rev.buf : "",
+					  !ctx->opts.show_name_rev ? NULL : pk->name_rev.len ? pk->name_rev.buf : "",
 					  NULL);
 		}
 	}
@@ -977,11 +978,11 @@ static void survey_report_plaintext(struct survey_context *ctx)
 	survey_report_plaintext_sorted_size(
 		&ctx->report.top_paths_by_inflate[REPORT_TYPE_BLOB]);
 
-	survey_report_largest_vec(ctx->report.reachable_objects.commits.vec_largest_by_nr_parents);
-	survey_report_largest_vec(ctx->report.reachable_objects.commits.vec_largest_by_size_bytes);
-	survey_report_largest_vec(ctx->report.reachable_objects.trees.vec_largest_by_nr_entries);
-	survey_report_largest_vec(ctx->report.reachable_objects.trees.vec_largest_by_size_bytes);
-	survey_report_largest_vec(ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes);
+	survey_report_largest_vec(ctx, ctx->report.reachable_objects.commits.vec_largest_by_nr_parents);
+	survey_report_largest_vec(ctx, ctx->report.reachable_objects.commits.vec_largest_by_size_bytes);
+	survey_report_largest_vec(ctx, ctx->report.reachable_objects.trees.vec_largest_by_nr_entries);
+	survey_report_largest_vec(ctx, ctx->report.reachable_objects.trees.vec_largest_by_size_bytes);
+	survey_report_largest_vec(ctx, ctx->report.reachable_objects.blobs.vec_largest_by_size_bytes);
 }
 
 /*
@@ -1053,6 +1054,10 @@ static int survey_load_config_cb(const char *var, const char *value,
 		ctx->opts.show_progress = git_config_bool(var, value);
 		return 0;
 	}
+	if (!strcmp(var, "survey.namerev")) {
+		ctx->opts.show_name_rev = git_config_bool(var, value);
+		return 0;
+	}
 	if (!strcmp(var, "survey.showcommitparents")) {
 		ctx->opts.show_largest_commits_by_nr_parents = git_config_ulong(var, value, cctx->kvi);
 		return 0;
@@ -1188,6 +1193,13 @@ static void large_item_vec_lookup_name_rev(struct survey_context *ctx,
 
 static void do_lookup_name_rev(struct survey_context *ctx)
 {
+	/*
+	 * `git name-rev` can be very expensive when there are lots of
+	 * refs, so make it optional.
+	 */
+	if (!ctx->opts.show_name_rev)
+		return;
+
 	if (ctx->opts.show_progress) {
 		ctx->progress_total = 0;
 		ctx->progress = start_progress(_("Resolving name-revs..."), 0);
@@ -1559,9 +1571,12 @@ static void survey_phase_objects(struct survey_context *ctx)
 	release_revisions(&revs);
 	trace2_region_leave("survey", "phase/objects", ctx->repo);
 
-	trace2_region_enter("survey", "phase/namerev", the_repository);
-	do_lookup_name_rev(ctx);
-	trace2_region_enter("survey", "phase/namerev", the_repository);}
+	if (ctx->opts.show_name_rev) {
+		trace2_region_enter("survey", "phase/namerev", the_repository);
+		do_lookup_name_rev(ctx);
+		trace2_region_enter("survey", "phase/namerev", the_repository);
+	}
+}
 
 int cmd_survey(int argc, const char **argv, const char *prefix, struct repository *repo)
 {
@@ -1585,6 +1600,7 @@ int cmd_survey(int argc, const char **argv, const char *prefix, struct repositor
 	static struct option survey_options[] = {
 		OPT__VERBOSE(&ctx.opts.verbose, N_("verbose output")),
 		OPT_BOOL(0, "progress", &ctx.opts.show_progress, N_("show progress")),
+		OPT_BOOL(0, "name-rev", &ctx.opts.show_name_rev, N_("run name-rev on each reported commit")),
 		OPT_INTEGER('n', "top", &ctx.opts.top_nr,
 			    N_("number of entries to include in detail tables")),
 

From f9c6badf2c6c36114af4aeda22ca4ef4204f60ff Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Mon, 17 Jun 2024 15:20:05 -0400
Subject: [PATCH 014/207] survey: started TODO list at bottom of source file

---
 builtin/survey.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 46 insertions(+)

diff --git a/builtin/survey.c b/builtin/survey.c
index 0d5a02358fbca9..ed58f10c4cbea5 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -1685,3 +1685,49 @@ int cmd_survey(int argc, const char **argv, const char *prefix, struct repositor
 	clear_survey_context(&ctx);
 	return 0;
 }
+
+/*
+ * NEEDSWORK: The following is a bit of a laundry list of things
+ * that I'd like to add.
+ *
+ * [] Dump stats on all of the packfiles. The number and size of each.
+ * Whether each is in the .git directory or in an alternate.  The state
+ * of the IDX or MIDX files and etc.  Delta chain stats.  All of this
+ * data is relative to the "lived-in" state of the repository.  Stuff
+ * that may change after a GC or repack.
+ *
+ * [] Dump stats on each remote.  When we fetch from a remote the size
+ * of the response is related to the set of haves on the server.  You
+ * can see this in `GIT_TRACE_CURL=1 git fetch`. We get a `ls-refs`
+ * payload that lists all of the branches and tags on the server, so
+ * at a minimum the RefName and SHA for each. But for annotated tags
+ * we also get the peeled SHA.  The size of this overhead on every
+ * fetch is proporational to the size of the `git ls-remote` response
+ * (roughly, although the latter repeats the RefName of the peeled
+ * tag).  If, for example, you have 500K refs on a remote, you're
+ * going to have a long "haves" message, so every fetch will be slow
+ * just because of that overhead (not counting new objects to be
+ * downloaded).
+ *
+ * Note that the local set of tags in "refs/tags/" is a union over all
+ * remotes.  However, since most people only have one remote, we can
+ * probaly estimate the overhead value directly from the size of the
+ * set of "refs/tags/" that we visited while building the `ref_info`
+ * and `ref_array` and not need to ask the remote.
+ *
+ * [] Dump info on the complexity of the DAG.  Criss-cross merges.
+ * The number of edges that must be touched to compute merge bases.
+ * Edge length. The number of parallel lanes in the history that must
+ * be navigated to get to the merge base.  What affects the cost of
+ * the Ahead/Behind computation?  How often do criss-crosses occur and
+ * do they cause various operations to slow down?
+ *
+ * [] If there are primary branches (like "main" or "master") are they
+ * always on the left side of merges?  Does the graph have a clean
+ * left edge?  Or are there normal and "backwards" merges?  Do these
+ * cause problems at scale?
+ *
+ * [] If we have a hierarchy of FI/RI branches like "L1", "L2, ...,
+ * can we learn anything about the shape of the repo around these FI
+ * and RI integrations?
+ */

From cce68246edf7a7884a431e5efe6b125f2cdc1cb7 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 28 Jun 2024 15:22:46 -0400
Subject: [PATCH 015/207] survey: expanded TODO list at the bottom of the
 source file

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 builtin/survey.c | 148 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 116 insertions(+), 32 deletions(-)

diff --git a/builtin/survey.c b/builtin/survey.c
index ed58f10c4cbea5..96362e9c14677a 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -1687,47 +1687,131 @@ int cmd_survey(int argc, const char **argv, const char *prefix, struct repositor
 }
 
 /*
- * NEEDSWORK: The following is a bit of a laundry list of things
- * that I'd like to add.
+ * NEEDSWORK: So far, I only have iteration on the requested set of
+ * refs and treewalk/reachable objects on that set of refs.  The
+ * following is a bit of a laundry list of things that I'd like to
+ * add.
  *
  * [] Dump stats on all of the packfiles. The number and size of each.
- * Whether each is in the .git directory or in an alternate.  The state
- * of the IDX or MIDX files and etc.  Delta chain stats.  All of this
- * data is relative to the "lived-in" state of the repository.  Stuff
- * that may change after a GC or repack.
+ *    Whether each is in the .git directory or in an alternate.  The
+ *    state of the IDX or MIDX files and etc.  Delta chain stats.  All
+ *    of this data is relative to the "lived-in" state of the
+ *    repository.  Stuff that may change after a GC or repack.
+ *
+ * [] Clone and Index stats. partial, shallow, sparse-checkout,
+ *    sparse-index, etc.  Hydration stats.
  *
  * [] Dump stats on each remote.  When we fetch from a remote the size
- * of the response is related to the set of haves on the server.  You
- * can see this in `GIT_TRACE_CURL=1 git fetch`. We get a `ls-refs`
- * payload that lists all of the branches and tags on the server, so
- * at a minimum the RefName and SHA for each. But for annotated tags
- * we also get the peeled SHA.  The size of this overhead on every
- * fetch is proporational to the size of the `git ls-remote` response
- * (roughly, although the latter repeats the RefName of the peeled
- * tag).  If, for example, you have 500K refs on a remote, you're
- * going to have a long "haves" message, so every fetch will be slow
- * just because of that overhead (not counting new objects to be
- * downloaded).
+ *    of the response is related to the set of haves on the server.
+ *    You can see this in `GIT_TRACE_CURL=1 git fetch`. We get a
+ *    `ls-refs` payload that lists all of the branches and tags on the
+ *    server, so at a minimum the RefName and SHA for each. But for
+ *    annotated tags we also get the peeled SHA.  The size of this
+ *    overhead on every fetch is proporational to the size of the `git
+ *    ls-remote` response (roughly, although the latter repeats the
+ *    RefName of the peeled tag).  If, for example, you have 500K refs
+ *    on a remote, you're going to have a long "haves" message, so
+ *    every fetch will be slow just because of that overhead (not
+ *    counting new objects to be downloaded).
  *
- * Note that the local set of tags in "refs/tags/" is a union over all
- * remotes.  However, since most people only have one remote, we can
- * probaly estimate the overhead value directly from the size of the
- * set of "refs/tags/" that we visited while building the `ref_info`
- * and `ref_array` and not need to ask the remote.
+ *    Note that the local set of tags in "refs/tags/" is a union over
+ *    all remotes.  However, since most people only have one remote,
+ *    we can probaly estimate the overhead value directly from the
+ *    size of the set of "refs/tags/" that we visited while building
+ *    the `ref_info` and `ref_array` and not need to ask the remote.
  *
  * [] Dump info on the complexity of the DAG.  Criss-cross merges.
- * The number of edges that must be touched to compute merge bases.
- * Edge length. The number of parallel lanes in the history that must
- * be navigated to get to the merge base.  What affects the cost of
- * the Ahead/Behind computation?  How often do criss-crosses occur and
- * do they cause various operations to slow down?
+ *    The number of edges that must be touched to compute merge bases.
+ *    Edge length. The number of parallel lanes in the history that
+ *    must be navigated to get to the merge base.  What affects the
+ *    cost of the Ahead/Behind computation?  How often do
+ *    criss-crosses occur and do they cause various operations to slow
+ *    down?
  *
  * [] If there are primary branches (like "main" or "master") are they
- * always on the left side of merges?  Does the graph have a clean
- * left edge?  Or are there normal and "backwards" merges?  Do these
- * cause problems at scale?
+ *    always on the left side of merges?  Does the graph have a clean
+ *    left edge?  Or are there normal and "backwards" merges?  Do
+ *    these cause problems at scale?
  *
  * [] If we have a hierarchy of FI/RI branches like "L1", "L2, ...,
- * can we learn anything about the shape of the repo around these FI
- * and RI integrations?
+ *    can we learn anything about the shape of the repo around these
+ *    FI and RI integrations?
+ *
+ * [] Do we need a no-PII flag to omit pathnames or branch/tag names
+ *    in the various histograms?  (This would turn off --name-rev
+ *    too.)
+ *
+ * [] I have so far avoided adding opinions about individual fields
+ *    (such as the way `git-sizer` prints a row of stars or bangs in
+ *    the last column).
+ *
+ *    I'm wondering if that is a job of this executable or if it
+ *    should be done in a post-processing step using the JSON output.
+ *
+ *    My problem with the `git-sizer` approach is that it doesn't give
+ *    the (casual) user any information on why it has stars or bangs.
+ *    And there isn't a good way to print detailed information in the
+ *    ASCII-art tables that would be easy to understand.
+ *
+ *    [] For example, a large number of refs does not define a cliff.
+ *       Performance will drop off (linearly, quadratically, ... ??).
+ *       The tool should refer them to article(s) talking about the
+ *       different problems that it could cause.  So should `git
+ *       survey` just print the number and (implicitly) refer them to
+ *       the man page (chapter/verse) or to a tool that will interpret
+ *       the number and explain it?
+ *
+ *    [] Alternatively, should `git survey` do that analysis too and
+ *       just print footnotes for each large number?
+ *
+ *    [] The computation of the raw survey JSON data can take HOURS on
+ *       a very large repo (like Windows), so I'm wondering if we
+ *       want to keep the opinion portion separate.
+ *
+ * [] In addition to opinions based on the static data, I would like
+ *    to dump the JSON results (or the Trace2 telemetry) into a DB and
+ *    aggregate it with other users.
+ *
+ *    Granted, they should all see the same DAG and the same set of
+ *    reachable objects, but we could average across all datasets
+ *    generated on a particular date and detect outlier users.
+ *
+ *    [] Maybe someone cloned from the `_full` endpoint rather than
+ *       the limited refs endpoint.
+ *
+ *    [] Maybe that user is having problems with repacking / GC /
+ *       maintenance without knowing it.
+ *
+ * [] I'd also like to dump use the DB to compare survey datasets over
+ *    a time.  How fast is their repository growing and in what ways?
+ *
+ *    [] I'd rather have the delta analysis NOT be inside `git
+ *       survey`, so it makes sense to consider having all of it in a
+ *       post-process step.
+ *
+ * [] Another reason to put the opinion analysis in a post-process
+ *    is that it would be easier to generate plots on the data tables.
+ *    Granted, we can get plots from telemetry, but a stand-alone user
+ *    could run the JSON thru python or jq or something and generate
+ *    something nicer than ASCII-art and it could handle cross-referencing
+ *    and hyperlinking to helpful information on each issue.
+ *
+ * [] I think there are several classes of data that we can report on:
+ *
+ *    [] The "inherit repo properties", such as the shape and size of
+ *       the DAG -- these should be universal in each enlistment.
+ *
+ *    [] The "ODB lived in properties", such as the efficiency
+ *       of the repack and things like partial and shallow clone.
+ *       These will vary, but indicate health of the ODB.
+ *
+ *    [] The "index related properties", such as sparse-checkout,
+ *       sparse-index, cache-tree, untracked-cache, fsmonitor, and
+ *       etc.  These will also vary, but are more like knobs for
+ *       the user to adjust.
+ *
+ *    [] I want to compare these with Matt's "dimensions of scale"
+ *       notes and see if there are other pieces of data that we
+ *       could compute/consider.
+ *
  */

From 7a088c023c4e5bf4460ff23ec86501ef9577e40c Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Mon, 1 Jul 2024 12:07:01 -0400
Subject: [PATCH 016/207] survey: expanded TODO with more notes

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 builtin/survey.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/builtin/survey.c b/builtin/survey.c
index 96362e9c14677a..c063b7bc44f0c3 100644
--- a/builtin/survey.c
+++ b/builtin/survey.c
@@ -1720,6 +1720,16 @@ int cmd_survey(int argc, const char **argv, const char *prefix, struct repositor
  *    size of the set of "refs/tags/" that we visited while building
  *    the `ref_info` and `ref_array` and not need to ask the remote.
  *
+ *    [] Should the "string length of refnames / remote refs", for
+ *       example, be sub-divided by remote so we can project the
+ *       cost of the haves/wants overhead a fetch.
+ *
+ * [] Can we examine the merge commits and classify them as clean or
+ *    dirty?  (ie. ones with merge conflicts that needed to be
+ *    addressed during the merge itself.)
+ *
+ *    [] Do dirty merges affect performance of later operations?
+ *
  * [] Dump info on the complexity of the DAG.  Criss-cross merges.
  *    The number of edges that must be touched to compute merge bases.
  *    Edge length. The number of parallel lanes in the history that

From 6109cfb6bcbeeb9dcb79c4fc1be97839af77c2b6 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Wed, 5 Apr 2017 10:58:09 -0600
Subject: [PATCH 017/207] reset --stdin: trim carriage return from the paths

While using the reset --stdin feature on windows path added may have a
\r at the end of the path that wasn't getting removed so didn't match
the path in the index and wasn't reset.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 t/t7108-reset-stdin.sh | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/t/t7108-reset-stdin.sh b/t/t7108-reset-stdin.sh
index b7cbcbf869296c..db5483b8f10052 100755
--- a/t/t7108-reset-stdin.sh
+++ b/t/t7108-reset-stdin.sh
@@ -29,4 +29,13 @@ test_expect_success '--stdin requires --mixed' '
 	git reset --mixed --stdin <list
 '
 
+
+test_expect_success '--stdin trims carriage returns' '
+	test_commit endline &&
+	git rm endline.t &&
+	printf "endline.t\r\n" >list &&
+	git reset --stdin <list &&
+	test endline.t = "$(git ls-files endline.t)"
+'
+
 test_done

From 031198ec35162ee5fbe3087c2c22e17579065e95 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 23 Apr 2024 12:21:01 +0200
Subject: [PATCH 018/207] Identify microsoft/git via a distinct version suffix

It has been a long-standing practice in Git for Windows to append
`.windows.<n>`, and in microsoft/git to append `.vfs.0.0`. Let's keep
doing that.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 GIT-VERSION-GEN | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index 194ec0f9ad32fe..e42c21abd18769 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -2,6 +2,9 @@
 
 DEF_VER=v2.48.0-rc1
 
+# Identify microsoft/git via a distinct version suffix
+DEF_VER=$DEF_VER.vfs.0.0
+
 LF='
 '
 

From 16d3fb1a7366d22b63ff5b54f0af3890c381c1c2 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 4 Apr 2017 12:04:11 +0200
Subject: [PATCH 019/207] gvfs: ensure that the version is based on a GVFS tag

Since we really want to be based on a `.vfs.*` tag, let's make sure that
there was a new-enough one, i.e. one that agrees with the first three
version numbers of the recorded default version.

This prevents e.g. v2.22.0.vfs.0.<some-huge-number>.<commit> from being
used when the current release train was not yet tagged.

It is important to get the first three numbers of the version right
because e.g. Scalar makes decisions depending on those (such as assuming
that the `git maintenance` built-in is not available, even though it
actually _is_ available).

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 GIT-VERSION-GEN | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index e42c21abd18769..e961f845618414 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -42,10 +42,15 @@ then
 			test -d "${GIT_DIR:-.git}" ||
 			test -f "$SOURCE_DIR"/.git;
 		} &&
-		VN=$(git -C "$SOURCE_DIR" describe --match "v[0-9]*" HEAD 2>/dev/null) &&
+		VN=$(git -C "$SOURCE_DIR" describe --match "v[0-9]*vfs*" HEAD 2>/dev/null) &&
 		case "$VN" in
 		*$LF*) (exit 1) ;;
 		v[0-9]*)
+			if test "${VN%%.vfs.*}" != "${DEF_VER%%.vfs.*}"
+			then
+				echo "Found version $VN, which is not based on $DEF_VER" >&2
+				exit 1
+			fi
 			git -C "$SOURCE_DIR" update-index -q --refresh
 			test -z "$(git -C "$SOURCE_DIR" diff-index --name-only HEAD --)" ||
 			VN="$VN-dirty" ;;

From afa95081d4c71d2e882a4cdb3df8962e28795992 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Tue, 24 Jan 2017 17:30:59 +0100
Subject: [PATCH 020/207] gvfs: add a GVFS-specific header file

This header file will accumulate GVFS-specific definitions.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 gvfs.h | 9 +++++++++
 1 file changed, 9 insertions(+)
 create mode 100644 gvfs.h

diff --git a/gvfs.h b/gvfs.h
new file mode 100644
index 00000000000000..b6dbe85eae4071
--- /dev/null
+++ b/gvfs.h
@@ -0,0 +1,9 @@
+#ifndef GVFS_H
+#define GVFS_H
+
+/*
+ * This file is for the specific settings and methods
+ * used for GVFS functionality
+ */
+
+#endif /* GVFS_H */

From 79b92573cc58ccd21a4fdac0e21f706e71c093c2 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 18 May 2021 22:48:24 +0200
Subject: [PATCH 021/207] git_config_set_multivar_in_file_gently(): add a lock
 timeout

In particular when multiple processes want to write to the config
simultaneously, it would come in handy to not fail immediately when
another process locked the config, but to gently try again.

This will help with Scalar's functional test suite which wants to
register multiple repositories for maintenance semi-simultaneously.

As not all code paths calling this function read the config (e.g. `git
config`), we have to read the config setting via
`git_config_get_ulong()`.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Documentation/config/core.txt | 9 +++++++++
 config.c                      | 8 +++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index b633053f799a9c..00056a0411e8ae 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -777,3 +777,12 @@ core.WSLCompat::
 	The default value is false. When set to true, Git will set the mode
 	bits of the file in the way of wsl, so that the executable flag of
 	files can be set or read correctly.
+
+core.configWriteLockTimeoutMS::
+	When processes try to write to the config concurrently, it is likely
+	that one process "wins" and the other process(es) fail to lock the
+	config file. By configuring a timeout larger than zero, Git can be
+	told to try to lock the config again a couple times within the
+	specified timeout. If the timeout is configure to zero (which is the
+	default), Git will fail immediately when the config is already
+	locked.
diff --git a/config.c b/config.c
index 95b7de0522d180..597b964ca8bcc2 100644
--- a/config.c
+++ b/config.c
@@ -3182,6 +3182,7 @@ int repo_config_set_multivar_in_file_gently(struct repository *r,
 					    const char *comment,
 					    unsigned flags)
 {
+	static unsigned long timeout_ms = ULONG_MAX;
 	int fd = -1, in_fd = -1;
 	int ret;
 	struct lock_file lock = LOCK_INIT;
@@ -3202,11 +3203,16 @@ int repo_config_set_multivar_in_file_gently(struct repository *r,
 	if (!config_filename)
 		config_filename = filename_buf = repo_git_path(r, "config");
 
+	if ((long)timeout_ms < 0 &&
+	    git_config_get_ulong("core.configWriteLockTimeoutMS", &timeout_ms))
+		timeout_ms = 0;
+
 	/*
 	 * The lock serves a purpose in addition to locking: the new
 	 * contents of .git/config will be written into it.
 	 */
-	fd = hold_lock_file_for_update(&lock, config_filename, 0);
+	fd = hold_lock_file_for_update_timeout(&lock, config_filename, 0,
+					       timeout_ms);
 	if (fd < 0) {
 		error_errno(_("could not lock config file %s"), config_filename);
 		ret = CONFIG_NO_LOCK;

From 3a6101a8be8ca0f749fcd0016613239364954e2e Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 18 May 2021 23:22:56 +0200
Subject: [PATCH 022/207] scalar: set the config write-lock timeout to 150ms

By default, Git fails immediately when locking a config file for writing
fails due to an existing lock. With this change, Scalar-registered
repositories will fall back to trying a couple times within a 150ms
timeout.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 scalar.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/scalar.c b/scalar.c
index 22932ad9d6da4e..48ded5cc4902ca 100644
--- a/scalar.c
+++ b/scalar.c
@@ -171,6 +171,7 @@ static int set_recommended_config(int reconfigure)
 		{ "core.safeCRLF", "false" },
 		{ "fetch.showForcedUpdates", "false" },
 		{ "pack.usePathWalk", "true" },
+		{ "core.configWriteLockTimeoutMS", "150" },
 		{ NULL, NULL },
 	};
 	int i;
@@ -212,6 +213,11 @@ static int set_recommended_config(int reconfigure)
 
 static int toggle_maintenance(int enable)
 {
+	unsigned long ul;
+
+	if (git_config_get_ulong("core.configWriteLockTimeoutMS", &ul))
+		git_config_push_parameter("core.configWriteLockTimeoutMS=150");
+
 	return run_git("maintenance",
 		       enable ? "start" : "unregister",
 		       enable ? NULL : "--force",
@@ -221,10 +227,14 @@ static int toggle_maintenance(int enable)
 static int add_or_remove_enlistment(int add)
 {
 	int res;
+	unsigned long ul;
 
 	if (!the_repository->worktree)
 		die(_("Scalar enlistments require a worktree"));
 
+	if (git_config_get_ulong("core.configWriteLockTimeoutMS", &ul))
+		git_config_push_parameter("core.configWriteLockTimeoutMS=150");
+
 	res = run_git("config", "--global", "--get", "--fixed-value",
 		      "scalar.repo", the_repository->worktree, NULL);
 

From eaf0859fbce8d518aab7057f0cc173824c3e84d8 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Wed, 16 Jun 2021 10:01:37 -0400
Subject: [PATCH 023/207] scalar: add docs from microsoft/scalar

These docs have been altered to fit the version implemented in C within
microsoft/git. This means in particular that the advanced.md file no
longer applied at all. Some other areas were removed or significantly
edited.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 contrib/scalar/docs/faq.md             | 51 ++++++++++++++
 contrib/scalar/docs/getting-started.md | 93 ++++++++++++++++++++++++++
 contrib/scalar/docs/index.md           | 50 ++++++++++++++
 contrib/scalar/docs/philosophy.md      | 66 ++++++++++++++++++
 contrib/scalar/docs/troubleshooting.md | 20 ++++++
 5 files changed, 280 insertions(+)
 create mode 100644 contrib/scalar/docs/faq.md
 create mode 100644 contrib/scalar/docs/getting-started.md
 create mode 100644 contrib/scalar/docs/index.md
 create mode 100644 contrib/scalar/docs/philosophy.md
 create mode 100644 contrib/scalar/docs/troubleshooting.md

diff --git a/contrib/scalar/docs/faq.md b/contrib/scalar/docs/faq.md
new file mode 100644
index 00000000000000..a14f78a996d5d5
--- /dev/null
+++ b/contrib/scalar/docs/faq.md
@@ -0,0 +1,51 @@
+Frequently Asked Questions
+==========================
+
+Using Scalar
+------------
+
+### I don't want a sparse clone, I want every file after I clone!
+
+Run `scalar clone --full-clone <url>` to initialize your repo to include
+every file. You can switch to a sparse-checkout later by running
+`git sparse-checkout init --cone`.
+
+### I already cloned without `--full-clone`. How do I get everything?
+
+Run `git sparse-checkout disable`.
+
+Scalar Design Decisions
+-----------------------
+
+There may be many design decisions within Scalar that are confusing at first
+glance. Some of them may cause friction when you use Scalar with your existing
+repos and existing habits.
+
+> Scalar has the most benefit when users design repositories
+> with efficient patterns.
+
+For example: Scalar uses the sparse-checkout feature to limit the size of the
+working directory within a large monorepo. It is designed to work efficiently
+with monorepos that are highly componentized, allowing most developers to
+need many fewer files in their daily work.
+
+### Why does `scalar clone` create a `<repo>/src` folder?
+
+Scalar uses a file system watcher to keep track of changes under this `src` folder.
+Any activity in this folder is assumed to be important to Git operations. By
+creating the `src` folder, we are making it easy for your build system to
+create output folders outside the `src` directory. We commonly see systems
+create folders for build outputs and package downloads. Scalar itself creates
+these folders during its builds.
+
+Your build system may create build artifacts such as `.obj` or `.lib` files
+next to your source code. These are commonly "hidden" from Git using
+`.gitignore` files. Having such artifacts in your source tree creates
+additional work for Git because it needs to look at these files and match them
+against the `.gitignore` patterns.
+
+By following the `src` pattern Scalar tries to establish and placing your build
+intermediates and outputs parallel with the `src` folder and not inside it,
+you can help optimize Git command performance for developers in the repository
+by limiting the number of files Git needs to consider for many common
+operations.
diff --git a/contrib/scalar/docs/getting-started.md b/contrib/scalar/docs/getting-started.md
new file mode 100644
index 00000000000000..ef7ea07b0f948c
--- /dev/null
+++ b/contrib/scalar/docs/getting-started.md
@@ -0,0 +1,93 @@
+Getting Started
+===============
+
+Registering existing Git repos
+------------------------------
+
+To add a repository to the list of registered repos, run `scalar register [<path>]`.
+If `<path>` is not provided, then the "current repository" is discovered from
+the working directory by scanning the parent paths for a path containing a `.git`
+folder, possibly inside a `src` folder.
+
+To see which repositories are currently tracked by the service, run
+`scalar list`.
+
+Run `scalar unregister [<path>]` to remove the repo from this list.
+
+Creating a new Scalar clone
+---------------------------------------------------
+
+The `clone` verb creates a local enlistment of a remote repository using the
+partial clone feature available e.g. on GitHub.
+
+
+```
+scalar clone [options] <url> [<dir>]
+```
+
+Create a local copy of the repository at `<url>`. If specified, create the `<dir>`
+directory and place the repository there. Otherwise, the last section of the `<url>`
+will be used for `<dir>`.
+
+At the end, the repo is located at `<dir>/src`. By default, the sparse-checkout
+feature is enabled and the only files present are those in the root of your
+Git repository. Use `git sparse-checkout set` to expand the set of directories
+you want to see, or `git sparse-checkout disable` to expand to all files. You
+can explore the subdirectories outside your sparse-checkout specification using
+`git ls-tree HEAD`.
+
+### Sparse Repo Mode
+
+By default, Scalar reduces your working directory to only the files at the
+root of the repository. You need to add the folders you care about to build up
+to your working set.
+
+* `scalar clone <url>`
+  * Please choose the **Clone with HTTPS** option in the `Clone Repository` dialog in Azure Repos, not **Clone with SSH**.
+* `cd <root>\src`
+* At this point, your `src` directory only contains files that appear in your root
+  tree. No folders are populated.
+* Set the directory list for your sparse-checkout using:
+	1. `git sparse-checkout set <dir1> <dir2> ...`
+	2. `git sparse-checkout set --stdin < dir-list.txt`
+* Run git commands as you normally would.
+* To fully populate your working directory, run `git sparse-checkout disable`.
+
+If instead you want to start with all files on-disk, you can clone with the
+`--full-clone` option. To enable sparse-checkout after the fact, run
+`git sparse-checkout init --cone`. This will initialize your sparse-checkout
+patterns to only match the files at root.
+
+If you are unfamiliar with what directories are available in the repository,
+then you can run `git ls-tree -d --name-only HEAD` to discover the directories
+at root, or `git ls-tree -d --name-only HEAD <path>` to discover the directories
+in `<path>`.
+
+### Options
+
+These options allow a user to customize their initial enlistment.
+
+* `--full-clone`: If specified, do not initialize the sparse-checkout feature.
+  All files will be present in your `src` directory. This uses a Git partial
+  clone: blobs are downloaded on demand.
+
+* `--branch=<ref>`: Specify the branch to checkout after clone.
+
+### Advanced Options
+
+The options below are not intended for use by a typical user. These are
+usually used by build machines to create a temporary enlistment that
+operates on a single commit.
+
+* `--single-branch`: Use this option to only download metadata for the branch
+  that will be checked out. This is helpful for build machines that target
+  a remote with many branches. Any `git fetch` commands after the clone will
+  still ask for all branches.
+
+Removing a Scalar Clone
+-----------------------
+
+Since the `scalar clone` command sets up a file-system watcher (when available),
+that watcher could prevent deleting the enlistment. Run `scalar delete <path>`
+from outside of your enlistment to unregister the enlistment from the filesystem
+watcher and delete the enlistment at `<path>`.
diff --git a/contrib/scalar/docs/index.md b/contrib/scalar/docs/index.md
new file mode 100644
index 00000000000000..f9f5ab06e09253
--- /dev/null
+++ b/contrib/scalar/docs/index.md
@@ -0,0 +1,50 @@
+Scalar: Enabling Git at Scale
+=============================
+
+Scalar is a tool that helps Git scale to some of the largest Git repositories.
+It achieves this by enabling some advanced Git features, such as:
+
+* *Partial clone:* reduces time to get a working repository by not
+  downloading all Git objects right away.
+
+* *Background prefetch:* downloads Git object data from all remotes every
+  hour, reducing the amount of time for foreground `git fetch` calls.
+
+* *Sparse-checkout:* limits the size of your working directory.
+
+* *File system monitor:* tracks the recently modified files and eliminates
+  the need for Git to scan the entire worktree.
+
+* *Commit-graph:* accelerates commit walks and reachability calculations,
+   speeding up commands like `git log`.
+
+* *Multi-pack-index:* enables fast object lookups across many pack-files.
+
+* *Incremental repack:* Repacks the packed Git data into fewer pack-file
+  without disrupting concurrent commands by using the multi-pack-index.
+
+By running `scalar register` in any Git repo, Scalar will automatically enable
+these features for that repo (except partial clone) and start running suggested
+maintenance in the background using
+[the `git maintenance` feature](https://git-scm.com/docs/git-maintenance).
+
+Repos cloned with the `scalar clone` command use partial clone to significantly
+reduce the amount of data required to get started using a repository. By
+delaying all blob downloads until they are required, Scalar allows you to work
+with very large repositories quickly.
+
+Documentation
+-------------
+
+* [Getting Started](getting-started.md): Get started with Scalar.
+  Includes `scalar register`, `scalar unregister`, `scalar clone`, and
+  `scalar delete`.
+
+* [Troubleshooting](troubleshooting.md):
+  Collect diagnostic information or update custom settings. Includes
+  `scalar diagnose`.
+
+* [The Philosophy of Scalar](philosophy.md): Why does Scalar work the way
+  it does, and how do we make decisions about its future?
+
+* [Frequently Asked Questions](faq.md)
diff --git a/contrib/scalar/docs/philosophy.md b/contrib/scalar/docs/philosophy.md
new file mode 100644
index 00000000000000..51486a75e41f0d
--- /dev/null
+++ b/contrib/scalar/docs/philosophy.md
@@ -0,0 +1,66 @@
+The Philosophy of Scalar
+========================
+
+The team building Scalar has **opinions** about Git performance. Scalar
+takes out the guesswork by automatically configuring your Git repositories
+to take advantage of the latest and greatest features. It is difficult to
+say that these are the absolute best settings for every repository, but
+these settings do work for some of the largest repositories in the world.
+
+Scalar intends to do very little more than the standard Git client. We
+actively implement new features into Git instead of Scalar, then update
+Scalar only to configure those new settings. In particular, we ported
+features like background maintenance to Git to make Scalar simpler and
+make Git more powerful.
+
+Services such as GitHub support partial clone , a standard adopted by the Git
+project to download only part of the Git objects when cloning, and fetching
+further objects on demand. If your hosting service supports partial clone, then
+we absolutely recommend it as a way to greatly speed up your clone and fetch
+times and to reduce how much disk space your Git repository requires. Scalar
+will help with this!
+
+Most of the value of Scalar can be found in the core Git client. However, most
+of the advanced features that really optimize Git's performance are off by
+default for compatibility reasons. To really take advantage of Git's latest and
+greatest features, you either need to study the [`git config`
+documentation](https://git-scm.com/docs/git-config) and regularly read [the Git
+release notes](https://github.com/git/git/tree/master/Documentation/RelNotes).
+Even if you do all that work and customize your Git settings on your machines,
+you likely will want to share those settings with other team members. Or, you
+can just use Scalar!
+
+Using `scalar register` on an existing Git repository will give you these
+benefits:
+
+* Additional compression of your `.git/index` file.
+* Hourly background `git fetch` operations, keeping you in-sync with your
+  remotes.
+* Advanced data structures, such as the `commit-graph` and `multi-pack-index`
+  are updated automatically in the background.
+* If using macOS or Windows, then Scalar configures Git's builtin File System
+  Monitor, providing faster commands such as `git status` or `git add`.
+
+Additionally, if you use `scalar clone` to create a new repository, then
+you will automatically get these benefits:
+
+* Use Git's partial clone feature to only download the files you need for
+  your current checkout.
+* Use Git's [sparse-checkout feature][sparse-checkout] to minimize the
+  number of files required in your working directory.
+  [Read more about sparse-checkout here.][sparse-checkout-blog]
+* Create the Git repository inside `<repo-name>/src` to make it easy to
+  place build artifacts outside of the Git repository, such as in
+  `<repo-name>/bin` or `<repo-name>/packages`.
+
+We also admit that these **opinions** can always be improved! If you have
+an idea of how to improve our setup, consider
+[creating an issue](https://github.com/microsoft/scalar/issues/new) or
+contributing a pull request! Some [existing](https://github.com/microsoft/scalar/issues/382)
+[issues](https://github.com/microsoft/scalar/issues/388) have already
+improved our configuration settings and roadmap!
+
+[gvfs-protocol]: https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md
+[microsoft-git]: https://github.com/microsoft/git
+[sparse-checkout]: https://git-scm.com/docs/git-sparse-checkout
+[sparse-checkout-blog]: https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/
diff --git a/contrib/scalar/docs/troubleshooting.md b/contrib/scalar/docs/troubleshooting.md
new file mode 100644
index 00000000000000..8ec56ad437ff09
--- /dev/null
+++ b/contrib/scalar/docs/troubleshooting.md
@@ -0,0 +1,20 @@
+Troubleshooting
+===============
+
+Diagnosing Issues
+-----------------
+
+The `scalar diagnose` command collects logs and config details for the current
+repository. The resulting zip file helps root-cause issues.
+
+When run inside your repository, creates a zip file containing several important
+files for that repository. This includes:
+
+* Configuration files from your `.git` folder, such as the `config` file,
+  `index`, `hooks`, and `refs`.
+
+* A summary of your Git object database, including the number of loose objects
+  and the names and sizes of pack-files.
+
+As the `diagnose` command completes, it provides the path of the resulting
+zip file. This zip can be attached to bug reports to make the analysis easier.

From d28ad031fcd7397b6153121d243206f65fdbc177 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 10 May 2022 13:43:05 +0200
Subject: [PATCH 024/207] scalar (Windows): use forward slashes as directory
 separators

Git traditionally uses those, not backslashes, ever.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 scalar.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/scalar.c b/scalar.c
index 48ded5cc4902ca..48feb74673c869 100644
--- a/scalar.c
+++ b/scalar.c
@@ -46,6 +46,9 @@ static void setup_enlistment_directory(int argc, const char **argv,
 		die(_("need a working directory"));
 
 	strbuf_trim_trailing_dir_sep(&path);
+#ifdef GIT_WINDOWS_NATIVE
+	convert_slashes(path.buf);
+#endif
 
 	/* check if currently in enlistment root with src/ workdir */
 	len = path.len;

From 1f2506f848327300ee9edc81b007f13bdb217fb3 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 17 Jun 2021 11:40:09 -0400
Subject: [PATCH 025/207] scalar: add retry logic to run_git()

Use a fixed 3 tries total to see how that increases our chances of
success for subcommands such as 'git fetch'.

We special-case the `diagnose` command here: When 672196a3073
(scalar-diagnose: use 'git diagnose --mode=all', 2022-08-12) updated
'scalar diagnose' to run 'git diagnose' as a subprocess, it was passed
through the run_git() caller. We need to avoid repeating the call when
the underlying 'git diagnose' command fails.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 scalar.c | 25 ++++++++++++++++++++-----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/scalar.c b/scalar.c
index 48feb74673c869..5ac58aea875c4f 100644
--- a/scalar.c
+++ b/scalar.c
@@ -75,21 +75,34 @@ static void setup_enlistment_directory(int argc, const char **argv,
 	strbuf_release(&path);
 }
 
+static int git_retries = 3;
+
 LAST_ARG_MUST_BE_NULL
 static int run_git(const char *arg, ...)
 {
-	struct child_process cmd = CHILD_PROCESS_INIT;
 	va_list args;
 	const char *p;
+	struct strvec argv = STRVEC_INIT;
+	int res = 0, attempts;
 
 	va_start(args, arg);
-	strvec_push(&cmd.args, arg);
+	strvec_push(&argv, arg);
 	while ((p = va_arg(args, const char *)))
-		strvec_push(&cmd.args, p);
+		strvec_push(&argv, p);
 	va_end(args);
 
-	cmd.git_cmd = 1;
-	return run_command(&cmd);
+	for (attempts = 0, res = 1;
+	     res && attempts < git_retries;
+	     attempts++) {
+		struct child_process cmd = CHILD_PROCESS_INIT;
+
+		cmd.git_cmd = 1;
+		strvec_pushv(&cmd.args, argv.v);
+		res = run_command(&cmd);
+	}
+
+	strvec_clear(&argv);
+	return res;
 }
 
 struct scalar_config {
@@ -590,6 +603,8 @@ static int cmd_diagnose(int argc, const char **argv)
 	setup_enlistment_directory(argc, argv, usage, options, &diagnostics_root);
 	strbuf_addstr(&diagnostics_root, "/.scalarDiagnostics");
 
+	/* Here, a failure should not repeat itself. */
+	git_retries = 1;
 	res = run_git("diagnose", "--mode=all", "-s", "%Y%m%d_%H%M%S",
 		      "-o", diagnostics_root.buf, NULL);
 

From 50c426092122b28d886fc427241ba13f74737b6b Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Thu, 27 May 2021 07:26:11 +0200
Subject: [PATCH 026/207] scalar: support the `config` command for backwards
 compatibility

The .NET version supported running `scalar config` to reconfigure the
current enlistment, and now the C port does, too.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 scalar.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/scalar.c b/scalar.c
index 5ac58aea875c4f..6ecfcee41b549d 100644
--- a/scalar.c
+++ b/scalar.c
@@ -1016,6 +1016,9 @@ int cmd_main(int argc, const char **argv)
 		argv++;
 		argc--;
 
+		if (!strcmp(argv[0], "config"))
+			argv[0] = "reconfigure";
+
 		for (i = 0; builtins[i].name; i++)
 			if (!strcmp(builtins[i].name, argv[0]))
 				return !!builtins[i].fn(argc, argv);

From 4da05687c9dbb7eb27e3e8207618c7f04ada8c86 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 23 Sep 2021 09:59:06 -0400
Subject: [PATCH 027/207] sequencer: avoid progress when stderr is redirected

During a run of the Scalar functional tests, we hit a case where the
inexact rename detection of a 'git cherry-pick' command slowed to the
point of writing its delayed progress, failing the test because stderr
differed from the control case. Showing progress like this when stderr
is not a terminal is non-standard for Git, so inject an isatty(2) when
initializing the progress option in sequencer.c.

Unfortunately, there is no '--quiet' option in 'git cherry-pick'
currently wired up. This could be considered in the future, and the
isatty(2) could be moved to that position. This would also be needed for
commands like 'git rebase', so we leave that for another time.

Reported-by: Victoria Dye <vdye@github.com>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 sequencer.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/sequencer.c b/sequencer.c
index 407ee4e90fea68..6d069a890ed18c 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -770,7 +770,7 @@ static int do_recursive_merge(struct repository *r,
 	o.branch2 = next ? next_label : "(empty tree)";
 	if (is_rebase_i(opts))
 		o.buffer_output = 2;
-	o.show_rename_progress = 1;
+	o.show_rename_progress = isatty(2);
 
 	head_tree = parse_tree_indirect(head);
 	if (!head_tree)

From be3b3acea5befba7270770142b2c1d8423f38624 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Tue, 24 Jan 2017 17:34:12 +0100
Subject: [PATCH 028/207] gvfs: add the core.gvfs config setting

This does not do anything yet. The next patches will add various values
for that config setting that correspond to the various features
offered/required by GVFS.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>

gvfs: refactor loading the core.gvfs config value

This code change makes sure that the config value for core_gvfs
is always loaded before checking it.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 Documentation/config/core.txt |  3 +++
 Makefile                      |  1 +
 config.c                      |  6 +++++
 environment.c                 |  1 +
 environment.h                 |  1 +
 gvfs.c                        | 45 +++++++++++++++++++++++++++++++++++
 gvfs.h                        |  4 ++++
 meson.build                   |  1 +
 8 files changed, 62 insertions(+)
 create mode 100644 gvfs.c

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index b633053f799a9c..5483850e38dd26 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -743,6 +743,9 @@ core.multiPackIndex::
 	single index. See linkgit:git-multi-pack-index[1] for more
 	information. Defaults to true.
 
+core.gvfs::
+	Enable the features needed for GVFS.
+
 core.sparseCheckout::
 	Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
 	for more information.
diff --git a/Makefile b/Makefile
index ab42aaf126936e..6331581179c305 100644
--- a/Makefile
+++ b/Makefile
@@ -1038,6 +1038,7 @@ LIB_OBJS += git-zlib.o
 LIB_OBJS += gpg-interface.o
 LIB_OBJS += graph.o
 LIB_OBJS += grep.o
+LIB_OBJS += gvfs.o
 LIB_OBJS += hash-lookup.o
 LIB_OBJS += hashmap.o
 LIB_OBJS += help.o
diff --git a/config.c b/config.c
index 95b7de0522d180..0861ad5425576b 100644
--- a/config.c
+++ b/config.c
@@ -13,6 +13,7 @@
 #include "abspath.h"
 #include "advice.h"
 #include "date.h"
+#include "gvfs.h"
 #include "branch.h"
 #include "config.h"
 #include "parse.h"
@@ -1621,6 +1622,11 @@ int git_default_core_config(const char *var, const char *value,
 		return 0;
 	}
 
+	if (!strcmp(var, "core.gvfs")) {
+		gvfs_load_config_value(value);
+		return 0;
+	}
+
 	if (!strcmp(var, "core.sparsecheckout")) {
 		core_apply_sparse_checkout = git_config_bool(var, value);
 		return 0;
diff --git a/environment.c b/environment.c
index 8389a272700eac..fdc1162679c5d1 100644
--- a/environment.c
+++ b/environment.c
@@ -68,6 +68,7 @@ int grafts_keep_true_parents;
 int core_apply_sparse_checkout;
 int core_sparse_checkout_cone;
 int sparse_expect_files_outside_of_patterns;
+int core_gvfs;
 int merge_log_config = -1;
 int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
 unsigned long pack_size_limit_cfg;
diff --git a/environment.h b/environment.h
index 2f43340f0b553a..a51a8cf4fb9902 100644
--- a/environment.h
+++ b/environment.h
@@ -170,6 +170,7 @@ extern unsigned long pack_size_limit_cfg;
 extern int max_allowed_tree_depth;
 
 extern int core_preload_index;
+extern int core_gvfs;
 extern int precomposed_unicode;
 extern int protect_hfs;
 extern int protect_ntfs;
diff --git a/gvfs.c b/gvfs.c
new file mode 100644
index 00000000000000..3cdd8a055d6021
--- /dev/null
+++ b/gvfs.c
@@ -0,0 +1,45 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#include "git-compat-util.h"
+#include "environment.h"
+#include "gvfs.h"
+#include "setup.h"
+#include "config.h"
+
+static int gvfs_config_loaded;
+static int core_gvfs_is_bool;
+
+static int early_core_gvfs_config(const char *var, const char *value,
+				  const struct config_context *ctx, void *cb UNUSED)
+{
+	if (!strcmp(var, "core.gvfs"))
+		core_gvfs = git_config_bool_or_int("core.gvfs", value, ctx->kvi,
+						   &core_gvfs_is_bool);
+	return 0;
+}
+
+void gvfs_load_config_value(const char *value)
+{
+	if (gvfs_config_loaded)
+		return;
+
+	if (value) {
+		struct key_value_info default_kvi = KVI_INIT;
+		core_gvfs = git_config_bool_or_int("core.gvfs", value, &default_kvi, &core_gvfs_is_bool);
+	} else if (startup_info->have_repository == 0)
+		read_early_config(the_repository, early_core_gvfs_config, NULL);
+	else
+		repo_config_get_bool_or_int(the_repository, "core.gvfs",
+					    &core_gvfs_is_bool, &core_gvfs);
+
+	/* Turn on all bits if a bool was set in the settings */
+	if (core_gvfs_is_bool && core_gvfs)
+		core_gvfs = -1;
+
+	gvfs_config_loaded = 1;
+}
+
+int gvfs_config_is_set(int mask)
+{
+	gvfs_load_config_value(NULL);
+	return (core_gvfs & mask) == mask;
+}
diff --git a/gvfs.h b/gvfs.h
index b6dbe85eae4071..011185dea93734 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -1,9 +1,13 @@
 #ifndef GVFS_H
 #define GVFS_H
 
+
 /*
  * This file is for the specific settings and methods
  * used for GVFS functionality
  */
 
+void gvfs_load_config_value(const char *value);
+int gvfs_config_is_set(int mask);
+
 #endif /* GVFS_H */
diff --git a/meson.build b/meson.build
index 38cdec55728f6a..a80323bf7a23d1 100644
--- a/meson.build
+++ b/meson.build
@@ -294,6 +294,7 @@ libgit_sources = [
   'gpg-interface.c',
   'graph.c',
   'grep.c',
+  'gvfs.c',
   'hash-lookup.c',
   'hashmap.c',
   'help.c',

From 6f9fb79bbba559d6976272820632312fd095a746 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Tue, 24 Jan 2017 17:38:59 +0100
Subject: [PATCH 029/207] gvfs: add the feature to skip writing the index'
 SHA-1

This takes a substantial amount of time, and if the user is reasonably
sure that the files' integrity is not compromised, that time can be saved.

Git no longer verifies the SHA-1 by default, anyway.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>

Update for 2023-02-27: This feature was upstreamed as the index.skipHash
config option. This resulted in some changes to the struct and some of
the setup code. In particular, the config reading was moved to
prepare_repo_settings(), so the core.gvfs bit check was moved there,
too.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
Signed-off-by: Derrick Stolee <derrickstolee@github.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Documentation/config/core.txt         | 10 +++++++++-
 gvfs.h                                |  6 ++++++
 repo-settings.c                       |  8 ++++++++
 t/meson.build                         |  1 +
 t/t1017-read-tree-skip-sha-on-read.sh | 22 ++++++++++++++++++++++
 5 files changed, 46 insertions(+), 1 deletion(-)
 create mode 100755 t/t1017-read-tree-skip-sha-on-read.sh

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 5483850e38dd26..f07cd690c4603f 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -744,7 +744,15 @@ core.multiPackIndex::
 	information. Defaults to true.
 
 core.gvfs::
-	Enable the features needed for GVFS.
+	Enable the features needed for GVFS.  This value can be set to true
+	to indicate all features should be turned on or the bit values listed
+	below can be used to turn on specific features.
++
+--
+	GVFS_SKIP_SHA_ON_INDEX::
+		Bit value 1
+		Disables the calculation of the sha when writing the index
+--
 
 core.sparseCheckout::
 	Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
diff --git a/gvfs.h b/gvfs.h
index 011185dea93734..c75991530fa1fa 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -7,6 +7,12 @@
  * used for GVFS functionality
  */
 
+
+/*
+ * The list of bits in the core_gvfs setting
+ */
+#define GVFS_SKIP_SHA_ON_INDEX                      (1 << 0)
+
 void gvfs_load_config_value(const char *value);
 int gvfs_config_is_set(int mask);
 
diff --git a/repo-settings.c b/repo-settings.c
index 633b154c20ab57..20f68200fab63b 100644
--- a/repo-settings.c
+++ b/repo-settings.c
@@ -4,6 +4,7 @@
 #include "repository.h"
 #include "midx.h"
 #include "pack-objects.h"
+#include "gvfs.h"
 
 static void repo_cfg_bool(struct repository *r, const char *key, int *dest,
 			  int def)
@@ -78,6 +79,13 @@ void prepare_repo_settings(struct repository *r)
 		      r->settings.pack_use_bitmap_boundary_traversal);
 	repo_cfg_bool(r, "core.usereplacerefs", &r->settings.read_replace_refs, 1);
 
+	/*
+	 * For historical compatibility reasons, enable index.skipHash based
+	 * on a bit in core.gvfs.
+	 */
+	if (gvfs_config_is_set(GVFS_SKIP_SHA_ON_INDEX))
+		r->settings.index_skip_hash = 1;
+
 	/*
 	 * The GIT_TEST_MULTI_PACK_INDEX variable is special in that
 	 * either it *or* the config sets
diff --git a/t/meson.build b/t/meson.build
index fad6a3a7fbeb26..7b0852f87b3c02 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -177,6 +177,7 @@ integration_tests = [
   't1014-read-tree-confusing.sh',
   't1015-read-index-unmerged.sh',
   't1016-compatObjectFormat.sh',
+  't1017-read-tree-skip-sha-on-read.sh',
   't1020-subdirectory.sh',
   't1021-rerere-in-workdir.sh',
   't1022-read-tree-partial-clone.sh',
diff --git a/t/t1017-read-tree-skip-sha-on-read.sh b/t/t1017-read-tree-skip-sha-on-read.sh
new file mode 100755
index 00000000000000..5b76a80a0020dc
--- /dev/null
+++ b/t/t1017-read-tree-skip-sha-on-read.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+test_description='check that read-tree works with core.gvfs config value'
+
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-read-tree.sh
+
+test_expect_success setup '
+	echo one >a &&
+	git add a &&
+	git commit -m initial
+'
+test_expect_success 'read-tree without core.gvsf' '
+	read_tree_u_must_succeed -m -u HEAD
+'
+
+test_expect_success 'read-tree with core.gvfs set to 1' '
+	git config core.gvfs 1 &&
+	read_tree_u_must_succeed -m -u HEAD
+'
+
+test_done

From af0d38066f1de9b5f5e99399e900763e71dcb873 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Tue, 24 Jan 2017 17:54:55 +0100
Subject: [PATCH 030/207] gvfs: add the feature that blobs may be missing

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 Documentation/config/core.txt | 4 ++++
 cache-tree.c                  | 4 +++-
 commit.c                      | 9 +++++++--
 gvfs.h                        | 1 +
 t/t0000-basic.sh              | 5 +++++
 5 files changed, 20 insertions(+), 3 deletions(-)

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index f07cd690c4603f..37014380c2cf5a 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -752,6 +752,10 @@ core.gvfs::
 	GVFS_SKIP_SHA_ON_INDEX::
 		Bit value 1
 		Disables the calculation of the sha when writing the index
+	GVFS_MISSING_OK::
+		Bit value 4
+		Normally git write-tree ensures that the objects referenced by the
+		directory exist in the object database. This option disables this check.
 --
 
 core.sparseCheckout::
diff --git a/cache-tree.c b/cache-tree.c
index bcbcad3d61a09c..312d266b7aae6a 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -4,6 +4,7 @@
 #include "git-compat-util.h"
 #include "gettext.h"
 #include "hex.h"
+#include "gvfs.h"
 #include "lockfile.h"
 #include "tree.h"
 #include "tree-walk.h"
@@ -261,7 +262,8 @@ static int update_one(struct cache_tree *it,
 		      int flags)
 {
 	struct strbuf buffer;
-	int missing_ok = flags & WRITE_TREE_MISSING_OK;
+	int missing_ok = gvfs_config_is_set(GVFS_MISSING_OK) ?
+		WRITE_TREE_MISSING_OK : (flags & WRITE_TREE_MISSING_OK);
 	int dryrun = flags & WRITE_TREE_DRY_RUN;
 	int repair = flags & WRITE_TREE_REPAIR;
 	int to_invalidate = 0;
diff --git a/commit.c b/commit.c
index a127fe60c5e83c..e5dbffae13a794 100644
--- a/commit.c
+++ b/commit.c
@@ -1,6 +1,7 @@
 #define USE_THE_REPOSITORY_VARIABLE
 
 #include "git-compat-util.h"
+#include "gvfs.h"
 #include "tag.h"
 #include "commit.h"
 #include "commit-graph.h"
@@ -556,13 +557,17 @@ int repo_parse_commit_internal(struct repository *r,
 		.sizep = &size,
 		.contentp = &buffer,
 	};
+	int ret;
 	/*
 	 * Git does not support partial clones that exclude commits, so set
 	 * OBJECT_INFO_SKIP_FETCH_OBJECT to fail fast when an object is missing.
 	 */
 	int flags = OBJECT_INFO_LOOKUP_REPLACE | OBJECT_INFO_SKIP_FETCH_OBJECT |
-		OBJECT_INFO_DIE_IF_CORRUPT;
-	int ret;
+		    OBJECT_INFO_DIE_IF_CORRUPT;
+
+	/* But the GVFS Protocol _does_ support missing commits! */
+	if (gvfs_config_is_set(GVFS_MISSING_OK))
+		flags ^= OBJECT_INFO_SKIP_FETCH_OBJECT;
 
 	if (!item)
 		return -1;
diff --git a/gvfs.h b/gvfs.h
index c75991530fa1fa..7bedfaacf6d684 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -12,6 +12,7 @@
  * The list of bits in the core_gvfs setting
  */
 #define GVFS_SKIP_SHA_ON_INDEX                      (1 << 0)
+#define GVFS_MISSING_OK                             (1 << 2)
 
 void gvfs_load_config_value(const char *value);
 int gvfs_config_is_set(int mask);
diff --git a/t/t0000-basic.sh b/t/t0000-basic.sh
index 35c5c2b4f9beb4..3f4ec0b183f1f4 100755
--- a/t/t0000-basic.sh
+++ b/t/t0000-basic.sh
@@ -1106,6 +1106,11 @@ test_expect_success 'writing this tree with --missing-ok' '
 	git write-tree --missing-ok
 '
 
+test_expect_success 'writing this tree with missing ok config value' '
+	git config core.gvfs 4 &&
+	git write-tree
+'
+
 
 ################################################################
 test_expect_success 'git read-tree followed by write-tree should be idempotent' '

From 8acc66f7c87845e0d4ad371aa110a8c12b22ee27 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Wed, 18 May 2016 13:40:39 +0000
Subject: [PATCH 031/207] gvfs: prevent files to be deleted outside the sparse
 checkout

Prevent the sparse checkout to delete files that were marked with
skip-worktree bit and are not in the sparse-checkout file.

This is because everything with the skip-worktree bit turned on is being
virtualized and will be removed with the change of HEAD.

There was only one failing test when running with these changes that was
checking to make sure the worktree narrows on checkout which was
expected since we would no longer be narrowing the worktree.

Update 2022-04-05: temporarily set 'sparse.expectfilesoutsideofpatterns' in
test (until we start disabling the "remove present-despite-SKIP_WORKTREE"
behavior with 'core.virtualfilesystem' in a later commit).

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 Documentation/config/core.txt    |  9 +++++++++
 gvfs.h                           |  1 +
 t/t1090-sparse-checkout-scope.sh | 20 ++++++++++++++++++++
 unpack-trees.c                   | 22 ++++++++++++++++++++++
 4 files changed, 52 insertions(+)

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 37014380c2cf5a..c3039ba74d2e7b 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -756,6 +756,15 @@ core.gvfs::
 		Bit value 4
 		Normally git write-tree ensures that the objects referenced by the
 		directory exist in the object database. This option disables this check.
+	GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT::
+		Bit value 8
+		When marking entries to remove from the index and the working
+		directory this option will take into account what the
+		skip-worktree bit was set to so that if the entry has the
+		skip-worktree bit set it will not be removed from the working
+		directory.  This will allow virtualized working directories to
+		detect the change to HEAD and use the new commit tree to show
+		the files that are in the working directory.
 --
 
 core.sparseCheckout::
diff --git a/gvfs.h b/gvfs.h
index 7bedfaacf6d684..44131625828cfa 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -13,6 +13,7 @@
  */
 #define GVFS_SKIP_SHA_ON_INDEX                      (1 << 0)
 #define GVFS_MISSING_OK                             (1 << 2)
+#define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT       (1 << 3)
 
 void gvfs_load_config_value(const char *value);
 int gvfs_config_is_set(int mask);
diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh
index 529844e2862c74..effa20aab7bea7 100755
--- a/t/t1090-sparse-checkout-scope.sh
+++ b/t/t1090-sparse-checkout-scope.sh
@@ -106,6 +106,26 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs
 	test_cmp expect actual
 '
 
+test_expect_success 'checkout does not delete items outside the sparse checkout file' '
+	# The "sparse.expectfilesoutsideofpatterns" config will prevent the
+	# SKIP_WORKTREE flag from being dropped on files present on-disk.
+	test_config sparse.expectfilesoutsideofpatterns true &&
+
+	test_config core.gvfs 8 &&
+	git checkout -b outside &&
+	echo "new file1" >d &&
+	git add --sparse d &&
+	git commit -m "branch initial" &&
+	echo "new file1" >e &&
+	git add --sparse e &&
+	git commit -m "skipped worktree" &&
+	git update-index --skip-worktree e &&
+	echo "/d" >.git/info/sparse-checkout &&
+	git checkout HEAD^ &&
+	test_path_is_file d &&
+	test_path_is_file e
+'
+
 test_expect_success MINGW 'no unnecessary opendir() with fscache' '
 	git clone . fscache-test &&
 	(
diff --git a/unpack-trees.c b/unpack-trees.c
index 32302ccb6556fe..00d5e3b023ce62 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -3,6 +3,7 @@
 
 #include "git-compat-util.h"
 #include "advice.h"
+#include "gvfs.h"
 #include "strvec.h"
 #include "repository.h"
 #include "parse.h"
@@ -2686,6 +2687,27 @@ static int deleted_entry(const struct cache_entry *ce,
 
 	if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))
 		return -1;
+
+	/*
+	 * When marking entries to remove from the index and the working
+	 * directory this option will take into account what the
+	 * skip-worktree bit was set to so that if the entry has the
+	 * skip-worktree bit set it will not be removed from the working
+	 * directory.  This will allow virtualized working directories to
+	 * detect the change to HEAD and use the new commit tree to show
+	 * the files that are in the working directory.
+	 *
+	 * old is the cache_entry that will have the skip-worktree bit set
+	 * which will need to be preserved when the CE_REMOVE entry is added
+	 */
+	if (gvfs_config_is_set(GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT) &&
+		old &&
+		old->ce_flags & CE_SKIP_WORKTREE) {
+		add_entry(o, old, CE_REMOVE, 0);
+		invalidate_ce_path(old, o);
+		return 1;
+	}
+
 	add_entry(o, ce, CE_REMOVE, 0);
 	invalidate_ce_path(ce, o);
 	return 1;

From 2a0ce9b2af58c10d030e2e774bdcb7365ba99eee Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Mon, 30 May 2016 10:55:53 -0400
Subject: [PATCH 032/207] gvfs: optionally skip reachability checks/upload pack
 during fetch

While performing a fetch with a virtual file system we know that there
will be missing objects and we don't want to download them just because
of the reachability of the commits.  We also don't want to download a
pack file with commits, trees, and blobs since these will be downloaded
on demand.

This flag will skip the first connectivity check and by returning zero
will skip the upload pack. It will also skip the second connectivity
check but continue to update the branches to the latest commit ids.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 Documentation/config/core.txt |  9 +++++++++
 connected.c                   | 19 +++++++++++++++++++
 gvfs.h                        |  1 +
 t/meson.build                 |  1 +
 t/t5584-vfs.sh                | 24 ++++++++++++++++++++++++
 5 files changed, 54 insertions(+)
 create mode 100755 t/t5584-vfs.sh

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index c3039ba74d2e7b..65dba924e82d5b 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -765,6 +765,15 @@ core.gvfs::
 		directory.  This will allow virtualized working directories to
 		detect the change to HEAD and use the new commit tree to show
 		the files that are in the working directory.
+	GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK::
+		Bit value 16
+		While performing a fetch with a virtual file system we know
+		that there will be missing objects and we don't want to download
+		them just because of the reachability of the commits.  We also
+		don't want to download a pack file with commits, trees, and blobs
+		since these will be downloaded on demand.  This flag will skip the
+		checks on the reachability of objects during a fetch as well as
+		the upload pack so that extraneous objects don't get downloaded.
 --
 
 core.sparseCheckout::
diff --git a/connected.c b/connected.c
index 3099da84f3397f..f1c7c7310508b9 100644
--- a/connected.c
+++ b/connected.c
@@ -3,6 +3,7 @@
 #include "git-compat-util.h"
 #include "gettext.h"
 #include "hex.h"
+#include "gvfs.h"
 #include "object-store-ll.h"
 #include "run-command.h"
 #include "sigchain.h"
@@ -34,6 +35,24 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
 	struct transport *transport;
 	size_t base_len;
 
+	/*
+	 * Running a virtual file system there will be objects that are
+	 * missing locally and we don't want to download a bunch of
+	 * commits, trees, and blobs just to make sure everything is
+	 * reachable locally so this option will skip reachablility
+	 * checks below that use rev-list.  This will stop the check
+	 * before uploadpack runs to determine if there is anything to
+	 * fetch.  Returning zero for the first check will also prevent the
+	 * uploadpack from happening.  It will also skip the check after
+	 * the fetch is finished to make sure all the objects where
+	 * downloaded in the pack file.  This will allow the fetch to
+	 * run and get all the latest tip commit ids for all the branches
+	 * in the fetch but not pull down commits, trees, or blobs via
+	 * upload pack.
+	 */
+	if (gvfs_config_is_set(GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK))
+		return 0;
+
 	if (!opt)
 		opt = &defaults;
 	transport = opt->transport;
diff --git a/gvfs.h b/gvfs.h
index 44131625828cfa..e69dd4b2c8f785 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -14,6 +14,7 @@
 #define GVFS_SKIP_SHA_ON_INDEX                      (1 << 0)
 #define GVFS_MISSING_OK                             (1 << 2)
 #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT       (1 << 3)
+#define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4)
 
 void gvfs_load_config_value(const char *value);
 int gvfs_config_is_set(int mask);
diff --git a/t/meson.build b/t/meson.build
index 7b0852f87b3c02..c6ff9aaf1f0bc3 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -704,6 +704,7 @@ integration_tests = [
   't5581-http-curl-verbose.sh',
   't5582-fetch-negative-refspec.sh',
   't5583-push-branches.sh',
+  't5584-vfs.sh',
   't5600-clone-fail-cleanup.sh',
   't5601-clone.sh',
   't5602-clone-remote-exec.sh',
diff --git a/t/t5584-vfs.sh b/t/t5584-vfs.sh
new file mode 100755
index 00000000000000..8a703cbb640387
--- /dev/null
+++ b/t/t5584-vfs.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+test_description='fetch using the flag to skip reachability and upload pack'
+
+. ./test-lib.sh
+
+
+test_expect_success setup '
+	echo inital >a &&
+	git add a &&
+	git commit -m initial &&
+	git clone . one
+'
+
+test_expect_success "fetch test" '
+	cd one &&
+	git config core.gvfs 16 &&
+	rm -rf .git/objects/* &&
+	git -C .. cat-file commit HEAD | git hash-object -w --stdin -t commit &&
+	git fetch &&
+	test_must_fail git rev-parse --verify HEAD^{tree}
+'
+
+test_done
\ No newline at end of file

From e44a53bd0c479c1844b773e2a14948a51310ef58 Mon Sep 17 00:00:00 2001
From: Ben Peart <Ben.Peart@microsoft.com>
Date: Wed, 15 Jun 2016 14:59:16 +0000
Subject: [PATCH 033/207] gvfs: ensure all filters and EOL conversions are
 blocked

Ensure all filters and EOL conversions are blocked when running under
GVFS so that our projected file sizes will match the actual file size
when it is hydrated on the local machine.

Signed-off-by: Ben Peart <Ben.Peart@microsoft.com>
---
 Documentation/config/core.txt |  9 +++++++++
 convert.c                     | 22 +++++++++++++++++++++
 gvfs.h                        |  1 +
 t/t0021-conversion.sh         | 37 +++++++++++++++++++++++++++++++++++
 t/t0027-auto-crlf.sh          | 12 ++++++++++++
 5 files changed, 81 insertions(+)

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 65dba924e82d5b..89a5889df688dd 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -774,6 +774,15 @@ core.gvfs::
 		since these will be downloaded on demand.  This flag will skip the
 		checks on the reachability of objects during a fetch as well as
 		the upload pack so that extraneous objects don't get downloaded.
+	GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS::
+		Bit value 64
+		With a virtual file system we only know the file size before any
+		CRLF or smudge/clean filters processing is done on the client.
+		To prevent file corruption due to truncation or expansion with
+		garbage at the end, these filters must not run when the file
+		is first accessed and brought down to the client. Git.exe can't
+		currently tell the first access vs subsequent accesses so this
+		flag just blocks them from occurring at all.
 --
 
 core.sparseCheckout::
diff --git a/convert.c b/convert.c
index 9cc0ca20ca0776..22486ab42b8cb7 100644
--- a/convert.c
+++ b/convert.c
@@ -3,6 +3,7 @@
 
 #include "git-compat-util.h"
 #include "advice.h"
+#include "gvfs.h"
 #include "config.h"
 #include "convert.h"
 #include "copy.h"
@@ -563,6 +564,9 @@ static int crlf_to_git(struct index_state *istate,
 	if (!buf)
 		return 1;
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("CRLF conversions not supported when running under GVFS");
+
 	/* only grow if not in place */
 	if (strbuf_avail(buf) + buf->len < len)
 		strbuf_grow(buf, len - buf->len);
@@ -602,6 +606,9 @@ static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf,
 	if (!will_convert_lf_to_crlf(&stats, crlf_action))
 		return 0;
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("CRLF conversions not supported when running under GVFS");
+
 	/* are we "faking" in place editing ? */
 	if (src == buf->buf)
 		to_free = strbuf_detach(buf, NULL);
@@ -711,6 +718,9 @@ static int apply_single_file_filter(const char *path, const char *src, size_t le
 	struct async async;
 	struct filter_params params;
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("Filter \"%s\" not supported when running under GVFS", cmd);
+
 	memset(&async, 0, sizeof(async));
 	async.proc = filter_buffer_or_fd;
 	async.data = &params;
@@ -1130,6 +1140,9 @@ static int ident_to_git(const char *src, size_t len,
 	if (!buf)
 		return 1;
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("ident conversions not supported when running under GVFS");
+
 	/* only grow if not in place */
 	if (strbuf_avail(buf) + buf->len < len)
 		strbuf_grow(buf, len - buf->len);
@@ -1177,6 +1190,9 @@ static int ident_to_worktree(const char *src, size_t len,
 	if (!cnt)
 		return 0;
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("ident conversions not supported when running under GVFS");
+
 	/* are we "faking" in place editing ? */
 	if (src == buf->buf)
 		to_free = strbuf_detach(buf, NULL);
@@ -1629,6 +1645,9 @@ static int lf_to_crlf_filter_fn(struct stream_filter *filter,
 	size_t count, o = 0;
 	struct lf_to_crlf_filter *lf_to_crlf = (struct lf_to_crlf_filter *)filter;
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("CRLF conversions not supported when running under GVFS");
+
 	/*
 	 * We may be holding onto the CR to see if it is followed by a
 	 * LF, in which case we would need to go to the main loop.
@@ -1873,6 +1892,9 @@ static int ident_filter_fn(struct stream_filter *filter,
 	struct ident_filter *ident = (struct ident_filter *)filter;
 	static const char head[] = "$Id";
 
+	if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+		die("ident conversions not supported when running under GVFS");
+
 	if (!input) {
 		/* drain upon eof */
 		switch (ident->state) {
diff --git a/gvfs.h b/gvfs.h
index e69dd4b2c8f785..7c9367866f502a 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -15,6 +15,7 @@
 #define GVFS_MISSING_OK                             (1 << 2)
 #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT       (1 << 3)
 #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4)
+#define GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS      (1 << 6)
 
 void gvfs_load_config_value(const char *value);
 int gvfs_config_is_set(int mask);
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh
index da22718b08786e..6c4c012f4fe4bf 100755
--- a/t/t0021-conversion.sh
+++ b/t/t0021-conversion.sh
@@ -335,6 +335,43 @@ test_expect_success "filter: smudge empty file" '
 	test_cmp expected filtered-empty-in-repo
 '
 
+test_expect_success "filter: clean filters blocked when under GVFS" '
+	test_config filter.empty-in-repo.clean "cat >/dev/null" &&
+	test_config filter.empty-in-repo.smudge "echo smudged && cat" &&
+	test_config core.gvfs 64 &&
+
+	echo dead data walking >empty-in-repo &&
+	test_must_fail git add empty-in-repo
+'
+
+test_expect_success "filter: smudge filters blocked when under GVFS" '
+	test_config filter.empty-in-repo.clean "cat >/dev/null" &&
+	test_config filter.empty-in-repo.smudge "echo smudged && cat" &&
+	test_config core.gvfs 64 &&
+
+	test_must_fail git checkout
+'
+
+test_expect_success "ident blocked on add when under GVFS" '
+	test_config core.gvfs 64 &&
+	test_config core.autocrlf false &&
+
+	echo "*.i ident" >.gitattributes &&
+	echo "\$Id\$" > ident.i &&
+
+	test_must_fail git add ident.i
+'
+
+test_expect_success "ident blocked when under GVFS" '
+	git add ident.i &&
+
+	git commit -m "added ident.i" &&
+	test_config core.gvfs 64 &&
+	rm ident.i &&
+
+	test_must_fail git checkout -- ident.i
+'
+
 test_expect_success 'disable filter with empty override' '
 	test_config_global filter.disable.smudge false &&
 	test_config_global filter.disable.clean false &&
diff --git a/t/t0027-auto-crlf.sh b/t/t0027-auto-crlf.sh
index 49dbf09da77386..8f42f28f56ab43 100755
--- a/t/t0027-auto-crlf.sh
+++ b/t/t0027-auto-crlf.sh
@@ -343,6 +343,18 @@ checkout_files () {
 	"
 }
 
+test_expect_success 'crlf conversions blocked when under GVFS' '
+	git checkout -b gvfs &&
+	test_commit initial &&
+	rm initial.t &&
+	test_config core.gvfs 64 &&
+	test_config core.autocrlf true &&
+	test_must_fail git read-tree --reset -u HEAD &&
+
+	git config core.autocrlf false &&
+	git read-tree --reset -u HEAD
+'
+
 # Test control characters
 # NUL SOH CR EOF==^Z
 test_expect_success 'ls-files --eol -o Text/Binary' '

From 6e174962ffad5e8206f2b7c5ad94e10cc5531248 Mon Sep 17 00:00:00 2001
From: Ben Peart <Ben.Peart@microsoft.com>
Date: Tue, 10 Jan 2017 18:47:14 +0000
Subject: [PATCH 034/207] gvfs: allow "virtualizing" objects

The idea is to allow blob objects to be missing from the local repository,
and to load them lazily on demand.

After discussing this idea on the mailing list, we will rename the feature
to "lazy clone" and work more on this.

Signed-off-by: Ben Peart <Ben.Peart@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 config.c      |  5 +++++
 connected.c   |  3 +++
 environment.c |  1 +
 environment.h |  1 +
 object-file.c | 23 +++++++++++++++++++++++
 5 files changed, 33 insertions(+)

diff --git a/config.c b/config.c
index 0861ad5425576b..c652d253fed4a0 100644
--- a/config.c
+++ b/config.c
@@ -1657,6 +1657,11 @@ int git_default_core_config(const char *var, const char *value,
 		return 0;
 	}
 
+	if (!strcmp(var, "core.virtualizeobjects")) {
+		core_virtualize_objects = git_config_bool(var, value);
+		return 0;
+	}
+
 	/* Add other config variables here and to Documentation/config.txt. */
 	return platform_core_config(var, value, ctx, cb);
 }
diff --git a/connected.c b/connected.c
index f1c7c7310508b9..1ce9cf9521ed90 100644
--- a/connected.c
+++ b/connected.c
@@ -1,6 +1,7 @@
 #define USE_THE_REPOSITORY_VARIABLE
 
 #include "git-compat-util.h"
+#include "environment.h"
 #include "gettext.h"
 #include "hex.h"
 #include "gvfs.h"
@@ -52,6 +53,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
 	 */
 	if (gvfs_config_is_set(GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK))
 		return 0;
+	if (core_virtualize_objects)
+		return 0;
 
 	if (!opt)
 		opt = &defaults;
diff --git a/environment.c b/environment.c
index fdc1162679c5d1..b21c255f07eda4 100644
--- a/environment.c
+++ b/environment.c
@@ -72,6 +72,7 @@ int core_gvfs;
 int merge_log_config = -1;
 int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
 unsigned long pack_size_limit_cfg;
+int core_virtualize_objects;
 int max_allowed_tree_depth =
 #ifdef _MSC_VER
 	/*
diff --git a/environment.h b/environment.h
index a51a8cf4fb9902..081c6088fcef2c 100644
--- a/environment.h
+++ b/environment.h
@@ -225,5 +225,6 @@ extern const char *comment_line_str;
 extern char *comment_line_str_to_free;
 extern int auto_comment_line_char;
 
+extern int core_virtualize_objects;
 # endif /* USE_THE_REPOSITORY_VARIABLE */
 #endif /* ENVIRONMENT_H */
diff --git a/object-file.c b/object-file.c
index f94254a0202c66..f72e7cc4f8767c 100644
--- a/object-file.c
+++ b/object-file.c
@@ -41,6 +41,8 @@
 #include "fsck.h"
 #include "loose.h"
 #include "object-file-convert.h"
+#include "trace.h"
+#include "hook.h"
 
 /* The maximum size for an object header. */
 #define MAX_HEADER_LEN 32
@@ -1616,6 +1618,20 @@ void disable_obj_read_lock(void)
 	pthread_mutex_destroy(&obj_read_mutex);
 }
 
+static int run_read_object_hook(struct repository *r, const struct object_id *oid)
+{
+	struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+	int ret;
+	uint64_t start;
+
+	start = getnanotime();
+	strvec_push(&opt.args, oid_to_hex(oid));
+	ret = run_hooks_opt(r, "read-object", &opt);
+	trace_performance_since(start, "run_read_object_hook");
+
+	return ret;
+}
+
 int fetch_if_missing = 1;
 
 static int do_oid_object_info_extended(struct repository *r,
@@ -1628,6 +1644,7 @@ static int do_oid_object_info_extended(struct repository *r,
 	int rtype;
 	const struct object_id *real = oid;
 	int already_retried = 0;
+	int tried_hook = 0;
 
 
 	if (flags & OBJECT_INFO_LOOKUP_REPLACE)
@@ -1639,6 +1656,7 @@ static int do_oid_object_info_extended(struct repository *r,
 	if (!oi)
 		oi = &blank_oi;
 
+retry:
 	co = find_cached_object(real);
 	if (co) {
 		if (oi->typep)
@@ -1670,6 +1688,11 @@ static int do_oid_object_info_extended(struct repository *r,
 			reprepare_packed_git(r);
 			if (find_pack_entry(r, real, &e))
 				break;
+			if (core_virtualize_objects && !tried_hook) {
+				tried_hook = 1;
+				if (!run_read_object_hook(r, oid))
+					goto retry;
+			}
 		}
 
 		/*

From 933af84a7341d764f1e58f052d7877572f71d7df Mon Sep 17 00:00:00 2001
From: Ben Peart <Ben.Peart@microsoft.com>
Date: Wed, 15 Mar 2017 18:43:05 +0000
Subject: [PATCH 035/207] Hydrate missing loose objects in check_and_freshen()

Hydrate missing loose objects in check_and_freshen() when running
virtualized. Add test cases to verify read-object hook works when
running virtualized.

This hook is called in check_and_freshen() rather than
check_and_freshen_local() to make the hook work also with alternates.

Helped-by: Kevin Willford <kewillf@microsoft.com>
Signed-off-by: Ben Peart <Ben.Peart@microsoft.com>
---
 .../technical/read-object-protocol.txt        | 102 +++++++++++++
 contrib/long-running-read-object/example.pl   | 114 ++++++++++++++
 object-file.c                                 | 142 ++++++++++++++++--
 t/meson.build                                 |   1 +
 t/t0410/read-object                           | 114 ++++++++++++++
 t/t0499-read-object.sh                        |  30 ++++
 6 files changed, 487 insertions(+), 16 deletions(-)
 create mode 100644 Documentation/technical/read-object-protocol.txt
 create mode 100644 contrib/long-running-read-object/example.pl
 create mode 100755 t/t0410/read-object
 create mode 100755 t/t0499-read-object.sh

diff --git a/Documentation/technical/read-object-protocol.txt b/Documentation/technical/read-object-protocol.txt
new file mode 100644
index 00000000000000..a893b46e7c28a9
--- /dev/null
+++ b/Documentation/technical/read-object-protocol.txt
@@ -0,0 +1,102 @@
+Read Object Process
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The read-object process enables Git to read all missing blobs with a
+single process invocation for the entire life of a single Git command.
+This is achieved by using a packet format (pkt-line, see technical/
+protocol-common.txt) based protocol over standard input and standard
+output as follows. All packets, except for the "*CONTENT" packets and
+the "0000" flush packet, are considered text and therefore are
+terminated by a LF.
+
+Git starts the process when it encounters the first missing object that
+needs to be retrieved. After the process is started, Git sends a welcome
+message ("git-read-object-client"), a list of supported protocol version
+numbers, and a flush packet. Git expects to read a welcome response
+message ("git-read-object-server"), exactly one protocol version number
+from the previously sent list, and a flush packet. All further
+communication will be based on the selected version.
+
+The remaining protocol description below documents "version=1". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-read-object-client
+packet: git> version=1
+packet: git> version=42
+packet: git> 0000
+packet: git< git-read-object-server
+packet: git< version=1
+packet: git< 0000
+packet: git> capability=get
+packet: git> capability=have
+packet: git> capability=put
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=get
+packet: git< 0000
+------------------------
+The only supported capability in version 1 is "get".
+
+Afterwards Git sends a list of "key=value" pairs terminated with a flush
+packet. The list will contain at least the command (based on the
+supported capabilities) and the sha1 of the object to retrieve. Please
+note, that the process must not send any response before it received the
+final flush packet.
+
+When the process receives the "get" command, it should make the requested
+object available in the git object store and then return success. Git will
+then check the object store again and this time find it and proceed.
+------------------------
+packet: git> command=get
+packet: git> sha1=0a214a649e1b3d5011e14a3dc227753f2bd2be05
+packet: git> 0000
+------------------------
+
+The process is expected to respond with a list of "key=value" pairs
+terminated with a flush packet. If the process does not experience
+problems then the list must contain a "success" status.
+------------------------
+packet: git< status=success
+packet: git< 0000
+------------------------
+
+In case the process cannot or does not want to process the content, it
+is expected to respond with an "error" status.
+------------------------
+packet: git< status=error
+packet: git< 0000
+------------------------
+
+In case the process cannot or does not want to process the content as
+well as any future content for the lifetime of the Git process, then it
+is expected to respond with an "abort" status at any point in the
+protocol.
+------------------------
+packet: git< status=abort
+packet: git< 0000
+------------------------
+
+Git neither stops nor restarts the process in case the "error"/"abort"
+status is set.
+
+If the process dies during the communication or does not adhere to the
+protocol then Git will stop the process and restart it with the next
+object that needs to be processed.
+
+After the read-object process has processed an object it is expected to
+wait for the next "key=value" list containing a command. Git will close
+the command pipe on exit. The process is expected to detect EOF and exit
+gracefully on its own. Git will wait until the process has stopped.
+
+A long running read-object process demo implementation can be found in
+`contrib/long-running-read-object/example.pl` located in the Git core
+repository. If you develop your own long running process then the
+`GIT_TRACE_PACKET` environment variables can be very helpful for
+debugging (see linkgit:git[1]).
diff --git a/contrib/long-running-read-object/example.pl b/contrib/long-running-read-object/example.pl
new file mode 100644
index 00000000000000..b8f37f836a813c
--- /dev/null
+++ b/contrib/long-running-read-object/example.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl
+#
+# Example implementation for the Git read-object protocol version 1
+# See Documentation/technical/read-object-protocol.txt
+#
+# Allows you to test the ability for blobs to be pulled from a host git repo
+# "on demand."  Called when git needs a blob it couldn't find locally due to
+# a lazy clone that only cloned the commits and trees.
+#
+# A lazy clone can be simulated via the following commands from the host repo
+# you wish to create a lazy clone of:
+#
+# cd /host_repo
+# git rev-parse HEAD
+# git init /guest_repo
+# git cat-file --batch-check --batch-all-objects | grep -v 'blob' |
+#	cut -d' ' -f1 | git pack-objects /guest_repo/.git/objects/pack/noblobs
+# cd /guest_repo
+# git config core.virtualizeobjects true
+# git reset --hard <sha from rev-parse call above>
+#
+# Please note, this sample is a minimal skeleton. No proper error handling
+# was implemented.
+#
+
+use strict;
+use warnings;
+
+#
+# Point $DIR to the folder where your host git repo is located so we can pull
+# missing objects from it
+#
+my $DIR = "/host_repo/.git/";
+
+sub packet_bin_read {
+	my $buffer;
+	my $bytes_read = read STDIN, $buffer, 4;
+	if ( $bytes_read == 0 ) {
+
+		# EOF - Git stopped talking to us!
+		exit();
+	}
+	elsif ( $bytes_read != 4 ) {
+		die "invalid packet: '$buffer'";
+	}
+	my $pkt_size = hex($buffer);
+	if ( $pkt_size == 0 ) {
+		return ( 1, "" );
+	}
+	elsif ( $pkt_size > 4 ) {
+		my $content_size = $pkt_size - 4;
+		$bytes_read = read STDIN, $buffer, $content_size;
+		if ( $bytes_read != $content_size ) {
+			die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+		}
+		return ( 0, $buffer );
+	}
+	else {
+		die "invalid packet size: $pkt_size";
+	}
+}
+
+sub packet_txt_read {
+	my ( $res, $buf ) = packet_bin_read();
+	unless ( $buf =~ s/\n$// ) {
+		die "A non-binary line MUST be terminated by an LF.";
+	}
+	return ( $res, $buf );
+}
+
+sub packet_bin_write {
+	my $buf = shift;
+	print STDOUT sprintf( "%04x", length($buf) + 4 );
+	print STDOUT $buf;
+	STDOUT->flush();
+}
+
+sub packet_txt_write {
+	packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+	print STDOUT sprintf( "%04x", 0 );
+	STDOUT->flush();
+}
+
+( packet_txt_read() eq ( 0, "git-read-object-client" ) ) || die "bad initialize";
+( packet_txt_read() eq ( 0, "version=1" ) )				 || die "bad version";
+( packet_bin_read() eq ( 1, "" ) )                       || die "bad version end";
+
+packet_txt_write("git-read-object-server");
+packet_txt_write("version=1");
+packet_flush();
+
+( packet_txt_read() eq ( 0, "capability=get" ) )    || die "bad capability";
+( packet_bin_read() eq ( 1, "" ) )                  || die "bad capability end";
+
+packet_txt_write("capability=get");
+packet_flush();
+
+while (1) {
+	my ($command) = packet_txt_read() =~ /^command=([^=]+)$/;
+
+	if ( $command eq "get" ) {
+		my ($sha1) = packet_txt_read() =~ /^sha1=([0-9a-f]{40})$/;
+		packet_bin_read();
+
+		system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1');
+		packet_txt_write(($?) ? "status=error" : "status=success");
+		packet_flush();
+	} else {
+		die "bad command '$command'";
+	}
+}
diff --git a/object-file.c b/object-file.c
index f72e7cc4f8767c..909bc4beff78b9 100644
--- a/object-file.c
+++ b/object-file.c
@@ -43,6 +43,9 @@
 #include "object-file-convert.h"
 #include "trace.h"
 #include "hook.h"
+#include "sigchain.h"
+#include "sub-process.h"
+#include "pkt-line.h"
 
 /* The maximum size for an object header. */
 #define MAX_HEADER_LEN 32
@@ -1023,6 +1026,116 @@ int has_alt_odb(struct repository *r)
 	return !!r->objects->odb->next;
 }
 
+#define CAP_GET    (1u<<0)
+
+static int subprocess_map_initialized;
+static struct hashmap subprocess_map;
+
+struct read_object_process {
+	struct subprocess_entry subprocess;
+	unsigned int supported_capabilities;
+};
+
+static int start_read_object_fn(struct subprocess_entry *subprocess)
+{
+	struct read_object_process *entry = (struct read_object_process *)subprocess;
+	static int versions[] = {1, 0};
+	static struct subprocess_capability capabilities[] = {
+		{ "get", CAP_GET },
+		{ NULL, 0 }
+	};
+
+	return subprocess_handshake(subprocess, "git-read-object", versions,
+				    NULL, capabilities,
+				    &entry->supported_capabilities);
+}
+
+static int read_object_process(const struct object_id *oid)
+{
+	int err;
+	struct read_object_process *entry;
+	struct child_process *process;
+	struct strbuf status = STRBUF_INIT;
+	const char *cmd = find_hook(the_repository, "read-object");
+	uint64_t start;
+
+	start = getnanotime();
+
+	if (!subprocess_map_initialized) {
+		subprocess_map_initialized = 1;
+		hashmap_init(&subprocess_map, (hashmap_cmp_fn)cmd2process_cmp,
+			     NULL, 0);
+		entry = NULL;
+	} else {
+		entry = (struct read_object_process *) subprocess_find_entry(&subprocess_map, cmd);
+	}
+
+	if (!entry) {
+		entry = xmalloc(sizeof(*entry));
+		entry->supported_capabilities = 0;
+
+		if (subprocess_start(&subprocess_map, &entry->subprocess, cmd,
+				     start_read_object_fn)) {
+			free(entry);
+			return -1;
+		}
+	}
+	process = &entry->subprocess.process;
+
+	if (!(CAP_GET & entry->supported_capabilities))
+		return -1;
+
+	sigchain_push(SIGPIPE, SIG_IGN);
+
+	err = packet_write_fmt_gently(process->in, "command=get\n");
+	if (err)
+		goto done;
+
+	err = packet_write_fmt_gently(process->in, "sha1=%s\n", oid_to_hex(oid));
+	if (err)
+		goto done;
+
+	err = packet_flush_gently(process->in);
+	if (err)
+		goto done;
+
+	err = subprocess_read_status(process->out, &status);
+	err = err ? err : strcmp(status.buf, "success");
+
+done:
+	sigchain_pop(SIGPIPE);
+
+	if (err || errno == EPIPE) {
+		err = err ? err : errno;
+		if (!strcmp(status.buf, "error")) {
+			/* The process signaled a problem with the file. */
+		}
+		else if (!strcmp(status.buf, "abort")) {
+			/*
+			 * The process signaled a permanent problem. Don't try to read
+			 * objects with the same command for the lifetime of the current
+			 * Git process.
+			 */
+			entry->supported_capabilities &= ~CAP_GET;
+		}
+		else {
+			/*
+			 * Something went wrong with the read-object process.
+			 * Force shutdown and restart if needed.
+			 */
+			error("external process '%s' failed", cmd);
+			subprocess_stop(&subprocess_map,
+					(struct subprocess_entry *)entry);
+			free(entry);
+		}
+	}
+
+	trace_performance_since(start, "read_object_process");
+
+	strbuf_release(&status);
+	return err;
+}
+
 /* Returns 1 if we have successfully freshened the file, 0 otherwise. */
 static int freshen_file(const char *fn)
 {
@@ -1073,8 +1186,19 @@ static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen)
 
 static int check_and_freshen(const struct object_id *oid, int freshen)
 {
-	return check_and_freshen_local(oid, freshen) ||
+	int ret;
+	int tried_hook = 0;
+
+retry:
+	ret = check_and_freshen_local(oid, freshen) ||
 	       check_and_freshen_nonlocal(oid, freshen);
+	if (!ret && core_virtualize_objects && !tried_hook) {
+		tried_hook = 1;
+		if (!read_object_process(oid))
+			goto retry;
+	}
+
+	return ret;
 }
 
 int has_loose_object_nonlocal(const struct object_id *oid)
@@ -1618,20 +1742,6 @@ void disable_obj_read_lock(void)
 	pthread_mutex_destroy(&obj_read_mutex);
 }
 
-static int run_read_object_hook(struct repository *r, const struct object_id *oid)
-{
-	struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
-	int ret;
-	uint64_t start;
-
-	start = getnanotime();
-	strvec_push(&opt.args, oid_to_hex(oid));
-	ret = run_hooks_opt(r, "read-object", &opt);
-	trace_performance_since(start, "run_read_object_hook");
-
-	return ret;
-}
-
 int fetch_if_missing = 1;
 
 static int do_oid_object_info_extended(struct repository *r,
@@ -1690,7 +1800,7 @@ static int do_oid_object_info_extended(struct repository *r,
 				break;
 			if (core_virtualize_objects && !tried_hook) {
 				tried_hook = 1;
-				if (!run_read_object_hook(r, oid))
+				if (!read_object_process(oid))
 					goto retry;
 			}
 		}
diff --git a/t/meson.build b/t/meson.build
index c6ff9aaf1f0bc3..335486962b5e77 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -152,6 +152,7 @@ integration_tests = [
   't0410-partial-clone.sh',
   't0411-clone-from-partial.sh',
   't0450-txt-doc-vs-help.sh',
+  't0499-read-object.sh',
   't0500-progress-display.sh',
   't0600-reffiles-backend.sh',
   't0601-reffiles-pack-refs.sh',
diff --git a/t/t0410/read-object b/t/t0410/read-object
new file mode 100755
index 00000000000000..2b8feacc78577f
--- /dev/null
+++ b/t/t0410/read-object
@@ -0,0 +1,114 @@
+#!/usr/bin/perl
+#
+# Example implementation for the Git read-object protocol version 1
+# See Documentation/technical/read-object-protocol.txt
+#
+# Allows you to test the ability for blobs to be pulled from a host git repo
+# "on demand."  Called when git needs a blob it couldn't find locally due to
+# a lazy clone that only cloned the commits and trees.
+#
+# A lazy clone can be simulated via the following commands from the host repo
+# you wish to create a lazy clone of:
+#
+# cd /host_repo
+# git rev-parse HEAD
+# git init /guest_repo
+# git cat-file --batch-check --batch-all-objects | grep -v 'blob' |
+#	cut -d' ' -f1 | git pack-objects /guest_repo/.git/objects/pack/noblobs
+# cd /guest_repo
+# git config core.virtualizeobjects true
+# git reset --hard <sha from rev-parse call above>
+#
+# Please note, this sample is a minimal skeleton. No proper error handling
+# was implemented.
+#
+
+use strict;
+use warnings;
+
+#
+# Point $DIR to the folder where your host git repo is located so we can pull
+# missing objects from it
+#
+my $DIR = "../.git/";
+
+sub packet_bin_read {
+	my $buffer;
+	my $bytes_read = read STDIN, $buffer, 4;
+	if ( $bytes_read == 0 ) {
+
+		# EOF - Git stopped talking to us!
+		exit();
+	}
+	elsif ( $bytes_read != 4 ) {
+		die "invalid packet: '$buffer'";
+	}
+	my $pkt_size = hex($buffer);
+	if ( $pkt_size == 0 ) {
+		return ( 1, "" );
+	}
+	elsif ( $pkt_size > 4 ) {
+		my $content_size = $pkt_size - 4;
+		$bytes_read = read STDIN, $buffer, $content_size;
+		if ( $bytes_read != $content_size ) {
+			die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+		}
+		return ( 0, $buffer );
+	}
+	else {
+		die "invalid packet size: $pkt_size";
+	}
+}
+
+sub packet_txt_read {
+	my ( $res, $buf ) = packet_bin_read();
+	unless ( $buf =~ s/\n$// ) {
+		die "A non-binary line MUST be terminated by an LF.";
+	}
+	return ( $res, $buf );
+}
+
+sub packet_bin_write {
+	my $buf = shift;
+	print STDOUT sprintf( "%04x", length($buf) + 4 );
+	print STDOUT $buf;
+	STDOUT->flush();
+}
+
+sub packet_txt_write {
+	packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+	print STDOUT sprintf( "%04x", 0 );
+	STDOUT->flush();
+}
+
+( packet_txt_read() eq ( 0, "git-read-object-client" ) ) || die "bad initialize";
+( packet_txt_read() eq ( 0, "version=1" ) )				 || die "bad version";
+( packet_bin_read() eq ( 1, "" ) )                       || die "bad version end";
+
+packet_txt_write("git-read-object-server");
+packet_txt_write("version=1");
+packet_flush();
+
+( packet_txt_read() eq ( 0, "capability=get" ) )    || die "bad capability";
+( packet_bin_read() eq ( 1, "" ) )                  || die "bad capability end";
+
+packet_txt_write("capability=get");
+packet_flush();
+
+while (1) {
+	my ($command) = packet_txt_read() =~ /^command=([^=]+)$/;
+
+	if ( $command eq "get" ) {
+		my ($sha1) = packet_txt_read() =~ /^sha1=([0-9a-f]{40,64})$/;
+		packet_bin_read();
+
+		system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1');
+		packet_txt_write(($?) ? "status=error" : "status=success");
+		packet_flush();
+	} else {
+		die "bad command '$command'";
+	}
+}
diff --git a/t/t0499-read-object.sh b/t/t0499-read-object.sh
new file mode 100755
index 00000000000000..2e208bdb46add5
--- /dev/null
+++ b/t/t0499-read-object.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+test_description='tests for long running read-object process'
+
+. ./test-lib.sh
+
+test_expect_success 'setup host repo with a root commit' '
+	test_commit zero &&
+	hash1=$(git ls-tree HEAD | grep zero.t | cut -f1 | cut -d\  -f3)
+'
+
+test_expect_success 'blobs can be retrieved from the host repo' '
+	git init guest-repo &&
+	(cd guest-repo &&
+	 mkdir -p .git/hooks &&
+	 sed "1s|/usr/bin/perl|$PERL_PATH|" \
+	   <$TEST_DIRECTORY/t0410/read-object \
+	   >.git/hooks/read-object &&
+	 chmod +x .git/hooks/read-object &&
+	 git config core.virtualizeobjects true &&
+	 git cat-file blob "$hash1")
+'
+
+test_expect_success 'invalid blobs generate errors' '
+	(cd guest-repo &&
+	 test_must_fail git cat-file blob "invalid")
+'
+
+
+test_done

From 00a11f2fa74d74a741e9c830458699b01e1e84e2 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Fri, 8 Sep 2017 11:32:43 +0200
Subject: [PATCH 036/207] sha1_file: when writing objects, skip the
 read_object_hook

If we are going to write an object there is no use in calling
the read object hook to get an object from a potentially remote
source.  We would rather just write out the object and avoid the
potential round trip for an object that doesn't exist.

This change adds a flag to the check_and_freshen() and
freshen_loose_object() functions' signatures so that the hook
is bypassed when the functions are called before writing loose
objects. The check for a local object is still performed so we
don't overwrite something that has already been written to one
of the objects directories.

Based on a patch by Kevin Willford.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 object-file.c          | 19 +++++++++++--------
 t/t0410/read-object    |  4 ++++
 t/t0499-read-object.sh |  7 +++++++
 3 files changed, 22 insertions(+), 8 deletions(-)

diff --git a/object-file.c b/object-file.c
index 909bc4beff78b9..d4e6a9005ecd6c 100644
--- a/object-file.c
+++ b/object-file.c
@@ -1184,7 +1184,8 @@ static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen)
 	return 0;
 }
 
-static int check_and_freshen(const struct object_id *oid, int freshen)
+static int check_and_freshen(const struct object_id *oid, int freshen,
+			     int skip_virtualized_objects)
 {
 	int ret;
 	int tried_hook = 0;
@@ -1192,7 +1193,8 @@ static int check_and_freshen(const struct object_id *oid, int freshen)
 retry:
 	ret = check_and_freshen_local(oid, freshen) ||
 	       check_and_freshen_nonlocal(oid, freshen);
-	if (!ret && core_virtualize_objects && !tried_hook) {
+	if (!ret && core_virtualize_objects && !skip_virtualized_objects &&
+	    !tried_hook) {
 		tried_hook = 1;
 		if (!read_object_process(oid))
 			goto retry;
@@ -1208,7 +1210,7 @@ int has_loose_object_nonlocal(const struct object_id *oid)
 
 int has_loose_object(const struct object_id *oid)
 {
-	return check_and_freshen(oid, 0);
+	return check_and_freshen(oid, 0, 0);
 }
 
 static void mmap_limit_check(size_t length)
@@ -2462,9 +2464,10 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
 					  FOF_SKIP_COLLISION_CHECK);
 }
 
-static int freshen_loose_object(const struct object_id *oid)
+static int freshen_loose_object(const struct object_id *oid,
+				int skip_virtualized_objects)
 {
-	return check_and_freshen(oid, 1);
+	return check_and_freshen(oid, 1, skip_virtualized_objects);
 }
 
 static int freshen_packed_object(const struct object_id *oid)
@@ -2560,7 +2563,7 @@ int stream_loose_object(struct input_stream *in_stream, size_t len,
 		die(_("deflateEnd on stream object failed (%d)"), ret);
 	close_loose_object(fd, tmp_file.buf);
 
-	if (freshen_packed_object(oid) || freshen_loose_object(oid)) {
+	if (freshen_packed_object(oid) || freshen_loose_object(oid, 1)) {
 		unlink_or_warn(tmp_file.buf);
 		goto cleanup;
 	}
@@ -2622,7 +2625,7 @@ int write_object_file_flags(const void *buf, size_t len,
 	 * it out into .git/objects/??/?{38} file.
 	 */
 	write_object_file_prepare(algo, buf, len, type, oid, hdr, &hdrlen);
-	if (freshen_packed_object(oid) || freshen_loose_object(oid))
+	if (freshen_packed_object(oid) || freshen_loose_object(oid, 1))
 		return 0;
 	if (write_loose_object(oid, hdr, hdrlen, buf, len, 0, flags))
 		return -1;
@@ -2666,7 +2669,7 @@ int write_object_file_literally(const void *buf, size_t len,
 
 	if (!(flags & HASH_WRITE_OBJECT))
 		goto cleanup;
-	if (freshen_packed_object(oid) || freshen_loose_object(oid))
+	if (freshen_packed_object(oid) || freshen_loose_object(oid, 1))
 		goto cleanup;
 	status = write_loose_object(oid, header, hdrlen, buf, len, 0, 0);
 	if (compat_type != -1)
diff --git a/t/t0410/read-object b/t/t0410/read-object
index 2b8feacc78577f..02c799837f4057 100755
--- a/t/t0410/read-object
+++ b/t/t0410/read-object
@@ -108,6 +108,10 @@ while (1) {
 		system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1');
 		packet_txt_write(($?) ? "status=error" : "status=success");
 		packet_flush();
+
+		open my $log, '>>.git/read-object-hook.log';
+		print $log "Read object $sha1, exit code $?\n";
+		close $log;
 	} else {
 		die "bad command '$command'";
 	}
diff --git a/t/t0499-read-object.sh b/t/t0499-read-object.sh
index 2e208bdb46add5..0cee1963cf091e 100755
--- a/t/t0499-read-object.sh
+++ b/t/t0499-read-object.sh
@@ -26,5 +26,12 @@ test_expect_success 'invalid blobs generate errors' '
 	 test_must_fail git cat-file blob "invalid")
 '
 
+test_expect_success 'read-object-hook is bypassed when writing objects' '
+	(cd guest-repo &&
+	 echo hello >hello.txt &&
+	 git add hello.txt &&
+	 hash="$(git rev-parse --verify :hello.txt)" &&
+	 ! grep "$hash" .git/read-object-hook.log)
+'
 
 test_done

From d16546c77ef21a176352ef34c3c8b180477d4678 Mon Sep 17 00:00:00 2001
From: Ben Peart <Ben.Peart@microsoft.com>
Date: Tue, 24 May 2016 00:32:38 +0000
Subject: [PATCH 037/207] gvfs: add global command pre and post hook procs

This adds hard-coded call to GVFS.hooks.exe before and after each Git
command runs.

To make sure that this is only called on repositories cloned with GVFS, we
test for the tell-tale .gvfs.

2021-10-30: Recent movement of find_hook() to hook.c required moving these
changes out of run-command.c to hook.c.

Signed-off-by: Ben Peart <Ben.Peart@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 git.c                        | 84 ++++++++++++++++++++++++++++++++++--
 hook.c                       | 57 +++++++++++++++++++++++-
 t/meson.build                |  2 +
 t/t0400-pre-command-hook.sh  | 34 +++++++++++++++
 t/t0401-post-command-hook.sh | 32 ++++++++++++++
 5 files changed, 205 insertions(+), 4 deletions(-)
 create mode 100755 t/t0400-pre-command-hook.sh
 create mode 100755 t/t0401-post-command-hook.sh

diff --git a/git.c b/git.c
index 71f4a9c37236ab..f356473cc2872d 100644
--- a/git.c
+++ b/git.c
@@ -17,6 +17,8 @@
 #include "shallow.h"
 #include "trace.h"
 #include "trace2.h"
+#include "dir.h"
+#include "hook.h"
 
 #define RUN_SETUP		(1<<0)
 #define RUN_SETUP_GENTLY	(1<<1)
@@ -437,6 +439,67 @@ static int handle_alias(struct strvec *args)
 	return ret;
 }
 
+/* Runs pre/post-command hook */
+static struct strvec sargv = STRVEC_INIT;
+static int run_post_hook = 0;
+static int exit_code = -1;
+
+static int run_pre_command_hook(struct repository *r, const char **argv)
+{
+	char *lock;
+	int ret = 0;
+	struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+	/*
+	 * Ensure the global pre/post command hook is only called for
+	 * the outer command and not when git is called recursively
+	 * or spawns multiple commands (like with the alias command)
+	 */
+	lock = getenv("COMMAND_HOOK_LOCK");
+	if (lock && !strcmp(lock, "true"))
+		return 0;
+	setenv("COMMAND_HOOK_LOCK", "true", 1);
+
+	/* call the hook proc */
+	strvec_pushv(&sargv, argv);
+	strvec_pushv(&opt.args, sargv.v);
+	ret = run_hooks_opt(r, "pre-command", &opt);
+
+	if (!ret)
+		run_post_hook = 1;
+	return ret;
+}
+
+static int run_post_command_hook(struct repository *r)
+{
+	char *lock;
+	int ret = 0;
+	struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+	/*
+	 * Only run post_command if pre_command succeeded in this process
+	 */
+	if (!run_post_hook)
+		return 0;
+	lock = getenv("COMMAND_HOOK_LOCK");
+	if (!lock || strcmp(lock, "true"))
+		return 0;
+
+	strvec_pushv(&opt.args, sargv.v);
+	strvec_pushf(&opt.args, "--exit_code=%u", exit_code);
+	ret = run_hooks_opt(r, "post-command", &opt);
+
+	run_post_hook = 0;
+	strvec_clear(&sargv);
+	setenv("COMMAND_HOOK_LOCK", "false", 1);
+	return ret;
+}
+
+static void post_command_hook_atexit(void)
+{
+	run_post_command_hook(the_repository);
+}
+
 static int run_builtin(struct cmd_struct *p, int argc, const char **argv, struct repository *repo)
 {
 	int status, help;
@@ -473,16 +536,21 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv, struct
 	if (!help && p->option & NEED_WORK_TREE)
 		setup_work_tree();
 
+	if (run_pre_command_hook(the_repository, argv))
+		die("pre-command hook aborted command");
+
 	trace_argv_printf(argv, "trace: built-in: git");
 	trace2_cmd_name(p->cmd);
 
 	validate_cache_entries(repo->index);
-	status = p->fn(argc, argv, prefix, no_repo ? NULL : repo);
+	exit_code = status = p->fn(argc, argv, prefix, no_repo ? NULL : repo);
 	validate_cache_entries(repo->index);
 
 	if (status)
 		return status;
 
+	run_post_command_hook(the_repository);
+
 	/* Somebody closed stdout? */
 	if (fstat(fileno(stdout), &st))
 		return 0;
@@ -770,13 +838,16 @@ static void execv_dashed_external(const char **argv)
 	 */
 	trace_argv_printf(cmd.args.v, "trace: exec:");
 
+	if (run_pre_command_hook(the_repository, cmd.args.v))
+		die("pre-command hook aborted command");
+
 	/*
 	 * If we fail because the command is not found, it is
 	 * OK to return. Otherwise, we just pass along the status code,
 	 * or our usual generic code if we were not even able to exec
 	 * the program.
 	 */
-	status = run_command(&cmd);
+	exit_code = status = run_command(&cmd);
 
 	/*
 	 * If the child process ran and we are now going to exit, emit a
@@ -787,6 +858,8 @@ static void execv_dashed_external(const char **argv)
 		exit(status);
 	else if (errno != ENOENT)
 		exit(128);
+
+	run_post_command_hook(the_repository);
 }
 
 static int run_argv(struct strvec *args)
@@ -894,6 +967,7 @@ int cmd_main(int argc, const char **argv)
 	}
 
 	trace_command_performance(argv);
+	atexit(post_command_hook_atexit);
 
 	/*
 	 * "git-xxxx" is the same as "git xxxx", but we obviously:
@@ -921,10 +995,14 @@ int cmd_main(int argc, const char **argv)
 	if (!argc) {
 		/* The user didn't specify a command; give them help */
 		commit_pager_choice();
+		if (run_pre_command_hook(the_repository, argv))
+			die("pre-command hook aborted command");
 		printf(_("usage: %s\n\n"), git_usage_string);
 		list_common_cmds_help();
 		printf("\n%s\n", _(git_more_info_string));
-		exit(1);
+		exit_code = 1;
+		run_post_command_hook(the_repository);
+		exit(exit_code);
 	}
 
 	if (!strcmp("--version", argv[0]) || !strcmp("-v", argv[0]))
diff --git a/hook.c b/hook.c
index 9ddbdee06d5774..1cc10db67707ef 100644
--- a/hook.c
+++ b/hook.c
@@ -1,5 +1,8 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
 #include "git-compat-util.h"
 #include "abspath.h"
+#include "environment.h"
 #include "advice.h"
 #include "gettext.h"
 #include "hook.h"
@@ -10,6 +13,54 @@
 #include "environment.h"
 #include "setup.h"
 
+static int early_hooks_path_config(const char *var, const char *value,
+				   const struct config_context *ctx UNUSED, void *cb)
+{
+	if (!strcmp(var, "core.hookspath"))
+		return git_config_pathname((char **)cb, var, value);
+
+	return 0;
+}
+
+/* Discover the hook before setup_git_directory() was called */
+static const char *hook_path_early(const char *name, struct strbuf *result)
+{
+	static struct strbuf hooks_dir = STRBUF_INIT;
+	static int initialized;
+
+	if (initialized < 0)
+		return NULL;
+
+	if (!initialized) {
+		struct strbuf gitdir = STRBUF_INIT, commondir = STRBUF_INIT;
+		char *early_hooks_dir = NULL;
+
+		if (discover_git_directory(&commondir, &gitdir) < 0) {
+			strbuf_release(&gitdir);
+			strbuf_release(&commondir);
+			initialized = -1;
+			return NULL;
+		}
+
+		read_early_config(the_repository, early_hooks_path_config, &early_hooks_dir);
+		if (!early_hooks_dir)
+			strbuf_addf(&hooks_dir, "%s/hooks/", commondir.buf);
+		else {
+			strbuf_add_absolute_path(&hooks_dir, early_hooks_dir);
+			free(early_hooks_dir);
+			strbuf_addch(&hooks_dir, '/');
+		}
+
+		strbuf_release(&gitdir);
+		strbuf_release(&commondir);
+
+		initialized = 1;
+	}
+
+	strbuf_addf(result, "%s%s", hooks_dir.buf, name);
+	return result->buf;
+}
+
 const char *find_hook(struct repository *r, const char *name)
 {
 	static struct strbuf path = STRBUF_INIT;
@@ -17,7 +68,11 @@ const char *find_hook(struct repository *r, const char *name)
 	int found_hook;
 
 	strbuf_reset(&path);
-	strbuf_repo_git_path(&path, r, "hooks/%s", name);
+	if (have_git_dir())
+		strbuf_repo_git_path(&path, r, "hooks/%s", name);
+	else if (!hook_path_early(name, &path))
+		return NULL;
+
 	found_hook = access(path.buf, X_OK) >= 0;
 #ifdef STRIP_EXTENSION
 	if (!found_hook) {
diff --git a/t/meson.build b/t/meson.build
index 335486962b5e77..887f260cbb087c 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -149,6 +149,8 @@ integration_tests = [
   't0301-credential-cache.sh',
   't0302-credential-store.sh',
   't0303-credential-external.sh',
+  't0400-pre-command-hook.sh',
+  't0401-post-command-hook.sh',
   't0410-partial-clone.sh',
   't0411-clone-from-partial.sh',
   't0450-txt-doc-vs-help.sh',
diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh
new file mode 100755
index 00000000000000..4f4f610b52b0a0
--- /dev/null
+++ b/t/t0400-pre-command-hook.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+test_description='pre-command hook'
+
+. ./test-lib.sh
+
+test_expect_success 'with no hook' '
+	echo "first" > file &&
+	git add file &&
+	git commit -m "first"
+'
+
+test_expect_success 'with succeeding hook' '
+	mkdir -p .git/hooks &&
+	write_script .git/hooks/pre-command <<-EOF &&
+	echo "\$*" >\$(git rev-parse --git-dir)/pre-command.out
+	EOF
+	echo "second" >> file &&
+	git add file &&
+	test "add file" = "$(cat .git/pre-command.out)" &&
+	echo Hello | git hash-object --stdin &&
+	test "hash-object --stdin" = "$(cat .git/pre-command.out)"
+'
+
+test_expect_success 'with failing hook' '
+	write_script .git/hooks/pre-command <<-EOF &&
+	exit 1
+	EOF
+	echo "third" >> file &&
+	test_must_fail git add file &&
+	test_path_is_missing "$(cat .git/pre-command.out)"
+'
+
+test_done
diff --git a/t/t0401-post-command-hook.sh b/t/t0401-post-command-hook.sh
new file mode 100755
index 00000000000000..64646f7ad03b57
--- /dev/null
+++ b/t/t0401-post-command-hook.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+test_description='post-command hook'
+
+. ./test-lib.sh
+
+test_expect_success 'with no hook' '
+	echo "first" > file &&
+	git add file &&
+	git commit -m "first"
+'
+
+test_expect_success 'with succeeding hook' '
+	mkdir -p .git/hooks &&
+	write_script .git/hooks/post-command <<-EOF &&
+	echo "\$*" >\$(git rev-parse --git-dir)/post-command.out
+	EOF
+	echo "second" >> file &&
+	git add file &&
+	test "add file --exit_code=0" = "$(cat .git/post-command.out)"
+'
+
+test_expect_success 'with failing pre-command hook' '
+	write_script .git/hooks/pre-command <<-EOF &&
+	exit 1
+	EOF
+	echo "third" >> file &&
+	test_must_fail git add file &&
+	test_path_is_missing "$(cat .git/post-command.out)"
+'
+
+test_done

From 89ea17ffef725682f62be4ba7e3d6d06d70940a1 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Thu, 16 Mar 2017 21:07:54 +0100
Subject: [PATCH 038/207] t0400: verify that the hook is called correctly from
 a subdirectory

Suggested by Ben Peart.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 t/t0400-pre-command-hook.sh | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)

diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh
index 4f4f610b52b0a0..83c453c9643eae 100755
--- a/t/t0400-pre-command-hook.sh
+++ b/t/t0400-pre-command-hook.sh
@@ -31,4 +31,27 @@ test_expect_success 'with failing hook' '
 	test_path_is_missing "$(cat .git/pre-command.out)"
 '
 
+test_expect_success 'in a subdirectory' '
+	echo touch i-was-here | write_script .git/hooks/pre-command &&
+	mkdir sub &&
+	(
+		cd sub &&
+		git version
+	) &&
+	test_path_is_file sub/i-was-here
+'
+
+test_expect_success 'in a subdirectory, using an alias' '
+	git reset --hard &&
+	echo "echo \"\$@; \$(pwd)\" >>log" |
+	write_script .git/hooks/pre-command &&
+	mkdir -p sub &&
+	(
+		cd sub &&
+		git -c alias.v="version" v
+	) &&
+	test_path_is_missing log &&
+	test_line_count = 2 sub/log
+'
+
 test_done

From 9d4b2f7adfa757824bd0131b8dcb13e94b48160c Mon Sep 17 00:00:00 2001
From: Alejandro Pauly <alpauly@microsoft.com>
Date: Mon, 10 Apr 2017 13:26:14 -0400
Subject: [PATCH 039/207] Pass PID of git process to hooks.

Signed-off-by: Alejandro Pauly <alpauly@microsoft.com>
---
 git.c                        | 1 +
 t/t0400-pre-command-hook.sh  | 3 ++-
 t/t0401-post-command-hook.sh | 3 ++-
 3 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/git.c b/git.c
index f356473cc2872d..879da47101e58f 100644
--- a/git.c
+++ b/git.c
@@ -462,6 +462,7 @@ static int run_pre_command_hook(struct repository *r, const char **argv)
 
 	/* call the hook proc */
 	strvec_pushv(&sargv, argv);
+	strvec_pushf(&sargv, "--git-pid=%"PRIuMAX, (uintmax_t)getpid());
 	strvec_pushv(&opt.args, sargv.v);
 	ret = run_hooks_opt(r, "pre-command", &opt);
 
diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh
index 83c453c9643eae..f04a55a695bc97 100755
--- a/t/t0400-pre-command-hook.sh
+++ b/t/t0400-pre-command-hook.sh
@@ -13,7 +13,8 @@ test_expect_success 'with no hook' '
 test_expect_success 'with succeeding hook' '
 	mkdir -p .git/hooks &&
 	write_script .git/hooks/pre-command <<-EOF &&
-	echo "\$*" >\$(git rev-parse --git-dir)/pre-command.out
+	echo "\$*" | sed "s/ --git-pid=[0-9]*//" \
+		>\$(git rev-parse --git-dir)/pre-command.out
 	EOF
 	echo "second" >> file &&
 	git add file &&
diff --git a/t/t0401-post-command-hook.sh b/t/t0401-post-command-hook.sh
index 64646f7ad03b57..fcbfc4a0c79c1e 100755
--- a/t/t0401-post-command-hook.sh
+++ b/t/t0401-post-command-hook.sh
@@ -13,7 +13,8 @@ test_expect_success 'with no hook' '
 test_expect_success 'with succeeding hook' '
 	mkdir -p .git/hooks &&
 	write_script .git/hooks/post-command <<-EOF &&
-	echo "\$*" >\$(git rev-parse --git-dir)/post-command.out
+	echo "\$*" | sed "s/ --git-pid=[0-9]*//" \
+		>\$(git rev-parse --git-dir)/post-command.out
 	EOF
 	echo "second" >> file &&
 	git add file &&

From f3e6f6d881616fcc428be8a58b1cff196fa5964d Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 8 Aug 2017 00:27:50 +0200
Subject: [PATCH 040/207] pre-command: always respect core.hooksPath

We need to respect that config setting even if we already know that we
have a repository, but have not yet read the config.

The regression test was written by Alejandro Pauly.

2021-10-30: Recent movement of find_hook() into hook.c required moving this
change from run-command.c.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 hook.c                      | 13 +++++++++++--
 t/t0400-pre-command-hook.sh | 11 +++++++++++
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/hook.c b/hook.c
index 1cc10db67707ef..305c48d463d62c 100644
--- a/hook.c
+++ b/hook.c
@@ -68,9 +68,18 @@ const char *find_hook(struct repository *r, const char *name)
 	int found_hook;
 
 	strbuf_reset(&path);
-	if (have_git_dir())
+	if (have_git_dir()) {
+		static int forced_config;
+
+		if (!forced_config) {
+			if (!git_hooks_path)
+				git_config_get_pathname("core.hookspath",
+							&git_hooks_path);
+			forced_config = 1;
+		}
+
 		strbuf_repo_git_path(&path, r, "hooks/%s", name);
-	else if (!hook_path_early(name, &path))
+	} else if (!hook_path_early(name, &path))
 		return NULL;
 
 	found_hook = access(path.buf, X_OK) >= 0;
diff --git a/t/t0400-pre-command-hook.sh b/t/t0400-pre-command-hook.sh
index f04a55a695bc97..f2a9115e299385 100755
--- a/t/t0400-pre-command-hook.sh
+++ b/t/t0400-pre-command-hook.sh
@@ -55,4 +55,15 @@ test_expect_success 'in a subdirectory, using an alias' '
 	test_line_count = 2 sub/log
 '
 
+test_expect_success 'with core.hooksPath' '
+	mkdir -p .git/alternateHooks &&
+	write_script .git/alternateHooks/pre-command <<-EOF &&
+	echo "alternate" >\$(git rev-parse --git-dir)/pre-command.out
+	EOF
+	write_script .git/hooks/pre-command <<-EOF &&
+	echo "original"	>\$(git rev-parse --git-dir)/pre-command.out
+	EOF
+	git -c core.hooksPath=.git/alternateHooks status &&
+	test "alternate" = "$(cat .git/pre-command.out)"
+'
 test_done

From ace01dde119a78c5684a3bc8e44e19cd614b3975 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Wed, 22 Feb 2017 12:50:43 -0700
Subject: [PATCH 041/207] sparse-checkout: update files with a modify/delete
 conflict

When using the sparse-checkout feature, the file might not be on disk
because the skip-worktree bit is on.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 merge-recursive.c                |  2 +-
 t/meson.build                    |  1 +
 t/t7616-merge-sparse-checkout.sh | 31 +++++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+), 1 deletion(-)
 create mode 100755 t/t7616-merge-sparse-checkout.sh

diff --git a/merge-recursive.c b/merge-recursive.c
index 39b3c479c69af7..4533127e972647 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -1594,7 +1594,7 @@ static int handle_change_delete(struct merge_options *opt,
 		 * path.  We could call update_file_flags() with update_cache=0
 		 * and update_wd=0, but that's a no-op.
 		 */
-		if (change_branch != opt->branch1 || alt_path)
+		if (change_branch != opt->branch1 || alt_path || !file_exists(update_path))
 			ret = update_file(opt, 0, changed, update_path);
 	}
 	free(alt_path);
diff --git a/t/meson.build b/t/meson.build
index 887f260cbb087c..be5247fb3ffabb 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -937,6 +937,7 @@ integration_tests = [
   't7612-merge-verify-signatures.sh',
   't7614-merge-signoff.sh',
   't7615-diff-algo-with-mergy-operations.sh',
+  't7616-merge-sparse-checkout.sh',
   't7700-repack.sh',
   't7701-repack-unpack-unreachable.sh',
   't7702-repack-cyclic-alternate.sh',
diff --git a/t/t7616-merge-sparse-checkout.sh b/t/t7616-merge-sparse-checkout.sh
new file mode 100755
index 00000000000000..5ce12431f62ad1
--- /dev/null
+++ b/t/t7616-merge-sparse-checkout.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+test_description='merge can handle sparse-checkout'
+
+. ./test-lib.sh
+
+# merges with conflicts
+
+test_expect_success 'setup' '
+	git branch -M main &&
+	test_commit a &&
+	test_commit file &&
+	git checkout -b delete-file &&
+	git rm file.t &&
+	test_tick &&
+	git commit -m "remove file" &&
+	git checkout main &&
+	test_commit modify file.t changed
+'
+
+test_expect_success 'merge conflict deleted file and modified' '
+	echo "/a.t" >.git/info/sparse-checkout &&
+	test_config core.sparsecheckout true &&
+	git checkout -f &&
+	test_path_is_missing file.t &&
+	test_must_fail git merge delete-file &&
+	test_path_is_file file.t &&
+	test "changed" = "$(cat file.t)"
+'
+
+test_done

From 26ed740eb6d01610e647123689a089dffab25166 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Wed, 1 Mar 2017 15:17:12 -0800
Subject: [PATCH 042/207] sparse-checkout: avoid writing entries with the
 skip-worktree bit

When using the sparse-checkout feature git should not write to the working
directory for files with the skip-worktree bit on.  With the skip-worktree
bit on the file may or may not be in the working directory and if it is
not we don't want or need to create it by calling checkout_entry.

There are two callers of checkout_target.  Both of which check that the
file does not exist before calling checkout_target.  load_current which
make a call to lstat right before calling checkout_target and
check_preimage which will only run checkout_taret it stat_ret is less than
zero.  It sets stat_ret to zero and only if !stat->cached will it lstat
the file and set stat_ret to something other than zero.

This patch checks if skip-worktree bit is on in checkout_target and just
returns so that the entry doesn't not end up in the working directory.
This is so that apply will not create a file in the working directory,
then update the index but not keep the working directory up to date with
the changes that happened in the index.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 apply.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/apply.c b/apply.c
index 3b4cb3e042112a..df1ae11f44be54 100644
--- a/apply.c
+++ b/apply.c
@@ -3370,6 +3370,24 @@ static int checkout_target(struct index_state *istate,
 {
 	struct checkout costate = CHECKOUT_INIT;
 
+	/*
+	 * Do not checkout the entry if the skipworktree bit is set
+	 *
+	 * Both callers of this method (check_preimage and load_current)
+	 * check for the existance of the file before calling this
+	 * method so we know that the file doesn't exist at this point
+	 * and we don't need to perform that check again here.
+	 * We just need to check the skip-worktree and return.
+	 *
+	 * This is to prevent git from creating a file in the
+	 * working directory that has the skip-worktree bit on,
+	 * then updating the index from the patch and not keeping
+	 * the working directory version up to date with what it
+	 * changed the index version to be.
+	 */
+	if (ce_skip_worktree(ce))
+		return 0;
+
 	costate.refresh_cache = 1;
 	costate.istate = istate;
 	if (checkout_entry(ce, &costate, NULL, NULL) ||

From 7a6f583e8779df32be774a2f35342034db6aaa95 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Wed, 5 Apr 2017 10:55:32 -0600
Subject: [PATCH 043/207] Do not remove files outside the sparse-checkout

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 unpack-trees.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/unpack-trees.c b/unpack-trees.c
index 00d5e3b023ce62..fc5191988bb2e5 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -573,7 +573,9 @@ static int apply_sparse_checkout(struct index_state *istate,
 			ce->ce_flags &= ~CE_SKIP_WORKTREE;
 			return -1;
 		}
-		ce->ce_flags |= CE_WT_REMOVE;
+		if (!gvfs_config_is_set(GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT))
+			ce->ce_flags |= CE_WT_REMOVE;
+
 		ce->ce_flags &= ~CE_UPDATE;
 	}
 	if (was_skip_worktree && !ce_skip_worktree(ce)) {

From f117f501a640954939782efd700593c34be72dea Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Fri, 16 Nov 2018 11:28:59 -0700
Subject: [PATCH 044/207] send-pack: do not check for sha1 file when
 GVFS_MISSING_OK set

---
 send-pack.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/send-pack.c b/send-pack.c
index 0d39ee7859d067..cdb6dc11d1ea18 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -6,6 +6,7 @@
 #include "date.h"
 #include "gettext.h"
 #include "hex.h"
+#include "gvfs.h"
 #include "object-store-ll.h"
 #include "pkt-line.h"
 #include "sideband.h"
@@ -46,7 +47,7 @@ int option_parse_push_signed(const struct option *opt,
 
 static void feed_object(const struct object_id *oid, FILE *fh, int negative)
 {
-	if (negative &&
+	if (negative && !gvfs_config_is_set(GVFS_MISSING_OK) &&
 	    !repo_has_object_file_with_flags(the_repository, oid,
 					     OBJECT_INFO_SKIP_FETCH_OBJECT |
 					     OBJECT_INFO_QUICK))

From 719b9eab5441a35efd4ed5da48be17b445e97dc9 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Mon, 3 Jul 2017 13:39:45 -0600
Subject: [PATCH 045/207] cache-tree: remove use of strbuf_addf in update_one

String formatting can be a performance issue when there are
hundreds of thousands of trees.

Change to stop using the strbuf_addf and just add the strings
or characters individually.

There are a limited number of modes so added a switch for the
known ones and a default case if something comes through that
are not a known one for git.

In one scenario regarding a huge worktree, this reduces the
time required for a `git checkout <branch>` from 44 seconds
to 38 seconds, i.e. it is a non-negligible performance
improvement.

Signed-off-by: Kevin Willford <kewillf@microsoft.com>
---
 cache-tree.c | 24 +++++++++++++++++++++++-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git a/cache-tree.c b/cache-tree.c
index 312d266b7aae6a..0ad97555013f5b 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -432,7 +432,29 @@ static int update_one(struct cache_tree *it,
 			continue;
 
 		strbuf_grow(&buffer, entlen + 100);
-		strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
+
+		switch (mode) {
+		case 0100644:
+			strbuf_add(&buffer, "100644 ", 7);
+			break;
+		case 0100664:
+			strbuf_add(&buffer, "100664 ", 7);
+			break;
+		case 0100755:
+			strbuf_add(&buffer, "100755 ", 7);
+			break;
+		case 0120000:
+			strbuf_add(&buffer, "120000 ", 7);
+			break;
+		case 0160000:
+			strbuf_add(&buffer, "160000 ", 7);
+			break;
+		default:
+			strbuf_addf(&buffer, "%o ", mode);
+			break;
+		}
+		strbuf_add(&buffer, path + baselen, entlen);
+		strbuf_addch(&buffer, '\0');
 		strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 
 #if DEBUG_CACHE_TREE

From ebba63fce69a564b29144baf7ef315c1a1eb27c5 Mon Sep 17 00:00:00 2001
From: Ben Peart <benpeart@microsoft.com>
Date: Thu, 6 Dec 2018 11:09:19 -0500
Subject: [PATCH 046/207] gvfs: block unsupported commands when running in a
 GVFS repo

The following commands and options are not currently supported when working
in a GVFS repo.  Add code to detect and block these commands from executing.

1) fsck
2) gc
4) prune
5) repack
6) submodule
8) update-index --split-index
9) update-index --index-version (other than 4)
10) update-index --[no-]skip-worktree
11) worktree

Signed-off-by: Ben Peart <benpeart@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 builtin/gc.c                     |  4 ++++
 builtin/update-index.c           | 10 ++++++++
 git.c                            | 15 ++++++++----
 gvfs.h                           |  1 +
 t/meson.build                    |  1 +
 t/t0402-block-command-on-gvfs.sh | 39 ++++++++++++++++++++++++++++++++
 6 files changed, 65 insertions(+), 5 deletions(-)
 create mode 100755 t/t0402-block-command-on-gvfs.sh

diff --git a/builtin/gc.c b/builtin/gc.c
index a9b1c36de27da2..e60dc90a96b4de 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -18,6 +18,7 @@
 #include "date.h"
 #include "environment.h"
 #include "hex.h"
+#include "gvfs.h"
 #include "config.h"
 #include "tempfile.h"
 #include "lockfile.h"
@@ -749,6 +750,9 @@ struct repository *repo UNUSED)
 	if (quiet)
 		strvec_push(&repack, "-q");
 
+	if ((!opts.auto_flag || (opts.auto_flag && cfg.gc_auto_threshold > 0)) && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+		die(_("'git gc' is not supported on a GVFS repo"));
+
 	if (opts.auto_flag) {
 		if (cfg.detach_auto && opts.detach < 0)
 			opts.detach = 1;
diff --git a/builtin/update-index.c b/builtin/update-index.c
index 74bbad9f87d86d..04b2dbe6ec6046 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -8,6 +8,7 @@
 #define DISABLE_SIGN_COMPARE_WARNINGS
 
 #include "builtin.h"
+#include "gvfs.h"
 #include "bulk-checkin.h"
 #include "config.h"
 #include "environment.h"
@@ -1115,7 +1116,13 @@ int cmd_update_index(int argc,
 	argc = parse_options_end(&ctx);
 
 	getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+	if (mark_skip_worktree_only && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+		die(_("modifying the skip worktree bit is not supported on a GVFS repo"));
+
 	if (preferred_index_format) {
+		if (preferred_index_format != 4 && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+			die(_("changing the index version is not supported on a GVFS repo"));
+
 		if (preferred_index_format < 0) {
 			printf(_("%d\n"), the_repository->index->version);
 		} else if (preferred_index_format < INDEX_FORMAT_LB ||
@@ -1161,6 +1168,9 @@ int cmd_update_index(int argc,
 	end_odb_transaction();
 
 	if (split_index > 0) {
+		if (gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+			die(_("split index is not supported on a GVFS repo"));
+
 		if (repo_config_get_split_index(the_repository) == 0)
 			warning(_("core.splitIndex is set to false; "
 				  "remove or change it, if you really want to "
diff --git a/git.c b/git.c
index 879da47101e58f..3cf3d2a4a18fa4 100644
--- a/git.c
+++ b/git.c
@@ -1,6 +1,7 @@
 #define USE_THE_REPOSITORY_VARIABLE
 
 #include "builtin.h"
+#include "gvfs.h"
 #include "config.h"
 #include "environment.h"
 #include "exec-cmd.h"
@@ -30,6 +31,7 @@
 #define NEED_WORK_TREE		(1<<3)
 #define DELAY_PAGER_CONFIG	(1<<4)
 #define NO_PARSEOPT		(1<<5) /* parse-options is not used */
+#define BLOCK_ON_GVFS_REPO	(1<<6) /* command not allowed in GVFS repos */
 
 struct cmd_struct {
 	const char *cmd;
@@ -537,6 +539,9 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv, struct
 	if (!help && p->option & NEED_WORK_TREE)
 		setup_work_tree();
 
+	if (!help && p->option & BLOCK_ON_GVFS_REPO && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+		die("'git %s' is not supported on a GVFS repo", p->cmd);
+
 	if (run_pre_command_hook(the_repository, argv))
 		die("pre-command hook aborted command");
 
@@ -620,7 +625,7 @@ static struct cmd_struct commands[] = {
 	{ "for-each-ref", cmd_for_each_ref, RUN_SETUP },
 	{ "for-each-repo", cmd_for_each_repo, RUN_SETUP_GENTLY },
 	{ "format-patch", cmd_format_patch, RUN_SETUP },
-	{ "fsck", cmd_fsck, RUN_SETUP },
+	{ "fsck", cmd_fsck, RUN_SETUP | BLOCK_ON_GVFS_REPO},
 	{ "fsck-objects", cmd_fsck, RUN_SETUP },
 	{ "fsmonitor--daemon", cmd_fsmonitor__daemon, RUN_SETUP },
 	{ "gc", cmd_gc, RUN_SETUP },
@@ -661,7 +666,7 @@ static struct cmd_struct commands[] = {
 	{ "pack-refs", cmd_pack_refs, RUN_SETUP },
 	{ "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT },
 	{ "pickaxe", cmd_blame, RUN_SETUP },
-	{ "prune", cmd_prune, RUN_SETUP },
+	{ "prune", cmd_prune, RUN_SETUP | BLOCK_ON_GVFS_REPO},
 	{ "prune-packed", cmd_prune_packed, RUN_SETUP },
 	{ "pull", cmd_pull, RUN_SETUP | NEED_WORK_TREE },
 	{ "push", cmd_push, RUN_SETUP },
@@ -674,7 +679,7 @@ static struct cmd_struct commands[] = {
 	{ "remote", cmd_remote, RUN_SETUP },
 	{ "remote-ext", cmd_remote_ext, NO_PARSEOPT },
 	{ "remote-fd", cmd_remote_fd, NO_PARSEOPT },
-	{ "repack", cmd_repack, RUN_SETUP },
+	{ "repack", cmd_repack, RUN_SETUP | BLOCK_ON_GVFS_REPO },
 	{ "replace", cmd_replace, RUN_SETUP },
 	{ "replay", cmd_replay, RUN_SETUP },
 	{ "rerere", cmd_rerere, RUN_SETUP },
@@ -695,7 +700,7 @@ static struct cmd_struct commands[] = {
 	{ "stash", cmd_stash, RUN_SETUP | NEED_WORK_TREE },
 	{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
 	{ "stripspace", cmd_stripspace },
-	{ "submodule--helper", cmd_submodule__helper, RUN_SETUP },
+	{ "submodule--helper", cmd_submodule__helper, RUN_SETUP | BLOCK_ON_GVFS_REPO },
 	{ "survey", cmd_survey, RUN_SETUP },
 	{ "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE },
 	{ "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
@@ -714,7 +719,7 @@ static struct cmd_struct commands[] = {
 	{ "verify-tag", cmd_verify_tag, RUN_SETUP },
 	{ "version", cmd_version },
 	{ "whatchanged", cmd_whatchanged, RUN_SETUP },
-	{ "worktree", cmd_worktree, RUN_SETUP },
+	{ "worktree", cmd_worktree, RUN_SETUP | BLOCK_ON_GVFS_REPO },
 	{ "write-tree", cmd_write_tree, RUN_SETUP },
 };
 
diff --git a/gvfs.h b/gvfs.h
index 7c9367866f502a..e193502151467a 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -12,6 +12,7 @@
  * The list of bits in the core_gvfs setting
  */
 #define GVFS_SKIP_SHA_ON_INDEX                      (1 << 0)
+#define GVFS_BLOCK_COMMANDS                         (1 << 1)
 #define GVFS_MISSING_OK                             (1 << 2)
 #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT       (1 << 3)
 #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4)
diff --git a/t/meson.build b/t/meson.build
index be5247fb3ffabb..6c97f29312cdfe 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -151,6 +151,7 @@ integration_tests = [
   't0303-credential-external.sh',
   't0400-pre-command-hook.sh',
   't0401-post-command-hook.sh',
+  't0402-block-command-on-gvfs.sh',
   't0410-partial-clone.sh',
   't0411-clone-from-partial.sh',
   't0450-txt-doc-vs-help.sh',
diff --git a/t/t0402-block-command-on-gvfs.sh b/t/t0402-block-command-on-gvfs.sh
new file mode 100755
index 00000000000000..3ec7620ce6194d
--- /dev/null
+++ b/t/t0402-block-command-on-gvfs.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+test_description='block commands in GVFS repo'
+
+. ./test-lib.sh
+
+not_with_gvfs () {
+	command=$1 &&
+	shift &&
+	test_expect_success "test $command $*" "
+		test_config alias.g4rbled $command &&
+		test_config core.gvfs true &&
+		test_must_fail git $command $* &&
+		test_must_fail git g4rbled $* &&
+		test_unconfig core.gvfs &&
+		test_must_fail git -c core.gvfs=true $command $* &&
+		test_must_fail git -c core.gvfs=true g4rbled $*
+	"
+}
+
+not_with_gvfs fsck
+not_with_gvfs gc
+not_with_gvfs gc --auto
+not_with_gvfs prune
+not_with_gvfs repack
+not_with_gvfs submodule status
+not_with_gvfs update-index --index-version 2
+not_with_gvfs update-index --skip-worktree
+not_with_gvfs update-index --no-skip-worktree
+not_with_gvfs update-index --split-index
+not_with_gvfs worktree list
+
+test_expect_success 'test gc --auto succeeds when disabled via config' '
+	test_config core.gvfs true &&
+	test_config gc.auto 0 &&
+	git gc --auto
+'
+
+test_done

From 50b45b608cf6ac6df76e1519700d7cd2a796f18f Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Fri, 30 Sep 2022 12:59:40 -0400
Subject: [PATCH 047/207] worktree: allow in Scalar repositories

The 'git worktree' command was marked as BLOCK_ON_GVFS_REPO because it
does not interact well with the virtual filesystem of VFS for Git. When
a Scalar clone uses the GVFS protocol, it enables the
GVFS_BLOCK_COMMANDS flag, since commands like 'git gc' do not work well
with the GVFS protocol.

However, 'git worktree' works just fine with the GVFS protocol since it
isn't doing anything special. It copies the sparse-checkout from the
current worktree, so it does not have performance issues.

This is a highly requested option.

The solution is to stop using the BLOCK_ON_GVFS_REPO option and instead
add a special-case check in cmd_worktree() specifically for a particular
bit of the 'core_gvfs' global variable (loaded by very early config
reading) that corresponds to the virtual filesystem. The bit that most
closely resembled this behavior was non-obviously named, but does
provide a signal that we are in a Scalar clone and not a VFS for Git
clone. The error message is copied from git.c, so it will have the same
output as before if a user runs this in a VFS for Git clone.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 builtin/worktree.c |  8 ++++++++
 git.c              |  2 +-
 gvfs.h             | 11 +++++++++++
 3 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/builtin/worktree.c b/builtin/worktree.c
index c043d4d523f578..900e7101dbe43f 100644
--- a/builtin/worktree.c
+++ b/builtin/worktree.c
@@ -4,6 +4,7 @@
 #include "builtin.h"
 #include "abspath.h"
 #include "advice.h"
+#include "gvfs.h"
 #include "checkout.h"
 #include "config.h"
 #include "copy.h"
@@ -1429,6 +1430,13 @@ int cmd_worktree(int ac,
 
 	git_config(git_worktree_config, NULL);
 
+	/*
+	 * git-worktree is special-cased to work in Scalar repositories
+	 * even when they use the GVFS Protocol.
+	 */
+	if (core_gvfs & GVFS_USE_VIRTUAL_FILESYSTEM)
+		die("'git %s' is not supported on a GVFS repo", "worktree");
+
 	if (!prefix)
 		prefix = "";
 
diff --git a/git.c b/git.c
index 3cf3d2a4a18fa4..7e244b82f450eb 100644
--- a/git.c
+++ b/git.c
@@ -719,7 +719,7 @@ static struct cmd_struct commands[] = {
 	{ "verify-tag", cmd_verify_tag, RUN_SETUP },
 	{ "version", cmd_version },
 	{ "whatchanged", cmd_whatchanged, RUN_SETUP },
-	{ "worktree", cmd_worktree, RUN_SETUP | BLOCK_ON_GVFS_REPO },
+	{ "worktree", cmd_worktree, RUN_SETUP },
 	{ "write-tree", cmd_write_tree, RUN_SETUP },
 };
 
diff --git a/gvfs.h b/gvfs.h
index e193502151467a..a8e58a6ebc88b8 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -14,7 +14,18 @@
 #define GVFS_SKIP_SHA_ON_INDEX                      (1 << 0)
 #define GVFS_BLOCK_COMMANDS                         (1 << 1)
 #define GVFS_MISSING_OK                             (1 << 2)
+
+/*
+ * This behavior of not deleting outside of the sparse-checkout
+ * is specific to the virtual filesystem support. It is only
+ * enabled by VFS for Git, and so can be used as an indicator
+ * that we are in a virtualized filesystem environment and not
+ * in a Scalar environment. This bit has two names to reflect
+ * that.
+ */
 #define GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT       (1 << 3)
+#define GVFS_USE_VIRTUAL_FILESYSTEM                 (1 << 3)
+
 #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4)
 #define GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS      (1 << 6)
 

From e4cd5d2c68724b36e2b92baaff63244b4a831fe7 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Wed, 15 Apr 2020 16:19:31 +0000
Subject: [PATCH 048/207] gvfs: allow overriding core.gvfs

We found a user who had set "core.gvfs = false" in their global
config. This should not have been necessary, but it also should not
have caused a problem. However, it did.

The reason is that gvfs_load_config_value() is called from config.c
when reading config key/value pairs from all the config files. The
local config should override the global config, and this is done by
config.c reading the global config first then reading the local
config. However, our logic only allowed writing the core_gvfs
variable once.

Put the guards against multiple assignments of core_gvfs into
gvfs_config_is_set() instead, because that will fix the problem
_and_ keep multiple calls to gvfs_config_is_set() from slowing down.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs.c                | 10 ++++------
 t/t0021-conversion.sh |  4 ++++
 2 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/gvfs.c b/gvfs.c
index 3cdd8a055d6021..11635237893968 100644
--- a/gvfs.c
+++ b/gvfs.c
@@ -19,9 +19,6 @@ static int early_core_gvfs_config(const char *var, const char *value,
 
 void gvfs_load_config_value(const char *value)
 {
-	if (gvfs_config_loaded)
-		return;
-
 	if (value) {
 		struct key_value_info default_kvi = KVI_INIT;
 		core_gvfs = git_config_bool_or_int("core.gvfs", value, &default_kvi, &core_gvfs_is_bool);
@@ -34,12 +31,13 @@ void gvfs_load_config_value(const char *value)
 	/* Turn on all bits if a bool was set in the settings */
 	if (core_gvfs_is_bool && core_gvfs)
 		core_gvfs = -1;
-
-	gvfs_config_loaded = 1;
 }
 
 int gvfs_config_is_set(int mask)
 {
-	gvfs_load_config_value(NULL);
+	if (!gvfs_config_loaded)
+		gvfs_load_config_value(NULL);
+
+	gvfs_config_loaded = 1;
 	return (core_gvfs & mask) == mask;
 }
diff --git a/t/t0021-conversion.sh b/t/t0021-conversion.sh
index 6c4c012f4fe4bf..0114d07f2db614 100755
--- a/t/t0021-conversion.sh
+++ b/t/t0021-conversion.sh
@@ -349,6 +349,10 @@ test_expect_success "filter: smudge filters blocked when under GVFS" '
 	test_config filter.empty-in-repo.smudge "echo smudged && cat" &&
 	test_config core.gvfs 64 &&
 
+	test_must_fail git checkout &&
+
+	# ensure the local core.gvfs setting overwrites the global setting
+	git config --global core.gvfs false &&
 	test_must_fail git checkout
 '
 

From 0029f6e464a4b2cc7775230d91e9e8e3bad67c1c Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Fri, 27 Jul 2018 12:00:44 -0600
Subject: [PATCH 049/207] BRANCHES.md: Add explanation of branches and using
 forks

---
 BRANCHES.md | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 59 insertions(+)
 create mode 100644 BRANCHES.md

diff --git a/BRANCHES.md b/BRANCHES.md
new file mode 100644
index 00000000000000..364158375e7d55
--- /dev/null
+++ b/BRANCHES.md
@@ -0,0 +1,59 @@
+Branches used in this repo
+==========================
+
+The document explains the branching structure that we are using in the VFSForGit repository as well as the forking strategy that we have adopted for contributing.
+
+Repo Branches
+-------------
+
+1. `vfs-#`
+
+    These branches are used to track the specific version that match Git for Windows with the VFSForGit specific patches on top.  When a new version of Git for Windows is released, the VFSForGit patches will be rebased on that windows version and a new gvfs-# branch created to create pull requests against.
+
+    #### Examples
+
+    ```
+    vfs-2.27.0
+    vfs-2.30.0
+    ```
+
+    The versions of git for VFSForGit are based on the Git for Windows versions.  v2.20.0.vfs.1 will correspond with the v2.20.0.windows.1 with the VFSForGit specific patches applied to the windows version.
+
+2. `vfs-#-exp`
+
+   These branches are for releasing experimental features to early adopters. They
+   should contain everything within the corresponding `vfs-#` branch; if the base
+   branch updates, then merge into the `vfs-#-exp` branch as well.
+
+Tags
+----
+
+We are using annotated tags to build the version number for git.  The build will look back through the commit history to find the first tag matching `v[0-9]*vfs*` and build the git version number using that tag.
+
+Full releases are of the form `v2.XX.Y.vfs.Z.W` where `v2.XX.Y` comes from the
+upstream version and `Z.W` are custom updates within our fork. Specifically,
+the `.Z` value represents the "compatibility level" with VFS for Git. Only
+increase this version when making a breaking change with a released version
+of VFS for Git. The `.W` version is used for minor updates between major
+versions.
+
+Experimental releases are of the form `v2.XX.Y.vfs.Z.W.exp`. The `.exp`
+suffix indicates that experimental features are available. The rest of the
+version string comes from the full release tag. These versions will only
+be made available as pre-releases on the releases page, never a full release.
+
+Forking
+-------
+
+A personal fork of this repository and a branch in that repository should be used for development.
+
+These branches should be based on the latest vfs-# branch.  If there are work in progress pull requests that you have based on a previous version branch when a new version branch is created, you will need to move your patches to the new branch to get them in that latest version.
+
+#### Example
+
+```
+git clone <personal fork repo URL>
+git remote add ms https://github.com/Microsoft/git.git
+git checkout -b my-changes ms/vfs-2.20.0 --no-track
+git push -fu origin HEAD
+```

From 839a737af2d5faf3281cd31015d929513e518a1b Mon Sep 17 00:00:00 2001
From: Ben Peart <benpeart@microsoft.com>
Date: Thu, 11 Jan 2018 16:25:08 -0500
Subject: [PATCH 050/207] Add virtual file system settings and hook proc

On index load, clear/set the skip worktree bits based on the virtual
file system data. Use virtual file system data to update skip-worktree
bit in unpack-trees. Use virtual file system data to exclude files and
folders not explicitly requested.

Update 2022-04-05: disable the "present-despite-SKIP_WORKTREE" file removal
behavior when 'core.virtualfilesystem' is enabled.

Signed-off-by: Ben Peart <benpeart@microsoft.com>
---
 Documentation/config/core.txt    |   8 +
 Documentation/githooks.txt       |  20 ++
 Makefile                         |   1 +
 config.c                         |  30 ++-
 config.h                         |   2 +
 dir.c                            |  36 ++-
 environment.c                    |   1 +
 environment.h                    |   1 +
 meson.build                      |   1 +
 read-cache.c                     |   2 +
 sparse-index.c                   |   5 +-
 t/meson.build                    |   1 +
 t/t1090-sparse-checkout-scope.sh |   4 +-
 t/t1093-virtualfilesystem.sh     | 369 +++++++++++++++++++++++++++++++
 unpack-trees.c                   |  14 +-
 virtualfilesystem.c              | 312 ++++++++++++++++++++++++++
 virtualfilesystem.h              |  25 +++
 wt-status.c                      |   2 +
 18 files changed, 826 insertions(+), 8 deletions(-)
 create mode 100755 t/t1093-virtualfilesystem.sh
 create mode 100644 virtualfilesystem.c
 create mode 100644 virtualfilesystem.h

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 89a5889df688dd..6f408dd89398d4 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -111,6 +111,14 @@ Version 2 uses an opaque string so that the monitor can return
 something that can be used to determine what files have changed
 without race conditions.
 
+core.virtualFilesystem::
+	If set, the value of this variable is used as a command which
+	will identify all files and directories that are present in
+	the working directory.  Git will only track and update files
+	listed in the virtual file system.  Using the virtual file system
+	will supersede the sparse-checkout settings which will be ignored.
+	See the "virtual file system" section of linkgit:githooks[5].
+
 core.trustctime::
 	If false, the ctime differences between the index and the
 	working tree are ignored; useful when the inode change time
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index 0397dec64d7315..86c78b3b4825e2 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -758,6 +758,26 @@ and "0" meaning they were not.
 Only one parameter should be set to "1" when the hook runs.  The hook
 running passing "1", "1" should not be possible.
 
+virtualFilesystem
+~~~~~~~~~~~~~~~~~~
+
+"Virtual File System" allows populating the working directory sparsely.
+The projection data is typically automatically generated by an external
+process.  Git will limit what files it checks for changes as well as which
+directories are checked for untracked files based on the path names given.
+Git will also only update those files listed in the projection.
+
+The hook is invoked when the configuration option core.virtualFilesystem
+is set.  It takes one argument, a version (currently 1).
+
+The hook should output to stdout the list of all files in the working
+directory that git should track.  The paths are relative to the root
+of the working directory and are separated by a single NUL.  Full paths
+('dir1/a.txt') as well as directories are supported (ie 'dir1/').
+
+The exit status determines whether git will use the data from the
+hook.  On error, git will abort the command with an error message.
+
 SEE ALSO
 --------
 linkgit:git-hook[1]
diff --git a/Makefile b/Makefile
index 6331581179c305..7074643902c95b 100644
--- a/Makefile
+++ b/Makefile
@@ -1197,6 +1197,7 @@ LIB_OBJS += utf8.o
 LIB_OBJS += varint.o
 LIB_OBJS += version.o
 LIB_OBJS += versioncmp.o
+LIB_OBJS += virtualfilesystem.o
 LIB_OBJS += walker.o
 LIB_OBJS += wildmatch.o
 LIB_OBJS += worktree.o
diff --git a/config.c b/config.c
index c652d253fed4a0..eca6dd024a355d 100644
--- a/config.c
+++ b/config.c
@@ -1628,7 +1628,11 @@ int git_default_core_config(const char *var, const char *value,
 	}
 
 	if (!strcmp(var, "core.sparsecheckout")) {
-		core_apply_sparse_checkout = git_config_bool(var, value);
+		/* virtual file system relies on the sparse checkout logic so force it on */
+		if (core_virtualfilesystem)
+			core_apply_sparse_checkout = 1;
+		else
+			core_apply_sparse_checkout = git_config_bool(var, value);
 		return 0;
 	}
 
@@ -2719,6 +2723,30 @@ int repo_config_get_max_percent_split_change(struct repository *r)
 	return -1; /* default value */
 }
 
+int repo_config_get_virtualfilesystem(struct repository *r)
+{
+	/* Run only once. */
+	static int virtual_filesystem_result = -1;
+	if (virtual_filesystem_result >= 0)
+		return virtual_filesystem_result;
+
+	if (repo_config_get_pathname(r, "core.virtualfilesystem", &core_virtualfilesystem))
+		core_virtualfilesystem = xstrdup_or_null(getenv("GIT_VIRTUALFILESYSTEM_TEST"));
+
+	if (core_virtualfilesystem && !*core_virtualfilesystem)
+		FREE_AND_NULL(core_virtualfilesystem);
+
+	/* virtual file system relies on the sparse checkout logic so force it on */
+	if (core_virtualfilesystem) {
+		core_apply_sparse_checkout = 1;
+		virtual_filesystem_result = 1;
+		return 1;
+	}
+
+	virtual_filesystem_result = 0;
+	return 0;
+}
+
 int repo_config_get_index_threads(struct repository *r, int *dest)
 {
 	int is_bool, val;
diff --git a/config.h b/config.h
index e4199bbdc07685..b640ded10f822f 100644
--- a/config.h
+++ b/config.h
@@ -684,6 +684,8 @@ int repo_config_get_index_threads(struct repository *r, int *dest);
 int repo_config_get_split_index(struct repository *r);
 int repo_config_get_max_percent_split_change(struct repository *r);
 
+int repo_config_get_virtualfilesystem(struct repository *r);
+
 /* This dies if the configured or default date is in the future */
 int repo_config_get_expiry(struct repository *r, const char *key, char **output);
 
diff --git a/dir.c b/dir.c
index 1ed84b7a2f67ac..04b37090f9a19e 100644
--- a/dir.c
+++ b/dir.c
@@ -11,6 +11,7 @@
 
 #include "git-compat-util.h"
 #include "abspath.h"
+#include "virtualfilesystem.h"
 #include "config.h"
 #include "convert.h"
 #include "dir.h"
@@ -1480,6 +1481,19 @@ enum pattern_match_result path_matches_pattern_list(
 	int result = NOT_MATCHED;
 	size_t slash_pos;
 
+	if (core_virtualfilesystem) {
+		/*
+		* The virtual file system data is used to prevent git from traversing
+		* any part of the tree that is not in the virtual file system.  Return
+		* 1 to exclude the entry if it is not found in the virtual file system,
+		* else fall through to the regular excludes logic as it may further exclude.
+		*/
+		if (*dtype == DT_UNKNOWN)
+			*dtype = resolve_dtype(DT_UNKNOWN, istate, pathname, pathlen);
+		if (is_excluded_from_virtualfilesystem(pathname, pathlen, *dtype) > 0)
+			return 1;
+	}
+
 	if (!pl->use_cone_patterns) {
 		pattern = last_matching_pattern_from_list(pathname, pathlen, basename,
 							dtype, pl, istate);
@@ -1824,8 +1838,22 @@ struct path_pattern *last_matching_pattern(struct dir_struct *dir,
 int is_excluded(struct dir_struct *dir, struct index_state *istate,
 		const char *pathname, int *dtype_p)
 {
-	struct path_pattern *pattern =
-		last_matching_pattern(dir, istate, pathname, dtype_p);
+	struct path_pattern *pattern;
+
+	if (core_virtualfilesystem) {
+		/*
+		* The virtual file system data is used to prevent git from traversing
+		* any part of the tree that is not in the virtual file system.  Return
+		* 1 to exclude the entry if it is not found in the virtual file system,
+		* else fall through to the regular excludes logic as it may further exclude.
+		*/
+		if (*dtype_p == DT_UNKNOWN)
+			*dtype_p = resolve_dtype(DT_UNKNOWN, istate, pathname, strlen(pathname));
+		if (is_excluded_from_virtualfilesystem(pathname, strlen(pathname), *dtype_p) > 0)
+			return 1;
+	}
+
+	pattern = last_matching_pattern(dir, istate, pathname, dtype_p);
 	if (pattern)
 		return pattern->flags & PATTERN_FLAG_NEGATIVE ? 0 : 1;
 	return 0;
@@ -2443,6 +2471,8 @@ static enum path_treatment treat_path(struct dir_struct *dir,
 						ignore_case);
 	if (dtype != DT_DIR && has_path_in_index)
 		return path_none;
+	if (is_excluded_from_virtualfilesystem(path->buf, path->len, dtype) > 0)
+		return path_excluded;
 
 	/*
 	 * When we are looking at a directory P in the working tree,
@@ -2647,6 +2677,8 @@ static void add_path_to_appropriate_result_list(struct dir_struct *dir,
 	/* add the path to the appropriate result list */
 	switch (state) {
 	case path_excluded:
+		if (is_excluded_from_virtualfilesystem(path->buf, path->len, DT_DIR) > 0)
+			break;
 		if (dir->flags & DIR_SHOW_IGNORED)
 			dir_add_name(dir, istate, path->buf, path->len);
 		else if ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
diff --git a/environment.c b/environment.c
index b21c255f07eda4..84bbc2a7172109 100644
--- a/environment.c
+++ b/environment.c
@@ -69,6 +69,7 @@ int core_apply_sparse_checkout;
 int core_sparse_checkout_cone;
 int sparse_expect_files_outside_of_patterns;
 int core_gvfs;
+char *core_virtualfilesystem;
 int merge_log_config = -1;
 int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
 unsigned long pack_size_limit_cfg;
diff --git a/environment.h b/environment.h
index 081c6088fcef2c..6245b95b6873f8 100644
--- a/environment.h
+++ b/environment.h
@@ -170,6 +170,7 @@ extern unsigned long pack_size_limit_cfg;
 extern int max_allowed_tree_depth;
 
 extern int core_preload_index;
+extern char *core_virtualfilesystem;
 extern int core_gvfs;
 extern int precomposed_unicode;
 extern int protect_hfs;
diff --git a/meson.build b/meson.build
index a80323bf7a23d1..2143945ad03bb2 100644
--- a/meson.build
+++ b/meson.build
@@ -465,6 +465,7 @@ libgit_sources = [
   'utf8.c',
   'varint.c',
   'versioncmp.c',
+  'virtualfilesystem.c',
   'walker.c',
   'wildmatch.c',
   'worktree.c',
diff --git a/read-cache.c b/read-cache.c
index d459903d4ca968..2d4ce41a5f1ac3 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -9,6 +9,7 @@
 
 #include "git-compat-util.h"
 #include "bulk-checkin.h"
+#include "virtualfilesystem.h"
 #include "config.h"
 #include "date.h"
 #include "diff.h"
@@ -1980,6 +1981,7 @@ static void post_read_index_from(struct index_state *istate)
 	tweak_untracked_cache(istate);
 	tweak_split_index(istate);
 	tweak_fsmonitor(istate);
+	apply_virtualfilesystem(istate);
 }
 
 static size_t estimate_cache_size_from_compressed(unsigned int entries)
diff --git a/sparse-index.c b/sparse-index.c
index 0717f6260fc845..82fcf36169a9de 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -270,7 +270,7 @@ static int add_path_to_index(const struct object_id *oid,
 	size_t len = base->len;
 
 	if (S_ISDIR(mode)) {
-		int dtype;
+		int dtype = DT_DIR;
 		size_t baselen = base->len;
 		if (!ctx->pl)
 			return READ_TREE_RECURSIVE;
@@ -394,7 +394,7 @@ void expand_index(struct index_state *istate, struct pattern_list *pl)
 		struct cache_entry *ce = istate->cache[i];
 		struct tree *tree;
 		struct pathspec ps;
-		int dtype;
+		int dtype = DT_UNKNOWN;
 
 		if (!S_ISSPARSEDIR(ce->ce_mode)) {
 			set_index_entry(full, full->cache_nr++, ce);
@@ -670,6 +670,7 @@ static void clear_skip_worktree_from_present_files_full(struct index_state *ista
 void clear_skip_worktree_from_present_files(struct index_state *istate)
 {
 	if (!core_apply_sparse_checkout ||
+	    core_virtualfilesystem ||
 	    sparse_expect_files_outside_of_patterns)
 		return;
 
diff --git a/t/meson.build b/t/meson.build
index 6c97f29312cdfe..1d341c406e00ac 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -191,6 +191,7 @@ integration_tests = [
   't1090-sparse-checkout-scope.sh',
   't1091-sparse-checkout-builtin.sh',
   't1092-sparse-checkout-compatibility.sh',
+  't1093-virtualfilesystem.sh',
   't1100-commit-tree-options.sh',
   't1300-config.sh',
   't1301-shared-repo.sh',
diff --git a/t/t1090-sparse-checkout-scope.sh b/t/t1090-sparse-checkout-scope.sh
index effa20aab7bea7..02b393e36a7d28 100755
--- a/t/t1090-sparse-checkout-scope.sh
+++ b/t/t1090-sparse-checkout-scope.sh
@@ -107,9 +107,9 @@ test_expect_success 'in partial clone, sparse checkout only fetches needed blobs
 '
 
 test_expect_success 'checkout does not delete items outside the sparse checkout file' '
-	# The "sparse.expectfilesoutsideofpatterns" config will prevent the
+	# The "core.virtualfilesystem" config will prevent the
 	# SKIP_WORKTREE flag from being dropped on files present on-disk.
-	test_config sparse.expectfilesoutsideofpatterns true &&
+	test_config core.virtualfilesystem true &&
 
 	test_config core.gvfs 8 &&
 	git checkout -b outside &&
diff --git a/t/t1093-virtualfilesystem.sh b/t/t1093-virtualfilesystem.sh
new file mode 100755
index 00000000000000..bd0c9f72ba3c4a
--- /dev/null
+++ b/t/t1093-virtualfilesystem.sh
@@ -0,0 +1,369 @@
+#!/bin/sh
+
+test_description='virtual file system tests'
+
+. ./test-lib.sh
+
+clean_repo () {
+	rm .git/index &&
+	git -c core.virtualfilesystem= reset --hard HEAD &&
+	git -c core.virtualfilesystem= clean -fd &&
+	touch untracked.txt &&
+	touch dir1/untracked.txt &&
+	touch dir2/untracked.txt
+}
+
+test_expect_success 'setup' '
+	git branch -M main &&
+	mkdir -p .git/hooks/ &&
+	cat > .gitignore <<-\EOF &&
+		.gitignore
+		expect*
+		actual*
+	EOF
+	mkdir -p dir1 &&
+	touch dir1/file1.txt &&
+	touch dir1/file2.txt &&
+	mkdir -p dir2 &&
+	touch dir2/file1.txt &&
+	touch dir2/file2.txt &&
+	git add . &&
+	git commit -m "initial" &&
+	git config --local core.virtualfilesystem .git/hooks/virtualfilesystem
+'
+
+test_expect_success 'test hook parameters and version' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		if test "$#" -ne 1
+		then
+			echo "$0: Exactly 1 argument expected" >&2
+			exit 2
+		fi
+
+		if test "$1" != 1
+		then
+			echo "$0: Unsupported hook version." >&2
+			exit 1
+		fi
+	EOF
+	git status &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		exit 3
+	EOF
+	test_must_fail git status
+'
+
+test_expect_success 'verify status is clean' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir2/file1.txt\0"
+	EOF
+	rm -f .git/index &&
+	git checkout -f &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir2/file1.txt\0"
+		printf "dir1/file1.txt\0"
+		printf "dir1/file2.txt\0"
+	EOF
+	git status > actual &&
+	cat > expected <<-\EOF &&
+		On branch main
+		nothing to commit, working tree clean
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify skip-worktree bit is set for absolute path' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file1.txt\0"
+	EOF
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		H dir1/file1.txt
+		S dir1/file2.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify skip-worktree bit is cleared for absolute path' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file2.txt\0"
+	EOF
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		S dir1/file1.txt
+		H dir1/file2.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify folder wild cards' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/\0"
+	EOF
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		H dir1/file1.txt
+		H dir1/file2.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify folders not included are ignored' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file1.txt\0"
+		printf "dir1/file2.txt\0"
+	EOF
+	mkdir -p dir1/dir2 &&
+	touch dir1/a &&
+	touch dir1/b &&
+	touch dir1/dir2/a &&
+	touch dir1/dir2/b &&
+	git add . &&
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		H dir1/file1.txt
+		H dir1/file2.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify including one file doesnt include the rest' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file1.txt\0"
+		printf "dir1/file2.txt\0"
+		printf "dir1/dir2/a\0"
+	EOF
+	mkdir -p dir1/dir2 &&
+	touch dir1/a &&
+	touch dir1/b &&
+	touch dir1/dir2/a &&
+	touch dir1/dir2/b &&
+	git add . &&
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		H dir1/dir2/a
+		H dir1/file1.txt
+		H dir1/file2.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify files not listed are ignored by git clean -f -x' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "untracked.txt\0"
+		printf "dir1/\0"
+	EOF
+	mkdir -p dir3 &&
+	touch dir3/untracked.txt &&
+	git clean -f -x &&
+	test ! -f untracked.txt &&
+	test -d dir1 &&
+	test -f dir1/file1.txt &&
+	test -f dir1/file2.txt &&
+	test ! -f dir1/untracked.txt &&
+	test -f dir2/file1.txt &&
+	test -f dir2/file2.txt &&
+	test -f dir2/untracked.txt &&
+	test -d dir3 &&
+	test -f dir3/untracked.txt
+'
+
+test_expect_success 'verify files not listed are ignored by git clean -f -d -x' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "untracked.txt\0"
+		printf "dir1/\0"
+		printf "dir3/\0"
+	EOF
+	mkdir -p dir3 &&
+	touch dir3/untracked.txt &&
+	git clean -f -d -x &&
+	test ! -f untracked.txt &&
+	test -d dir1 &&
+	test -f dir1/file1.txt &&
+	test -f dir1/file2.txt &&
+	test ! -f dir1/untracked.txt &&
+	test -f dir2/file1.txt &&
+	test -f dir2/file2.txt &&
+	test -f dir2/untracked.txt &&
+	test ! -d dir3 &&
+	test ! -f dir3/untracked.txt
+'
+
+test_expect_success 'verify folder entries include all files' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/\0"
+	EOF
+	mkdir -p dir1/dir2 &&
+	touch dir1/a &&
+	touch dir1/b &&
+	touch dir1/dir2/a &&
+	touch dir1/dir2/b &&
+	git status -su > actual &&
+	cat > expected <<-\EOF &&
+		?? dir1/a
+		?? dir1/b
+		?? dir1/untracked.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'verify case insensitivity of virtual file system entries' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/a\0"
+		printf "Dir1/Dir2/a\0"
+		printf "DIR2/\0"
+	EOF
+	mkdir -p dir1/dir2 &&
+	touch dir1/a &&
+	touch dir1/b &&
+	touch dir1/dir2/a &&
+	touch dir1/dir2/b &&
+	git -c core.ignorecase=false status -su > actual &&
+	cat > expected <<-\EOF &&
+		?? dir1/a
+	EOF
+	test_cmp expected actual &&
+	git -c core.ignorecase=true status -su > actual &&
+	cat > expected <<-\EOF &&
+		?? dir1/a
+		?? dir1/dir2/a
+		?? dir2/untracked.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'on file created' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file3.txt\0"
+	EOF
+	touch dir1/file3.txt &&
+	git add . &&
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		S dir1/file1.txt
+		S dir1/file2.txt
+		H dir1/file3.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'on file renamed' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file1.txt\0"
+		printf "dir1/file3.txt\0"
+	EOF
+	mv dir1/file1.txt dir1/file3.txt &&
+	git status -su > actual &&
+	cat > expected <<-\EOF &&
+		 D dir1/file1.txt
+		?? dir1/file3.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'on file deleted' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file1.txt\0"
+	EOF
+	rm dir1/file1.txt &&
+	git status -su > actual &&
+	cat > expected <<-\EOF &&
+		 D dir1/file1.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'on file overwritten' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/file1.txt\0"
+	EOF
+	echo "overwritten" > dir1/file1.txt &&
+	git status -su > actual &&
+	cat > expected <<-\EOF &&
+		 M dir1/file1.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'on folder created' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/dir1/\0"
+	EOF
+	mkdir -p dir1/dir1 &&
+	git status -su > actual &&
+	cat > expected <<-\EOF &&
+	EOF
+	test_cmp expected actual &&
+	git clean -fd &&
+	test ! -d "/dir1/dir1"
+'
+
+test_expect_success 'on folder renamed' '
+	clean_repo &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir3/\0"
+		printf "dir1/file1.txt\0"
+		printf "dir1/file2.txt\0"
+		printf "dir3/file1.txt\0"
+		printf "dir3/file2.txt\0"
+	EOF
+	mv dir1 dir3 &&
+	git status -su > actual &&
+	cat > expected <<-\EOF &&
+		 D dir1/file1.txt
+		 D dir1/file2.txt
+		?? dir3/file1.txt
+		?? dir3/file2.txt
+		?? dir3/untracked.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_expect_success 'folder with same prefix as file' '
+	clean_repo &&
+	touch dir1.sln &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/\0"
+		printf "dir1.sln\0"
+	EOF
+	git add dir1.sln &&
+	git ls-files -v > actual &&
+	cat > expected <<-\EOF &&
+		H dir1.sln
+		H dir1/file1.txt
+		H dir1/file2.txt
+		S dir2/file1.txt
+		S dir2/file2.txt
+	EOF
+	test_cmp expected actual
+'
+
+test_done
diff --git a/unpack-trees.c b/unpack-trees.c
index fc5191988bb2e5..9fa2260bc74ef8 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -4,6 +4,7 @@
 #include "git-compat-util.h"
 #include "advice.h"
 #include "gvfs.h"
+#include "virtualfilesystem.h"
 #include "strvec.h"
 #include "repository.h"
 #include "parse.h"
@@ -1706,6 +1707,14 @@ static int clear_ce_flags_1(struct index_state *istate,
 			continue;
 		}
 
+		/* if it's not in the virtual file system, exit early */
+		if (core_virtualfilesystem) {
+			if (is_included_in_virtualfilesystem(ce->name, ce->ce_namelen) > 0)
+				ce->ce_flags &= ~clear_mask;
+			cache++;
+			continue;
+		}
+
 		if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))
 			break;
 
@@ -1932,7 +1941,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	if (!o->skip_sparse_checkout) {
 		memset(&pl, 0, sizeof(pl));
 		free_pattern_list = 1;
-		populate_from_existing_patterns(o, &pl);
+		if (core_virtualfilesystem)
+			o->internal.pl = &pl;
+		else
+			populate_from_existing_patterns(o, &pl);
 	}
 
 	index_state_init(&o->internal.result, o->src_index->repo);
diff --git a/virtualfilesystem.c b/virtualfilesystem.c
new file mode 100644
index 00000000000000..cb040dc97c24f7
--- /dev/null
+++ b/virtualfilesystem.c
@@ -0,0 +1,312 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
+#include "git-compat-util.h"
+#include "environment.h"
+#include "gettext.h"
+#include "config.h"
+#include "dir.h"
+#include "hashmap.h"
+#include "run-command.h"
+#include "name-hash.h"
+#include "read-cache-ll.h"
+#include "virtualfilesystem.h"
+
+#define HOOK_INTERFACE_VERSION	(1)
+
+static struct strbuf virtual_filesystem_data = STRBUF_INIT;
+static struct hashmap virtual_filesystem_hashmap;
+static struct hashmap parent_directory_hashmap;
+
+struct virtualfilesystem {
+	struct hashmap_entry ent; /* must be the first member! */
+	const char *pattern;
+	int patternlen;
+};
+
+static unsigned int(*vfshash)(const void *buf, size_t len);
+static int(*vfscmp)(const char *a, const char *b, size_t len);
+
+static int vfs_hashmap_cmp(const void *cmp_data UNUSED,
+			   const struct hashmap_entry *he1,
+			   const struct hashmap_entry *he2,
+			   const void *key UNUSED)
+{
+	const struct virtualfilesystem *vfs1 =
+		container_of(he1, const struct virtualfilesystem, ent);
+	const struct virtualfilesystem *vfs2 =
+		container_of(he2, const struct virtualfilesystem, ent);
+
+	return vfscmp(vfs1->pattern, vfs2->pattern, vfs1->patternlen);
+}
+
+static void get_virtual_filesystem_data(struct repository *r, struct strbuf *vfs_data)
+{
+	struct child_process cp = CHILD_PROCESS_INIT;
+	int err;
+
+	strbuf_init(vfs_data, 0);
+
+	strvec_push(&cp.args, core_virtualfilesystem);
+	strvec_pushf(&cp.args, "%d", HOOK_INTERFACE_VERSION);
+	cp.use_shell = 1;
+	cp.dir = repo_get_work_tree(r);
+
+	err = capture_command(&cp, vfs_data, 1024);
+	if (err)
+		die("unable to load virtual file system");
+}
+
+static int check_includes_hashmap(struct hashmap *map, const char *pattern, int patternlen)
+{
+	struct strbuf sb = STRBUF_INIT;
+	struct virtualfilesystem vfs;
+	char *slash;
+
+	/* Check straight mapping */
+	strbuf_reset(&sb);
+	strbuf_add(&sb, pattern, patternlen);
+	vfs.pattern = sb.buf;
+	vfs.patternlen = sb.len;
+	hashmap_entry_init(&vfs.ent, vfshash(vfs.pattern, vfs.patternlen));
+	if (hashmap_get_entry(map, &vfs, ent, NULL)) {
+		strbuf_release(&sb);
+		return 1;
+	}
+
+	/*
+	 * Check to see if it matches a directory or any path
+	 * underneath it.  In other words, 'a/b/foo.txt' will match
+	 * '/', 'a/', and 'a/b/'.
+	 */
+	slash = strchr(sb.buf, '/');
+	while (slash) {
+		vfs.pattern = sb.buf;
+		vfs.patternlen = slash - sb.buf + 1;
+		hashmap_entry_init(&vfs.ent, vfshash(vfs.pattern, vfs.patternlen));
+		if (hashmap_get_entry(map, &vfs, ent, NULL)) {
+			strbuf_release(&sb);
+			return 1;
+		}
+		slash = strchr(slash + 1, '/');
+	}
+
+	strbuf_release(&sb);
+	return 0;
+}
+
+static void includes_hashmap_add(struct hashmap *map, const char *pattern, const int patternlen)
+{
+	struct virtualfilesystem *vfs;
+
+	vfs = xmalloc(sizeof(struct virtualfilesystem));
+	vfs->pattern = pattern;
+	vfs->patternlen = patternlen;
+	hashmap_entry_init(&vfs->ent, vfshash(vfs->pattern, vfs->patternlen));
+	hashmap_add(map, &vfs->ent);
+}
+
+static void initialize_includes_hashmap(struct hashmap *map, struct strbuf *vfs_data)
+{
+	char *buf, *entry;
+	size_t len, i;
+
+	/*
+	 * Build a hashmap of the virtual file system data we can use to look
+	 * for cache entry matches quickly
+	 */
+	vfshash = ignore_case ? memihash : memhash;
+	vfscmp = ignore_case ? strncasecmp : strncmp;
+	hashmap_init(map, vfs_hashmap_cmp, NULL, 0);
+
+	entry = buf = vfs_data->buf;
+	len = vfs_data->len;
+	for (i = 0; i < len; i++) {
+		if (buf[i] == '\0') {
+			includes_hashmap_add(map, entry, buf + i - entry);
+			entry = buf + i + 1;
+		}
+	}
+}
+
+/*
+ * Return 1 if the requested item is found in the virtual file system,
+ * 0 for not found and -1 for undecided.
+ */
+int is_included_in_virtualfilesystem(const char *pathname, int pathlen)
+{
+	if (!core_virtualfilesystem)
+		return -1;
+
+	if (!virtual_filesystem_hashmap.tablesize && virtual_filesystem_data.len)
+		initialize_includes_hashmap(&virtual_filesystem_hashmap, &virtual_filesystem_data);
+	if (!virtual_filesystem_hashmap.tablesize)
+		return -1;
+
+	return check_includes_hashmap(&virtual_filesystem_hashmap, pathname, pathlen);
+}
+
+static void parent_directory_hashmap_add(struct hashmap *map, const char *pattern, const int patternlen)
+{
+	char *slash;
+	struct virtualfilesystem *vfs;
+
+	/*
+	 * Add any directories leading up to the file as the excludes logic
+	 * needs to match directories leading up to the files as well. Detect
+	 * and prevent unnecessary duplicate entries which will be common.
+	 */
+	if (patternlen > 1) {
+		slash = strchr(pattern + 1, '/');
+		while (slash) {
+			vfs = xmalloc(sizeof(struct virtualfilesystem));
+			vfs->pattern = pattern;
+			vfs->patternlen = slash - pattern + 1;
+			hashmap_entry_init(&vfs->ent, vfshash(vfs->pattern, vfs->patternlen));
+			if (hashmap_get_entry(map, vfs, ent, NULL))
+				free(vfs);
+			else
+				hashmap_add(map, &vfs->ent);
+			slash = strchr(slash + 1, '/');
+		}
+	}
+}
+
+static void initialize_parent_directory_hashmap(struct hashmap *map, struct strbuf *vfs_data)
+{
+	char *buf, *entry;
+	size_t len, i;
+
+	/*
+	 * Build a hashmap of the parent directories contained in the virtual
+	 * file system data we can use to look for matches quickly
+	 */
+	vfshash = ignore_case ? memihash : memhash;
+	vfscmp = ignore_case ? strncasecmp : strncmp;
+	hashmap_init(map, vfs_hashmap_cmp, NULL, 0);
+
+	entry = buf = vfs_data->buf;
+	len = vfs_data->len;
+	for (i = 0; i < len; i++) {
+		if (buf[i] == '\0') {
+			parent_directory_hashmap_add(map, entry, buf + i - entry);
+			entry = buf + i + 1;
+		}
+	}
+}
+
+static int check_directory_hashmap(struct hashmap *map, const char *pathname, int pathlen)
+{
+	struct strbuf sb = STRBUF_INIT;
+	struct virtualfilesystem vfs;
+
+	/* Check for directory */
+	strbuf_reset(&sb);
+	strbuf_add(&sb, pathname, pathlen);
+	strbuf_addch(&sb, '/');
+	vfs.pattern = sb.buf;
+	vfs.patternlen = sb.len;
+	hashmap_entry_init(&vfs.ent, vfshash(vfs.pattern, vfs.patternlen));
+	if (hashmap_get_entry(map, &vfs, ent, NULL)) {
+		strbuf_release(&sb);
+		return 0;
+	}
+
+	strbuf_release(&sb);
+	return 1;
+}
+
+/*
+ * Return 1 for exclude, 0 for include and -1 for undecided.
+ */
+int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dtype)
+{
+	if (!core_virtualfilesystem)
+		return -1;
+
+	if (dtype != DT_REG && dtype != DT_DIR && dtype != DT_LNK)
+		die(_("is_excluded_from_virtualfilesystem passed unhandled dtype"));
+
+	if (dtype == DT_REG || dtype == DT_LNK) {
+		int ret = is_included_in_virtualfilesystem(pathname, pathlen);
+		if (ret > 0)
+			return 0;
+		if (ret == 0)
+			return 1;
+		return ret;
+	}
+
+	if (dtype == DT_DIR) {
+		if (!parent_directory_hashmap.tablesize && virtual_filesystem_data.len)
+			initialize_parent_directory_hashmap(&parent_directory_hashmap, &virtual_filesystem_data);
+		if (!parent_directory_hashmap.tablesize)
+			return -1;
+
+		return check_directory_hashmap(&parent_directory_hashmap, pathname, pathlen);
+	}
+
+	return -1;
+}
+
+/*
+ * Update the CE_SKIP_WORKTREE bits based on the virtual file system.
+ */
+void apply_virtualfilesystem(struct index_state *istate)
+{
+	char *buf, *entry;
+	size_t i;
+
+	if (!repo_config_get_virtualfilesystem(istate->repo))
+		return;
+
+	if (!virtual_filesystem_data.len)
+		get_virtual_filesystem_data(istate->repo, &virtual_filesystem_data);
+
+	/* set CE_SKIP_WORKTREE bit on all entries */
+	for (i = 0; i < istate->cache_nr; i++)
+		istate->cache[i]->ce_flags |= CE_SKIP_WORKTREE;
+
+	/* clear CE_SKIP_WORKTREE bit for everything in the virtual file system */
+	entry = buf = virtual_filesystem_data.buf;
+	for (i = 0; i < virtual_filesystem_data.len; i++) {
+		if (buf[i] == '\0') {
+			ssize_t pos, len;
+
+			len = buf + i - entry;
+
+			/* look for a directory wild card (ie "dir1/") */
+			if (buf[i - 1] == '/') {
+				if (ignore_case)
+					adjust_dirname_case(istate, entry);
+				pos = index_name_pos(istate, entry, len);
+				if (pos < 0) {
+					pos = -pos - 1;
+					while ((size_t)pos < istate->cache_nr && !fspathncmp(istate->cache[pos]->name, entry, len)) {
+						istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE;
+						pos++;
+					}
+				}
+			} else {
+				if (ignore_case) {
+					struct cache_entry *ce = index_file_exists(istate, entry, len, ignore_case);
+					if (ce)
+						ce->ce_flags &= ~CE_SKIP_WORKTREE;
+				} else {
+					int pos = index_name_pos(istate, entry, len);
+					if (pos >= 0)
+						istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE;
+				}
+			}
+
+			entry += len + 1;
+		}
+	}
+}
+
+/*
+ * Free the virtual file system data structures.
+ */
+void free_virtualfilesystem(void) {
+	hashmap_clear_and_free(&virtual_filesystem_hashmap, struct virtualfilesystem, ent);
+	hashmap_clear_and_free(&parent_directory_hashmap, struct virtualfilesystem, ent);
+	strbuf_release(&virtual_filesystem_data);
+}
diff --git a/virtualfilesystem.h b/virtualfilesystem.h
new file mode 100644
index 00000000000000..5e8c5b096df09a
--- /dev/null
+++ b/virtualfilesystem.h
@@ -0,0 +1,25 @@
+#ifndef VIRTUALFILESYSTEM_H
+#define VIRTUALFILESYSTEM_H
+
+/*
+ * Update the CE_SKIP_WORKTREE bits based on the virtual file system.
+ */
+void apply_virtualfilesystem(struct index_state *istate);
+
+/*
+ * Return 1 if the requested item is found in the virtual file system,
+ * 0 for not found and -1 for undecided.
+ */
+int is_included_in_virtualfilesystem(const char *pathname, int pathlen);
+
+/*
+ * Return 1 for exclude, 0 for include and -1 for undecided.
+ */
+int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dtype);
+
+/*
+ * Free the virtual file system data structures.
+ */
+void free_virtualfilesystem(void);
+
+#endif
diff --git a/wt-status.c b/wt-status.c
index 9609ba260fe804..e97b7db9db2abc 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -1612,6 +1612,8 @@ static void show_sparse_checkout_in_use(struct wt_status *s,
 {
 	if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_DISABLED)
 		return;
+	if (core_virtualfilesystem)
+		return;
 
 	if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_SPARSE_INDEX)
 		status_printf_ln(s, color, _("You are in a sparse checkout."));

From 747db264c844f32d9d0e35dd60037d6ed03b2532 Mon Sep 17 00:00:00 2001
From: Ben Peart <benpeart@microsoft.com>
Date: Wed, 1 Aug 2018 13:26:22 -0400
Subject: [PATCH 051/207] virtualfilesystem: don't run the virtual file system
 hook if the index has been redirected

Fixes #13

Some git commands spawn helpers and redirect the index to a different
location.  These include "difftool -d" and the sequencer
(i.e. `git rebase -i`, `git cherry-pick` and `git revert`) and others.
In those instances we don't want to update their temporary index with
our virtualization data.

Helped-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Ben Peart <Ben.Peart@microsoft.com>
---
 config.c | 22 ++++++++++++++++++----
 1 file changed, 18 insertions(+), 4 deletions(-)

diff --git a/config.c b/config.c
index eca6dd024a355d..0d66812d3f3c98 100644
--- a/config.c
+++ b/config.c
@@ -2736,11 +2736,25 @@ int repo_config_get_virtualfilesystem(struct repository *r)
 	if (core_virtualfilesystem && !*core_virtualfilesystem)
 		FREE_AND_NULL(core_virtualfilesystem);
 
-	/* virtual file system relies on the sparse checkout logic so force it on */
 	if (core_virtualfilesystem) {
-		core_apply_sparse_checkout = 1;
-		virtual_filesystem_result = 1;
-		return 1;
+		/*
+		 * Some git commands spawn helpers and redirect the index to a different
+		 * location.  These include "difftool -d" and the sequencer
+		 * (i.e. `git rebase -i`, `git cherry-pick` and `git revert`) and others.
+		 * In those instances we don't want to update their temporary index with
+		 * our virtualization data.
+		 */
+		char *default_index_file = xstrfmt("%s/%s", the_repository->gitdir, "index");
+		int should_run_hook = !strcmp(default_index_file, the_repository->index_file);
+
+		free(default_index_file);
+		if (should_run_hook) {
+			/* virtual file system relies on the sparse checkout logic so force it on */
+			core_apply_sparse_checkout = 1;
+			virtual_filesystem_result = 1;
+			return 1;
+		}
+		FREE_AND_NULL(core_virtualfilesystem);
 	}
 
 	virtual_filesystem_result = 0;

From 98fabc5a295969af1dacf8070aef5d6068c68941 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Tue, 9 Oct 2018 10:19:14 -0600
Subject: [PATCH 052/207] virtualfilesystem: check if directory is included

Add check to see if a directory is included in the virtualfilesystem
before checking the directory hashmap.  This allows a directory entry
like foo/ to find all untracked files in subdirectories.
---
 t/t1093-virtualfilesystem.sh | 2 ++
 virtualfilesystem.c          | 4 ++++
 2 files changed, 6 insertions(+)

diff --git a/t/t1093-virtualfilesystem.sh b/t/t1093-virtualfilesystem.sh
index bd0c9f72ba3c4a..8ba9a2a75e093a 100755
--- a/t/t1093-virtualfilesystem.sh
+++ b/t/t1093-virtualfilesystem.sh
@@ -222,6 +222,8 @@ test_expect_success 'verify folder entries include all files' '
 	cat > expected <<-\EOF &&
 		?? dir1/a
 		?? dir1/b
+		?? dir1/dir2/a
+		?? dir1/dir2/b
 		?? dir1/untracked.txt
 	EOF
 	test_cmp expected actual
diff --git a/virtualfilesystem.c b/virtualfilesystem.c
index cb040dc97c24f7..3f00800d86ef47 100644
--- a/virtualfilesystem.c
+++ b/virtualfilesystem.c
@@ -236,6 +236,10 @@ int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dt
 	}
 
 	if (dtype == DT_DIR) {
+		int ret = is_included_in_virtualfilesystem(pathname, pathlen);
+		if (ret > 0)
+			return 0;
+
 		if (!parent_directory_hashmap.tablesize && virtual_filesystem_data.len)
 			initialize_parent_directory_hashmap(&parent_directory_hashmap, &virtual_filesystem_data);
 		if (!parent_directory_hashmap.tablesize)

From 31d0210095d39d57599070dce9d7df07c988040f Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 28 May 2019 21:48:08 +0200
Subject: [PATCH 053/207] backwards-compatibility: support the
 post-indexchanged hook

When our patches to support that hook were upstreamed, the hook's name
was eliciting some reviewer suggestions, and it was renamed to
`post-index-change`. These patches (with the new name) made it into
v2.22.0.

However, VFSforGit users may very well have checkouts with that hook
installed under the original name.

To support this, let's just introduce a hack where we look a bit more
closely when we just failed to find the `post-index-change` hook, and
allow any `post-indexchanged` hook to run instead (if it exists).
---
 hook.c                            | 14 +++++++++++++-
 t/t7113-post-index-change-hook.sh | 30 ++++++++++++++++++++++++++++++
 2 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/hook.c b/hook.c
index 305c48d463d62c..e2beb2b16d76bd 100644
--- a/hook.c
+++ b/hook.c
@@ -194,7 +194,7 @@ int run_hooks_opt(struct repository *r, const char *hook_name,
 		.hook_name = hook_name,
 		.options = options,
 	};
-	const char *const hook_path = find_hook(r, hook_name);
+	const char *hook_path = find_hook(r, hook_name);
 	int ret = 0;
 	const struct run_process_parallel_opts opts = {
 		.tr2_category = "hook",
@@ -210,6 +210,18 @@ int run_hooks_opt(struct repository *r, const char *hook_name,
 		.data = &cb_data,
 	};
 
+	/*
+	 * Backwards compatibility hack in VFS for Git: when originally
+	 * introduced (and used!), it was called `post-indexchanged`, but this
+	 * name was changed during the review on the Git mailing list.
+	 *
+	 * Therefore, when the `post-index-change` hook is not found, let's
+	 * look for a hook with the old name (which would be found in case of
+	 * already-existing checkouts).
+	 */
+	if (!hook_path && !strcmp(hook_name, "post-index-change"))
+		hook_path = find_hook(r, "post-indexchanged");
+
 	if (!options)
 		BUG("a struct run_hooks_opt must be provided to run_hooks");
 
diff --git a/t/t7113-post-index-change-hook.sh b/t/t7113-post-index-change-hook.sh
index c10d94fe3d3b01..22b81a67cc32b6 100755
--- a/t/t7113-post-index-change-hook.sh
+++ b/t/t7113-post-index-change-hook.sh
@@ -15,6 +15,36 @@ test_expect_success 'setup' '
 	git commit -m "initial"
 '
 
+test_expect_success 'post-indexchanged' '
+	mkdir -p .git/hooks &&
+	test_when_finished "rm -f .git/hooks/post-indexchanged marker" &&
+	write_script .git/hooks/post-indexchanged <<-\EOF &&
+	: >marker
+	EOF
+
+	: make sure -changed is called if -change does not exist &&
+	test_when_finished "echo testing >dir1/file2.txt && git status" &&
+	echo changed >dir1/file2.txt &&
+	: force index to be dirty &&
+	test-tool chmtime -60 .git/index &&
+	git status &&
+	test_path_is_file marker &&
+
+	test_when_finished "rm -f .git/hooks/post-index-change marker2" &&
+	write_script .git/hooks/post-index-change <<-\EOF &&
+	: >marker2
+	EOF
+
+	: make sure -changed is not called if -change exists &&
+	rm -f marker marker2 &&
+	echo testing >dir1/file2.txt &&
+	: force index to be dirty &&
+	test-tool chmtime -60 .git/index &&
+	git status &&
+	test_path_is_missing marker &&
+	test_path_is_file marker2
+'
+
 test_expect_success 'test status, add, commit, others trigger hook without flags set' '
 	test_hook post-index-change <<-\EOF &&
 		if test "$1" -eq 1; then

From e326cdd153288bda85d57570ff5d13804d18f9fb Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 18 Jun 2021 14:45:20 +0200
Subject: [PATCH 054/207] gvfs: verify that the built-in FSMonitor is disabled

When using a virtual file system layer, the FSMonitor does not make
sense.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 t/t1093-virtualfilesystem.sh | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/t/t1093-virtualfilesystem.sh b/t/t1093-virtualfilesystem.sh
index 8ba9a2a75e093a..cad13d680cb199 100755
--- a/t/t1093-virtualfilesystem.sh
+++ b/t/t1093-virtualfilesystem.sh
@@ -368,4 +368,15 @@ test_expect_success 'folder with same prefix as file' '
 	test_cmp expected actual
 '
 
+test_expect_success MINGW,FSMONITOR_DAEMON 'virtualfilesystem hook disables built-in FSMonitor' '
+	clean_repo &&
+	test_config core.usebuiltinfsmonitor true &&
+	write_script .git/hooks/virtualfilesystem <<-\EOF &&
+		printf "dir1/\0"
+	EOF
+	git config core.virtualfilesystem .git/hooks/virtualfilesystem &&
+	git status &&
+	test_must_fail git fsmonitor--daemon status
+'
+
 test_done

From ae8b8edb4e1c632874462fba79e6dc8fa9f73650 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 21 Jun 2024 17:18:59 -0400
Subject: [PATCH 055/207] wt-status: add trace2 data for sparse-checkout
 percentage

When sparse-checkout is enabled, add the sparse-checkout percentage to
the Trace2 data stream.  This number was already computed and printed
on the console in the "You are in a sparse checkout..." message.  It
would be helpful to log it too for performance monitoring.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 wt-status.c | 30 ++++++++++++++++++++++++++++++
 1 file changed, 30 insertions(+)

diff --git a/wt-status.c b/wt-status.c
index e97b7db9db2abc..076ccb1181ea5a 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -2570,6 +2570,36 @@ void wt_status_print(struct wt_status *s)
 			   s->untracked.nr);
 	trace2_data_intmax("status", s->repo, "count/ignored", s->ignored.nr);
 
+	switch (s->state.sparse_checkout_percentage) {
+	case SPARSE_CHECKOUT_DISABLED:
+		break;
+	case SPARSE_CHECKOUT_SPARSE_INDEX:
+		/*
+		 * Log just the observed size of the sparse-index.
+		 *
+		 * When sparse-index is enabled we can have
+		 * sparse-directory entries in addition to individual
+		 * sparse-file entries, so we don't know the complete
+		 * size of the index.  And we do not want to force
+		 * expand it just to emit some telemetry data.  So we
+		 * cannot report a percentage for the space savings.
+		 *
+		 * It is possible that if the telemetry data is
+		 * aggregated, someone will have a good estimate for
+		 * the size of a fully populated index and can compute
+		 * a percentage after the fact.
+		 */
+		trace2_data_intmax("status", s->repo,
+				   "sparse-index/size",
+				   s->repo->index->cache_nr);
+		break;
+	default:
+		trace2_data_intmax("status", s->repo,
+				   "sparse-checkout/percentage",
+				   s->state.sparse_checkout_percentage);
+		break;
+	}
+
 	trace2_region_enter("status", "print", s->repo);
 
 	switch (s->status_format) {

From 627045e9c76c42d8ed50f7a6d1f5746f6d1ed392 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Mon, 24 Jun 2024 11:24:20 -0400
Subject: [PATCH 056/207] wt-status: add VFS hydration percentage to normal
 `git status` output

Add VFS checkout hydration percentage information to the default `git
status` output.  When VFS is enable, users will now see a "You are in
a partially-hydrated checkout with <percentage> of tracked files
present." message.

Upstream `git status` normally prints a "You are in a sparse checkout
with <percentage> of tracked files present."  This message was hidden
in `microsoft/git` when `core_virtualfilesystem` is set (because GVFS
users are always (and secretly) in a sparse checkout) and it was
thought that it would annoy users.

However, we now believe that it may be helpful for users to always see
the percentage and know when they are over-hyrdated, since
over-hyrdation can occur by accident and may greatly impact their Git
performance.  Knowing this value may help with GVFS support.

Helped-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 t/t1093-virtualfilesystem.sh |  2 ++
 wt-status.c                  | 13 +++++++++----
 2 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/t/t1093-virtualfilesystem.sh b/t/t1093-virtualfilesystem.sh
index cad13d680cb199..7786735dffec06 100755
--- a/t/t1093-virtualfilesystem.sh
+++ b/t/t1093-virtualfilesystem.sh
@@ -69,6 +69,8 @@ test_expect_success 'verify status is clean' '
 	git status > actual &&
 	cat > expected <<-\EOF &&
 		On branch main
+		You are in a partially-hydrated checkout with 75% of tracked files present.
+
 		nothing to commit, working tree clean
 	EOF
 	test_cmp expected actual
diff --git a/wt-status.c b/wt-status.c
index 076ccb1181ea5a..9eef96e85bf7a3 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -1612,10 +1612,15 @@ static void show_sparse_checkout_in_use(struct wt_status *s,
 {
 	if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_DISABLED)
 		return;
-	if (core_virtualfilesystem)
-		return;
-
-	if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_SPARSE_INDEX)
+	if (core_virtualfilesystem) {
+		if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_SPARSE_INDEX)
+			status_printf_ln(s, color,
+					 _("You are in a partially-hydrated checkout with a sparse index."));
+		else
+			status_printf_ln(s, color,
+					 _("You are in a partially-hydrated checkout with %d%% of tracked files present."),
+					 s->state.sparse_checkout_percentage);
+	} else if (s->state.sparse_checkout_percentage == SPARSE_CHECKOUT_SPARSE_INDEX)
 		status_printf_ln(s, color, _("You are in a sparse checkout."));
 	else
 		status_printf_ln(s, color,

From 213f2954bb03d9e5631b508ecc78c330d7bfcdd8 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 22 Aug 2017 11:54:23 -0400
Subject: [PATCH 057/207] status: add status serialization mechanism

Teach STATUS to optionally serialize the results of a
status computation to a file.

Teach STATUS to optionally read an existing serialization
file and simply print the results, rather than actually
scanning.

This is intended for immediate status results on extremely
large repos and assumes the use of a service/daemon to
maintain a fresh current status snapshot.

2021-10-30: packet_read() changed its prototype in ec9a37d (pkt-line.[ch]:
remove unused packet_read_line_buf(), 2021-10-14).

2021-10-30: sscanf() now does an extra check that "%d" goes into an "int"
and complains about "uint32_t". Replacing with "%u" fixes the compile-time
error.

2021-10-30: string_list_init() was removed by abf897b (string-list.[ch]:
remove string_list_init() compatibility function, 2021-09-28), so we need to
initialize manually.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 Documentation/config/status.txt               |   6 +
 Documentation/git-status.txt                  |  33 +
 .../technical/status-serialization-format.txt | 107 +++
 Makefile                                      |   2 +
 builtin/commit.c                              | 123 +++-
 contrib/completion/git-completion.bash        |   2 +-
 meson.build                                   |   2 +
 pkt-line.c                                    |   2 +-
 pkt-line.h                                    |   1 +
 t/meson.build                                 |   2 +
 t/t7522-serialized-status.sh                  | 141 ++++
 t/t7523-status-complete-untracked.sh          |  39 ++
 wt-status-deserialize.c                       | 622 ++++++++++++++++++
 wt-status-serialize.c                         | 219 ++++++
 wt-status.c                                   |   6 +
 wt-status.h                                   |  53 +-
 16 files changed, 1356 insertions(+), 4 deletions(-)
 create mode 100644 Documentation/technical/status-serialization-format.txt
 create mode 100755 t/t7522-serialized-status.sh
 create mode 100755 t/t7523-status-complete-untracked.sh
 create mode 100644 wt-status-deserialize.c
 create mode 100644 wt-status-serialize.c

diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt
index 8caf90f51c19a3..7302b066644e73 100644
--- a/Documentation/config/status.txt
+++ b/Documentation/config/status.txt
@@ -77,3 +77,9 @@ status.submoduleSummary::
 	the --ignore-submodules=dirty command-line option or the 'git
 	submodule summary' command, which shows a similar output but does
 	not honor these settings.
+
+status.deserializePath::
+	EXPERIMENTAL, Pathname to a file containing cached status results
+	generated by `--serialize`.  This will be overridden by
+	`--deserialize=<path>` on the command line.  If the cache file is
+	invalid or stale, git will fall-back and compute status normally.
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index 9a376886a5867a..fedf86d32718eb 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -151,6 +151,19 @@ ignored, then the directory is not shown, but all contents are shown.
 	threshold.
 	See also linkgit:git-diff[1] `--find-renames`.
 
+--serialize[=<version>]::
+	(EXPERIMENTAL) Serialize raw status results to stdout in a
+	format suitable for use by `--deserialize`.  Valid values for
+	`<version>` are "1" and "v1".
+
+--deserialize[=<path>]::
+	(EXPERIMENTAL) Deserialize raw status results from a file or
+	stdin rather than scanning the worktree.  If `<path>` is omitted
+	and `status.deserializePath` is unset, input is read from stdin.
+--no-deserialize::
+	(EXPERIMENTAL) Disable implicit deserialization of status results
+	from the value of `status.deserializePath`.
+
 <pathspec>...::
 	See the 'pathspec' entry in linkgit:gitglossary[7].
 
@@ -424,6 +437,26 @@ quoted as explained for the configuration variable `core.quotePath`
 (see linkgit:git-config[1]).
 
 
+SERIALIZATION and DESERIALIZATION (EXPERIMENTAL)
+------------------------------------------------
+
+The `--serialize` option allows git to cache the result of a
+possibly time-consuming status scan to a binary file.  A local
+service/daemon watching file system events could use this to
+periodically pre-compute a fresh status result.
+
+Interactive users could then use `--deserialize` to simply
+(and immediately) print the last-known-good result without
+waiting for the status scan.
+
+The binary serialization file format includes some worktree state
+information allowing `--deserialize` to reject the cached data
+and force a normal status scan if, for example, the commit, branch,
+or status modes/options change.  The format cannot, however, indicate
+when the cached data is otherwise stale -- that coordination belongs
+to the task driving the serializations.
+
+
 CONFIGURATION
 -------------
 
diff --git a/Documentation/technical/status-serialization-format.txt b/Documentation/technical/status-serialization-format.txt
new file mode 100644
index 00000000000000..475ae814495581
--- /dev/null
+++ b/Documentation/technical/status-serialization-format.txt
@@ -0,0 +1,107 @@
+Git status serialization format
+===============================
+
+Git status serialization enables git to dump the results of a status scan
+to a binary file.  This file can then be loaded by later status invocations
+to print the cached status results.
+
+The file contains the essential fields from:
+() the index
+() the "struct wt_status" for the overall results
+() the contents of "struct wt_status_change_data" for tracked changed files
+() the list of untracked and ignored files
+
+Version 1 Format:
+=================
+
+The V1 file begins with a required header section followed by optional
+sections for each type of item (changed, untracked, ignored).  Individual
+item sections are only present if necessary.  Each item section begins
+with an item-type header with the number of items in the section.
+
+Each "line" in the format is encoded using pkt-line with a final LF.
+Flush packets are used to terminate sections.
+
+-----------------
+PKT-LINE("version" SP "1")
+<v1-header-section>
+[<v1-changed-item-section>]
+[<v1-untracked-item-section>]
+[<v1-ignored-item-section>]
+-----------------
+
+
+V1 Header
+---------
+
+The v1-header-section fields are taken directly from "struct wt_status".
+Each field is printed on a separate pkt-line.  Lines for NULL string
+values are omitted.  All integers are printed with "%d".  OIDs are
+printed in hex.
+
+v1-header-section    = <v1-index-headers>
+		       <v1-wt-status-headers>
+		       PKT-LINE(<flush>)
+
+v1-index-headers     = PKT-LINE("index_mtime" SP <sec> SP <nsec> LF)
+
+v1-wt-status-headers = PKT-LINE("is_initial" SP <integer> LF)
+		       [ PKT-LINE("branch" SP <branch-name> LF) ]
+		       [ PKT-LINE("reference" SP <reference-name> LF) ]
+		       PKT-LINE("show_ignored_files" SP <integer> LF)
+		       PKT-LINE("show_untracked_files" SP <integer> LF)
+		       PKT-LINE("show_ignored_directory" SP <integer> LF)
+		       [ PKT-LINE("ignore_submodule_arg" SP <string> LF) ]
+		       PKT-LINE("detect_rename" SP <integer> LF)
+		       PKT-LINE("rename_score" SP <integer> LF)
+		       PKT-LINE("rename_limit" SP <integer> LF)
+		       PKT-LINE("detect_break" SP <integer> LF)
+		       PKT-LINE("sha1_commit" SP <oid> LF)
+		       PKT-LINE("committable" SP <integer> LF)
+		       PKT-LINE("workdir_dirty" SP <integer> LF)
+
+
+V1 Changed Items
+----------------
+
+The v1-changed-item-section lists all of the changed items with one
+item per pkt-line.  Each pkt-line contains:  a binary block of data
+from "struct wt_status_serialize_data_fixed" in a fixed header where
+integers are in network byte order and OIDs are in raw (non-hex) form.
+This is followed by one or two raw pathnames (not c-quoted) with NUL
+terminators (both NULs are always present even if there is no rename).
+
+v1-changed-item-section = PKT-LINE("changed" SP <count> LF)
+			  [ PKT-LINE(<changed_item> LF) ]+
+			  PKT-LINE(<flush>)
+
+changed_item = <byte[4] worktree_status>
+	       <byte[4] index_status>
+	       <byte[4] stagemask>
+	       <byte[4] score>
+	       <byte[4] mode_head>
+	       <byte[4] mode_index>
+	       <byte[4] mode_worktree>
+	       <byte[4] dirty_submodule>
+	       <byte[4] new_submodule_commits>
+	       <byte[20] oid_head>
+	       <byte[20] oid_index>
+	       <byte[*] path>
+	       NUL
+	       [ <byte[*] src_path> ]
+	       NUL
+
+
+V1 Untracked and Ignored Items
+------------------------------
+
+These sections are simple lists of pathnames.  They ARE NOT
+c-quoted.
+
+v1-untracked-item-section = PKT-LINE("untracked" SP <count> LF)
+			    [ PKT-LINE(<pathname> LF) ]+
+			    PKT-LINE(<flush>)
+
+v1-ignored-item-section = PKT-LINE("ignored" SP <count> LF)
+			  [ PKT-LINE(<pathname> LF) ]+
+			  PKT-LINE(<flush>)
diff --git a/Makefile b/Makefile
index 7074643902c95b..801e4111f18b9f 100644
--- a/Makefile
+++ b/Makefile
@@ -1205,6 +1205,8 @@ LIB_OBJS += wrapper.o
 LIB_OBJS += write-or-die.o
 LIB_OBJS += ws.o
 LIB_OBJS += wt-status.o
+LIB_OBJS += wt-status-deserialize.o
+LIB_OBJS += wt-status-serialize.o
 LIB_OBJS += xdiff-interface.o
 
 BUILTIN_OBJS += builtin/add.o
diff --git a/builtin/commit.c b/builtin/commit.c
index f4f87d01d5a90b..a031af40e80e1c 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -164,6 +164,70 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un
 	return 0;
 }
 
+static int do_serialize = 0;
+static int do_implicit_deserialize = 0;
+static int do_explicit_deserialize = 0;
+static char *deserialize_path = NULL;
+
+/*
+ * --serialize | --serialize=1 | --serialize=v1
+ *
+ * Request that we serialize our output rather than printing in
+ * any of the established formats.  Optionally specify serialization
+ * version.
+ */
+static int opt_parse_serialize(const struct option *opt, const char *arg, int unset)
+{
+	enum wt_status_format *value = (enum wt_status_format *)opt->value;
+	if (unset || !arg)
+		*value = STATUS_FORMAT_SERIALIZE_V1;
+	else if (!strcmp(arg, "v1") || !strcmp(arg, "1"))
+		*value = STATUS_FORMAT_SERIALIZE_V1;
+	else
+		die("unsupported serialize version '%s'", arg);
+
+	if (do_explicit_deserialize)
+		die("cannot mix --serialize and --deserialize");
+	do_implicit_deserialize = 0;
+
+	do_serialize = 1;
+	return 0;
+}
+
+/*
+ * --deserialize | --deserialize=<path> |
+ * --no-deserialize
+ *
+ * Request that we deserialize status data from some existing resource
+ * rather than performing a status scan.
+ *
+ * The input source can come from stdin or a path given here -- or be
+ * inherited from the config settings.
+ */
+static int opt_parse_deserialize(const struct option *opt UNUSED, const char *arg, int unset)
+{
+	if (unset) {
+		do_implicit_deserialize = 0;
+		do_explicit_deserialize = 0;
+	} else {
+		if (do_serialize)
+			die("cannot mix --serialize and --deserialize");
+		if (arg) {
+			/* override config or stdin */
+			free(deserialize_path);
+			deserialize_path = xstrdup(arg);
+		}
+		if (deserialize_path && *deserialize_path
+		    && (access(deserialize_path, R_OK) != 0))
+			die("cannot find serialization file '%s'",
+			    deserialize_path);
+
+		do_explicit_deserialize = 1;
+	}
+
+	return 0;
+}
+
 static int opt_parse_m(const struct option *opt, const char *arg, int unset)
 {
 	struct strbuf *buf = opt->value;
@@ -1186,6 +1250,8 @@ static enum untracked_status_type parse_untracked_setting_name(const char *u)
 		return SHOW_NORMAL_UNTRACKED_FILES;
 	else if (!strcmp(u, "all"))
 		return SHOW_ALL_UNTRACKED_FILES;
+	else if (!strcmp(u,"complete"))
+		return SHOW_COMPLETE_UNTRACKED_FILES;
 	else
 		return SHOW_UNTRACKED_FILES_ERROR;
 }
@@ -1481,6 +1547,19 @@ static int git_status_config(const char *k, const char *v,
 		s->relative_paths = git_config_bool(k, v);
 		return 0;
 	}
+	if (!strcmp(k, "status.deserializepath")) {
+		/*
+		 * Automatically assume deserialization if this is
+		 * set in the config and the file exists.  Do not
+		 * complain if the file does not exist, because we
+		 * silently fall back to normal mode.
+		 */
+		if (v && *v && access(v, R_OK) == 0) {
+			do_implicit_deserialize = 1;
+			deserialize_path = xstrdup(v);
+		}
+		return 0;
+	}
 	if (!strcmp(k, "status.showuntrackedfiles")) {
 		enum untracked_status_type u;
 
@@ -1520,7 +1599,8 @@ struct repository *repo UNUSED)
 	static const char *rename_score_arg = (const char *)-1;
 	static struct wt_status s;
 	unsigned int progress_flag = 0;
-	int fd;
+	int try_deserialize;
+	int fd = -1;
 	struct object_id oid;
 	static struct option builtin_status_options[] = {
 		OPT__VERBOSE(&verbose, N_("be verbose")),
@@ -1535,6 +1615,12 @@ struct repository *repo UNUSED)
 		OPT_CALLBACK_F(0, "porcelain", &status_format,
 		  N_("version"), N_("machine-readable output"),
 		  PARSE_OPT_OPTARG, opt_parse_porcelain),
+		{ OPTION_CALLBACK, 0, "serialize", &status_format,
+		  N_("version"), N_("serialize raw status data to stdout"),
+		  PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize },
+		{ OPTION_CALLBACK, 0, "deserialize", NULL,
+		  N_("path"), N_("deserialize raw status data from file"),
+		  PARSE_OPT_OPTARG, opt_parse_deserialize },
 		OPT_SET_INT(0, "long", &status_format,
 			    N_("show status in long format (default)"),
 			    STATUS_FORMAT_LONG),
@@ -1579,10 +1665,26 @@ struct repository *repo UNUSED)
 	    s.show_untracked_files == SHOW_NO_UNTRACKED_FILES)
 		die(_("Unsupported combination of ignored and untracked-files arguments"));
 
+	if (s.show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES &&
+	    s.show_ignored_mode == SHOW_NO_IGNORED)
+		die(_("Complete Untracked only supported with ignored files"));
+
 	parse_pathspec(&s.pathspec, 0,
 		       PATHSPEC_PREFER_FULL,
 		       prefix, argv);
 
+	/*
+	 * If we want to try to deserialize status data from a cache file,
+	 * we need to re-order the initialization code.  The problem is that
+	 * this makes for a very nasty diff and causes merge conflicts as we
+	 * carry it forward.  And it easy to mess up the merge, so we
+	 * duplicate some code here to hopefully reduce conflicts.
+	 */
+	try_deserialize = (!do_serialize &&
+			   (do_implicit_deserialize || do_explicit_deserialize));
+	if (try_deserialize)
+		goto skip_init;
+
 	enable_fscache(0);
 	if (status_format != STATUS_FORMAT_PORCELAIN &&
 	    status_format != STATUS_FORMAT_PORCELAIN_V2)
@@ -1597,6 +1699,7 @@ struct repository *repo UNUSED)
 	else
 		fd = -1;
 
+skip_init:
 	s.is_initial = repo_get_oid(the_repository, s.reference, &oid) ? 1 : 0;
 	if (!s.is_initial)
 		oidcpy(&s.oid_commit, &oid);
@@ -1613,6 +1716,24 @@ struct repository *repo UNUSED)
 			s.rename_score = parse_rename_score(&rename_score_arg);
 	}
 
+	if (try_deserialize) {
+		if (s.relative_paths)
+			s.prefix = prefix;
+
+		if (wt_status_deserialize(&s, deserialize_path) == DESERIALIZE_OK)
+			return 0;
+
+		/* deserialize failed, so force the initialization we skipped above. */
+		enable_fscache(1);
+		repo_read_index_preload(the_repository, &s.pathspec, 0);
+		refresh_index(the_repository->index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
+
+		if (use_optional_locks())
+			fd = repo_hold_locked_index(the_repository, &index_lock, 0);
+		else
+			fd = -1;
+	}
+
 	wt_status_collect(&s);
 
 	if (0 <= fd)
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index b3b6aa3bae2919..8cb868301057fe 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -1802,7 +1802,7 @@ _git_clone ()
 	esac
 }
 
-__git_untracked_file_modes="all no normal"
+__git_untracked_file_modes="all no normal complete"
 
 __git_trailer_tokens ()
 {
diff --git a/meson.build b/meson.build
index 2143945ad03bb2..4e7a219b6517c6 100644
--- a/meson.build
+++ b/meson.build
@@ -473,6 +473,8 @@ libgit_sources = [
   'write-or-die.c',
   'ws.c',
   'wt-status.c',
+  'wt-status-deserialize.c',
+  'wt-status-serialize.c',
   'xdiff-interface.c',
   'xdiff/xdiffi.c',
   'xdiff/xemit.c',
diff --git a/pkt-line.c b/pkt-line.c
index a5bcbc96fb340f..ae54a960a6320e 100644
--- a/pkt-line.c
+++ b/pkt-line.c
@@ -230,7 +230,7 @@ static int do_packet_write(const int fd_out, const char *buf, size_t size,
 	return 0;
 }
 
-static int packet_write_gently(const int fd_out, const char *buf, size_t size)
+int packet_write_gently(const int fd_out, const char *buf, size_t size)
 {
 	struct strbuf err = STRBUF_INIT;
 	if (do_packet_write(fd_out, buf, size, &err)) {
diff --git a/pkt-line.h b/pkt-line.h
index 3b33cc64f34dcc..10fd9a812e1935 100644
--- a/pkt-line.h
+++ b/pkt-line.h
@@ -29,6 +29,7 @@ void packet_write(int fd_out, const char *buf, size_t size);
 void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 int packet_flush_gently(int fd);
 int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+int packet_write_gently(const int fd_out, const char *buf, size_t size);
 int write_packetized_from_fd_no_flush(int fd_in, int fd_out);
 int write_packetized_from_buf_no_flush_count(const char *src_in, size_t len,
 					     int fd_out, int *packet_counter);
diff --git a/t/meson.build b/t/meson.build
index 1d341c406e00ac..014fd228740e88 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -919,6 +919,8 @@ integration_tests = [
   't7519-status-fsmonitor.sh',
   't7520-ignored-hook-warning.sh',
   't7521-ignored-mode.sh',
+  't7522-serialized-status.sh',
+  't7523-status-complete-untracked.sh',
   't7524-commit-summary.sh',
   't7525-status-rename.sh',
   't7526-commit-pathspec-file.sh',
diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
new file mode 100755
index 00000000000000..283a98bdf750e6
--- /dev/null
+++ b/t/t7522-serialized-status.sh
@@ -0,0 +1,141 @@
+#!/bin/sh
+
+test_description='git serialized status tests'
+
+. ./test-lib.sh
+
+# This file includes tests for serializing / deserializing
+# status data. These tests cover two basic features:
+#
+# [1] Because users can request different types of untracked-file
+#     and ignored file reporting, the cache data generated by
+#     serialize must use either the same untracked and ignored
+#     parameters as the later deserialize invocation; otherwise,
+#     the deserialize invocation must disregard the cached data
+#     and run a full scan itself.
+#
+#     To increase the number of cases where the cached status can
+#     be used, we have added a "--untracked-file=complete" option
+#     that reports a superset or union of the results from the
+#     "-u normal" and "-u all".  We combine this with a filter in
+#     deserialize to filter the results.
+#
+#     Ignored file reporting is simpler in that is an all or
+#     nothing; there are no subsets.
+#
+#     The tests here (in addition to confirming that a cache
+#     file can be generated and used by a subsequent status
+#     command) need to test this untracked-file filtering.
+#
+# [2] ensuring the status calls are using data from the status
+#     cache as expected.  This includes verifying cached data
+#     is used when appropriate as well as falling back to
+#     performing a new status scan when the data in the cache
+#     is insufficient/known stale.
+
+test_expect_success 'setup' '
+	git branch -M main &&
+	cat >.gitignore <<-\EOF &&
+	*.ign
+	ignored_dir/
+	EOF
+
+	mkdir tracked ignored_dir &&
+	touch tracked_1.txt tracked/tracked_1.txt &&
+	git add . &&
+	test_tick &&
+	git commit -m"Adding original file." &&
+	mkdir untracked &&
+	touch ignored.ign ignored_dir/ignored_2.txt \
+	      untracked_1.txt untracked/untracked_2.txt untracked/untracked_3.txt
+'
+
+test_expect_success 'verify untracked-files=complete with no conversion' '
+	test_when_finished "rm serialized_status.dat new_change.txt output" &&
+	cat >expect <<-\EOF &&
+	? expect
+	? serialized_status.dat
+	? untracked/
+	? untracked/untracked_2.txt
+	? untracked/untracked_3.txt
+	? untracked_1.txt
+	! ignored.ign
+	! ignored_dir/
+	EOF
+
+	git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat &&
+	touch new_change.txt &&
+
+	git status --porcelain=v2 --untracked-files=complete --ignored=matching --deserialize=serialized_status.dat >output &&
+	test_cmp expect output
+'
+
+test_expect_success 'verify untracked-files=complete to untracked-files=normal conversion' '
+	test_when_finished "rm serialized_status.dat new_change.txt output" &&
+	cat >expect <<-\EOF &&
+	? expect
+	? serialized_status.dat
+	? untracked/
+	? untracked_1.txt
+	EOF
+
+	git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat &&
+	touch new_change.txt &&
+
+	git status --porcelain=v2 --deserialize=serialized_status.dat >output &&
+	test_cmp expect output
+'
+
+test_expect_success 'verify untracked-files=complete to untracked-files=all conversion' '
+	test_when_finished "rm serialized_status.dat new_change.txt output" &&
+	cat >expect <<-\EOF &&
+	? expect
+	? serialized_status.dat
+	? untracked/untracked_2.txt
+	? untracked/untracked_3.txt
+	? untracked_1.txt
+	! ignored.ign
+	! ignored_dir/
+	EOF
+
+	git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat &&
+	touch new_change.txt &&
+
+	git status --porcelain=v2 --untracked-files=all --ignored=matching --deserialize=serialized_status.dat >output &&
+	test_cmp expect output
+'
+
+test_expect_success 'verify serialized status with non-convertible ignore mode does new scan' '
+	test_when_finished "rm serialized_status.dat new_change.txt output" &&
+	cat >expect <<-\EOF &&
+	? expect
+	? new_change.txt
+	? output
+	? serialized_status.dat
+	? untracked/
+	? untracked_1.txt
+	! ignored.ign
+	! ignored_dir/
+	EOF
+
+	git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat &&
+	touch new_change.txt &&
+
+	git status --porcelain=v2 --ignored --deserialize=serialized_status.dat >output &&
+	test_cmp expect output
+'
+
+test_expect_success 'verify serialized status handles path scopes' '
+	test_when_finished "rm serialized_status.dat new_change.txt output" &&
+	cat >expect <<-\EOF &&
+	? untracked/
+	EOF
+
+	git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat &&
+	touch new_change.txt &&
+
+	git status --porcelain=v2 --deserialize=serialized_status.dat untracked >output &&
+	test_cmp expect output
+'
+
+test_done
diff --git a/t/t7523-status-complete-untracked.sh b/t/t7523-status-complete-untracked.sh
new file mode 100755
index 00000000000000..f79611fc024f48
--- /dev/null
+++ b/t/t7523-status-complete-untracked.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+test_description='git status untracked complete tests'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+	cat >.gitignore <<-\EOF &&
+	*.ign
+	ignored_dir/
+	EOF
+
+	mkdir tracked ignored_dir &&
+	touch tracked_1.txt tracked/tracked_1.txt &&
+	git add . &&
+	test_tick &&
+	git commit -m"Adding original file." &&
+	mkdir untracked &&
+	touch ignored.ign ignored_dir/ignored_2.txt \
+	      untracked_1.txt untracked/untracked_2.txt untracked/untracked_3.txt
+'
+
+test_expect_success 'verify untracked-files=complete' '
+	cat >expect <<-\EOF &&
+	? expect
+	? output
+	? untracked/
+	? untracked/untracked_2.txt
+	? untracked/untracked_3.txt
+	? untracked_1.txt
+	! ignored.ign
+	! ignored_dir/
+	EOF
+
+	git status --porcelain=v2 --untracked-files=complete --ignored >output &&
+	test_cmp expect output
+'
+
+test_done
diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
new file mode 100644
index 00000000000000..90174bb3d1fc51
--- /dev/null
+++ b/wt-status-deserialize.c
@@ -0,0 +1,622 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#include "git-compat-util.h"
+#include "environment.h"
+#include "hex.h"
+#include "hash.h"
+#include "wt-status.h"
+#include "pkt-line.h"
+#include "trace.h"
+#include "statinfo.h"
+#include "hex.h"
+
+static struct trace_key trace_deserialize = TRACE_KEY_INIT(DESERIALIZE);
+
+enum deserialize_parse_strategy {
+	DESERIALIZE_STRATEGY_AS_IS,
+	DESERIALIZE_STRATEGY_SKIP,
+	DESERIALIZE_STRATEGY_NORMAL,
+	DESERIALIZE_STRATEGY_ALL
+};
+
+static int check_path_contains(const char *out, int out_len, const char *in, int in_len)
+{
+	return (out_len > 0 &&
+		out_len < in_len &&
+		(out[out_len - 1] == '/') &&
+		!memcmp(out, in, out_len));
+}
+
+static const char *my_packet_read_line(int fd, int *line_len)
+{
+	static char buf[LARGE_PACKET_MAX];
+
+	*line_len = packet_read(fd, buf, sizeof(buf),
+				PACKET_READ_CHOMP_NEWLINE |
+				PACKET_READ_GENTLE_ON_EOF);
+	return (*line_len > 0) ? buf : NULL;
+}
+
+/*
+ * mtime_reported contains the mtime of the index when the
+ * serialization snapshot was computed.
+ *
+ * mtime_observed_on_disk contains the mtime of the index now.
+ *
+ * If these 2 times are different, then the .git/index has
+ * changed since the serialization cache was created and we
+ * must reject the cache because anything could have changed.
+ *
+ * If they are the same, we continue trying to use the cache.
+ */
+static int my_validate_index(const char *path, const struct cache_time *mtime_reported)
+{
+	struct stat st;
+	struct cache_time mtime_observed_on_disk;
+
+	if (lstat(path, &st)) {
+		trace_printf_key(&trace_deserialize, "could not stat index");
+		return DESERIALIZE_ERR;
+	}
+	mtime_observed_on_disk.sec = st.st_mtime;
+	mtime_observed_on_disk.nsec = ST_MTIME_NSEC(st);
+	if ((mtime_observed_on_disk.sec != mtime_reported->sec) ||
+	    (mtime_observed_on_disk.nsec != mtime_reported->nsec)) {
+		trace_printf_key(&trace_deserialize, "index mtime changed [des %d.%d][obs %d.%d]",
+			     mtime_reported->sec, mtime_reported->nsec,
+			     mtime_observed_on_disk.sec, mtime_observed_on_disk.nsec);
+		return DESERIALIZE_ERR;
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int wt_deserialize_v1_header(struct wt_status *s, int fd)
+{
+	struct cache_time index_mtime;
+	int line_len, nr_fields;
+	const char *line;
+	const char *arg;
+
+	/*
+	 * parse header lines up to the first flush packet.
+	 */
+	while ((line = my_packet_read_line(fd, &line_len))) {
+
+		if (skip_prefix(line, "index_mtime ", &arg)) {
+			nr_fields = sscanf(arg, "%u %u",
+					   &index_mtime.sec,
+					   &index_mtime.nsec);
+			if (nr_fields != 2) {
+				trace_printf_key(&trace_deserialize, "invalid index_mtime (%d) '%s'",
+					     nr_fields, line);
+				return DESERIALIZE_ERR;
+			}
+			continue;
+		}
+
+		if (skip_prefix(line, "is_initial ", &arg)) {
+			s->is_initial = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "branch ", &arg)) {
+			s->branch = xstrdup(arg);
+			continue;
+		}
+		if (skip_prefix(line, "reference ", &arg)) {
+			s->reference = xstrdup(arg);
+			continue;
+		}
+		/* pathspec */
+		/* verbose */
+		/* amend */
+		if (skip_prefix(line, "whence ", &arg)) {
+			s->whence = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		/* nowarn */
+		/* use_color */
+		/* no_gettext */
+		/* display_comment_prefix */
+		/* relative_paths */
+		/* submodule_summary */
+		if (skip_prefix(line, "show_ignored_mode ", &arg)) {
+			s->show_ignored_mode = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "show_untracked_files ", &arg)) {
+			s->show_untracked_files = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "ignore_submodule_arg ", &arg)) {
+			s->ignore_submodule_arg = xstrdup(arg);
+			continue;
+		}
+		/* color_palette */
+		/* colopts */
+		/* null_termination */
+		/* commit_template */
+		/* show_branch */
+		/* show_stash */
+		if (skip_prefix(line, "hints ", &arg)) {
+			s->hints = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "detect_rename ", &arg)) {
+			s->detect_rename = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "rename_score ", &arg)) {
+			s->rename_score = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "rename_limit ", &arg)) {
+			s->rename_limit = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		/* status_format */
+		if (skip_prefix(line, "sha1_commit ", &arg)) {
+			if (get_oid_hex(arg, &s->oid_commit)) {
+				trace_printf_key(&trace_deserialize, "invalid sha1_commit");
+				return DESERIALIZE_ERR;
+			}
+			continue;
+		}
+		if (skip_prefix(line, "committable ", &arg)) {
+			s->committable = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		if (skip_prefix(line, "workdir_dirty ", &arg)) {
+			s->workdir_dirty = (int)strtol(arg, NULL, 10);
+			continue;
+		}
+		/* prefix */
+
+		trace_printf_key(&trace_deserialize, "unexpected line '%s'", line);
+		return DESERIALIZE_ERR;
+	}
+
+	return my_validate_index(s->index_file, &index_mtime);
+}
+
+/*
+ * Build a string-list of (count) <changed-item> lines from the input.
+ */
+static int wt_deserialize_v1_changed_items(struct wt_status *s, int fd, int count UNUSED)
+{
+	struct wt_status_serialize_data *sd;
+	char *p;
+	int line_len;
+	const char *line;
+	struct string_list_item *item;
+
+	memset(&s->change, 0, sizeof(s->change));
+	s->change.strdup_strings = 1;
+
+	/*
+	 * <wt_status_change_data_fields>+
+	 * <flush>
+	 *
+	 * <fixed_part><path> NUL [<head_path>] NUL
+	 */
+	while ((line = my_packet_read_line(fd, &line_len))) {
+		struct wt_status_change_data *d = xcalloc(1, sizeof(*d));
+		sd = (struct wt_status_serialize_data *)line;
+
+		d->worktree_status = ntohl(sd->fixed.worktree_status);
+		d->index_status = ntohl(sd->fixed.index_status);
+		d->stagemask = ntohl(sd->fixed.stagemask);
+		d->rename_status = ntohl(sd->fixed.rename_status);
+		d->rename_score = ntohl(sd->fixed.rename_score);
+		d->mode_head = ntohl(sd->fixed.mode_head);
+		d->mode_index = ntohl(sd->fixed.mode_index);
+		d->mode_worktree = ntohl(sd->fixed.mode_worktree);
+		d->dirty_submodule = ntohl(sd->fixed.dirty_submodule);
+		d->new_submodule_commits = ntohl(sd->fixed.new_submodule_commits);
+		oidcpy(&d->oid_head, &sd->fixed.oid_head);
+		oidcpy(&d->oid_index, &sd->fixed.oid_index);
+
+		p = sd->variant;
+		item = string_list_append(&s->change, p);
+		p += strlen(p) + 1;
+		if (*p)
+			d->rename_source = xstrdup(p);
+		item->util = d;
+
+		trace_printf_key(
+			&trace_deserialize,
+			"change: %d %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'",
+			d->worktree_status,
+			d->index_status,
+			d->stagemask,
+			d->rename_status,
+			d->rename_score,
+			d->mode_head,
+			d->mode_index,
+			d->mode_worktree,
+			d->dirty_submodule,
+			d->new_submodule_commits,
+			oid_to_hex(&d->oid_head),
+			oid_to_hex(&d->oid_index),
+			item->string,
+			(d->rename_source ? d->rename_source : ""));
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int wt_deserialize_v1_untracked_items(struct wt_status *s,
+					     int fd,
+					     int count UNUSED,
+					     enum deserialize_parse_strategy strategy)
+{
+	int line_len;
+	const char *line;
+	char *out = NULL;
+	int out_len = 0;
+
+	memset(&s->untracked, 0, sizeof(s->untracked));
+	s->untracked.strdup_strings = 1;
+
+	/*
+	 * <pathname>+
+	 * <flush>
+	 */
+	while ((line = my_packet_read_line(fd, &line_len))) {
+		if (strategy == DESERIALIZE_STRATEGY_AS_IS)
+			string_list_append(&s->untracked, line);
+		if (strategy == DESERIALIZE_STRATEGY_SKIP)
+			continue;
+		if (strategy == DESERIALIZE_STRATEGY_NORMAL) {
+
+			/* Only add "normal" entries to list */
+			if (out &&
+				check_path_contains(out, out_len, line, line_len)) {
+				continue;
+			}
+			else {
+				out = string_list_append(&s->untracked, line)->string;
+				out_len = line_len;
+			}
+		}
+		if (strategy == DESERIALIZE_STRATEGY_ALL) {
+			/* Only add "all" entries to list */
+			if (line[line_len - 1] != '/')
+				string_list_append(&s->untracked, line);
+		}
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int wt_deserialize_v1_ignored_items(struct wt_status *s,
+					   int fd,
+					   int count UNUSED,
+					   enum deserialize_parse_strategy strategy)
+{
+	int line_len;
+	const char *line;
+
+	memset(&s->ignored, 0, sizeof(s->ignored));
+	s->ignored.strdup_strings = 1;
+
+	/*
+	 * <pathname>+
+	 * <flush>
+	 */
+	while ((line = my_packet_read_line(fd, &line_len))) {
+		if (strategy == DESERIALIZE_STRATEGY_AS_IS)
+			string_list_append(&s->ignored, line);
+		else
+			continue;
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int validate_untracked_files_arg(enum untracked_status_type cmd,
+					enum untracked_status_type des,
+					enum deserialize_parse_strategy *strategy)
+{
+	*strategy = DESERIALIZE_STRATEGY_AS_IS;
+
+	if (cmd == des) {
+		*strategy = DESERIALIZE_STRATEGY_AS_IS;
+	} else if (cmd == SHOW_NO_UNTRACKED_FILES) {
+		*strategy = DESERIALIZE_STRATEGY_SKIP;
+	} else if (des == SHOW_COMPLETE_UNTRACKED_FILES) {
+		if (cmd == SHOW_ALL_UNTRACKED_FILES)
+			*strategy = DESERIALIZE_STRATEGY_ALL;
+		else if (cmd == SHOW_NORMAL_UNTRACKED_FILES)
+			*strategy = DESERIALIZE_STRATEGY_NORMAL;
+	} else {
+		return DESERIALIZE_ERR;
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int validate_ignored_files_arg(enum show_ignored_type cmd,
+				      enum show_ignored_type des,
+				      enum deserialize_parse_strategy *strategy)
+{
+	*strategy = DESERIALIZE_STRATEGY_AS_IS;
+
+	if (cmd == SHOW_NO_IGNORED) {
+		*strategy = DESERIALIZE_STRATEGY_SKIP;
+	}
+	else if (cmd != des) {
+		return DESERIALIZE_ERR;
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s, int fd)
+{
+	int line_len;
+	const char *line;
+	const char *arg;
+	int nr_changed = 0;
+	int nr_untracked = 0;
+	int nr_ignored = 0;
+
+	enum deserialize_parse_strategy ignored_strategy = DESERIALIZE_STRATEGY_AS_IS, untracked_strategy = DESERIALIZE_STRATEGY_AS_IS;
+
+	if (wt_deserialize_v1_header(s, fd) == DESERIALIZE_ERR)
+		return DESERIALIZE_ERR;
+
+	/*
+	 * We now have the header parsed. Look at the command args (as passed in), and see how to parse
+	 * the serialized data
+	*/
+	if (validate_untracked_files_arg(cmd_s->show_untracked_files, s->show_untracked_files, &untracked_strategy)) {
+		trace_printf_key(&trace_deserialize, "reject: show_untracked_file: command: %d, serialized : %d",
+				cmd_s->show_untracked_files,
+				s->show_untracked_files);
+		return DESERIALIZE_ERR;
+	}
+
+	if (validate_ignored_files_arg(cmd_s->show_ignored_mode, s->show_ignored_mode, &ignored_strategy)) {
+		trace_printf_key(&trace_deserialize, "reject: show_ignored_mode: command: %d, serialized: %d",
+				cmd_s->show_ignored_mode,
+				s->show_ignored_mode);
+		return DESERIALIZE_ERR;
+	}
+
+	/*
+	 * [<changed-header> [<changed-item>+] <flush>]
+	 * [<untracked-header> [<untracked-item>+] <flush>]
+	 * [<ignored-header> [<ignored-item>+] <flush>]
+	 */
+	while ((line = my_packet_read_line(fd, &line_len))) {
+		if (skip_prefix(line, "changed ", &arg)) {
+			nr_changed = (int)strtol(arg, NULL, 10);
+			if (wt_deserialize_v1_changed_items(s, fd, nr_changed)
+			    == DESERIALIZE_ERR)
+				return DESERIALIZE_ERR;
+			continue;
+		}
+		if (skip_prefix(line, "untracked ", &arg)) {
+			nr_untracked = (int)strtol(arg, NULL, 10);
+			if (wt_deserialize_v1_untracked_items(s, fd, nr_untracked, untracked_strategy)
+			    == DESERIALIZE_ERR)
+				return DESERIALIZE_ERR;
+			continue;
+		}
+		if (skip_prefix(line, "ignored ", &arg)) {
+			nr_ignored = (int)strtol(arg, NULL, 10);
+			if (wt_deserialize_v1_ignored_items(s, fd, nr_ignored, ignored_strategy)
+			    == DESERIALIZE_ERR)
+				return DESERIALIZE_ERR;
+			continue;
+		}
+		trace_printf_key(&trace_deserialize, "unexpected line '%s'", line);
+		return DESERIALIZE_ERR;
+	}
+
+	return DESERIALIZE_OK;
+}
+
+static int wt_deserialize_parse(const struct wt_status *cmd_s, struct wt_status *s, int fd)
+{
+	int line_len;
+	const char *line;
+	const char *arg;
+
+	if ((line = my_packet_read_line(fd, &line_len)) &&
+	    (skip_prefix(line, "version ", &arg))) {
+		int version = (int)strtol(arg, NULL, 10);
+		if (version == 1)
+			return wt_deserialize_v1(cmd_s, s, fd);
+	}
+	trace_printf_key(&trace_deserialize, "missing/unsupported version");
+	return DESERIALIZE_ERR;
+}
+
+static inline int my_strcmp_null(const char *a, const char *b)
+{
+	const char *alt_a = (a) ? a : "";
+	const char *alt_b = (b) ? b : "";
+
+	return strcmp(alt_a, alt_b);
+}
+
+static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *des_s, int fd)
+{
+	/*
+	 * Check the path spec on the current command
+	 */
+	if (cmd_s->pathspec.nr > 1) {
+		trace_printf_key(&trace_deserialize, "reject: multiple pathspecs");
+		return DESERIALIZE_ERR;
+	}
+
+	/*
+	 * If we have a pathspec, but it maches the root (e.g. no filtering)
+	 * then this is OK.
+	 */
+	if (cmd_s->pathspec.nr == 1 &&
+		my_strcmp_null(cmd_s->pathspec.items[0].match, "")) {
+		trace_printf_key(&trace_deserialize, "reject: pathspec");
+		return DESERIALIZE_ERR;
+	}
+
+	/*
+	 * Copy over some required fields from the current command.
+	 */
+	des_s->repo = cmd_s->repo;
+	des_s->index_file = cmd_s->index_file;
+
+	/*
+	 * Deserialize cached status
+	 */
+	if (wt_deserialize_parse(cmd_s, des_s, fd) == DESERIALIZE_ERR)
+		return DESERIALIZE_ERR;
+
+	/*
+	 * Compare fields in cmd_s with those observed in des_s and
+	 * complain if they are incompatible (such as different "-u"
+	 * or "--ignored" settings).
+	 */
+	if (cmd_s->is_initial != des_s->is_initial) {
+		trace_printf_key(&trace_deserialize, "reject: is_initial");
+		return DESERIALIZE_ERR;
+	}
+	if (my_strcmp_null(cmd_s->branch, des_s->branch)) {
+		trace_printf_key(&trace_deserialize, "reject: branch");
+		return DESERIALIZE_ERR;
+	}
+	if (my_strcmp_null(cmd_s->reference, des_s->reference)) {
+		trace_printf_key(&trace_deserialize, "reject: reference");
+		return DESERIALIZE_ERR;
+	}
+	/* verbose */
+	/* amend */
+	if (cmd_s->whence != des_s->whence) {
+		trace_printf_key(&trace_deserialize, "reject: whence");
+		return DESERIALIZE_ERR;
+	}
+	/* nowarn */
+	/* use_color */
+	/* no_gettext */
+	/* display_comment_prefix */
+	/* relative_paths */
+	/* submodule_summary */
+
+	/* show_ignored_files - already validated */
+	/* show_untrackes_files - already validated */
+
+	/*
+	 * Submodules are not supported by status serialization.
+	 * The status will not be serialized if it contains submodules,
+	 * and so this check is not needed.
+	 *
+	 * if (my_strcmp_null(cmd_s->ignore_submodule_arg, des_s->ignore_submodule_arg)) {
+	 *	trace_printf_key(&trace_deserialize, "reject: ignore_submodule_arg");
+	 * 	return DESERIALIZE_ERR;
+	 * }
+	 */
+
+	/* color_palette */
+	/* colopts */
+	/* null_termination */
+	/* commit_template */
+	/* show_branch */
+	/* show_stash */
+	/* hints */
+	if (cmd_s->detect_rename != des_s->detect_rename) {
+		trace_printf_key(&trace_deserialize, "reject: detect_rename");
+		return DESERIALIZE_ERR;
+	}
+	if (cmd_s->rename_score != des_s->rename_score) {
+		trace_printf_key(&trace_deserialize, "reject: rename_score");
+		return DESERIALIZE_ERR;
+	}
+	if (cmd_s->rename_limit != des_s->rename_limit) {
+		trace_printf_key(&trace_deserialize, "reject: rename_limit");
+		return DESERIALIZE_ERR;
+	}
+	/* status_format */
+	if (!oideq(&cmd_s->oid_commit, &des_s->oid_commit)) {
+		trace_printf_key(&trace_deserialize, "reject: sha1_commit");
+		return DESERIALIZE_ERR;
+	}
+
+	/*
+	 * Copy over display-related fields from the current command.
+	 */
+	des_s->verbose = cmd_s->verbose;
+	/* amend */
+	/* whence */
+	des_s->nowarn = cmd_s->nowarn;
+	des_s->use_color = cmd_s->use_color;
+	des_s->no_gettext = cmd_s->no_gettext;
+	des_s->display_comment_prefix = cmd_s->display_comment_prefix;
+	des_s->relative_paths = cmd_s->relative_paths;
+	des_s->submodule_summary = cmd_s->submodule_summary;
+	memcpy(des_s->color_palette, cmd_s->color_palette,
+	       sizeof(char)*WT_STATUS_MAXSLOT*COLOR_MAXLEN);
+	des_s->colopts = cmd_s->colopts;
+	des_s->null_termination = cmd_s->null_termination;
+	/* commit_template */
+	des_s->show_branch = cmd_s->show_branch;
+	des_s->show_stash = cmd_s->show_stash;
+	/* hints */
+	des_s->status_format = cmd_s->status_format;
+	des_s->fp = cmd_s->fp;
+	if (cmd_s->prefix && *cmd_s->prefix)
+		des_s->prefix = xstrdup(cmd_s->prefix);
+
+	return DESERIALIZE_OK;
+}
+
+
+/*
+ * Read raw serialized status data from the given file
+ *
+ * Verify that the args specified in the current command
+ * are compatible with the deserialized data (such as "-uno").
+ *
+ * Copy display-related fields from the current command
+ * into the deserialized data (so that the user can request
+ * long or short as they please).
+ */
+int wt_status_deserialize(const struct wt_status *cmd_s,
+			  const char *path)
+{
+	struct wt_status des_s;
+	int result;
+	struct string_list_item *change;
+
+	if (path && *path && strcmp(path, "0")) {
+		int fd = xopen(path, O_RDONLY);
+		if (fd == -1) {
+			trace_printf_key(&trace_deserialize, "could not read '%s'", path);
+			return DESERIALIZE_ERR;
+		}
+		trace_printf_key(&trace_deserialize, "reading serialization file '%s'", path);
+		result = wt_deserialize_fd(cmd_s, &des_s, fd);
+		close(fd);
+	} else {
+		trace_printf_key(&trace_deserialize, "reading stdin");
+		result = wt_deserialize_fd(cmd_s, &des_s, 0);
+	}
+
+	if (result == DESERIALIZE_OK) {
+		wt_status_get_state(cmd_s->repo, &des_s.state, des_s.branch &&
+				    !strcmp(des_s.branch, "HEAD"));
+		wt_status_print(&des_s);
+	}
+
+	free(des_s.branch);
+	free((char *)des_s.reference);
+	for_each_string_list_item(change, &des_s.change) {
+		struct wt_status_change_data *d = change->util;
+		if (d)
+			free(d->rename_source);
+	}
+	string_list_clear(&des_s.change, 1);
+	string_list_clear(&des_s.ignored, 1);
+	string_list_clear(&des_s.untracked, 1);
+	return result;
+}
diff --git a/wt-status-serialize.c b/wt-status-serialize.c
new file mode 100644
index 00000000000000..a0dcf8e65ed44e
--- /dev/null
+++ b/wt-status-serialize.c
@@ -0,0 +1,219 @@
+#include "git-compat-util.h"
+#include "hex.h"
+#include "repository.h"
+#include "wt-status.h"
+#include "pkt-line.h"
+#include "trace.h"
+#include "read-cache-ll.h"
+
+static struct trace_key trace_serialize = TRACE_KEY_INIT(SERIALIZE);
+
+/*
+ * Write V1 header fields.
+ */
+static void wt_serialize_v1_header(struct wt_status *s, int fd)
+{
+	/*
+	 * Write select fields from the current index to help
+	 * the deserializer recognize a stale data set.
+	 */
+	packet_write_fmt(fd, "index_mtime %d %d\n",
+			 s->repo->index->timestamp.sec,
+			 s->repo->index->timestamp.nsec);
+
+	/*
+	 * Write data from wt_status to qualify this status report.
+	 * That is, if this run specified "-uno", the consumer of
+	 * our serialization should know that.
+	 */
+	packet_write_fmt(fd, "is_initial %d\n", s->is_initial);
+	if (s->branch)
+		packet_write_fmt(fd, "branch %s\n", s->branch);
+	if (s->reference)
+		packet_write_fmt(fd, "reference %s\n", s->reference);
+	/* pathspec */
+	/* verbose */
+	/* amend */
+	packet_write_fmt(fd, "whence %d\n", s->whence);
+	/* nowarn */
+	/* use_color */
+	/* no_gettext */
+	/* display_comment_prefix */
+	/* relative_paths */
+	/* submodule_summary */
+	packet_write_fmt(fd, "show_ignored_mode %d\n", s->show_ignored_mode);
+	packet_write_fmt(fd, "show_untracked_files %d\n", s->show_untracked_files);
+	if (s->ignore_submodule_arg)
+		packet_write_fmt(fd, "ignore_submodule_arg %s\n", s->ignore_submodule_arg);
+	/* color_palette */
+	/* colopts */
+	/* null_termination */
+	/* commit_template */
+	/* show_branch */
+	/* show_stash */
+	packet_write_fmt(fd, "hints %d\n", s->hints);
+	packet_write_fmt(fd, "detect_rename %d\n", s->detect_rename);
+	packet_write_fmt(fd, "rename_score %d\n", s->rename_score);
+	packet_write_fmt(fd, "rename_limit %d\n", s->rename_limit);
+	/* status_format */
+	packet_write_fmt(fd, "sha1_commit %s\n", oid_to_hex(&s->oid_commit));
+	packet_write_fmt(fd, "committable %d\n", s->committable);
+	packet_write_fmt(fd, "workdir_dirty %d\n", s->workdir_dirty);
+	/* prefix */
+	packet_flush(fd);
+}
+
+/*
+ * Print changed/unmerged items.
+ * We write raw (not c-quoted) pathname(s).  The rename_source is only
+ * set when status computed a rename/copy.
+ *
+ * We ALWAYS write a final LF to the packet-line (for debugging)
+ * even though Linux pathnames allow LFs.
+ */
+static inline void wt_serialize_v1_changed(struct wt_status *s UNUSED, int fd,
+					   struct string_list_item *item)
+{
+	struct wt_status_change_data *d = item->util;
+	struct wt_status_serialize_data sd;
+	char *begin;
+	char *end;
+	char *p;
+	int len_path, len_rename_source;
+
+	trace_printf_key(&trace_serialize,
+		"change: %d %d %d %d %d %o %o %o %d %d %s %s '%s' '%s'",
+		d->worktree_status,
+		d->index_status,
+		d->stagemask,
+		d->rename_status,
+		d->rename_score,
+		d->mode_head,
+		d->mode_index,
+		d->mode_worktree,
+		d->dirty_submodule,
+		d->new_submodule_commits,
+		oid_to_hex(&d->oid_head),
+		oid_to_hex(&d->oid_index),
+		item->string,
+		(d->rename_source ? d->rename_source : ""));
+
+	sd.fixed.worktree_status       = htonl(d->worktree_status);
+	sd.fixed.index_status          = htonl(d->index_status);
+	sd.fixed.stagemask             = htonl(d->stagemask);
+	sd.fixed.rename_status         = htonl(d->rename_status);
+	sd.fixed.rename_score          = htonl(d->rename_score);
+	sd.fixed.mode_head             = htonl(d->mode_head);
+	sd.fixed.mode_index            = htonl(d->mode_index);
+	sd.fixed.mode_worktree         = htonl(d->mode_worktree);
+	sd.fixed.dirty_submodule       = htonl(d->dirty_submodule);
+	sd.fixed.new_submodule_commits = htonl(d->new_submodule_commits);
+	oidcpy(&sd.fixed.oid_head,  &d->oid_head);
+	oidcpy(&sd.fixed.oid_index, &d->oid_index);
+
+	begin = (char *)&sd;
+	end = begin + sizeof(sd);
+
+	p = sd.variant;
+
+	/*
+	 * Write <path> NUL [<rename_source>] NUL LF at the end of the buffer.
+	 */
+	len_path = strlen(item->string);
+	len_rename_source = d->rename_source ? strlen(d->rename_source) : 0;
+
+	/*
+	 * This is a bit of a hack, but I don't want to split the
+	 * status detail record across multiple pkt-lines.
+	 */
+	if (p + len_path + 1 + len_rename_source + 1 + 1 >= end)
+		BUG("path to long to serialize '%s'", item->string);
+
+	memcpy(p, item->string, len_path);
+	p += len_path;
+	*p++ = '\0';
+
+	if (len_rename_source) {
+		memcpy(p, d->rename_source, len_rename_source);
+		p += len_rename_source;
+	}
+	*p++ = '\0';
+	*p++ = '\n';
+
+	if (packet_write_gently(fd, begin, (p - begin)))
+		BUG("cannot serialize '%s'", item->string);
+}
+
+/*
+ * Write raw (not c-quoted) pathname for an untracked item.
+ * We ALWAYS write a final LF to the packet-line (for debugging)
+ * even though Linux pathnames allows LFs.  That is, deserialization
+ * should use the packet-line length and omit the final LF.
+ */
+static inline void wt_serialize_v1_untracked(struct wt_status *s UNUSED, int fd,
+					     struct string_list_item *item)
+{
+	packet_write_fmt(fd, "%s\n", item->string);
+}
+
+/*
+ * Write raw (not c-quoted) pathname for an ignored item.
+ * We ALWAYS write a final LF to the packet-line (for debugging)
+ * even though Linux pathnames allows LFs.
+ */
+static inline void wt_serialize_v1_ignored(struct wt_status *s UNUSED, int fd,
+					   struct string_list_item *item)
+{
+	packet_write_fmt(fd, "%s\n", item->string);
+}
+
+/*
+ * Serialize the list of changes to stdout.  The goal of this
+ * is to just serialize the key fields in wt_status so that a
+ * later command can rebuilt it and do the printing.
+ *
+ * We DO NOT include the contents of wt_status_state NOR
+ * current branch info.  This info easily gets stale and
+ * is relatively quick for the status consumer to compute
+ * as necessary.
+ */
+void wt_status_serialize_v1(struct wt_status *s)
+{
+	int fd = 1; /* we always write to stdout */
+	struct string_list_item *iter;
+	size_t k;
+
+	/*
+	 * version header must be first line.
+	 */
+	packet_write_fmt(fd, "version 1\n");
+
+	wt_serialize_v1_header(s, fd);
+
+	if (s->change.nr > 0) {
+		packet_write_fmt(fd, "changed %"PRIuMAX"\n", (uintmax_t)s->change.nr);
+		for (k = 0; k < s->change.nr; k++) {
+			iter = &(s->change.items[k]);
+			wt_serialize_v1_changed(s, fd, iter);
+		}
+		packet_flush(fd);
+	}
+
+	if (s->untracked.nr > 0) {
+		packet_write_fmt(fd, "untracked %"PRIuMAX"\n", (uintmax_t)s->untracked.nr);
+		for (k = 0; k < s->untracked.nr; k++) {
+			iter = &(s->untracked.items[k]);
+			wt_serialize_v1_untracked(s, fd, iter);
+		}
+		packet_flush(fd);
+	}
+
+	if (s->ignored.nr > 0) {
+		packet_write_fmt(fd, "ignored %"PRIuMAX"\n", (uintmax_t)s->ignored.nr);
+		for (k = 0; k < s->ignored.nr; k++) {
+			iter = &(s->ignored.items[k]);
+			wt_serialize_v1_ignored(s, fd, iter);
+		}
+		packet_flush(fd);
+	}
+}
diff --git a/wt-status.c b/wt-status.c
index 9eef96e85bf7a3..4da08956cbaf9d 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -792,6 +792,9 @@ static void wt_status_collect_untracked(struct wt_status *s)
 	if (s->show_untracked_files != SHOW_ALL_UNTRACKED_FILES)
 		dir.flags |=
 			DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
+	if (s->show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES)
+		dir.flags |= DIR_KEEP_UNTRACKED_CONTENTS;
+
 	if (s->show_ignored_mode) {
 		dir.flags |= DIR_SHOW_IGNORED_TOO;
 
@@ -2624,6 +2627,9 @@ void wt_status_print(struct wt_status *s)
 	case STATUS_FORMAT_LONG:
 		wt_longstatus_print(s);
 		break;
+	case STATUS_FORMAT_SERIALIZE_V1:
+		wt_status_serialize_v1(s);
+		break;
 	}
 
 	trace2_region_leave("status", "print", s->repo);
diff --git a/wt-status.h b/wt-status.h
index 4e377ce62b8b28..afeaf5753a0494 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -4,6 +4,7 @@
 #include "string-list.h"
 #include "color.h"
 #include "pathspec.h"
+#include "pkt-line.h"
 #include "remote.h"
 
 struct repository;
@@ -26,7 +27,8 @@ enum untracked_status_type {
 	SHOW_UNTRACKED_FILES_ERROR = -1,
 	SHOW_NO_UNTRACKED_FILES = 0,
 	SHOW_NORMAL_UNTRACKED_FILES,
-	SHOW_ALL_UNTRACKED_FILES
+	SHOW_ALL_UNTRACKED_FILES,
+	SHOW_COMPLETE_UNTRACKED_FILES,
 };
 
 enum show_ignored_type {
@@ -74,6 +76,7 @@ enum wt_status_format {
 	STATUS_FORMAT_SHORT,
 	STATUS_FORMAT_PORCELAIN,
 	STATUS_FORMAT_PORCELAIN_V2,
+	STATUS_FORMAT_SERIALIZE_V1,
 
 	STATUS_FORMAT_UNSPECIFIED
 };
@@ -185,4 +188,52 @@ int require_clean_work_tree(struct repository *repo,
 			    int ignore_submodules,
 			    int gently);
 
+#define DESERIALIZE_OK  0
+#define DESERIALIZE_ERR 1
+
+struct wt_status_serialize_data_fixed
+{
+	uint32_t worktree_status;
+	uint32_t index_status;
+	uint32_t stagemask;
+	uint32_t rename_status;
+	uint32_t rename_score;
+	uint32_t mode_head;
+	uint32_t mode_index;
+	uint32_t mode_worktree;
+	uint32_t dirty_submodule;
+	uint32_t new_submodule_commits;
+	struct object_id oid_head;
+	struct object_id oid_index;
+};
+
+/*
+ * Consume the maximum amount of data possible in a
+ * packet-line record.  This is overkill because we
+ * have at most 2 relative pathnames, but means we
+ * don't need to allocate a variable length structure.
+ */
+struct wt_status_serialize_data
+{
+	struct wt_status_serialize_data_fixed fixed;
+	char variant[LARGE_PACKET_DATA_MAX
+		     - sizeof(struct wt_status_serialize_data_fixed)];
+};
+
+/*
+ * Serialize computed status scan results using "version 1" format
+ * to the given file.
+ */
+void wt_status_serialize_v1(struct wt_status *s);
+
+/*
+ * Deserialize existing status results from the given file and
+ * populate a (new) "struct wt_status".  Use the contents of "cmd_s"
+ * (computed from the command line arguments) to verify that the
+ * cached data is compatible and overlay various display-related
+ * fields.
+ */
+int wt_status_deserialize(const struct wt_status *cmd_s,
+			  const char *path);
+
 #endif /* STATUS_H */

From 0221115cdc4b2a98d84f0729721fc9794d779c08 Mon Sep 17 00:00:00 2001
From: Jameson Miller <jamill@microsoft.com>
Date: Wed, 10 Jan 2018 11:56:26 -0500
Subject: [PATCH 058/207] Teach ahead-behind and serialized status to play
 nicely together

---
 t/t7522-serialized-status.sh | 34 +++++++++++++++++++++++++++++++++-
 wt-status-deserialize.c      |  3 ++-
 wt-status-serialize.c        |  1 +
 3 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index 283a98bdf750e6..0f5a33e2a23442 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -47,7 +47,13 @@ test_expect_success 'setup' '
 	git commit -m"Adding original file." &&
 	mkdir untracked &&
 	touch ignored.ign ignored_dir/ignored_2.txt \
-	      untracked_1.txt untracked/untracked_2.txt untracked/untracked_3.txt
+	      untracked_1.txt untracked/untracked_2.txt untracked/untracked_3.txt &&
+
+	test_oid_cache <<-EOF
+	branch_oid sha1:68d4a437ea4c2de65800f48c053d4d543b55c410
+
+	branch_oid sha256:6b95e4b1ea911dad213f2020840f5e92d3066cf9e38cf35f79412ec58d409ce4
+	EOF
 '
 
 test_expect_success 'verify untracked-files=complete with no conversion' '
@@ -138,4 +144,30 @@ test_expect_success 'verify serialized status handles path scopes' '
 	test_cmp expect output
 '
 
+test_expect_success 'verify no-ahead-behind and serialized status integration' '
+	test_when_finished "rm serialized_status.dat new_change.txt output" &&
+	cat >expect <<-EOF &&
+	# branch.oid $(test_oid branch_oid)
+	# branch.head alt_branch
+	# branch.upstream main
+	# branch.ab +1 -0
+	? expect
+	? serialized_status.dat
+	? untracked/
+	? untracked_1.txt
+	EOF
+
+	git checkout -b alt_branch main --track >/dev/null &&
+	touch alt_branch_changes.txt &&
+	git add alt_branch_changes.txt &&
+	test_tick &&
+	git commit -m"New commit on alt branch"  &&
+
+	git status --untracked-files=complete --ignored=matching --serialize >serialized_status.dat &&
+	touch new_change.txt &&
+
+	git -c status.aheadBehind=false status --porcelain=v2 --branch --ahead-behind --deserialize=serialized_status.dat >output &&
+	test_cmp expect output
+'
+
 test_done
diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
index 90174bb3d1fc51..52e3958bcf2eba 100644
--- a/wt-status-deserialize.c
+++ b/wt-status-deserialize.c
@@ -524,6 +524,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	/* show_branch */
 	/* show_stash */
 	/* hints */
+	/* ahead_behind_flags */
 	if (cmd_s->detect_rename != des_s->detect_rename) {
 		trace_printf_key(&trace_deserialize, "reject: detect_rename");
 		return DESERIALIZE_ERR;
@@ -562,6 +563,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	des_s->show_branch = cmd_s->show_branch;
 	des_s->show_stash = cmd_s->show_stash;
 	/* hints */
+	des_s->ahead_behind_flags = cmd_s->ahead_behind_flags;
 	des_s->status_format = cmd_s->status_format;
 	des_s->fp = cmd_s->fp;
 	if (cmd_s->prefix && *cmd_s->prefix)
@@ -570,7 +572,6 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	return DESERIALIZE_OK;
 }
 
-
 /*
  * Read raw serialized status data from the given file
  *
diff --git a/wt-status-serialize.c b/wt-status-serialize.c
index a0dcf8e65ed44e..b7102ce80baf50 100644
--- a/wt-status-serialize.c
+++ b/wt-status-serialize.c
@@ -52,6 +52,7 @@ static void wt_serialize_v1_header(struct wt_status *s, int fd)
 	/* show_branch */
 	/* show_stash */
 	packet_write_fmt(fd, "hints %d\n", s->hints);
+	/* ahead_behind_flags */
 	packet_write_fmt(fd, "detect_rename %d\n", s->detect_rename);
 	packet_write_fmt(fd, "rename_score %d\n", s->rename_score);
 	packet_write_fmt(fd, "rename_limit %d\n", s->rename_limit);

From 760d51327e3ac5b45109a3da5bd3b8c65f71a9d6 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 2 Feb 2018 14:17:05 -0500
Subject: [PATCH 059/207] status: serialize to path

Teach status serialization to take an optional pathname on
the command line to direct that cache data be written there
rather than to stdout.  When used this way, normal status
results will still be written to stdout.

When no path is given, only binary serialization data is
written to stdout.

Usage:
    git status --serialize[=<path>]

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 Documentation/git-status.txt | 10 ++++++----
 builtin/commit.c             | 36 +++++++++++++++++++++++++++---------
 t/t7522-serialized-status.sh | 35 +++++++++++++++++++++++++++++++++++
 wt-status-serialize.c        |  5 ++---
 wt-status.c                  |  2 +-
 wt-status.h                  |  2 +-
 6 files changed, 72 insertions(+), 18 deletions(-)

diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index fedf86d32718eb..62254617dd7fc1 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -151,10 +151,12 @@ ignored, then the directory is not shown, but all contents are shown.
 	threshold.
 	See also linkgit:git-diff[1] `--find-renames`.
 
---serialize[=<version>]::
-	(EXPERIMENTAL) Serialize raw status results to stdout in a
-	format suitable for use by `--deserialize`.  Valid values for
-	`<version>` are "1" and "v1".
+--serialize[=<path>]::
+	(EXPERIMENTAL) Serialize raw status results to a file or stdout
+	in a format suitable for use by `--deserialize`.  If a path is
+	given, serialize data will be written to that path *and* normal
+	status output will be written to stdout.  If path is omitted,
+	only binary serialization data will be written to stdout.
 
 --deserialize[=<path>]::
 	(EXPERIMENTAL) Deserialize raw status results from a file or
diff --git a/builtin/commit.c b/builtin/commit.c
index a031af40e80e1c..6644a882f53cb2 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -165,26 +165,34 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un
 }
 
 static int do_serialize = 0;
+static char *serialize_path = NULL;
+
 static int do_implicit_deserialize = 0;
 static int do_explicit_deserialize = 0;
 static char *deserialize_path = NULL;
 
 /*
- * --serialize | --serialize=1 | --serialize=v1
+ * --serialize | --serialize=<path>
+ *
+ * Request that we serialize status output rather than or in addition to
+ * printing in any of the established formats.
+ *
+ * Without a path, we write binary serialization data to stdout (and omit
+ * the normal status output).
  *
- * Request that we serialize our output rather than printing in
- * any of the established formats.  Optionally specify serialization
- * version.
+ * With a path, we write binary serialization data to the <path> and then
+ * write normal status output.
  */
 static int opt_parse_serialize(const struct option *opt, const char *arg, int unset)
 {
 	enum wt_status_format *value = (enum wt_status_format *)opt->value;
 	if (unset || !arg)
 		*value = STATUS_FORMAT_SERIALIZE_V1;
-	else if (!strcmp(arg, "v1") || !strcmp(arg, "1"))
-		*value = STATUS_FORMAT_SERIALIZE_V1;
-	else
-		die("unsupported serialize version '%s'", arg);
+
+	if (arg) {
+		free(serialize_path);
+		serialize_path = xstrdup(arg);
+	}
 
 	if (do_explicit_deserialize)
 		die("cannot mix --serialize and --deserialize");
@@ -1616,7 +1624,7 @@ struct repository *repo UNUSED)
 		  N_("version"), N_("machine-readable output"),
 		  PARSE_OPT_OPTARG, opt_parse_porcelain),
 		{ OPTION_CALLBACK, 0, "serialize", &status_format,
-		  N_("version"), N_("serialize raw status data to stdout"),
+		  N_("path"), N_("serialize raw status data to path or stdout"),
 		  PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize },
 		{ OPTION_CALLBACK, 0, "deserialize", NULL,
 		  N_("path"), N_("deserialize raw status data from file"),
@@ -1742,6 +1750,16 @@ struct repository *repo UNUSED)
 	if (s.relative_paths)
 		s.prefix = prefix;
 
+	if (serialize_path) {
+		int fd_serialize = xopen(serialize_path,
+					 O_WRONLY | O_CREAT | O_TRUNC, 0666);
+		if (fd_serialize < 0)
+			die_errno(_("could not serialize to '%s'"),
+				  serialize_path);
+		wt_status_serialize_v1(fd_serialize, &s);
+		close(fd_serialize);
+	}
+
 	wt_status_print(&s);
 	wt_status_collect_free_buffers(&s);
 
diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index 0f5a33e2a23442..2a81b4e625ee59 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -170,4 +170,39 @@ test_expect_success 'verify no-ahead-behind and serialized status integration' '
 	test_cmp expect output
 '
 
+test_expect_success 'verify new --serialize=path mode' '
+	#test_when_finished "rm serialized_status.dat expect new_change.txt output.1 output.2" &&
+	cat >expect <<-\EOF &&
+	? expect
+	? output.1
+	? untracked/
+	? untracked_1.txt
+	EOF
+
+	git checkout -b serialize_path_branch main --track >/dev/null &&
+	touch alt_branch_changes.txt &&
+	git add alt_branch_changes.txt &&
+	test_tick &&
+	git commit -m"New commit on serialize_path_branch"  &&
+
+	git status --porcelain=v2 --serialize=serialized_status.dat >output.1 &&
+	touch new_change.txt &&
+
+	git status --porcelain=v2 --deserialize=serialized_status.dat >output.2 &&
+	test_cmp expect output.1 &&
+	test_cmp expect output.2
+'
+
+test_expect_success 'renames' '
+	git init -b main rename_test &&
+	echo OLDNAME >rename_test/OLDNAME &&
+	git -C rename_test add OLDNAME &&
+	git -C rename_test commit -m OLDNAME &&
+	git -C rename_test mv OLDNAME NEWNAME &&
+	git -C rename_test status --serialize=renamed.dat >output.1 &&
+	echo DIRT >rename_test/DIRT &&
+	git -C rename_test status --deserialize=renamed.dat >output.2 &&
+	test_cmp output.1 output.2
+'
+
 test_done
diff --git a/wt-status-serialize.c b/wt-status-serialize.c
index b7102ce80baf50..179e76f8081fdf 100644
--- a/wt-status-serialize.c
+++ b/wt-status-serialize.c
@@ -169,7 +169,7 @@ static inline void wt_serialize_v1_ignored(struct wt_status *s UNUSED, int fd,
 }
 
 /*
- * Serialize the list of changes to stdout.  The goal of this
+ * Serialize the list of changes to the given file.  The goal of this
  * is to just serialize the key fields in wt_status so that a
  * later command can rebuilt it and do the printing.
  *
@@ -178,9 +178,8 @@ static inline void wt_serialize_v1_ignored(struct wt_status *s UNUSED, int fd,
  * is relatively quick for the status consumer to compute
  * as necessary.
  */
-void wt_status_serialize_v1(struct wt_status *s)
+void wt_status_serialize_v1(int fd, struct wt_status *s)
 {
-	int fd = 1; /* we always write to stdout */
 	struct string_list_item *iter;
 	size_t k;
 
diff --git a/wt-status.c b/wt-status.c
index 4da08956cbaf9d..8959216fc6d58f 100644
--- a/wt-status.c
+++ b/wt-status.c
@@ -2628,7 +2628,7 @@ void wt_status_print(struct wt_status *s)
 		wt_longstatus_print(s);
 		break;
 	case STATUS_FORMAT_SERIALIZE_V1:
-		wt_status_serialize_v1(s);
+		wt_status_serialize_v1(1, s);
 		break;
 	}
 
diff --git a/wt-status.h b/wt-status.h
index afeaf5753a0494..314185cc1ae8f3 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -224,7 +224,7 @@ struct wt_status_serialize_data
  * Serialize computed status scan results using "version 1" format
  * to the given file.
  */
-void wt_status_serialize_v1(struct wt_status *s);
+void wt_status_serialize_v1(int fd, struct wt_status *s);
 
 /*
  * Deserialize existing status results from the given file and

From 2acf45f3f8aa00b344a104d1627ee823cedc93c2 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 7 Feb 2018 10:59:03 -0500
Subject: [PATCH 060/207] status: reject deserialize in V2 and conflicts

Teach status deserialize code to reject status cache
when printing in porcelain V2 and there are unresolved
conflicts in the cache file.  A follow-on task might
extend the cache format to include this additiona data.

See code for longer explanation.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 t/t7522-serialized-status.sh | 90 +++++++++++++++++++++++++++++++++++-
 wt-status-deserialize.c      | 28 ++++++++++-
 2 files changed, 115 insertions(+), 3 deletions(-)

diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index 2a81b4e625ee59..361afca94835e0 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -51,8 +51,14 @@ test_expect_success 'setup' '
 
 	test_oid_cache <<-EOF
 	branch_oid sha1:68d4a437ea4c2de65800f48c053d4d543b55c410
+	x_base sha1:587be6b4c3f93f93c489c0111bba5596147a26cb
+	x_ours sha1:b68025345d5301abad4d9ec9166f455243a0d746
+	x_theirs sha1:975fbec8256d3e8a3797e7a3611380f27c49f4ac
 
 	branch_oid sha256:6b95e4b1ea911dad213f2020840f5e92d3066cf9e38cf35f79412ec58d409ce4
+	x_base sha256:14f5162e2fe3d240d0d37aaab0f90e4af9a7cfa79639f3bab005b5bfb4174d9f
+	x_ours sha256:3a404ba030a4afa912155c476a48a253d4b3a43d0098431b6d6ca6e554bd78fb
+	x_theirs sha256:44dc634218adec09e34f37839b3840bad8c6103693e9216626b32d00e093fa35
 	EOF
 '
 
@@ -171,7 +177,7 @@ test_expect_success 'verify no-ahead-behind and serialized status integration' '
 '
 
 test_expect_success 'verify new --serialize=path mode' '
-	#test_when_finished "rm serialized_status.dat expect new_change.txt output.1 output.2" &&
+	test_when_finished "rm serialized_status.dat expect new_change.txt output.1 output.2" &&
 	cat >expect <<-\EOF &&
 	? expect
 	? output.1
@@ -193,6 +199,88 @@ test_expect_success 'verify new --serialize=path mode' '
 	test_cmp expect output.2
 '
 
+test_expect_success 'merge conflicts' '
+
+	# create a merge conflict.
+
+	git init -b main conflicts &&
+	echo x >conflicts/x.txt &&
+	git -C conflicts add x.txt &&
+	git -C conflicts commit -m x &&
+	git -C conflicts branch a &&
+	git -C conflicts branch b &&
+	git -C conflicts checkout a &&
+	echo y >conflicts/x.txt &&
+	git -C conflicts add x.txt &&
+	git -C conflicts commit -m a &&
+	git -C conflicts checkout b &&
+	echo z >conflicts/x.txt &&
+	git -C conflicts add x.txt &&
+	git -C conflicts commit -m b &&
+	test_must_fail git -C conflicts merge --no-commit a &&
+
+	# verify that regular status correctly identifies it
+	# in each format.
+
+	cat >expect.v2 <<EOF &&
+u UU N... 100644 100644 100644 100644 $(test_oid x_base) $(test_oid x_ours) $(test_oid x_theirs) x.txt
+EOF
+	git -C conflicts status --porcelain=v2 >observed.v2 &&
+	test_cmp expect.v2 observed.v2 &&
+
+	cat >expect.long <<EOF &&
+On branch b
+You have unmerged paths.
+  (fix conflicts and run "git commit")
+  (use "git merge --abort" to abort the merge)
+
+Unmerged paths:
+  (use "git add <file>..." to mark resolution)
+	both modified:   x.txt
+
+no changes added to commit (use "git add" and/or "git commit -a")
+EOF
+	git -C conflicts status --long >observed.long &&
+	test_cmp expect.long observed.long &&
+
+	cat >expect.short <<EOF &&
+UU x.txt
+EOF
+	git -C conflicts status --short >observed.short &&
+	test_cmp expect.short observed.short &&
+
+	# save status data in serialized cache.
+
+	git -C conflicts status --serialize >serialized &&
+
+	# make some dirt in the worktree so we can tell whether subsequent
+	# status commands used the cached data or did a fresh status.
+
+	echo dirt >conflicts/dirt.txt &&
+
+	# run status using the cached data.
+
+	git -C conflicts status --long --deserialize=../serialized >observed.long &&
+	test_cmp expect.long observed.long &&
+
+	git -C conflicts status --short --deserialize=../serialized >observed.short &&
+	test_cmp expect.short observed.short &&
+
+	# currently, the cached data does not have enough information about
+	# merge conflicts for porcelain V2 format.  (And V2 format looks at
+	# the index to get that data, but the whole point of the serialization
+	# is to avoid reading the index unnecessarily.)  So V2 always rejects
+	# the cached data when there is an unresolved conflict.
+
+	cat >expect.v2.dirty <<EOF &&
+u UU N... 100644 100644 100644 100644 $(test_oid x_base) $(test_oid x_ours) $(test_oid x_theirs) x.txt
+? dirt.txt
+EOF
+	git -C conflicts status --porcelain=v2 --deserialize=../serialized >observed.v2 &&
+	test_cmp expect.v2.dirty observed.v2
+
+'
+
 test_expect_success 'renames' '
 	git init -b main rename_test &&
 	echo OLDNAME >rename_test/OLDNAME &&
diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
index 52e3958bcf2eba..293b9a71ecb99d 100644
--- a/wt-status-deserialize.c
+++ b/wt-status-deserialize.c
@@ -181,7 +181,8 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 /*
  * Build a string-list of (count) <changed-item> lines from the input.
  */
-static int wt_deserialize_v1_changed_items(struct wt_status *s, int fd, int count UNUSED)
+static int wt_deserialize_v1_changed_items(const struct wt_status *cmd_s,
+					   struct wt_status *s, int fd, int count UNUSED)
 {
 	struct wt_status_serialize_data *sd;
 	char *p;
@@ -239,6 +240,29 @@ static int wt_deserialize_v1_changed_items(struct wt_status *s, int fd, int coun
 			oid_to_hex(&d->oid_index),
 			item->string,
 			(d->rename_source ? d->rename_source : ""));
+
+		if (d->stagemask &&
+		    cmd_s->status_format == STATUS_FORMAT_PORCELAIN_V2) {
+			/*
+			 * We have an unresolved conflict and the user wants
+			 * to see porcelain V2 output.  The cached status data
+			 * does not contain enough information for V2 (because
+			 * the main status computation does not capture it).
+			 * We only get a single change record for the file with
+			 * a single SHA -- we don't get the stage [123] mode
+			 * and SHA data.  The V2 detail-line print code looks
+			 * up this information directly from the index.  The
+			 * whole point of this serialization cache is to avoid
+			 * reading the index, so the V2 print code gets zeros.
+			 * So we reject the status cache and let the fallback
+			 * code run.
+			 */
+			trace_printf_key(
+				&trace_deserialize,
+				"reject: V2 format and umerged file: %s",
+				item->string);
+			return DESERIALIZE_ERR;
+		}
 	}
 
 	return DESERIALIZE_OK;
@@ -391,7 +415,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s,
 	while ((line = my_packet_read_line(fd, &line_len))) {
 		if (skip_prefix(line, "changed ", &arg)) {
 			nr_changed = (int)strtol(arg, NULL, 10);
-			if (wt_deserialize_v1_changed_items(s, fd, nr_changed)
+			if (wt_deserialize_v1_changed_items(cmd_s, s, fd, nr_changed)
 			    == DESERIALIZE_ERR)
 				return DESERIALIZE_ERR;
 			continue;

From 42e8e53670748df5bba88a72a573e85d6b3db6f7 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 20 Jul 2018 12:08:50 -0400
Subject: [PATCH 061/207] serialize-status: serialize global and repo-local
 exclude file metadata

Changes to the global or repo-local excludes files can change the
results returned by "git status" for untracked files.  Therefore,
it is important that the exclude-file values used during serialization
are still current at the time of deserialization.

Teach "git status --serialize" to report metadata on the user's global
exclude file (which defaults to "$XDG_HOME/git/ignore") and for the
repo-local excludes file (which is in ".git/info/excludes").  Serialize
will record the pathnames and mtimes for these files in the serialization
header (next to the mtime data for the .git/index file).

Teach "git status --deserialize" to validate this new metadata.  If either
exclude file has changed since the serialization-cache-file was written,
then deserialize will reject the cache file and force a full/normal status
run.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 wt-status-deserialize.c |  85 ++++++++++++++++++++++++++++
 wt-status-serialize.c   | 122 ++++++++++++++++++++++++++++++++++++++++
 wt-status.h             |   8 +++
 3 files changed, 215 insertions(+)

diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
index 293b9a71ecb99d..d9716a8531cb6a 100644
--- a/wt-status-deserialize.c
+++ b/wt-status-deserialize.c
@@ -8,6 +8,7 @@
 #include "trace.h"
 #include "statinfo.h"
 #include "hex.h"
+#include "path.h"
 
 static struct trace_key trace_deserialize = TRACE_KEY_INIT(DESERIALIZE);
 
@@ -70,12 +71,69 @@ static int my_validate_index(const char *path, const struct cache_time *mtime_re
 	return DESERIALIZE_OK;
 }
 
+/*
+ * Use the given key and exclude pathname to compute a serialization header
+ * reflecting the current contents on disk.  See if that matches the value
+ * computed for this key when the cache was written.  Reject the cache if
+ * anything has changed.
+ */
+static int my_validate_excludes(const char *path, const char *key, const char *line)
+{
+	struct strbuf sb = STRBUF_INIT;
+	int r;
+
+	wt_serialize_compute_exclude_header(&sb, key, path);
+
+	r = (strcmp(line, sb.buf) ? DESERIALIZE_ERR : DESERIALIZE_OK);
+
+	if (r == DESERIALIZE_ERR)
+		trace_printf_key(&trace_deserialize,
+				 "%s changed [cached '%s'][observed '%s']",
+				 key, line, sb.buf);
+
+	strbuf_release(&sb);
+	return r;
+}
+
+static int my_parse_core_excludes(const char *line)
+{
+	/*
+	 * In dir.c:setup_standard_excludes() they use either the value of
+	 * the "core.excludefile" variable (stored in the global "excludes_file"
+	 * variable) -or- the default value "$XDG_HOME/git/ignore".  This is done
+	 * during wt_status_collect_untracked() which we are hoping to not call.
+	 *
+	 * Fake the setup here.
+	 */
+
+	if (excludes_file) {
+		return my_validate_excludes(excludes_file, "core_excludes", line);
+	} else {
+		char *path = xdg_config_home("ignore");
+		int r = my_validate_excludes(path, "core_excludes", line);
+		free(path);
+		return r;
+	}
+}
+
+static int my_parse_repo_excludes(const char *line)
+{
+	char *path = git_pathdup("info/exclude");
+	int r = my_validate_excludes(path, "repo_excludes", line);
+	free(path);
+
+	return r;
+}
+
 static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 {
 	struct cache_time index_mtime;
 	int line_len, nr_fields;
 	const char *line;
 	const char *arg;
+	int have_required_index_mtime = 0;
+	int have_required_core_excludes = 0;
+	int have_required_repo_excludes = 0;
 
 	/*
 	 * parse header lines up to the first flush packet.
@@ -91,6 +149,20 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 					     nr_fields, line);
 				return DESERIALIZE_ERR;
 			}
+			have_required_index_mtime = 1;
+			continue;
+		}
+
+		if (skip_prefix(line, "core_excludes ", &arg)) {
+			if (my_parse_core_excludes(line) != DESERIALIZE_OK)
+				return DESERIALIZE_ERR;
+			have_required_core_excludes = 1;
+			continue;
+		}
+		if (skip_prefix(line, "repo_excludes ", &arg)) {
+			if (my_parse_repo_excludes(line) != DESERIALIZE_OK)
+				return DESERIALIZE_ERR;
+			have_required_repo_excludes = 1;
 			continue;
 		}
 
@@ -175,6 +247,19 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 		return DESERIALIZE_ERR;
 	}
 
+	if (!have_required_index_mtime) {
+		trace_printf_key(&trace_deserialize, "missing '%s'", "index_mtime");
+		return DESERIALIZE_ERR;
+	}
+	if (!have_required_core_excludes) {
+		trace_printf_key(&trace_deserialize, "missing '%s'", "core_excludes");
+		return DESERIALIZE_ERR;
+	}
+	if (!have_required_repo_excludes) {
+		trace_printf_key(&trace_deserialize, "missing '%s'", "repo_excludes");
+		return DESERIALIZE_ERR;
+	}
+
 	return my_validate_index(s->index_file, &index_mtime);
 }
 
diff --git a/wt-status-serialize.c b/wt-status-serialize.c
index 179e76f8081fdf..6ab43b8127d917 100644
--- a/wt-status-serialize.c
+++ b/wt-status-serialize.c
@@ -1,13 +1,133 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
 #include "git-compat-util.h"
+#include "environment.h"
 #include "hex.h"
 #include "repository.h"
 #include "wt-status.h"
 #include "pkt-line.h"
 #include "trace.h"
 #include "read-cache-ll.h"
+#include "path.h"
 
 static struct trace_key trace_serialize = TRACE_KEY_INIT(SERIALIZE);
 
+/*
+ * Compute header record for exclude file using format:
+ *      <key> SP <status_char> SP <variant> LF
+ */
+void wt_serialize_compute_exclude_header(struct strbuf *sb,
+					 const char *key,
+					 const char *path)
+{
+	struct stat st;
+	struct stat_data sd;
+
+	memset(&sd, 0, sizeof(sd));
+
+	strbuf_setlen(sb, 0);
+
+	if (!path || !*path) {
+		strbuf_addf(sb, "%s U (unset)", key);
+	} else if (lstat(path, &st) == -1) {
+		if (is_missing_file_error(errno))
+			strbuf_addf(sb, "%s E (not-found) %s", key, path);
+		else
+			strbuf_addf(sb, "%s E (other) %s", key, path);
+	} else {
+		fill_stat_data(&sd, &st);
+		strbuf_addf(sb, "%s F %d %d %s",
+			    key, sd.sd_mtime.sec, sd.sd_mtime.nsec, path);
+	}
+}
+
+static void append_exclude_info(int fd, const char *path, const char *key)
+{
+	struct strbuf sb = STRBUF_INIT;
+
+	wt_serialize_compute_exclude_header(&sb, key, path);
+
+	packet_write_fmt(fd, "%s\n", sb.buf);
+
+	strbuf_release(&sb);
+}
+
+static void append_core_excludes_file_info(int fd)
+{
+	/*
+	 * Write pathname and mtime of the core/global excludes file to
+	 * the status cache header.  Since a change in the global excludes
+	 * will/may change the results reported by status, the deserialize
+	 * code should be able to reject the status cache if the excludes
+	 * file changes since when the cache was written.
+	 *
+	 * The "core.excludefile" setting defaults to $XDG_HOME/git/ignore
+	 * and uses a global variable which should have been set during
+	 * wt_status_collect_untracked().
+	 *
+	 * See dir.c:setup_standard_excludes()
+	 */
+	append_exclude_info(fd, excludes_file, "core_excludes");
+}
+
+static void append_repo_excludes_file_info(int fd)
+{
+	/*
+	 * Likewise, there is a per-repo excludes file in .git/info/excludes
+	 * that can change the results reported by status.  And the deserialize
+	 * code needs to be able to reject the status cache if this file
+	 * changes.
+	 *
+	 * See dir.c:setup_standard_excludes() and git_path_info_excludes().
+	 * We replicate the pathname construction here because of the static
+	 * variables/functions used in dir.c.
+	 */
+	char *path = git_pathdup("info/exclude");
+
+	append_exclude_info(fd, path, "repo_excludes");
+
+	free(path);
+}
+
+/*
+ * WARNING: The status cache attempts to preserve the essential in-memory
+ * status data after a status scan into a "serialization" (aka "status cache")
+ * file.  It allows later "git status --deserialize=<foo>" instances to
+ * just print the cached status results without scanning the workdir (and
+ * without reading the index).
+ *
+ * The status cache file is valid as long as:
+ * [1] the set of functional command line options are the same (think "-u").
+ * [2] repo-local and user-global configuration settings are compatible.
+ * [3] nothing in the workdir has changed.
+ *
+ * We rely on:
+ * [1.a] We remember the relevant (functional, non-display) command line
+ *       arguments in the status cache header.
+ * [2.a] We use the mtime of the .git/index to detect staging changes.
+ * [2.b] We use the mtimes of the excludes files to detect changes that
+ *      might affect untracked file reporting.
+ *
+ * But we need external help to verify [3].
+ * [] This includes changes to tracked files.
+ * [] This includes changes to tracked .gitignore files that might change
+ *    untracked file reporting.
+ * [] This includes the creation of new, untracked per-directory .gitignore
+ *    files that might change untracked file reporting.
+ *
+ * [3.a] On GVFS repos, we rely on the GVFS service (mount) daemon to
+ *      watch the filesystem and invalidate (delete) the status cache
+ *      when anything changes inside the workdir.
+ *
+ * [3.b] TODO This problem is not solved for non-GVFS repos.
+ *       [] It is possible that the untracked-cache index extension
+ *          could help with this but that requires status to read the
+ *          index to load the extension.
+ *       [] It is possible that the new fsmonitor facility could also
+ *          provide this information, but that to requires reading the
+ *          index.
+ */
+
 /*
  * Write V1 header fields.
  */
@@ -20,6 +140,8 @@ static void wt_serialize_v1_header(struct wt_status *s, int fd)
 	packet_write_fmt(fd, "index_mtime %d %d\n",
 			 s->repo->index->timestamp.sec,
 			 s->repo->index->timestamp.nsec);
+	append_core_excludes_file_info(fd);
+	append_repo_excludes_file_info(fd);
 
 	/*
 	 * Write data from wt_status to qualify this status report.
diff --git a/wt-status.h b/wt-status.h
index 314185cc1ae8f3..9728117f0e9217 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -236,4 +236,12 @@ void wt_status_serialize_v1(int fd, struct wt_status *s);
 int wt_status_deserialize(const struct wt_status *cmd_s,
 			  const char *path);
 
+/*
+ * A helper routine for serialize and deserialize to compute
+ * metadata for the user-global and repo-local excludes files.
+ */
+void wt_serialize_compute_exclude_header(struct strbuf *sb,
+					 const char *key,
+					 const char *path);
+
 #endif /* STATUS_H */

From b2024bb906d226d331df857ea2da980cf250952b Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 25 Jul 2018 14:49:37 -0400
Subject: [PATCH 062/207] status: deserialization wait

Teach `git status --deserialize` to either wait indefintely
or immediately fail if the status serialization cache file
is stale.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 Documentation/config/status.txt |  16 +++++
 builtin/commit.c                |  59 +++++++++++++++-
 t/t7522-serialized-status.sh    |  52 ++++++++++++++
 wt-status-deserialize.c         | 119 +++++++++++++++++++++++++++++---
 wt-status.h                     |  12 +++-
 5 files changed, 245 insertions(+), 13 deletions(-)

diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt
index 7302b066644e73..4d863fdaaec2eb 100644
--- a/Documentation/config/status.txt
+++ b/Documentation/config/status.txt
@@ -83,3 +83,19 @@ status.deserializePath::
 	generated by `--serialize`.  This will be overridden by
 	`--deserialize=<path>` on the command line.  If the cache file is
 	invalid or stale, git will fall-back and compute status normally.
+
+status.deserializeWait::
+	EXPERIMENTAL, Specifies what `git status --deserialize` should do
+	if the serialization cache file is stale and whether it should
+	fall-back and compute status normally.  This will be overridden by
+	`--deserialize-wait=<value>` on the command line.
++
+--
+* `fail` - cause git to exit with an error when the status cache file
+is stale; this is intended for testing and debugging.
+* `block` - cause git to spin and periodically retry the cache file
+every 100 ms; this is intended to help coordinate with another git
+instance concurrently computing the cache file.
+* `no` - to immediately fall-back if cache file is stale.  This is the default.
+* `<timeout>` - time (in tenths of a second) to spin and retry.
+--
diff --git a/builtin/commit.c b/builtin/commit.c
index 6644a882f53cb2..f1afb753ab0878 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -171,6 +171,9 @@ static int do_implicit_deserialize = 0;
 static int do_explicit_deserialize = 0;
 static char *deserialize_path = NULL;
 
+static enum wt_status_deserialize_wait implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+static enum wt_status_deserialize_wait explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+
 /*
  * --serialize | --serialize=<path>
  *
@@ -236,6 +239,40 @@ static int opt_parse_deserialize(const struct option *opt UNUSED, const char *ar
 	return 0;
 }
 
+static enum wt_status_deserialize_wait parse_dw(const char *arg)
+{
+	int tenths;
+
+	if (!strcmp(arg, "fail"))
+		return DESERIALIZE_WAIT__FAIL;
+	else if (!strcmp(arg, "block"))
+		return DESERIALIZE_WAIT__BLOCK;
+	else if (!strcmp(arg, "no"))
+		return DESERIALIZE_WAIT__NO;
+
+	/*
+	 * Otherwise, assume it is a timeout in tenths of a second.
+	 * If it contains a bogus value, atol() will return zero
+	 * which is OK.
+	 */
+	tenths = atol(arg);
+	if (tenths < 0)
+		tenths = DESERIALIZE_WAIT__NO;
+	return tenths;
+}
+
+static int opt_parse_deserialize_wait(const struct option *opt UNUSED,
+				      const char *arg,
+				      int unset)
+{
+	if (unset)
+		explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+	else
+		explicit_deserialize_wait = parse_dw(arg);
+
+	return 0;
+}
+
 static int opt_parse_m(const struct option *opt, const char *arg, int unset)
 {
 	struct strbuf *buf = opt->value;
@@ -1568,6 +1605,13 @@ static int git_status_config(const char *k, const char *v,
 		}
 		return 0;
 	}
+	if (!strcmp(k, "status.deserializewait")) {
+		if (!v || !*v)
+			implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+		else
+			implicit_deserialize_wait = parse_dw(v);
+		return 0;
+	}
 	if (!strcmp(k, "status.showuntrackedfiles")) {
 		enum untracked_status_type u;
 
@@ -1629,6 +1673,9 @@ struct repository *repo UNUSED)
 		{ OPTION_CALLBACK, 0, "deserialize", NULL,
 		  N_("path"), N_("deserialize raw status data from file"),
 		  PARSE_OPT_OPTARG, opt_parse_deserialize },
+		{ OPTION_CALLBACK, 0, "deserialize-wait", NULL,
+		  N_("fail|block|no"), N_("how to wait if status cache file is invalid"),
+		  PARSE_OPT_OPTARG, opt_parse_deserialize_wait },
 		OPT_SET_INT(0, "long", &status_format,
 			    N_("show status in long format (default)"),
 			    STATUS_FORMAT_LONG),
@@ -1725,11 +1772,21 @@ struct repository *repo UNUSED)
 	}
 
 	if (try_deserialize) {
+		int result;
+		enum wt_status_deserialize_wait dw = implicit_deserialize_wait;
+		if (explicit_deserialize_wait != DESERIALIZE_WAIT__UNSET)
+			dw = explicit_deserialize_wait;
+		if (dw == DESERIALIZE_WAIT__UNSET)
+			dw = DESERIALIZE_WAIT__NO;
+
 		if (s.relative_paths)
 			s.prefix = prefix;
 
-		if (wt_status_deserialize(&s, deserialize_path) == DESERIALIZE_OK)
+		result = wt_status_deserialize(&s, deserialize_path, dw);
+		if (result == DESERIALIZE_OK)
 			return 0;
+		if (dw == DESERIALIZE_WAIT__FAIL)
+			die(_("Rejected status serialization cache"));
 
 		/* deserialize failed, so force the initialization we skipped above. */
 		enable_fscache(1);
diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index 361afca94835e0..edf15d7af45489 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -199,6 +199,58 @@ test_expect_success 'verify new --serialize=path mode' '
 	test_cmp expect output.2
 '
 
+test_expect_success 'try deserialize-wait feature' '
+	test_when_finished "rm -f serialized_status.dat dirt expect.* output.* trace.*" &&
+
+	git status --serialize=serialized_status.dat >output.1 &&
+
+	# make status cache stale by updating the mtime on the index.  confirm that
+	# deserialize fails when requested.
+	sleep 1 &&
+	touch .git/index &&
+	test_must_fail git status --deserialize=serialized_status.dat --deserialize-wait=fail &&
+	test_must_fail git -c status.deserializeWait=fail status --deserialize=serialized_status.dat &&
+
+	cat >expect.1 <<-\EOF &&
+	? expect.1
+	? output.1
+	? serialized_status.dat
+	? untracked/
+	? untracked_1.txt
+	EOF
+
+	# refresh the status cache.
+	git status --porcelain=v2 --serialize=serialized_status.dat >output.1 &&
+	test_cmp expect.1 output.1 &&
+
+	# create some dirt. confirm deserialize used the existing status cache.
+	echo x >dirt &&
+	git status --porcelain=v2 --deserialize=serialized_status.dat >output.2 &&
+	test_cmp output.1 output.2 &&
+
+	# make the cache stale and try the timeout feature and wait upto
+	# 2 tenths of a second.  confirm deserialize timed out and rejected
+	# the status cache and did a normal scan.
+
+	cat >expect.2 <<-\EOF &&
+	? dirt
+	? expect.1
+	? expect.2
+	? output.1
+	? output.2
+	? serialized_status.dat
+	? trace.2
+	? untracked/
+	? untracked_1.txt
+	EOF
+
+	sleep 1 &&
+	touch .git/index &&
+	GIT_TRACE_DESERIALIZE=1 git status --porcelain=v2 --deserialize=serialized_status.dat --deserialize-wait=2 >output.2 2>trace.2 &&
+	test_cmp expect.2 output.2 &&
+	grep "wait polled=2 result=1" trace.2 >trace.2g
+'
+
 test_expect_success 'merge conflicts' '
 
 	# create a merge conflict.
diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
index d9716a8531cb6a..1c6c728f77f0fc 100644
--- a/wt-status-deserialize.c
+++ b/wt-status-deserialize.c
@@ -62,7 +62,8 @@ static int my_validate_index(const char *path, const struct cache_time *mtime_re
 	mtime_observed_on_disk.nsec = ST_MTIME_NSEC(st);
 	if ((mtime_observed_on_disk.sec != mtime_reported->sec) ||
 	    (mtime_observed_on_disk.nsec != mtime_reported->nsec)) {
-		trace_printf_key(&trace_deserialize, "index mtime changed [des %d.%d][obs %d.%d]",
+		trace_printf_key(&trace_deserialize,
+				 "index mtime changed [des %d %d][obs %d %d]",
 			     mtime_reported->sec, mtime_reported->nsec,
 			     mtime_observed_on_disk.sec, mtime_observed_on_disk.nsec);
 		return DESERIALIZE_ERR;
@@ -552,6 +553,8 @@ static inline int my_strcmp_null(const char *a, const char *b)
 
 static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *des_s, int fd)
 {
+	memset(des_s, 0, sizeof(*des_s));
+
 	/*
 	 * Check the path spec on the current command
 	 */
@@ -681,8 +684,101 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	return DESERIALIZE_OK;
 }
 
+static struct cache_time deserialize_prev_mtime = { 0, 0 };
+
+static int try_deserialize_read_from_file_1(const struct wt_status *cmd_s,
+					    const char *path,
+					    struct wt_status *des_s)
+{
+	struct stat st;
+	int result;
+	int fd;
+
+	/*
+	 * If we are spinning waiting for the status cache to become
+	 * valid, skip re-reading it if the mtime has not changed
+	 * since the last time we read it.
+	 */
+	if (lstat(path, &st)) {
+		trace_printf_key(&trace_deserialize,
+				 "could not lstat '%s'", path);
+		return DESERIALIZE_ERR;
+	}
+	if ((uint32_t)st.st_mtime == deserialize_prev_mtime.sec &&
+	    ST_MTIME_NSEC(st) == deserialize_prev_mtime.nsec) {
+		trace_printf_key(&trace_deserialize,
+				 "mtime has not changed '%s'", path);
+		return DESERIALIZE_ERR;
+	}
+
+	fd = xopen(path, O_RDONLY);
+	if (fd == -1) {
+		trace_printf_key(&trace_deserialize,
+				 "could not read '%s'", path);
+		return DESERIALIZE_ERR;
+	}
+
+	deserialize_prev_mtime.sec = st.st_mtime;
+	deserialize_prev_mtime.nsec = ST_MTIME_NSEC(st);
+
+	trace_printf_key(&trace_deserialize,
+			 "reading serialization file (%d %d) '%s'",
+			 deserialize_prev_mtime.sec,
+			 deserialize_prev_mtime.nsec,
+			 path);
+
+	result = wt_deserialize_fd(cmd_s, des_s, fd);
+	close(fd);
+
+	return result;
+}
+
+static int try_deserialize_read_from_file(const struct wt_status *cmd_s,
+					  const char *path,
+					  enum wt_status_deserialize_wait dw,
+					  struct wt_status *des_s)
+{
+	int k, limit;
+	int result = DESERIALIZE_ERR;
+
+	/*
+	 * For "fail" or "no", try exactly once to read the status cache.
+	 * Return an error if the file is stale.
+	 */
+	if (dw == DESERIALIZE_WAIT__FAIL || dw == DESERIALIZE_WAIT__NO)
+		return try_deserialize_read_from_file_1(cmd_s, path, des_s);
+
+	/*
+	 * Wait for the status cache file to refresh.  Wait duration can
+	 * be in tenths of a second or unlimited.  Poll every 100ms.
+	 */
+	if (dw == DESERIALIZE_WAIT__BLOCK) {
+		/*
+		 * Convert "unlimited" to 1 day.
+		 */
+		limit = 10 * 60 * 60 * 24;
+	} else {
+		/* spin for dw tenths of a second */
+		limit = dw;
+	}
+	for (k = 0; k < limit; k++) {
+		result = try_deserialize_read_from_file_1(
+			cmd_s, path, des_s);
+
+		if (result == DESERIALIZE_OK)
+			break;
+
+		sleep_millisec(100);
+	}
+
+	trace_printf_key(&trace_deserialize,
+			 "wait polled=%d result=%d '%s'",
+			 k, result, path);
+	return result;
+}
+
 /*
- * Read raw serialized status data from the given file
+ * Read raw serialized status data from the given file (or STDIN).
  *
  * Verify that the args specified in the current command
  * are compatible with the deserialized data (such as "-uno").
@@ -690,25 +786,26 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
  * Copy display-related fields from the current command
  * into the deserialized data (so that the user can request
  * long or short as they please).
+ *
+ * Print status report using cached data.
  */
 int wt_status_deserialize(const struct wt_status *cmd_s,
-			  const char *path)
+			  const char *path,
+			  enum wt_status_deserialize_wait dw)
 {
 	struct wt_status des_s;
 	int result;
 	struct string_list_item *change;
 
 	if (path && *path && strcmp(path, "0")) {
-		int fd = xopen(path, O_RDONLY);
-		if (fd == -1) {
-			trace_printf_key(&trace_deserialize, "could not read '%s'", path);
-			return DESERIALIZE_ERR;
-		}
-		trace_printf_key(&trace_deserialize, "reading serialization file '%s'", path);
-		result = wt_deserialize_fd(cmd_s, &des_s, fd);
-		close(fd);
+		result = try_deserialize_read_from_file(cmd_s, path, dw, &des_s);
 	} else {
 		trace_printf_key(&trace_deserialize, "reading stdin");
+
+		/*
+		 * Read status cache data from stdin.  Ignore the deserialize-wait
+		 * term, since we cannot read stdin multiple times.
+		 */
 		result = wt_deserialize_fd(cmd_s, &des_s, 0);
 	}
 
diff --git a/wt-status.h b/wt-status.h
index 9728117f0e9217..b6cf4531fe56a4 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -220,6 +220,15 @@ struct wt_status_serialize_data
 		     - sizeof(struct wt_status_serialize_data_fixed)];
 };
 
+enum wt_status_deserialize_wait
+{
+	DESERIALIZE_WAIT__UNSET = -3,
+	DESERIALIZE_WAIT__FAIL = -2, /* return error, do not fallback */
+	DESERIALIZE_WAIT__BLOCK = -1, /* unlimited timeout */
+	DESERIALIZE_WAIT__NO = 0, /* immediately fallback */
+	/* any positive value is a timeout in tenths of a second */
+};
+
 /*
  * Serialize computed status scan results using "version 1" format
  * to the given file.
@@ -234,7 +243,8 @@ void wt_status_serialize_v1(int fd, struct wt_status *s);
  * fields.
  */
 int wt_status_deserialize(const struct wt_status *cmd_s,
-			  const char *path);
+			  const char *path,
+			  enum wt_status_deserialize_wait dw);
 
 /*
  * A helper routine for serialize and deserialize to compute

From e9a6fe0f6ed308281e367d6da3b0c8f7d7c80e97 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 21 May 2019 23:14:48 +0200
Subject: [PATCH 063/207] merge-recursive: avoid confusing logic in was_dirty()

It took this developer more than a moment to verify that was_dirty()
really returns 0 (i.e. "false") if the file was not even tracked. In
other words, the `dirty` variable that was initialized to 1 (i.e.
"true") and then negated to be returned was not helping readability.

The same holds for the final return: rather than assigning the value to
return to `dirty` and then *immediately* returning that, we can simplify
it to a single statement.
---
 merge-recursive.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/merge-recursive.c b/merge-recursive.c
index 4533127e972647..3d79512f25aa47 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -875,15 +875,13 @@ static int would_lose_untracked(struct merge_options *opt, const char *path)
 static int was_dirty(struct merge_options *opt, const char *path)
 {
 	struct cache_entry *ce;
-	int dirty = 1;
 
 	if (opt->priv->call_depth || !was_tracked(opt, path))
-		return !dirty;
+		return 0;
 
 	ce = index_file_exists(opt->priv->unpack_opts.src_index,
 			       path, strlen(path), ignore_case);
-	dirty = verify_uptodate(ce, &opt->priv->unpack_opts) != 0;
-	return dirty;
+	return verify_uptodate(ce, &opt->priv->unpack_opts) != 0;
 }
 
 static int make_room_for_path(struct merge_options *opt, const char *path)

From cd466515b51817fea38bb21da17a0da8dae17cdf Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 21 May 2019 23:17:46 +0200
Subject: [PATCH 064/207] merge-recursive: add some defensive coding to
 was_dirty()

It took this developer quite a good while to understand why the current
code cannot get a `NULL` returned by `index_file_exists()`. To
un-confuse readers (and future-proof the code), let's just be safe and
check before we dereference the returned pointer.
---
 merge-recursive.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/merge-recursive.c b/merge-recursive.c
index 3d79512f25aa47..2c8b948f066a5b 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -881,7 +881,7 @@ static int was_dirty(struct merge_options *opt, const char *path)
 
 	ce = index_file_exists(opt->priv->unpack_opts.src_index,
 			       path, strlen(path), ignore_case);
-	return verify_uptodate(ce, &opt->priv->unpack_opts) != 0;
+	return !ce || verify_uptodate(ce, &opt->priv->unpack_opts) != 0;
 }
 
 static int make_room_for_path(struct merge_options *opt, const char *path)

From f326c70bda754ea4b154ce7f7442a94b88b589a7 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johasc@microsoft.com>
Date: Tue, 21 May 2019 23:20:16 +0200
Subject: [PATCH 065/207] merge-recursive: teach was_dirty() about the
 virtualfilesystem

The idea of the virtual file system really is to tell Git to avoid
accessing certain paths. This fixes the case where a given path is not
yet included in the virtual file system and we are about to write a
conflicted version of it.
---
 merge-recursive.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/merge-recursive.c b/merge-recursive.c
index 2c8b948f066a5b..ed87ce52b95cd1 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -9,6 +9,7 @@
 
 #include "git-compat-util.h"
 #include "merge-recursive.h"
+#include "virtualfilesystem.h"
 
 #include "alloc.h"
 #include "cache-tree.h"
@@ -876,7 +877,8 @@ static int was_dirty(struct merge_options *opt, const char *path)
 {
 	struct cache_entry *ce;
 
-	if (opt->priv->call_depth || !was_tracked(opt, path))
+	if (opt->priv->call_depth || !was_tracked(opt, path) ||
+	    is_excluded_from_virtualfilesystem(path, strlen(path), DT_REG) == 1)
 		return 0;
 
 	ce = index_file_exists(opt->priv->unpack_opts.src_index,

From 2d8e853c4163e2eb007dd12f6d61892f0718c144 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 25 Jun 2019 16:38:50 -0400
Subject: [PATCH 066/207] status: deserialize with -uno does not print correct
 hint

With the "--untracked-files=complete" option status computes a
superset of the untracked files.  We use this when writing the
status cache.  If subsequent deserialize commands ask for either
the complete set or one of the "no", "normal", or "all" subsets,
it can still use the cache file because of filtering in the
deserialize parser.

When running status with the "-uno" option, the long format
status would print a "(use -u to show untracked files)" hint.

When deserializing with the "-uno" option and using a cache computed
with "-ucomplete", the "nothing to commit, working tree clean" message
would be printed instead of the hint.

It was easy to miss because the correct hint message was printed
if the cache was rejected for any reason (and status did the full
fallback).

The "struct wt_status des" structure was initialized with the
content of the status cache (and thus defaulted to "complete").
This change sets "des.show_untracked_files" to the requested
subset from the command-line or config.  This allows the long
format to print the hint.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 t/t7522-serialized-status.sh | 55 ++++++++++++++++++++++++++++++++++++
 wt-status-deserialize.c      | 16 +++++++----
 2 files changed, 65 insertions(+), 6 deletions(-)

diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index edf15d7af45489..b52a9b7fa2f520 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -345,4 +345,59 @@ test_expect_success 'renames' '
 	test_cmp output.1 output.2
 '
 
+test_expect_success 'hint message when cached with u=complete' '
+	git init -b main hint &&
+	echo xxx >hint/xxx &&
+	git -C hint add xxx &&
+	git -C hint commit -m xxx &&
+
+	cat >expect.clean <<EOF &&
+On branch main
+nothing to commit, working tree clean
+EOF
+
+	cat >expect.use_u <<EOF &&
+On branch main
+nothing to commit (use -u to show untracked files)
+EOF
+
+	# Capture long format output from "no", "normal", and "all"
+	# (without using status cache) and verify it matches expected
+	# output.
+
+	git -C hint status --untracked-files=normal >hint.output_normal &&
+	test_cmp expect.clean hint.output_normal &&
+
+	git -C hint status --untracked-files=all >hint.output_all &&
+	test_cmp expect.clean hint.output_all &&
+
+	git -C hint status --untracked-files=no >hint.output_no &&
+	test_cmp expect.use_u hint.output_no &&
+
+	# Create long format output for "complete" and create status cache.
+
+	git -C hint status --untracked-files=complete --ignored=matching --serialize=../hint.dat >hint.output_complete &&
+	test_cmp expect.clean hint.output_complete &&
+
+	# Capture long format output using the status cache and verify
+	# that the output matches the non-cached version.  There are 2
+	# ways to specify untracked-files, so do them both.
+
+	git -C hint status --deserialize=../hint.dat -unormal >hint.d1_normal &&
+	test_cmp expect.clean hint.d1_normal &&
+	git -C hint -c status.showuntrackedfiles=normal status --deserialize=../hint.dat >hint.d2_normal &&
+	test_cmp expect.clean hint.d2_normal &&
+
+	git -C hint status --deserialize=../hint.dat -uall >hint.d1_all &&
+	test_cmp expect.clean hint.d1_all &&
+	git -C hint -c status.showuntrackedfiles=all status --deserialize=../hint.dat >hint.d2_all &&
+	test_cmp expect.clean hint.d2_all &&
+
+	git -C hint status --deserialize=../hint.dat -uno >hint.d1_no &&
+	test_cmp expect.use_u hint.d1_no &&
+	git -C hint -c status.showuntrackedfiles=no status --deserialize=../hint.dat >hint.d2_no &&
+	test_cmp expect.use_u hint.d2_no
+
+'
+
 test_done
diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
index 1c6c728f77f0fc..072408c0b7ab75 100644
--- a/wt-status-deserialize.c
+++ b/wt-status-deserialize.c
@@ -424,20 +424,24 @@ static int wt_deserialize_v1_ignored_items(struct wt_status *s,
 }
 
 static int validate_untracked_files_arg(enum untracked_status_type cmd,
-					enum untracked_status_type des,
+					enum untracked_status_type *des,
 					enum deserialize_parse_strategy *strategy)
 {
 	*strategy = DESERIALIZE_STRATEGY_AS_IS;
 
-	if (cmd == des) {
+	if (cmd == *des) {
 		*strategy = DESERIALIZE_STRATEGY_AS_IS;
 	} else if (cmd == SHOW_NO_UNTRACKED_FILES) {
 		*strategy = DESERIALIZE_STRATEGY_SKIP;
-	} else if (des == SHOW_COMPLETE_UNTRACKED_FILES) {
-		if (cmd == SHOW_ALL_UNTRACKED_FILES)
+		*des = cmd;
+	} else if (*des == SHOW_COMPLETE_UNTRACKED_FILES) {
+		if (cmd == SHOW_ALL_UNTRACKED_FILES) {
 			*strategy = DESERIALIZE_STRATEGY_ALL;
-		else if (cmd == SHOW_NORMAL_UNTRACKED_FILES)
+			*des = cmd;
+		} else if (cmd == SHOW_NORMAL_UNTRACKED_FILES) {
 			*strategy = DESERIALIZE_STRATEGY_NORMAL;
+			*des = cmd;
+		}
 	} else {
 		return DESERIALIZE_ERR;
 	}
@@ -479,7 +483,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s,
 	 * We now have the header parsed. Look at the command args (as passed in), and see how to parse
 	 * the serialized data
 	*/
-	if (validate_untracked_files_arg(cmd_s->show_untracked_files, s->show_untracked_files, &untracked_strategy)) {
+	if (validate_untracked_files_arg(cmd_s->show_untracked_files, &s->show_untracked_files, &untracked_strategy)) {
 		trace_printf_key(&trace_deserialize, "reject: show_untracked_file: command: %d, serialized : %d",
 				cmd_s->show_untracked_files,
 				s->show_untracked_files);

From c9cee1c164d2fb5c0cf42995217e1e16d2bf3b18 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 26 Sep 2018 12:29:26 -0400
Subject: [PATCH 067/207] gvfs:trace2:data: add trace2 tracing around
 read_object_process

Add trace2 region around read_object_process to collect
time spent waiting for missing objects to be dynamically
fetched.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 object-file.c | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/object-file.c b/object-file.c
index d4e6a9005ecd6c..adb3cafa425992 100644
--- a/object-file.c
+++ b/object-file.c
@@ -42,6 +42,7 @@
 #include "loose.h"
 #include "object-file-convert.h"
 #include "trace.h"
+#include "trace2.h"
 #include "hook.h"
 #include "sigchain.h"
 #include "sub-process.h"
@@ -1061,6 +1062,8 @@ static int read_object_process(const struct object_id *oid)
 
 	start = getnanotime();
 
+	trace2_region_enter("subprocess", "read_object", the_repository);
+
 	if (!subprocess_map_initialized) {
 		subprocess_map_initialized = 1;
 		hashmap_init(&subprocess_map, (hashmap_cmp_fn)cmd2process_cmp,
@@ -1077,13 +1080,16 @@ static int read_object_process(const struct object_id *oid)
 		if (subprocess_start(&subprocess_map, &entry->subprocess, cmd,
 				     start_read_object_fn)) {
 			free(entry);
-			return -1;
+			err = -1;
+			goto leave_region;
 		}
 	}
 	process = &entry->subprocess.process;
 
-	if (!(CAP_GET & entry->supported_capabilities))
-		return -1;
+	if (!(CAP_GET & entry->supported_capabilities)) {
+		err = -1;
+		goto leave_region;
+	}
 
 	sigchain_push(SIGPIPE, SIG_IGN);
 
@@ -1132,6 +1138,10 @@ static int read_object_process(const struct object_id *oid)
 
 	trace_performance_since(start, "read_object_process");
 
+leave_region:
+	trace2_region_leave_printf("subprocess", "read_object", the_repository,
+				   "result %d", err);
+
 	strbuf_release(&status);
 	return err;
 }

From 4b24b8246fa8e89e55c8db09e8344095cbaa5bd6 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 26 Sep 2018 11:21:22 -0400
Subject: [PATCH 068/207] gvfs:trace2:data: status deserialization information

Add trace2 region and data events describing attempts to deserialize
status data using a status cache.

A category:status, label:deserialize region is pushed around the
deserialize code.

Deserialization results when reading from a file are:
    category:status, path   = <path>
    category:status, polled = <number_of_attempts>
    category:status, result = "ok" | "reject"

When reading from STDIN are:
    category:status, path   = "STDIN"
    category:status, result = "ok" | "reject"

Status will fallback and run a normal status scan when a "reject"
is reported (unless "--deserialize-wait=fail").

If "ok" is reported, status was able to use the status cache and
avoid scanning the workdir.

Additionally, a cmd_mode is emitted for each step: collection,
deserialization, and serialization.  For example, if deserialization
is attempted and fails and status falls back to actually computing
the status, a cmd_mode message containing "deserialize" is issued
and then a cmd_mode for "collect" is issued.

Also, if deserialization fails, a data message containing the
rejection reason is emitted.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 builtin/commit.c        | 19 +++++++++++-
 wt-status-deserialize.c | 67 ++++++++++++++++++++++++++++++++++++++---
 wt-status.h             |  2 ++
 3 files changed, 83 insertions(+), 5 deletions(-)

diff --git a/builtin/commit.c b/builtin/commit.c
index 9826e5c26dc122..da296192b46f95 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -168,6 +168,7 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un
 static int do_serialize = 0;
 static char *serialize_path = NULL;
 
+static int reject_implicit = 0;
 static int do_implicit_deserialize = 0;
 static int do_explicit_deserialize = 0;
 static char *deserialize_path = NULL;
@@ -231,7 +232,7 @@ static int opt_parse_deserialize(const struct option *opt UNUSED, const char *ar
 		}
 		if (!deserialize_path || !*deserialize_path)
 			do_explicit_deserialize = 1; /* read stdin */
-		else if (access(deserialize_path, R_OK) == 0)
+		else if (wt_status_deserialize_access(deserialize_path, R_OK) == 0)
 			do_explicit_deserialize = 1; /* can read from this file */
 		else {
 			/*
@@ -1609,6 +1610,8 @@ static int git_status_config(const char *k, const char *v,
 		if (v && *v && access(v, R_OK) == 0) {
 			do_implicit_deserialize = 1;
 			deserialize_path = xstrdup(v);
+		} else {
+			reject_implicit = 1;
 		}
 		return 0;
 	}
@@ -1762,6 +1765,17 @@ struct repository *repo UNUSED)
 
 	if (try_deserialize)
 		goto skip_init;
+	/*
+	 * If we implicitly received a status cache pathname from the config
+	 * and the file does not exist, we silently reject it and do the normal
+	 * status "collect".  Fake up some trace2 messages to reflect this and
+	 * assist post-processors know this case is different.
+	 */
+	if (!do_serialize && reject_implicit) {
+		trace2_cmd_mode("implicit-deserialize");
+		trace2_data_string("status", the_repository, "deserialize/reject",
+				   "status-cache/access");
+	}
 
 	enable_fscache(0);
 	if (status_format != STATUS_FORMAT_PORCELAIN &&
@@ -1805,6 +1819,7 @@ struct repository *repo UNUSED)
 		if (s.relative_paths)
 			s.prefix = prefix;
 
+		trace2_cmd_mode("deserialize");
 		result = wt_status_deserialize(&s, deserialize_path, dw);
 		if (result == DESERIALIZE_OK)
 			return 0;
@@ -1822,6 +1837,7 @@ struct repository *repo UNUSED)
 			fd = -1;
 	}
 
+	trace2_cmd_mode("collect");
 	wt_status_collect(&s);
 
 	if (0 <= fd)
@@ -1836,6 +1852,7 @@ struct repository *repo UNUSED)
 		if (fd_serialize < 0)
 			die_errno(_("could not serialize to '%s'"),
 				  serialize_path);
+		trace2_cmd_mode("serialize");
 		wt_status_serialize_v1(fd_serialize, &s);
 		close(fd_serialize);
 	}
diff --git a/wt-status-deserialize.c b/wt-status-deserialize.c
index 072408c0b7ab75..ed8886f623907b 100644
--- a/wt-status-deserialize.c
+++ b/wt-status-deserialize.c
@@ -9,6 +9,23 @@
 #include "statinfo.h"
 #include "hex.h"
 #include "path.h"
+#include "trace2.h"
+
+static void set_deserialize_reject_reason(const char *reason)
+{
+	trace2_data_string("status", the_repository, "deserialize/reject",
+			   reason);
+}
+
+int wt_status_deserialize_access(const char *path, int mode)
+{
+	int a = access(path, mode);
+
+	if (a != 0)
+		set_deserialize_reject_reason("status-cache/access");
+
+	return a;
+}
 
 static struct trace_key trace_deserialize = TRACE_KEY_INIT(DESERIALIZE);
 
@@ -55,6 +72,7 @@ static int my_validate_index(const char *path, const struct cache_time *mtime_re
 	struct cache_time mtime_observed_on_disk;
 
 	if (lstat(path, &st)) {
+		set_deserialize_reject_reason("index/not-found");
 		trace_printf_key(&trace_deserialize, "could not stat index");
 		return DESERIALIZE_ERR;
 	}
@@ -62,6 +80,7 @@ static int my_validate_index(const char *path, const struct cache_time *mtime_re
 	mtime_observed_on_disk.nsec = ST_MTIME_NSEC(st);
 	if ((mtime_observed_on_disk.sec != mtime_reported->sec) ||
 	    (mtime_observed_on_disk.nsec != mtime_reported->nsec)) {
+		set_deserialize_reject_reason("index/mtime-changed");
 		trace_printf_key(&trace_deserialize,
 				 "index mtime changed [des %d %d][obs %d %d]",
 			     mtime_reported->sec, mtime_reported->nsec,
@@ -87,10 +106,12 @@ static int my_validate_excludes(const char *path, const char *key, const char *l
 
 	r = (strcmp(line, sb.buf) ? DESERIALIZE_ERR : DESERIALIZE_OK);
 
-	if (r == DESERIALIZE_ERR)
+	if (r == DESERIALIZE_ERR) {
+		set_deserialize_reject_reason("excludes/changed");
 		trace_printf_key(&trace_deserialize,
 				 "%s changed [cached '%s'][observed '%s']",
 				 key, line, sb.buf);
+	}
 
 	strbuf_release(&sb);
 	return r;
@@ -146,6 +167,7 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 					   &index_mtime.sec,
 					   &index_mtime.nsec);
 			if (nr_fields != 2) {
+				set_deserialize_reject_reason("v1-header/invalid-index-mtime");
 				trace_printf_key(&trace_deserialize, "invalid index_mtime (%d) '%s'",
 					     nr_fields, line);
 				return DESERIALIZE_ERR;
@@ -229,6 +251,7 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 		/* status_format */
 		if (skip_prefix(line, "sha1_commit ", &arg)) {
 			if (get_oid_hex(arg, &s->oid_commit)) {
+				set_deserialize_reject_reason("v1-header/invalid-commit-sha");
 				trace_printf_key(&trace_deserialize, "invalid sha1_commit");
 				return DESERIALIZE_ERR;
 			}
@@ -244,19 +267,23 @@ static int wt_deserialize_v1_header(struct wt_status *s, int fd)
 		}
 		/* prefix */
 
+		set_deserialize_reject_reason("v1-header/unexpected-line");
 		trace_printf_key(&trace_deserialize, "unexpected line '%s'", line);
 		return DESERIALIZE_ERR;
 	}
 
 	if (!have_required_index_mtime) {
+		set_deserialize_reject_reason("v1-header/missing-index-mtime");
 		trace_printf_key(&trace_deserialize, "missing '%s'", "index_mtime");
 		return DESERIALIZE_ERR;
 	}
 	if (!have_required_core_excludes) {
+		set_deserialize_reject_reason("v1-header/missing-core-excludes");
 		trace_printf_key(&trace_deserialize, "missing '%s'", "core_excludes");
 		return DESERIALIZE_ERR;
 	}
 	if (!have_required_repo_excludes) {
+		set_deserialize_reject_reason("v1-header/missing-repo-excludes");
 		trace_printf_key(&trace_deserialize, "missing '%s'", "repo_excludes");
 		return DESERIALIZE_ERR;
 	}
@@ -343,6 +370,7 @@ static int wt_deserialize_v1_changed_items(const struct wt_status *cmd_s,
 			 * So we reject the status cache and let the fallback
 			 * code run.
 			 */
+			set_deserialize_reject_reason("v1-data/unmerged");
 			trace_printf_key(
 				&trace_deserialize,
 				"reject: V2 format and umerged file: %s",
@@ -484,6 +512,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s,
 	 * the serialized data
 	*/
 	if (validate_untracked_files_arg(cmd_s->show_untracked_files, &s->show_untracked_files, &untracked_strategy)) {
+		set_deserialize_reject_reason("args/untracked-files");
 		trace_printf_key(&trace_deserialize, "reject: show_untracked_file: command: %d, serialized : %d",
 				cmd_s->show_untracked_files,
 				s->show_untracked_files);
@@ -491,6 +520,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s,
 	}
 
 	if (validate_ignored_files_arg(cmd_s->show_ignored_mode, s->show_ignored_mode, &ignored_strategy)) {
+		set_deserialize_reject_reason("args/ignored-mode");
 		trace_printf_key(&trace_deserialize, "reject: show_ignored_mode: command: %d, serialized: %d",
 				cmd_s->show_ignored_mode,
 				s->show_ignored_mode);
@@ -524,6 +554,7 @@ static int wt_deserialize_v1(const struct wt_status *cmd_s, struct wt_status *s,
 				return DESERIALIZE_ERR;
 			continue;
 		}
+		set_deserialize_reject_reason("v1-data/unexpected-line");
 		trace_printf_key(&trace_deserialize, "unexpected line '%s'", line);
 		return DESERIALIZE_ERR;
 	}
@@ -543,6 +574,7 @@ static int wt_deserialize_parse(const struct wt_status *cmd_s, struct wt_status
 		if (version == 1)
 			return wt_deserialize_v1(cmd_s, s, fd);
 	}
+	set_deserialize_reject_reason("status-cache/unsupported-version");
 	trace_printf_key(&trace_deserialize, "missing/unsupported version");
 	return DESERIALIZE_ERR;
 }
@@ -563,6 +595,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	 * Check the path spec on the current command
 	 */
 	if (cmd_s->pathspec.nr > 1) {
+		set_deserialize_reject_reason("args/multiple-pathspecs");
 		trace_printf_key(&trace_deserialize, "reject: multiple pathspecs");
 		return DESERIALIZE_ERR;
 	}
@@ -573,6 +606,7 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	 */
 	if (cmd_s->pathspec.nr == 1 &&
 		my_strcmp_null(cmd_s->pathspec.items[0].match, "")) {
+		set_deserialize_reject_reason("args/root-pathspec");
 		trace_printf_key(&trace_deserialize, "reject: pathspec");
 		return DESERIALIZE_ERR;
 	}
@@ -595,20 +629,24 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	 * or "--ignored" settings).
 	 */
 	if (cmd_s->is_initial != des_s->is_initial) {
+		set_deserialize_reject_reason("args/is-initial-changed");
 		trace_printf_key(&trace_deserialize, "reject: is_initial");
 		return DESERIALIZE_ERR;
 	}
 	if (my_strcmp_null(cmd_s->branch, des_s->branch)) {
+		set_deserialize_reject_reason("args/branch-changed");
 		trace_printf_key(&trace_deserialize, "reject: branch");
 		return DESERIALIZE_ERR;
 	}
 	if (my_strcmp_null(cmd_s->reference, des_s->reference)) {
+		set_deserialize_reject_reason("args/reference-changed");
 		trace_printf_key(&trace_deserialize, "reject: reference");
 		return DESERIALIZE_ERR;
 	}
 	/* verbose */
 	/* amend */
 	if (cmd_s->whence != des_s->whence) {
+		set_deserialize_reject_reason("args/whence-changed");
 		trace_printf_key(&trace_deserialize, "reject: whence");
 		return DESERIALIZE_ERR;
 	}
@@ -642,19 +680,23 @@ static int wt_deserialize_fd(const struct wt_status *cmd_s, struct wt_status *de
 	/* hints */
 	/* ahead_behind_flags */
 	if (cmd_s->detect_rename != des_s->detect_rename) {
+		set_deserialize_reject_reason("args/detect-rename-changed");
 		trace_printf_key(&trace_deserialize, "reject: detect_rename");
 		return DESERIALIZE_ERR;
 	}
 	if (cmd_s->rename_score != des_s->rename_score) {
+		set_deserialize_reject_reason("args/rename-score-changed");
 		trace_printf_key(&trace_deserialize, "reject: rename_score");
 		return DESERIALIZE_ERR;
 	}
 	if (cmd_s->rename_limit != des_s->rename_limit) {
+		set_deserialize_reject_reason("args/rename-limit-changed");
 		trace_printf_key(&trace_deserialize, "reject: rename_limit");
 		return DESERIALIZE_ERR;
 	}
 	/* status_format */
 	if (!oideq(&cmd_s->oid_commit, &des_s->oid_commit)) {
+		set_deserialize_reject_reason("args/commit-changed");
 		trace_printf_key(&trace_deserialize, "reject: sha1_commit");
 		return DESERIALIZE_ERR;
 	}
@@ -742,15 +784,18 @@ static int try_deserialize_read_from_file(const struct wt_status *cmd_s,
 					  enum wt_status_deserialize_wait dw,
 					  struct wt_status *des_s)
 {
-	int k, limit;
+	int k = 0;
+	int limit;
 	int result = DESERIALIZE_ERR;
 
 	/*
 	 * For "fail" or "no", try exactly once to read the status cache.
 	 * Return an error if the file is stale.
 	 */
-	if (dw == DESERIALIZE_WAIT__FAIL || dw == DESERIALIZE_WAIT__NO)
-		return try_deserialize_read_from_file_1(cmd_s, path, des_s);
+	if (dw == DESERIALIZE_WAIT__FAIL || dw == DESERIALIZE_WAIT__NO) {
+		result = try_deserialize_read_from_file_1(cmd_s, path, des_s);
+		goto done;
+	}
 
 	/*
 	 * Wait for the status cache file to refresh.  Wait duration can
@@ -775,6 +820,12 @@ static int try_deserialize_read_from_file(const struct wt_status *cmd_s,
 		sleep_millisec(100);
 	}
 
+done:
+	trace2_data_string("status", the_repository, "deserialize/path", path);
+	trace2_data_intmax("status", the_repository, "deserialize/polled", k);
+	trace2_data_string("status", the_repository, "deserialize/result",
+			   ((result == DESERIALIZE_OK) ? "ok" : "reject"));
+
 	trace_printf_key(&trace_deserialize,
 			 "wait polled=%d result=%d '%s'",
 			 k, result, path);
@@ -801,6 +852,8 @@ int wt_status_deserialize(const struct wt_status *cmd_s,
 	int result;
 	struct string_list_item *change;
 
+	trace2_region_enter("status", "deserialize", the_repository);
+
 	if (path && *path && strcmp(path, "0")) {
 		result = try_deserialize_read_from_file(cmd_s, path, dw, &des_s);
 	} else {
@@ -811,8 +864,14 @@ int wt_status_deserialize(const struct wt_status *cmd_s,
 		 * term, since we cannot read stdin multiple times.
 		 */
 		result = wt_deserialize_fd(cmd_s, &des_s, 0);
+
+		trace2_data_string("status", the_repository, "deserialize/path", "STDIN");
+		trace2_data_string("status", the_repository, "deserialize/result",
+				   ((result == DESERIALIZE_OK) ? "ok" : "reject"));
 	}
 
+	trace2_region_leave("status", "deserialize", the_repository);
+
 	if (result == DESERIALIZE_OK) {
 		wt_status_get_state(cmd_s->repo, &des_s.state, des_s.branch &&
 				    !strcmp(des_s.branch, "HEAD"));
diff --git a/wt-status.h b/wt-status.h
index b6cf4531fe56a4..03c25186491990 100644
--- a/wt-status.h
+++ b/wt-status.h
@@ -246,6 +246,8 @@ int wt_status_deserialize(const struct wt_status *cmd_s,
 			  const char *path,
 			  enum wt_status_deserialize_wait dw);
 
+int wt_status_deserialize_access(const char *path, int mode);
+
 /*
  * A helper routine for serialize and deserialize to compute
  * metadata for the user-global and repo-local excludes files.

From 05bee9e1e1eaa43d4f8f3e101db5e862f47a3fc5 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Mon, 7 Jan 2019 12:45:48 -0500
Subject: [PATCH 069/207] gvfs:trace2:data: status serialization

Add trace information around status serialization.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 wt-status-serialize.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/wt-status-serialize.c b/wt-status-serialize.c
index 6ab43b8127d917..f9ee02a2a7c6ff 100644
--- a/wt-status-serialize.c
+++ b/wt-status-serialize.c
@@ -9,6 +9,7 @@
 #include "trace.h"
 #include "read-cache-ll.h"
 #include "path.h"
+#include "trace2.h"
 
 static struct trace_key trace_serialize = TRACE_KEY_INIT(SERIALIZE);
 
@@ -305,6 +306,8 @@ void wt_status_serialize_v1(int fd, struct wt_status *s)
 	struct string_list_item *iter;
 	size_t k;
 
+	trace2_region_enter("status", "serialize", the_repository);
+
 	/*
 	 * version header must be first line.
 	 */
@@ -338,4 +341,6 @@ void wt_status_serialize_v1(int fd, struct wt_status *s)
 		}
 		packet_flush(fd);
 	}
+
+	trace2_region_leave("status", "serialize", the_repository);
 }

From d03100d07ade40bebb9ccbbb29c685b9a4885232 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Mon, 19 Nov 2018 16:26:37 -0500
Subject: [PATCH 070/207] gvfs:trace2:data: add vfs stats

Report virtual filesystem summary data.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 virtualfilesystem.c | 38 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 36 insertions(+), 2 deletions(-)

diff --git a/virtualfilesystem.c b/virtualfilesystem.c
index 3f00800d86ef47..aff9cfd3eb8086 100644
--- a/virtualfilesystem.c
+++ b/virtualfilesystem.c
@@ -3,6 +3,7 @@
 #include "git-compat-util.h"
 #include "environment.h"
 #include "gettext.h"
+#include "trace2.h"
 #include "config.h"
 #include "dir.h"
 #include "hashmap.h"
@@ -258,6 +259,11 @@ void apply_virtualfilesystem(struct index_state *istate)
 {
 	char *buf, *entry;
 	size_t i;
+	int nr_unknown = 0;
+	int nr_vfs_dirs = 0;
+	int nr_vfs_rows = 0;
+	int nr_bulk_skip = 0;
+	int nr_explicit_skip = 0;
 
 	if (!repo_config_get_virtualfilesystem(istate->repo))
 		return;
@@ -275,16 +281,21 @@ void apply_virtualfilesystem(struct index_state *istate)
 		if (buf[i] == '\0') {
 			ssize_t pos, len;
 
+			nr_vfs_rows++;
+
 			len = buf + i - entry;
 
 			/* look for a directory wild card (ie "dir1/") */
 			if (buf[i - 1] == '/') {
+				nr_vfs_dirs++;
 				if (ignore_case)
 					adjust_dirname_case(istate, entry);
 				pos = index_name_pos(istate, entry, len);
 				if (pos < 0) {
 					pos = -pos - 1;
 					while ((size_t)pos < istate->cache_nr && !fspathncmp(istate->cache[pos]->name, entry, len)) {
+						if (istate->cache[pos]->ce_flags & CE_SKIP_WORKTREE)
+							nr_bulk_skip++;
 						istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE;
 						pos++;
 					}
@@ -292,18 +303,41 @@ void apply_virtualfilesystem(struct index_state *istate)
 			} else {
 				if (ignore_case) {
 					struct cache_entry *ce = index_file_exists(istate, entry, len, ignore_case);
-					if (ce)
+					if (ce) {
+						if (ce->ce_flags & CE_SKIP_WORKTREE)
+							nr_explicit_skip++;
 						ce->ce_flags &= ~CE_SKIP_WORKTREE;
+					}
+					else {
+						nr_unknown++;
+					}
 				} else {
 					int pos = index_name_pos(istate, entry, len);
-					if (pos >= 0)
+					if (pos >= 0) {
+						if (istate->cache[pos]->ce_flags & CE_SKIP_WORKTREE)
+							nr_explicit_skip++;
 						istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE;
+					}
+					else {
+						nr_unknown++;
+					}
 				}
 			}
 
 			entry += len + 1;
 		}
 	}
+
+	if (nr_vfs_rows > 0) {
+		trace2_data_intmax("vfs", the_repository, "apply/tracked", nr_bulk_skip + nr_explicit_skip);
+
+		trace2_data_intmax("vfs", the_repository, "apply/vfs_rows", nr_vfs_rows);
+		trace2_data_intmax("vfs", the_repository, "apply/vfs_dirs", nr_vfs_dirs);
+
+		trace2_data_intmax("vfs", the_repository, "apply/nr_unknown", nr_unknown);
+		trace2_data_intmax("vfs", the_repository, "apply/nr_bulk_skip", nr_bulk_skip);
+		trace2_data_intmax("vfs", the_repository, "apply/nr_explicit_skip", nr_explicit_skip);
+	}
 }
 
 /*

From b31168502626aa3b6e0b5f1ea81228323833537a Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Mon, 15 Apr 2019 13:39:43 -0700
Subject: [PATCH 071/207] trace2: refactor setting process starting time

Create trace2_initialize_clock() and call from main() to capture
process start time in isolation and before other sub-systems are
ready.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
---
 compat/mingw.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/compat/mingw.c b/compat/mingw.c
index b1bd925c701ed0..8470936af099b7 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -4238,6 +4238,8 @@ int wmain(int argc, const wchar_t **wargv)
 
 	SetConsoleCtrlHandler(handle_ctrl_c, TRUE);
 
+	trace2_initialize_clock();
+
 	maybe_redirect_std_handles();
 	adjust_symlink_flags();
 	fsync_object_files = 1;

From acc3200ff589e549cb083f482d57f272fb40c8a4 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 30 Apr 2019 14:12:51 -0400
Subject: [PATCH 072/207] trace2:gvfs:experiment: clear_ce_flags_1

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 unpack-trees.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/unpack-trees.c b/unpack-trees.c
index 9fa2260bc74ef8..0486b2285075c9 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -1791,6 +1791,7 @@ static int clear_ce_flags(struct index_state *istate,
 	xsnprintf(label, sizeof(label), "clear_ce_flags(0x%08lx,0x%08lx)",
 		  (unsigned long)select_mask, (unsigned long)clear_mask);
 	trace2_region_enter("unpack_trees", label, the_repository);
+
 	rval = clear_ce_flags_1(istate,
 				istate->cache,
 				istate->cache_nr,

From a38eaecb7a96cf8f45bb8d61188ba4950943ba50 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 30 Apr 2019 16:02:39 -0400
Subject: [PATCH 073/207] trace2:gvfs:experiment: report_tracking

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 builtin/checkout.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/builtin/checkout.c b/builtin/checkout.c
index 0ff27deffeee97..7fbf5a0dfe2e5f 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -1049,8 +1049,11 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
 	strbuf_release(&msg);
 	if (!opts->quiet &&
 	    !opts->force_detach &&
-	    (new_branch_info->path || !strcmp(new_branch_info->name, "HEAD")))
+	    (new_branch_info->path || !strcmp(new_branch_info->name, "HEAD"))) {
+		trace2_region_enter("exp", "report_tracking", the_repository);
 		report_tracking(new_branch_info);
+		trace2_region_leave("exp", "report_tracking", the_repository);
+	}
 }
 
 static int add_pending_uninteresting_ref(const char *refname, const char *referent UNUSED,

From 19a8a316106c26c1144a9b10da248333a870a291 Mon Sep 17 00:00:00 2001
From: Kevin Willford <Kevin.Willford@microsoft.com>
Date: Thu, 21 Nov 2019 12:01:04 -0700
Subject: [PATCH 074/207] fsmonitor: check CE_FSMONITOR_VALID in ce_uptodate

When using fsmonitor the CE_FSMONITOR_VALID flag should be checked when
wanting to know if the entry has been updated. If the flag is set the
entry should be considered up to date and the same as if the CE_UPTODATE
is set.

In order to trust the CE_FSMONITOR_VALID flag, the fsmonitor data needs to
be refreshed when the fsmonitor bitmap is applied to the index in
tweak_fsmonitor. Since the fsmonitor data is kept up to date for every
command, some tests needed to be updated to take that into account.

istate->untracked->use_fsmonitor was set in tweak_fsmonitor when the
fsmonitor bitmap data was loaded and is now in refresh_fsmonitor since
that is being called in tweak_fsmonitor. refresh_fsmonitor will only be
called once and any other callers should be setting it when refreshing
the fsmonitor data so that code can use the fsmonitor data when checking
untracked files.

When writing the index, fsmonitor_last_update is used to determine if
the fsmonitor bitmap should be created and the extension data written to
the index. When running through unpack-trees this is not copied to the
result index. This makes the next time a git command is ran do all the
work of lstating all files to determine what is clean since all entries
in the index are marked as dirty since there wasn't any fsmonitor data
saved in the index extension.

Copying the fsmonitor_last_update to the result index will cause the
extension data for fsmonitor to be in the index for the next git command
to use.

Signed-off-by: Kevin Willford <Kevin.Willford@microsoft.com>
---
 read-cache-ll.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/read-cache-ll.h b/read-cache-ll.h
index 71b49d9af48a9d..84092540a7830b 100644
--- a/read-cache-ll.h
+++ b/read-cache-ll.h
@@ -118,7 +118,7 @@ static inline unsigned create_ce_flags(unsigned stage)
 #define ce_namelen(ce) ((ce)->ce_namelen)
 #define ce_size(ce) cache_entry_size(ce_namelen(ce))
 #define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT)
-#define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE)
+#define ce_uptodate(ce) (((ce)->ce_flags & CE_UPTODATE) || ((ce)->ce_flags & CE_FSMONITOR_VALID))
 #define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE)
 #define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE)
 #define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD)

From 95e2952f5a7fca4bda56e74feac4f73a8b4cedfa Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 14 Jun 2019 12:38:31 -0400
Subject: [PATCH 075/207] trace2:gvfs:experiment: read_cache: annotate thread
 usage in read-cache

Add trace2_thread_start() and trace2_thread_exit() events to the worker
threads used to read the index.  This gives per-thread perf data.

These workers were introduced in:
abb4bb83845 read-cache: load cache extensions on a worker thread
77ff1127a4c read-cache: load cache entries on worker threads

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 read-cache.c | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/read-cache.c b/read-cache.c
index 2d4ce41a5f1ac3..60f9f32b1fd7b0 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -2054,6 +2054,17 @@ static void *load_index_extensions(void *_data)
 	return NULL;
 }
 
+static void *load_index_extensions_threadproc(void *_data)
+{
+	void *result;
+
+	trace2_thread_start("load_index_extensions");
+	result = load_index_extensions(_data);
+	trace2_thread_exit();
+
+	return result;
+}
+
 /*
  * A helper function that will load the specified range of cache entries
  * from the memory mapped file and add them to the given index.
@@ -2130,12 +2141,17 @@ static void *load_cache_entries_thread(void *_data)
 	struct load_cache_entries_thread_data *p = _data;
 	int i;
 
+	trace2_thread_start("load_cache_entries");
+
 	/* iterate across all ieot blocks assigned to this thread */
 	for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
 		p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
 			p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
 		p->offset += p->ieot->entries[i].nr;
 	}
+
+	trace2_thread_exit();
+
 	return NULL;
 }
 
@@ -2305,7 +2321,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
 			int err;
 
 			p.src_offset = extension_offset;
-			err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);
+			err = pthread_create(&p.pthread, NULL, load_index_extensions_threadproc, &p);
 			if (err)
 				die(_("unable to create load_index_extensions thread: %s"), strerror(err));
 

From 4781219cc3aa41ca61d5a6a313912df0ba6a9bef Mon Sep 17 00:00:00 2001
From: Kevin Willford <Kevin.Willford@microsoft.com>
Date: Thu, 21 Nov 2019 09:24:36 -0700
Subject: [PATCH 076/207] fsmonitor: add script for debugging and update script
 for tests

The fsmonitor script that can be used for running all the git tests
using watchman was causing some of the tests to fail because it wrote
to stderr and created some files for debugging purposes.

Add a new debug script to use with debugging and modify the other script
to remove the code that would cause tests to fail.

Signed-off-by: Kevin Willford <Kevin.Willford@microsoft.com>
---
 t/t7519/fsmonitor-watchman       |  22 +-----
 t/t7519/fsmonitor-watchman-debug | 128 +++++++++++++++++++++++++++++++
 2 files changed, 129 insertions(+), 21 deletions(-)
 create mode 100755 t/t7519/fsmonitor-watchman-debug

diff --git a/t/t7519/fsmonitor-watchman b/t/t7519/fsmonitor-watchman
index 264b9daf834ec8..6461f625f64181 100755
--- a/t/t7519/fsmonitor-watchman
+++ b/t/t7519/fsmonitor-watchman
@@ -17,7 +17,6 @@ use IPC::Open2;
 # 'git config core.fsmonitor .git/hooks/query-watchman'
 #
 my ($version, $time) = @ARGV;
-#print STDERR "$0 $version $time\n";
 
 # Check the hook interface version
 
@@ -44,7 +43,7 @@ launch_watchman();
 
 sub launch_watchman {
 
-	my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j')
+	my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty')
 	    or die "open2() failed: $!\n" .
 	    "Falling back to scanning...\n";
 
@@ -62,19 +61,11 @@ sub launch_watchman {
 			"fields": ["name"]
 		}]
 	END
-	
-	open (my $fh, ">", ".git/watchman-query.json");
-	print $fh $query;
-	close $fh;
 
 	print CHLD_IN $query;
 	close CHLD_IN;
 	my $response = do {local $/; <CHLD_OUT>};
 
-	open ($fh, ">", ".git/watchman-response.json");
-	print $fh $response;
-	close $fh;
-
 	die "Watchman: command returned no output.\n" .
 	    "Falling back to scanning...\n" if $response eq "";
 	die "Watchman: command returned invalid output: $response\n" .
@@ -93,7 +84,6 @@ sub launch_watchman {
 	my $o = $json_pkg->new->utf8->decode($response);
 
 	if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) {
-		print STDERR "Adding '$git_work_tree' to watchman's watch list.\n";
 		$retry--;
 		qx/watchman watch "$git_work_tree"/;
 		die "Failed to make watchman watch '$git_work_tree'.\n" .
@@ -103,11 +93,6 @@ sub launch_watchman {
 		# return the fast "everything is dirty" flag to git and do the
 		# Watchman query just to get it over with now so we won't pay
 		# the cost in git to look up each individual file.
-
-		open ($fh, ">", ".git/watchman-output.out");
-		print "/\0";
-		close $fh;
-
 		print "/\0";
 		eval { launch_watchman() };
 		exit 0;
@@ -116,11 +101,6 @@ sub launch_watchman {
 	die "Watchman: $o->{error}.\n" .
 	    "Falling back to scanning...\n" if $o->{error};
 
-	open ($fh, ">", ".git/watchman-output.out");
-	binmode $fh, ":utf8";
-	print $fh @{$o->{files}};
-	close $fh;
-
 	binmode STDOUT, ":utf8";
 	local $, = "\0";
 	print @{$o->{files}};
diff --git a/t/t7519/fsmonitor-watchman-debug b/t/t7519/fsmonitor-watchman-debug
new file mode 100755
index 00000000000000..d8e7a1e5ba85c0
--- /dev/null
+++ b/t/t7519/fsmonitor-watchman-debug
@@ -0,0 +1,128 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use IPC::Open2;
+
+# An example hook script to integrate Watchman
+# (https://facebook.github.io/watchman/) with git to speed up detecting
+# new and modified files.
+#
+# The hook is passed a version (currently 1) and a time in nanoseconds
+# formatted as a string and outputs to stdout all files that have been
+# modified since the given time. Paths must be relative to the root of
+# the working tree and separated by a single NUL.
+#
+# To enable this hook, rename this file to "query-watchman" and set
+# 'git config core.fsmonitor .git/hooks/query-watchman'
+#
+my ($version, $time) = @ARGV;
+#print STDERR "$0 $version $time\n";
+
+# Check the hook interface version
+
+if ($version == 1) {
+	# convert nanoseconds to seconds
+	# subtract one second to make sure watchman will return all changes
+	$time = int ($time / 1000000000) - 1;
+} else {
+	die "Unsupported query-fsmonitor hook version '$version'.\n" .
+	    "Falling back to scanning...\n";
+}
+
+my $git_work_tree;
+if ($^O =~ 'msys' || $^O =~ 'cygwin') {
+	$git_work_tree = Win32::GetCwd();
+	$git_work_tree =~ tr/\\/\//;
+} else {
+	require Cwd;
+	$git_work_tree = Cwd::cwd();
+}
+
+my $retry = 1;
+
+launch_watchman();
+
+sub launch_watchman {
+
+	my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j')
+	    or die "open2() failed: $!\n" .
+	    "Falling back to scanning...\n";
+
+	# In the query expression below we're asking for names of files that
+	# changed since $time but were not transient (ie created after
+	# $time but no longer exist).
+	#
+	# To accomplish this, we're using the "since" generator to use the
+	# recency index to select candidate nodes and "fields" to limit the
+	# output to file names only.
+
+	my $query = <<"	END";
+		["query", "$git_work_tree", {
+			"since": $time,
+			"fields": ["name"]
+		}]
+	END
+	
+	open (my $fh, ">", ".git/watchman-query.json");
+	print $fh $query;
+	close $fh;
+
+	print CHLD_IN $query;
+	close CHLD_IN;
+	my $response = do {local $/; <CHLD_OUT>};
+
+	open ($fh, ">", ".git/watchman-response.json");
+	print $fh $response;
+	close $fh;
+
+	die "Watchman: command returned no output.\n" .
+	    "Falling back to scanning...\n" if $response eq "";
+	die "Watchman: command returned invalid output: $response\n" .
+	    "Falling back to scanning...\n" unless $response =~ /^\{/;
+
+	my $json_pkg;
+	eval {
+		require JSON::XS;
+		$json_pkg = "JSON::XS";
+		1;
+	} or do {
+		require JSON::PP;
+		$json_pkg = "JSON::PP";
+	};
+
+	my $o = $json_pkg->new->utf8->decode($response);
+
+	if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) {
+		print STDERR "Adding '$git_work_tree' to watchman's watch list.\n";
+		$retry--;
+		qx/watchman watch "$git_work_tree"/;
+		die "Failed to make watchman watch '$git_work_tree'.\n" .
+		    "Falling back to scanning...\n" if $? != 0;
+
+		# Watchman will always return all files on the first query so
+		# return the fast "everything is dirty" flag to git and do the
+		# Watchman query just to get it over with now so we won't pay
+		# the cost in git to look up each individual file.
+
+		open ($fh, ">", ".git/watchman-output.out");
+		print "/\0";
+		close $fh;
+
+		print "/\0";
+		eval { launch_watchman() };
+		exit 0;
+	}
+
+	die "Watchman: $o->{error}.\n" .
+	    "Falling back to scanning...\n" if $o->{error};
+
+	open ($fh, ">", ".git/watchman-output.out");
+	binmode $fh, ":utf8";
+	print $fh @{$o->{files}};
+	close $fh;
+
+	binmode STDOUT, ":utf8";
+	local $, = "\0";
+	print @{$o->{files}};
+}

From 8c0fd3c7ffa08910bc4534fda44b03bcac67b8a7 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 9 Jul 2019 14:43:47 -0400
Subject: [PATCH 077/207] trace2:gvfs:experiment: read-cache: time read/write
 of cache-tree extension

Add regions around code to read and write the cache-tree extension
when the index is read or written.

This is an experiment and may be dropped in future releases if
we don't need it anymore.

This experiment demonstrates that it takes more time to parse and
deserialize the cache-tree extension than it does to read the
cache-entries.

Commits [1] and [2] spreads cache-entry reading across N-1 cores
and dedicates a single core to simultaneously read the index extensions.

Local testing (on my machine) shows that reading the cache-tree extension
takes ~0.28 seconds.  The 11 cache-entry threads take ~0.08 seconds.
The main thread is blocked for 0.15 to 0.20 seconds waiting for the
extension thread to finish.

Let's use this commit to gather some telemetry and confirm this.

My point is that improvements, such as index V5 which makes the
cache entries smaller, may improve performance, but the gains may
be limited because of this extension.  And that we may need to
look inside the cache-tree extension to truly improve do_read_index()
performance.

[1] abb4bb83845 read-cache: load cache extensions on a worker thread
[2] 77ff1127a4c read-cache: load cache entries on worker threads

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 read-cache.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/read-cache.c b/read-cache.c
index 60f9f32b1fd7b0..ce3abdb063f208 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -1764,7 +1764,10 @@ static int read_index_extension(struct index_state *istate,
 {
 	switch (CACHE_EXT(ext)) {
 	case CACHE_EXT_TREE:
+		trace2_region_enter("index", "read/extension/cache_tree", NULL);
 		istate->cache_tree = cache_tree_read(data, sz);
+		trace2_data_intmax("index", NULL, "read/extension/cache_tree/bytes", (intmax_t)sz);
+		trace2_region_leave("index", "read/extension/cache_tree", NULL);
 		break;
 	case CACHE_EXT_RESOLVE_UNDO:
 		istate->resolve_undo = resolve_undo_read(data, sz);
@@ -3051,9 +3054,13 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
 	    !drop_cache_tree && istate->cache_tree) {
 		strbuf_reset(&sb);
 
+		trace2_region_enter("index", "write/extension/cache_tree", NULL);
 		cache_tree_write(&sb, istate->cache_tree);
 		err = write_index_ext_header(f, eoie_c, CACHE_EXT_TREE, sb.len) < 0;
 		hashwrite(f, sb.buf, sb.len);
+		trace2_data_intmax("index", NULL, "write/extension/cache_tree/bytes", (intmax_t)sb.len);
+		trace2_region_leave("index", "write/extension/cache_tree", NULL);
+
 		if (err) {
 			ret = -1;
 			goto out;

From 48a225fad6834cd987cdb6a45a1957a2c310e4a4 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 10 Apr 2020 21:14:44 -0400
Subject: [PATCH 078/207] status: disable deserialize when verbose output
 requested.

Disable deserialization when verbose output requested.

Verbose mode causes Git to print diffs for modified files.
This requires the index to be loaded to have the currently
staged OID values.  Without loading the index, verbose output
make it look like everything was deleted.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 builtin/commit.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/builtin/commit.c b/builtin/commit.c
index f1afb753ab0878..10e43fea0713b0 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -42,6 +42,7 @@
 #include "commit-graph.h"
 #include "pretty.h"
 #include "trailer.h"
+#include "trace2.h"
 
 static const char * const builtin_commit_usage[] = {
 	N_("git commit [-a | --interactive | --patch] [-s] [-v] [-u<mode>] [--amend]\n"
@@ -1737,6 +1738,22 @@ struct repository *repo UNUSED)
 	 */
 	try_deserialize = (!do_serialize &&
 			   (do_implicit_deserialize || do_explicit_deserialize));
+
+	/*
+	 * Disable deserialize when verbose is set because it causes us to
+	 * print diffs for each modified file, but that requires us to have
+	 * the index loaded and we don't want to do that (at least not now for
+	 * this seldom used feature).  My fear is that would further tangle
+	 * the merge conflict with upstream.
+	 *
+	 * TODO Reconsider this in the future.
+	 */
+	if (try_deserialize && verbose) {
+		trace2_data_string("status", the_repository, "deserialize/reject",
+				   "args/verbose");
+		try_deserialize = 0;
+	}
+
 	if (try_deserialize)
 		goto skip_init;
 

From 6eaefc6dddb57303a95e449ad1de351c93a87a10 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 16 Jul 2019 09:09:53 -0400
Subject: [PATCH 079/207] trace2:gvfs:experiment: add region to
 apply_virtualfilesystem()

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 virtualfilesystem.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/virtualfilesystem.c b/virtualfilesystem.c
index aff9cfd3eb8086..66117dc8447757 100644
--- a/virtualfilesystem.c
+++ b/virtualfilesystem.c
@@ -268,6 +268,8 @@ void apply_virtualfilesystem(struct index_state *istate)
 	if (!repo_config_get_virtualfilesystem(istate->repo))
 		return;
 
+	trace2_region_enter("vfs", "apply", the_repository);
+
 	if (!virtual_filesystem_data.len)
 		get_virtual_filesystem_data(istate->repo, &virtual_filesystem_data);
 
@@ -338,6 +340,8 @@ void apply_virtualfilesystem(struct index_state *istate)
 		trace2_data_intmax("vfs", the_repository, "apply/nr_bulk_skip", nr_bulk_skip);
 		trace2_data_intmax("vfs", the_repository, "apply/nr_explicit_skip", nr_explicit_skip);
 	}
+
+	trace2_region_leave("vfs", "apply", the_repository);
 }
 
 /*

From 4b0c554b12eaea193ff3de8d182d9b886f65779f Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 10 Apr 2020 21:18:41 -0400
Subject: [PATCH 080/207] t7524: add test for verbose status deserialzation

Verify that `git status --deserialize=x -v` does not crash and
generates the same output as a normal (scanning) status command.

These issues are described in the previous 2 commits.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 t/t7522-serialized-status.sh | 39 ++++++++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index b52a9b7fa2f520..6010fcd31635cd 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -400,4 +400,43 @@ EOF
 
 '
 
+test_expect_success 'ensure deserialize -v does not crash' '
+
+	git init -b main verbose_test &&
+	touch verbose_test/a &&
+	touch verbose_test/b &&
+	touch verbose_test/c &&
+	git -C verbose_test add a b c &&
+	git -C verbose_test commit -m abc &&
+
+	echo green >>verbose_test/a &&
+	git -C verbose_test add a &&
+	echo red_1 >>verbose_test/b &&
+	echo red_2 >verbose_test/dirt &&
+
+	git -C verbose_test status    >output.ref &&
+	git -C verbose_test status -v >output.ref_v &&
+
+	git -C verbose_test --no-optional-locks status --serialize=../verbose_test.dat      >output.ser.long &&
+	git -C verbose_test --no-optional-locks status --serialize=../verbose_test.dat_v -v >output.ser.long_v &&
+
+	# Verify that serialization does not affect the status output itself.
+	test_cmp output.ref   output.ser.long &&
+	test_cmp output.ref_v output.ser.long_v &&
+
+	GIT_TRACE2_PERF="$(pwd)"/verbose_test.log \
+	git -C verbose_test status --deserialize=../verbose_test.dat >output.des.long &&
+
+	# Verify that normal deserialize was actually used and produces the same result.
+	test_cmp output.ser.long output.des.long &&
+	grep -q "deserialize/result:ok" verbose_test.log &&
+
+	GIT_TRACE2_PERF="$(pwd)"/verbose_test.log_v \
+	git -C verbose_test status --deserialize=../verbose_test.dat_v -v >output.des.long_v &&
+
+	# Verify that vebose mode produces the same result because verbose was rejected.
+	test_cmp output.ser.long_v output.des.long_v &&
+	grep -q "deserialize/reject:args/verbose" verbose_test.log_v
+'
+
 test_done

From cd1ce85cbfd3f1db240c657b1533c637a8b388bd Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 16 Jul 2019 10:08:08 -0400
Subject: [PATCH 081/207] trace2:gvfs:experiment: add region around
 unpack_trees()

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 unpack-trees.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/unpack-trees.c b/unpack-trees.c
index 0486b2285075c9..71e208f11b5ba4 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -1914,6 +1914,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	if (o->df_conflict_entry)
 		BUG("o->df_conflict_entry is an output only field");
 
+	trace2_region_enter("exp", "unpack_trees", NULL);
+
 	trace_performance_enter();
 	trace2_region_enter("unpack_trees", "unpack_trees", the_repository);
 
@@ -2123,6 +2125,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	}
 	trace2_region_leave("unpack_trees", "unpack_trees", the_repository);
 	trace_performance_leave("unpack_trees");
+	trace2_region_leave("exp", "unpack_trees", NULL);
 	return ret;
 
 return_failed:

From 82cbf980d39675ba2c4b2fa63540e5137c0d2ef2 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 13 May 2020 17:38:50 -0400
Subject: [PATCH 082/207] deserialize-status: silently fallback if we cannot
 read cache file

Teach Git to not throw a fatal error when an explicitly-specified
status-cache file (`git status --deserialize=<foo>`) could not be
found or opened for reading and silently fallback to a traditional
scan.

This matches the behavior when the status-cache file is implicitly
given via a config setting.

Note: the current version causes a test to start failing. Mark this as
an expected result for now.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 builtin/commit.c             | 18 ++++++++++++------
 t/t7522-serialized-status.sh | 16 ++++++++++++++++
 2 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/builtin/commit.c b/builtin/commit.c
index 10e43fea0713b0..9826e5c26dc122 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -229,12 +229,18 @@ static int opt_parse_deserialize(const struct option *opt UNUSED, const char *ar
 			free(deserialize_path);
 			deserialize_path = xstrdup(arg);
 		}
-		if (deserialize_path && *deserialize_path
-		    && (access(deserialize_path, R_OK) != 0))
-			die("cannot find serialization file '%s'",
-			    deserialize_path);
-
-		do_explicit_deserialize = 1;
+		if (!deserialize_path || !*deserialize_path)
+			do_explicit_deserialize = 1; /* read stdin */
+		else if (access(deserialize_path, R_OK) == 0)
+			do_explicit_deserialize = 1; /* can read from this file */
+		else {
+			/*
+			 * otherwise, silently fallback to the normal
+			 * collection scan
+			 */
+			do_implicit_deserialize = 0;
+			do_explicit_deserialize = 0;
+		}
 	}
 
 	return 0;
diff --git a/t/t7522-serialized-status.sh b/t/t7522-serialized-status.sh
index 6010fcd31635cd..230e1e24cfc1c4 100755
--- a/t/t7522-serialized-status.sh
+++ b/t/t7522-serialized-status.sh
@@ -439,4 +439,20 @@ test_expect_success 'ensure deserialize -v does not crash' '
 	grep -q "deserialize/reject:args/verbose" verbose_test.log_v
 '
 
+test_expect_success 'fallback when implicit' '
+	git init -b main implicit_fallback_test &&
+	git -C implicit_fallback_test -c status.deserializepath=foobar status
+'
+
+test_expect_success 'fallback when explicit' '
+	git init -b main explicit_fallback_test &&
+	git -C explicit_fallback_test status --deserialize=foobar
+'
+
+test_expect_success 'deserialize from stdin' '
+	git init -b main stdin_test &&
+	git -C stdin_test status --serialize >serialized_status.dat &&
+	cat serialize_status.dat | git -C stdin_test status --deserialize
+'
+
 test_done

From aefc9ae076185d1b69757afc7eec678b7a1dba64 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Thu, 3 Oct 2019 13:21:26 -0400
Subject: [PATCH 083/207] credential: set trace2_child_class for credential
 manager children

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 credential.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/credential.c b/credential.c
index a995031c5f5d84..a00eedba31a97e 100644
--- a/credential.c
+++ b/credential.c
@@ -444,6 +444,8 @@ static int run_credential_helper(struct credential *c,
 	else
 		helper.no_stdout = 1;
 
+	helper.trace2_child_class = "cred";
+
 	if (start_command(&helper) < 0)
 		return -1;
 

From 9a9734bb96488577caa2a51067c86e2d865ed6b5 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 18 Sep 2019 10:35:45 -0400
Subject: [PATCH 084/207] sub-process: do not borrow cmd pointer from caller

Teach subprocess_start() to use a copy of the passed `cmd` string
rather than borrowing the buffer from the caller.

Some callers of subprocess_start() pass the value returned from
find_hook() which points to a static buffer and therefore is only
good until the next call to find_hook().  This could cause problems
for the long-running background processes managed by sub-process.c
where later calls to subprocess_find_entry() to get an existing
process will fail.  This could cause more than 1 long-running
process to be created.

TODO Need to confirm, but if only read_object_hook() uses
TODO subprocess_start() in this manner, we could drop this
TODO commit when we drop support for read_object_hook().

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 sub-process.c | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/sub-process.c b/sub-process.c
index 1daf5a975254b9..3327b1e4396c0a 100644
--- a/sub-process.c
+++ b/sub-process.c
@@ -81,7 +81,12 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
 	int err;
 	struct child_process *process;
 
-	entry->cmd = cmd;
+	// BUGBUG most callers to subprocess_start() pass in "cmd" the value
+	// BUGBUG of find_hook() which returns a static buffer (that's only
+	// BUGBUG good until the next call to find_hook()).
+	// BUGFIX Defer assignment until we copy the string in our argv.
+	// entry->cmd = cmd;
+
 	process = &entry->process;
 
 	child_process_init(process);
@@ -93,6 +98,8 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
 	process->clean_on_exit_handler = subprocess_exit_handler;
 	process->trace2_child_class = "subprocess";
 
+	entry->cmd = process->args.v[0];
+
 	err = start_command(process);
 	if (err) {
 		error("cannot fork to run subprocess '%s'", cmd);

From 87d4eb2d59c0e3feac8fda6200af42388691f9da Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 18 Sep 2019 10:45:58 -0400
Subject: [PATCH 085/207] sub-process: add subprocess_start_argv()

Add function to start a subprocess with an argv.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 sub-process.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++
 sub-process.h |  6 ++++++
 2 files changed, 53 insertions(+)

diff --git a/sub-process.c b/sub-process.c
index 3327b1e4396c0a..29a65f8aebbd9d 100644
--- a/sub-process.c
+++ b/sub-process.c
@@ -5,6 +5,7 @@
 #include "sub-process.h"
 #include "sigchain.h"
 #include "pkt-line.h"
+#include "quote.h"
 
 int cmd2process_cmp(const void *cmp_data UNUSED,
 		    const struct hashmap_entry *eptr,
@@ -119,6 +120,52 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
 	return 0;
 }
 
+int subprocess_start_strvec(struct hashmap *hashmap,
+			  struct subprocess_entry *entry,
+			  int is_git_cmd,
+			  const struct strvec *argv,
+			  subprocess_start_fn startfn)
+{
+	int err;
+	size_t k;
+	struct child_process *process;
+	struct strbuf quoted = STRBUF_INIT;
+
+	process = &entry->process;
+
+	child_process_init(process);
+	for (k = 0; k < argv->nr; k++)
+		strvec_push(&process->args, argv->v[k]);
+	process->use_shell = 1;
+	process->in = -1;
+	process->out = -1;
+	process->git_cmd = is_git_cmd;
+	process->clean_on_exit = 1;
+	process->clean_on_exit_handler = subprocess_exit_handler;
+	process->trace2_child_class = "subprocess";
+
+	sq_quote_argv_pretty(&quoted, argv->v);
+	entry->cmd = strbuf_detach(&quoted, NULL);
+
+	err = start_command(process);
+	if (err) {
+		error("cannot fork to run subprocess '%s'", entry->cmd);
+		return err;
+	}
+
+	hashmap_entry_init(&entry->ent, strhash(entry->cmd));
+
+	err = startfn(entry);
+	if (err) {
+		error("initialization for subprocess '%s' failed", entry->cmd);
+		subprocess_stop(hashmap, entry);
+		return err;
+	}
+
+	hashmap_add(hashmap, &entry->ent);
+	return 0;
+}
+
 static int handshake_version(struct child_process *process,
 			     const char *welcome_prefix, int *versions,
 			     int *chosen_version)
diff --git a/sub-process.h b/sub-process.h
index 6a61638a8ace0b..73cc536646df79 100644
--- a/sub-process.h
+++ b/sub-process.h
@@ -56,6 +56,12 @@ typedef int(*subprocess_start_fn)(struct subprocess_entry *entry);
 int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, const char *cmd,
 		subprocess_start_fn startfn);
 
+int subprocess_start_strvec(struct hashmap *hashmap,
+			  struct subprocess_entry *entry,
+			  int is_git_cmd,
+			  const struct strvec *argv,
+			  subprocess_start_fn startfn);
+
 /* Kill a subprocess and remove it from the subprocess hashmap. */
 void subprocess_stop(struct hashmap *hashmap, struct subprocess_entry *entry);
 

From 0a0520d588b3da9776a32bd15191b1c44ed8beb6 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 24 Sep 2019 14:31:10 -0400
Subject: [PATCH 086/207] sha1-file: add function to update existing loose
 object cache

Create a function to add a new object to the loose object cache
after the existing odb/xx/ directory was scanned.

This will be used in a later commit to keep the loose object
cache fresh after dynamically fetching an individual object
and without requiring the odb/xx/ directory to be rescanned.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 object-file.c     | 7 +++++++
 object-store-ll.h | 8 ++++++++
 2 files changed, 15 insertions(+)

diff --git a/object-file.c b/object-file.c
index adb3cafa425992..36e32dde4a3ad1 100644
--- a/object-file.c
+++ b/object-file.c
@@ -3149,6 +3149,13 @@ struct oidtree *odb_loose_cache(struct object_directory *odb,
 	return odb->loose_objects_cache;
 }
 
+void odb_loose_cache_add_new_oid(struct object_directory *odb,
+				 const struct object_id *oid)
+{
+	struct oidtree *cache = odb_loose_cache(odb, oid);
+	append_loose_object(oid, NULL, cache);
+}
+
 void odb_clear_loose_cache(struct object_directory *odb)
 {
 	oidtree_clear(odb->loose_objects_cache);
diff --git a/object-store-ll.h b/object-store-ll.h
index f9eee3433a112c..90f28e9c6f1ecf 100644
--- a/object-store-ll.h
+++ b/object-store-ll.h
@@ -95,6 +95,14 @@ void restore_primary_odb(struct object_directory *restore_odb, const char *old_p
 struct oidtree *odb_loose_cache(struct object_directory *odb,
 				  const struct object_id *oid);
 
+/*
+ * Add a new object to the loose object cache (possibly after the
+ * cache was populated).  This might be used after dynamically
+ * fetching a missing object.
+ */
+void odb_loose_cache_add_new_oid(struct object_directory *odb,
+				 const struct object_id *oid);
+
 /* Empty the loose object cache for the specified object directory. */
 void odb_clear_loose_cache(struct object_directory *odb);
 

From b7c6146948bf72bbe39ab0f32e041998e6bd6b9a Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 25 Sep 2019 13:36:54 -0400
Subject: [PATCH 087/207] packfile: add install_packed_git_and_mru()

Create a function to install a new packfile into the packed-git
list and add it to the head of the MRU list.

This function will be used in a later commit to install packfiles
created by dynamic object fetching.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 packfile.c | 6 ++++++
 packfile.h | 1 +
 2 files changed, 7 insertions(+)

diff --git a/packfile.c b/packfile.c
index 9f4a71b3f0f2d4..5e596bd8a944ff 100644
--- a/packfile.c
+++ b/packfile.c
@@ -782,6 +782,12 @@ void install_packed_git(struct repository *r, struct packed_git *pack)
 	hashmap_add(&r->objects->pack_map, &pack->packmap_ent);
 }
 
+void install_packed_git_and_mru(struct repository *r, struct packed_git *pack)
+{
+	install_packed_git(r, pack);
+	list_add(&pack->mru, &r->objects->packed_git_mru);
+}
+
 void (*report_garbage)(unsigned seen_bits, const char *path);
 
 static void report_helper(const struct string_list *list,
diff --git a/packfile.h b/packfile.h
index 7c9edf7526c54a..c0024cfa64e153 100644
--- a/packfile.h
+++ b/packfile.h
@@ -68,6 +68,7 @@ extern void (*report_garbage)(unsigned seen_bits, const char *path);
 
 void reprepare_packed_git(struct repository *r);
 void install_packed_git(struct repository *r, struct packed_git *pack);
+void install_packed_git_and_mru(struct repository *r, struct packed_git *pack);
 
 struct packed_git *get_packed_git(struct repository *r);
 struct list_head *get_packed_git_mru(struct repository *r);

From 79bf2b68f8b5c1cbed0567aadf462f877c1e5bd1 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 24 Sep 2019 15:51:16 -0400
Subject: [PATCH 088/207] index-pack: avoid immediate object fetch while
 parsing packfile

Prevent packfile parsing from accidentally dynamically fetching
each individual object found in the packfile.

When index-pack parses the input packfile, it does a lookup in the
ODB to test for conflicts/collisions.  This can accidentally cause
the object to be individually fetched when gvfs-helper (or
read-object-hook or partial-clone) is enabled.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 builtin/index-pack.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 99c7fdc744d305..0c11a1bcb49352 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -889,7 +889,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
 		read_lock();
 		collision_test_needed =
 			repo_has_object_file_with_flags(the_repository, oid,
-							OBJECT_INFO_QUICK);
+							OBJECT_INFO_FOR_PREFETCH);
 		read_unlock();
 	}
 

From 369298213fcdbabbcb6d294558d84ffc2fe94a13 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 16 Jul 2019 10:16:37 -0400
Subject: [PATCH 089/207] trace2:gvfs:experiment: add region to
 cache_tree_fully_valid()

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 cache-tree.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/cache-tree.c b/cache-tree.c
index 0ad97555013f5b..03d23737523c52 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -234,7 +234,7 @@ static void discard_unused_subtrees(struct cache_tree *it)
 	}
 }
 
-int cache_tree_fully_valid(struct cache_tree *it)
+static int cache_tree_fully_valid_1(struct cache_tree *it)
 {
 	int i;
 	if (!it)
@@ -242,7 +242,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
 	if (it->entry_count < 0 || !repo_has_object_file(the_repository, &it->oid))
 		return 0;
 	for (i = 0; i < it->subtree_nr; i++) {
-		if (!cache_tree_fully_valid(it->down[i]->cache_tree))
+		if (!cache_tree_fully_valid_1(it->down[i]->cache_tree))
 			return 0;
 	}
 	return 1;
@@ -253,6 +253,17 @@ static int must_check_existence(const struct cache_entry *ce)
 	return !(repo_has_promisor_remote(the_repository) && ce_skip_worktree(ce));
 }
 
+int cache_tree_fully_valid(struct cache_tree *it)
+{
+	int result;
+
+	trace2_region_enter("cache_tree", "fully_valid", NULL);
+	result = cache_tree_fully_valid_1(it);
+	trace2_region_leave("cache_tree", "fully_valid", NULL);
+
+	return result;
+}
+
 static int update_one(struct cache_tree *it,
 		      struct cache_entry **cache,
 		      int entries,

From 0df6c6350522325cd56010fd4fbb2d9397f2fe2c Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 13 Aug 2019 12:12:08 -0400
Subject: [PATCH 090/207] gvfs-helper: create tool to fetch objects using the
 GVFS Protocol

Create gvfs-helper.  This is a helper tool to use the GVFS Protocol
REST API to fetch objects and configuration data from a GVFS cache-server
or Git server.  This tool uses libcurl to send object requests to either
server.  This tool creates loose objects and/or packfiles.

Create gvfs-helper-client.  This code resides within git proper and
uses the sub-process API to manage gvfs-helper as a long-running background
process.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 .gitignore                          |    1 +
 Documentation/config.txt            |    2 +
 Documentation/config/core.txt       |    3 +
 Documentation/config/gvfs.txt       |    5 +
 Documentation/lint-manpages.sh      |    1 +
 Makefile                            |    7 +
 config.c                            |   42 +
 contrib/buildsystems/CMakeLists.txt |    5 +-
 environment.c                       |    3 +
 environment.h                       |    3 +
 gvfs-helper-client.c                |  372 +++++
 gvfs-helper-client.h                |   68 +
 gvfs-helper.c                       | 2311 +++++++++++++++++++++++++++
 meson.build                         |    9 +
 object-file.c                       |   31 +-
 promisor-remote.c                   |   13 +-
 t/helper/.gitignore                 |    1 +
 17 files changed, 2874 insertions(+), 3 deletions(-)
 create mode 100644 Documentation/config/gvfs.txt
 create mode 100644 gvfs-helper-client.c
 create mode 100644 gvfs-helper-client.h
 create mode 100644 gvfs-helper.c

diff --git a/.gitignore b/.gitignore
index 9dd1fbc61e6c82..a461bdd35b2985 100644
--- a/.gitignore
+++ b/.gitignore
@@ -74,6 +74,7 @@
 /git-gc
 /git-get-tar-commit-id
 /git-grep
+/git-gvfs-helper
 /git-hash-object
 /git-help
 /git-hook
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 939cc1387992f8..b87cb7a593b368 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -448,6 +448,8 @@ include::config/gui.txt[]
 
 include::config/guitool.txt[]
 
+include::config/gvfs.txt[]
+
 include::config/help.txt[]
 
 include::config/http.txt[]
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 6f408dd89398d4..6f9cba3b4c7ed7 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -793,6 +793,9 @@ core.gvfs::
 		flag just blocks them from occurring at all.
 --
 
+core.useGvfsHelper::
+	TODO
+
 core.sparseCheckout::
 	Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
 	for more information.
diff --git a/Documentation/config/gvfs.txt b/Documentation/config/gvfs.txt
new file mode 100644
index 00000000000000..6ab221ded36c91
--- /dev/null
+++ b/Documentation/config/gvfs.txt
@@ -0,0 +1,5 @@
+gvfs.cache-server::
+	TODO
+
+gvfs.sharedcache::
+	TODO
diff --git a/Documentation/lint-manpages.sh b/Documentation/lint-manpages.sh
index 92cfc0a15abd56..8bc316ba7646e3 100755
--- a/Documentation/lint-manpages.sh
+++ b/Documentation/lint-manpages.sh
@@ -27,6 +27,7 @@ check_missing_docs () (
 		git-init-db) continue;;
 		git-remote-*) continue;;
 		git-stage) continue;;
+		git-gvfs-helper) continue;;
 		git-legacy-*) continue;;
 		git-?*--?* ) continue ;;
 		esac
diff --git a/Makefile b/Makefile
index 801e4111f18b9f..9269f9fd8c2a1d 100644
--- a/Makefile
+++ b/Makefile
@@ -1039,6 +1039,7 @@ LIB_OBJS += gpg-interface.o
 LIB_OBJS += graph.o
 LIB_OBJS += grep.o
 LIB_OBJS += gvfs.o
+LIB_OBJS += gvfs-helper-client.o
 LIB_OBJS += hash-lookup.o
 LIB_OBJS += hashmap.o
 LIB_OBJS += help.o
@@ -1686,6 +1687,8 @@ endif
         endif
 	BASIC_CFLAGS += $(CURL_CFLAGS)
 
+	PROGRAM_OBJS += gvfs-helper.o
+
 	REMOTE_CURL_PRIMARY = git-remote-http$X
 	REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
 	REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
@@ -2936,6 +2939,10 @@ scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
 	$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
 		$(filter %.o,$^) $(LIBS)
 
+git-gvfs-helper$X: gvfs-helper.o http.o GIT-LDFLAGS $(GITLIBS) $(LAZYLOAD_LIBCURL_OBJ)
+	$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
+		$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
+
 $(LIB_FILE): $(LIB_OBJS)
 	$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
 
diff --git a/config.c b/config.c
index 0d66812d3f3c98..0a9a2856d242d1 100644
--- a/config.c
+++ b/config.c
@@ -43,6 +43,7 @@
 #include "wildmatch.h"
 #include "ws.h"
 #include "write-or-die.h"
+#include "transport.h"
 
 struct config_source {
 	struct config_source *prev;
@@ -1627,6 +1628,11 @@ int git_default_core_config(const char *var, const char *value,
 		return 0;
 	}
 
+	if (!strcmp(var, "core.usegvfshelper")) {
+		core_use_gvfs_helper = git_config_bool(var, value);
+		return 0;
+	}
+
 	if (!strcmp(var, "core.sparsecheckout")) {
 		/* virtual file system relies on the sparse checkout logic so force it on */
 		if (core_virtualfilesystem)
@@ -1778,6 +1784,39 @@ static int git_default_mailmap_config(const char *var, const char *value)
 	return 0;
 }
 
+static int git_default_gvfs_config(const char *var, const char *value)
+{
+	if (!strcmp(var, "gvfs.cache-server")) {
+		char *v2 = NULL;
+
+		if (!git_config_string(&v2, var, value) && v2 && *v2) {
+			free(gvfs_cache_server_url);
+			gvfs_cache_server_url = transport_anonymize_url(v2);
+		}
+		free(v2);
+		return 0;
+	}
+
+	if (!strcmp(var, "gvfs.sharedcache") && value && *value) {
+		struct strbuf buf = STRBUF_INIT;
+		strbuf_addstr(&buf, value);
+		if (strbuf_normalize_path(&buf) < 0) {
+			/*
+			 * Pretend it wasn't set.  This will cause us to
+			 * fallback to ".git/objects" effectively.
+			 */
+			strbuf_release(&buf);
+			return 0;
+		}
+		strbuf_trim_trailing_dir_sep(&buf);
+
+		gvfs_shared_cache_pathname = strbuf_detach(&buf, NULL);
+		return 0;
+	}
+
+	return 0;
+}
+
 static int git_default_attr_config(const char *var, const char *value)
 {
 	if (!strcmp(var, "attr.tree")) {
@@ -1845,6 +1884,9 @@ int git_default_config(const char *var, const char *value,
 	if (starts_with(var, "sparse."))
 		return git_default_sparse_config(var, value);
 
+	if (starts_with(var, "gvfs."))
+		return git_default_gvfs_config(var, value);
+
 	/* Add other config variables here and to Documentation/config.txt. */
 	return 0;
 }
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 5b2c600c2db1b7..d1635d13270fd7 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -635,7 +635,7 @@ if(NOT CURL_FOUND)
 	add_compile_definitions(NO_CURL)
 	message(WARNING "git-http-push and git-http-fetch will not be built")
 else()
-	list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http)
+	list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http git-gvfs-helper)
 	if(CURL_VERSION_STRING VERSION_GREATER_EQUAL 7.34.0)
 		add_compile_definitions(USE_CURL_FOR_IMAP_SEND)
 	endif()
@@ -823,6 +823,9 @@ if(CURL_FOUND)
 		add_executable(git-http-push ${CMAKE_SOURCE_DIR}/http-push.c)
 		target_link_libraries(git-http-push http_obj common-main ${CURL_LIBRARIES} ${EXPAT_LIBRARIES})
 	endif()
+
+	add_executable(git-gvfs-helper ${CMAKE_SOURCE_DIR}/gvfs-helper.c)
+	target_link_libraries(git-gvfs-helper http_obj common-main ${CURL_LIBRARIES} )
 endif()
 
 parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
diff --git a/environment.c b/environment.c
index 84bbc2a7172109..f7393925730d87 100644
--- a/environment.c
+++ b/environment.c
@@ -98,6 +98,9 @@ int protect_hfs = PROTECT_HFS_DEFAULT;
 #define PROTECT_NTFS_DEFAULT 1
 #endif
 int protect_ntfs = PROTECT_NTFS_DEFAULT;
+int core_use_gvfs_helper;
+char *gvfs_cache_server_url;
+const char *gvfs_shared_cache_pathname;
 
 /*
  * The character that begins a commented line in user-editable file
diff --git a/environment.h b/environment.h
index 6245b95b6873f8..4d92b6a8b64ee9 100644
--- a/environment.h
+++ b/environment.h
@@ -175,6 +175,9 @@ extern int core_gvfs;
 extern int precomposed_unicode;
 extern int protect_hfs;
 extern int protect_ntfs;
+extern int core_use_gvfs_helper;
+extern char *gvfs_cache_server_url;
+extern const char *gvfs_shared_cache_pathname;
 
 extern int core_apply_sparse_checkout;
 extern int core_sparse_checkout_cone;
diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
new file mode 100644
index 00000000000000..b81f38a4fe721f
--- /dev/null
+++ b/gvfs-helper-client.c
@@ -0,0 +1,372 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#include "git-compat-util.h"
+#include "hex.h"
+#include "strvec.h"
+#include "trace2.h"
+#include "oidset.h"
+#include "object.h"
+#include "object-store.h"
+#include "gvfs-helper-client.h"
+#include "sub-process.h"
+#include "sigchain.h"
+#include "pkt-line.h"
+#include "quote.h"
+#include "packfile.h"
+
+static struct oidset gh_client__oidset_queued = OIDSET_INIT;
+static unsigned long gh_client__oidset_count;
+static int gh_client__includes_immediate;
+
+struct gh_server__process {
+	struct subprocess_entry subprocess; /* must be first */
+	unsigned int supported_capabilities;
+};
+
+static int gh_server__subprocess_map_initialized;
+static struct hashmap gh_server__subprocess_map;
+static struct object_directory *gh_client__chosen_odb;
+
+#define CAP_GET      (1u<<1)
+
+static int gh_client__start_fn(struct subprocess_entry *subprocess)
+{
+	static int versions[] = {1, 0};
+	static struct subprocess_capability capabilities[] = {
+		{ "get", CAP_GET },
+		{ NULL, 0 }
+	};
+
+	struct gh_server__process *entry = (struct gh_server__process *)subprocess;
+
+	return subprocess_handshake(subprocess, "gvfs-helper", versions,
+				    NULL, capabilities,
+				    &entry->supported_capabilities);
+}
+
+/*
+ * Send:
+ *
+ *     get LF
+ *     (<hex-oid> LF)*
+ *     <flush>
+ *
+ */
+static int gh_client__get__send_command(struct child_process *process)
+{
+	struct oidset_iter iter;
+	struct object_id *oid;
+	int err;
+
+	/*
+	 * We assume that all of the packet_ routines call error()
+	 * so that we don't have to.
+	 */
+
+	err = packet_write_fmt_gently(process->in, "get\n");
+	if (err)
+		return err;
+
+	oidset_iter_init(&gh_client__oidset_queued, &iter);
+	while ((oid = oidset_iter_next(&iter))) {
+		err = packet_write_fmt_gently(process->in, "%s\n",
+					      oid_to_hex(oid));
+		if (err)
+			return err;
+	}
+
+	err = packet_flush_gently(process->in);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/*
+ * Update the loose object cache to include the newly created
+ * object.
+ */
+static void gh_client__update_loose_cache(const char *line)
+{
+	const char *v1_oid;
+	struct object_id oid;
+
+	if (!skip_prefix(line, "loose ", &v1_oid))
+		BUG("update_loose_cache: invalid line '%s'", line);
+
+	odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid);
+}
+
+/*
+ * Update the packed-git list to include the newly created packfile.
+ */
+static void gh_client__update_packed_git(const char *line)
+{
+	struct strbuf path = STRBUF_INIT;
+	const char *v1_filename;
+	struct packed_git *p;
+	int is_local;
+
+	if (!skip_prefix(line, "packfile ", &v1_filename))
+		BUG("update_packed_git: invalid line '%s'", line);
+
+	/*
+	 * ODB[0] is the local .git/objects.  All others are alternates.
+	 */
+	is_local = (gh_client__chosen_odb == the_repository->objects->odb);
+
+	strbuf_addf(&path, "%s/pack/%s",
+		    gh_client__chosen_odb->path, v1_filename);
+	strbuf_strip_suffix(&path, ".pack");
+	strbuf_addstr(&path, ".idx");
+
+	p = add_packed_git(the_repository, path.buf, path.len, is_local);
+	if (p)
+		install_packed_git_and_mru(the_repository, p);
+	strbuf_release(&path);
+}
+
+/*
+ * We expect:
+ *
+ *    <odb>
+ *    <data>*
+ *    <status>
+ *    <flush>
+ *
+ * Where:
+ *
+ * <odb>      ::= odb SP <directory> LF
+ *
+ * <data>     ::= <packfile> / <loose>
+ *
+ * <packfile> ::= packfile SP <filename> LF
+ *
+ * <loose>    ::= loose SP <hex-oid> LF
+ *
+ * <status>   ::=   ok LF
+ *                / partial LF
+ *                / error SP <message> LF
+ *
+ * Note that `gvfs-helper` controls how/if it chunks the request when
+ * it talks to the cache-server and/or main Git server.  So it is
+ * possible for us to receive many packfiles and/or loose objects *AND
+ * THEN* get a hard network error or a 404 on an individual object.
+ *
+ * If we get a partial result, we can let the caller try to continue
+ * -- for example, maybe an immediate request for a tree object was
+ * grouped with a queued request for a blob.  The tree-walk *might* be
+ * able to continue and let the 404 blob be handled later.
+ */
+static int gh_client__get__receive_response(
+	struct child_process *process,
+	enum gh_client__created *p_ghc,
+	int *p_nr_loose, int *p_nr_packfile)
+{
+	enum gh_client__created ghc = GHC__CREATED__NOTHING;
+	const char *v1;
+	char *line;
+	int len;
+	int err = 0;
+
+	while (1) {
+		/*
+		 * Warning: packet_read_line_gently() calls die()
+		 * despite the _gently moniker.
+		 */
+		len = packet_read_line_gently(process->out, NULL, &line);
+		if ((len < 0) || !line)
+			break;
+
+		if (starts_with(line, "odb")) {
+			/* trust that this matches what we expect */
+		}
+
+		else if (starts_with(line, "packfile")) {
+			gh_client__update_packed_git(line);
+			ghc |= GHC__CREATED__PACKFILE;
+			*p_nr_packfile += 1;
+		}
+
+		else if (starts_with(line, "loose")) {
+			gh_client__update_loose_cache(line);
+			ghc |= GHC__CREATED__LOOSE;
+			*p_nr_loose += 1;
+		}
+
+		else if (starts_with(line, "ok"))
+			;
+		else if (starts_with(line, "partial"))
+			;
+		else if (skip_prefix(line, "error ", &v1)) {
+			error("gvfs-helper error: '%s'", v1);
+			err = -1;
+		}
+	}
+
+	*p_ghc = ghc;
+
+	return err;
+}
+
+static void gh_client__choose_odb(void)
+{
+	if (gh_client__chosen_odb)
+		return;
+
+	prepare_alt_odb(the_repository);
+	gh_client__chosen_odb = the_repository->objects->odb;
+}
+
+static int gh_client__get(enum gh_client__created *p_ghc)
+{
+	struct gh_server__process *entry;
+	struct child_process *process;
+	struct strvec argv = STRVEC_INIT;
+	struct strbuf quoted = STRBUF_INIT;
+	int nr_loose = 0;
+	int nr_packfile = 0;
+	int err = 0;
+
+	trace2_region_enter("gh-client", "get", the_repository);
+
+	gh_client__choose_odb();
+
+	/*
+	 * TODO decide what defaults we want.
+	 */
+	strvec_push(&argv, "gvfs-helper");
+	strvec_push(&argv, "--fallback");
+	strvec_push(&argv, "--cache-server=trust");
+	strvec_pushf(&argv, "--shared-cache=%s",
+			 gh_client__chosen_odb->path);
+	strvec_push(&argv, "server");
+
+	sq_quote_argv_pretty(&quoted, argv.v);
+
+	if (!gh_server__subprocess_map_initialized) {
+		gh_server__subprocess_map_initialized = 1;
+		hashmap_init(&gh_server__subprocess_map,
+			     (hashmap_cmp_fn)cmd2process_cmp, NULL, 0);
+		entry = NULL;
+	} else
+		entry = (struct gh_server__process *)subprocess_find_entry(
+			&gh_server__subprocess_map, quoted.buf);
+
+	if (!entry) {
+		entry = xmalloc(sizeof(*entry));
+		entry->supported_capabilities = 0;
+
+		err = subprocess_start_strvec(
+			&gh_server__subprocess_map, &entry->subprocess, 1,
+			&argv, gh_client__start_fn);
+		if (err) {
+			free(entry);
+			goto leave_region;
+		}
+	}
+
+	process = &entry->subprocess.process;
+
+	if (!(CAP_GET & entry->supported_capabilities)) {
+		error("gvfs-helper: does not support GET");
+		subprocess_stop(&gh_server__subprocess_map,
+				(struct subprocess_entry *)entry);
+		free(entry);
+		err = -1;
+		goto leave_region;
+	}
+
+	sigchain_push(SIGPIPE, SIG_IGN);
+
+	err = gh_client__get__send_command(process);
+	if (!err)
+		err = gh_client__get__receive_response(process, p_ghc,
+						 &nr_loose, &nr_packfile);
+
+	sigchain_pop(SIGPIPE);
+
+	if (err) {
+		subprocess_stop(&gh_server__subprocess_map,
+				(struct subprocess_entry *)entry);
+		free(entry);
+	}
+
+leave_region:
+	strvec_clear(&argv);
+	strbuf_release(&quoted);
+
+	trace2_data_intmax("gh-client", the_repository,
+			   "get/immediate", gh_client__includes_immediate);
+
+	trace2_data_intmax("gh-client", the_repository,
+			   "get/nr_objects", gh_client__oidset_count);
+
+	if (nr_loose)
+		trace2_data_intmax("gh-client", the_repository,
+				   "get/nr_loose", nr_loose);
+
+	if (nr_packfile)
+		trace2_data_intmax("gh-client", the_repository,
+				   "get/nr_packfile", nr_packfile);
+
+	if (err)
+		trace2_data_intmax("gh-client", the_repository,
+				   "get/error", err);
+
+	trace2_region_leave("gh-client", "get", the_repository);
+
+	oidset_clear(&gh_client__oidset_queued);
+	gh_client__oidset_count = 0;
+	gh_client__includes_immediate = 0;
+
+	return err;
+}
+
+void gh_client__queue_oid(const struct object_id *oid)
+{
+	// TODO consider removing this trace2.  it is useful for interactive
+	// TODO debugging, but may generate way too much noise for a data
+	// TODO event.
+	trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
+
+	if (!oidset_insert(&gh_client__oidset_queued, oid))
+		gh_client__oidset_count++;
+}
+
+/*
+ * This routine should actually take a "const struct oid_array *"
+ * rather than the component parts, but fetch_objects() uses
+ * this model (because of the call in sha1-file.c).
+ */
+void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr)
+{
+	int k;
+
+	for (k = 0; k < oid_nr; k++)
+		gh_client__queue_oid(&oids[k]);
+}
+
+int gh_client__drain_queue(enum gh_client__created *p_ghc)
+{
+	*p_ghc = GHC__CREATED__NOTHING;
+
+	if (!gh_client__oidset_count)
+		return 0;
+
+	return gh_client__get(p_ghc);
+}
+int gh_client__get_immediate(const struct object_id *oid,
+			     enum gh_client__created *p_ghc)
+{
+	gh_client__includes_immediate = 1;
+
+	// TODO consider removing this trace2.  it is useful for interactive
+	// TODO debugging, but may generate way too much noise for a data
+	// TODO event.
+	trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
+
+	if (!oidset_insert(&gh_client__oidset_queued, oid))
+		gh_client__oidset_count++;
+
+	return gh_client__drain_queue(p_ghc);
+}
diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h
new file mode 100644
index 00000000000000..a5a951ff5b5bfe
--- /dev/null
+++ b/gvfs-helper-client.h
@@ -0,0 +1,68 @@
+#ifndef GVFS_HELPER_CLIENT_H
+#define GVFS_HELPER_CLIENT_H
+
+struct repository;
+struct commit;
+
+enum gh_client__created {
+	/*
+	 * The _get_ operation did not create anything.  If doesn't
+	 * matter if `gvfs-helper` had errors or not -- just that
+	 * nothing was created.
+	 */
+	GHC__CREATED__NOTHING  = 0,
+
+	/*
+	 * The _get_ operation created one or more packfiles.
+	 */
+	GHC__CREATED__PACKFILE = 1<<1,
+
+	/*
+	 * The _get_ operation created one or more loose objects.
+	 * (Not necessarily the for the individual OID you requested.)
+	 */
+	GHC__CREATED__LOOSE    = 1<<2,
+
+	/*
+	 * The _get_ operation created one or more packfilea *and*
+	 * one or more loose objects.
+	 */
+	GHC__CREATED__PACKFILE_AND_LOOSE = (GHC__CREATED__PACKFILE |
+					    GHC__CREATED__LOOSE),
+};
+
+/*
+ * Ask `gvfs-helper server` to immediately fetch a single object
+ * using "/gvfs/objects" GET semantics.
+ *
+ * A long-running background process is used to make subsequent
+ * requests more efficient.
+ *
+ * A loose object will be created in the shared-cache ODB and
+ * in-memory cache updated.
+ */
+int gh_client__get_immediate(const struct object_id *oid,
+			     enum gh_client__created *p_ghc);
+
+/*
+ * Queue this OID for a future fetch using `gvfs-helper service`.
+ * It does not wait.
+ *
+ * Callers should not rely on the queued object being on disk until
+ * the queue has been drained.
+ */
+void gh_client__queue_oid(const struct object_id *oid);
+void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr);
+
+/*
+ * Ask `gvfs-helper server` to fetch the set of queued OIDs using
+ * "/gvfs/objects" POST semantics.
+ *
+ * A long-running background process is used to subsequent requests
+ * more efficient.
+ *
+ * One or more packfiles will be created in the shared-cache ODB.
+ */
+int gh_client__drain_queue(enum gh_client__created *p_ghc);
+
+#endif /* GVFS_HELPER_CLIENT_H */
diff --git a/gvfs-helper.c b/gvfs-helper.c
new file mode 100644
index 00000000000000..abf97c95e1ce9e
--- /dev/null
+++ b/gvfs-helper.c
@@ -0,0 +1,2311 @@
+// TODO Write a man page.  Here are some notes for dogfooding.
+// TODO
+//
+// Usage: git gvfs-helper [<main_options>] <sub-command> [<sub-command-options>]
+//
+// <main_options>:
+//
+//     --remote=<remote-name>         // defaults to "origin"
+//
+//     --fallback                     // boolean. defaults to off
+//
+//            When a fetch from the cache-server fails, automatically
+//            fallback to the main Git server.  This option has no effect
+//            if no cache-server is defined.
+//
+//     --cache-server=<use>  // defaults to "verify"
+//
+//            verify   := lookup the set of defined cache-servers using
+//                        "gvfs/config" and confirm that the selected
+//                        cache-server is well-known.  Silently disable the
+//                        cache-server if not.  (See security notes later.)
+//
+//            error    := verify cache-server and abort if not well-known.
+//
+//            trust    := do not verify cache-server.  just use it.
+//
+//            disable  := disable the cache-server and always use the main
+//                        Git server.
+//
+//     --shared-cache=<odb-directory-pathname>
+//
+//            A relative or absolute pathname to the ODB directory to store
+//            fetched objects.
+//
+//            If this option is not specified, we default to the value
+//            in the "gvfs.sharedcache" config setting and then to the
+//            local ".git/objects" directory.
+//
+// <sub-command>:
+//
+//     config
+//
+//            Fetch the "gvfs/config" string from the main Git server.
+//            (The cache-server setting is ignored because cache-servers
+//            do not support this REST API.)
+//
+//     get
+//
+//            Fetch 1 or more objects.  If a cache-server is configured,
+//            try it first.  Optionally fallback to the main Git server.
+//
+//            The set of objects is given on stdin and is assumed to be
+//            a list of <oid>, one per line.
+//
+//            <get-options>:
+//
+//                 --block-size=<n>      // defaults to "4000"
+//
+//                       Request objects from server in batches of at
+//                       most n objects (not bytes).
+//
+//                 --depth=<depth>       // defaults to "1"
+//
+//     server
+//
+//            Interactive/sub-process mode.  Listen for a series of commands
+//            and data on stdin and return results on stdout.  This command
+//            uses pkt-line format [1] and implements the long-running process
+//            protocol [2] to communicate with the foreground/parent process.
+//
+//            <server-options>:
+//
+//                 --block-size=<n>      // defaults to "4000"
+//
+//                       Request objects from server in batches of at
+//                       most n objects (not bytes).
+//
+//                 --depth=<depth>       // defaults to "1"
+//
+//            Interactive verb: get
+//
+//                 Fetch 1 or more objects.  If a cache-server is configured,
+//                 try it first.  Optionally fallback to the main Git server.
+//                 Create 1 or more loose objects and/or packfiles in the
+//                 requested shared-cache directory (given on the command
+//                 line and which is reported at the beginning of the
+//                 response).
+//
+//                 git> get
+//                 git> <oid>
+//                 git> <oid>
+//                 git> ...
+//                 git> <oid>
+//                 git> 0000
+//
+//                 git< odb <directory>
+//                 git< loose <oid> | packfile <filename.pack>
+//                 git< loose <oid> | packfile <filename.pack>
+//                 gid< ...
+//                 git< loose <oid> | packfile <filename.pack>
+//                 git< ok | partial | error <message>
+//                 git< 0000
+//
+//            [1] Documentation/technical/protocol-common.txt
+//            [2] Documentation/technical/long-running-process-protocol.txt
+//            [3] See GIT_TRACE_PACKET
+//
+// Example:
+//
+// $ git -c core.virtualizeobjects=false -c core.usegvfshelper=false
+//           rev-list --objects --no-walk --missing=print HEAD
+//     | grep "^?"
+//     | sed 's/^?//'
+//     | git gvfs-helper get-missing
+//
+// Note: In this example, we need to turn off "core.virtualizeobjects" and
+//       "core.usegvfshelper" when building the list of objects.  This prevents
+//       rev-list (in oid_object_info_extended() from automatically fetching
+//       them with read-object-hook or "gvfs-helper server" sub-process (and
+//       defeating the whole purpose of this example).
+//
+//////////////////////////////////////////////////////////////////
+
+#define USE_THE_REPOSITORY_VARIABLE
+#include "git-compat-util.h"
+#include "git-curl-compat.h"
+#include "environment.h"
+#include "hex.h"
+#include "setup.h"
+#include "config.h"
+#include "remote.h"
+#include "connect.h"
+#include "strbuf.h"
+#include "walker.h"
+#include "http.h"
+#include "exec-cmd.h"
+#include "run-command.h"
+#include "pkt-line.h"
+#include "string-list.h"
+#include "sideband.h"
+#include "strvec.h"
+#include "credential.h"
+#include "oid-array.h"
+#include "send-pack.h"
+#include "protocol.h"
+#include "quote.h"
+#include "transport.h"
+#include "parse-options.h"
+#include "object-file.h"
+#include "object-store.h"
+#include "json-writer.h"
+#include "tempfile.h"
+#include "oidset.h"
+#include "dir.h"
+#include "url.h"
+#include "abspath.h"
+#include "progress.h"
+#include "trace2.h"
+
+static const char * const main_usage[] = {
+	N_("git gvfs-helper [<main_options>] config      [<options>]"),
+	N_("git gvfs-helper [<main_options>] get         [<options>]"),
+	N_("git gvfs-helper [<main_options>] server      [<options>]"),
+	NULL
+};
+
+static const char *const get_usage[] = {
+	N_("git gvfs-helper [<main_options>] get [<options>]"),
+	NULL
+};
+
+static const char *const server_usage[] = {
+	N_("git gvfs-helper [<main_options>] server [<options>]"),
+	NULL
+};
+
+#define GH__DEFAULT_BLOCK_SIZE 4000
+
+/*
+ * Our exit-codes.
+ */
+enum gh__error_code {
+	GH__ERROR_CODE__USAGE = -1, /* will be mapped to usage() */
+	GH__ERROR_CODE__OK = 0,
+	GH__ERROR_CODE__ERROR = 1, /* unspecified */
+//	GH__ERROR_CODE__CACHE_SERVER_NOT_FOUND = 2,
+	GH__ERROR_CODE__CURL_ERROR = 3,
+	GH__ERROR_CODE__HTTP_401 = 4,
+	GH__ERROR_CODE__HTTP_404 = 5,
+	GH__ERROR_CODE__HTTP_UNEXPECTED_CODE = 6,
+	GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE = 7,
+	GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE = 8,
+	GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE = 9,
+	GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 10,
+	GH__ERROR_CODE__SUBPROCESS_SYNTAX = 11,
+};
+
+enum gh__cache_server_mode {
+	/* verify URL. disable if unknown. */
+	GH__CACHE_SERVER_MODE__VERIFY_DISABLE = 0,
+	/* verify URL. error if unknown. */
+	GH__CACHE_SERVER_MODE__VERIFY_ERROR,
+	/* disable the cache-server, if defined */
+	GH__CACHE_SERVER_MODE__DISABLE,
+	/* trust any cache-server */
+	GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY,
+};
+
+/*
+ * The set of command line, config, and environment variables
+ * that we use as input to decide how we should operate.
+ */
+static struct gh__cmd_opts {
+	const char *remote_name;
+
+	int try_fallback; /* to git server if cache-server fails */
+	int show_progress;
+
+	int depth;
+	int block_size;
+
+	enum gh__cache_server_mode cache_server_mode;
+} gh__cmd_opts;
+
+/*
+ * The chosen global state derrived from the inputs in gh__cmd_opts.
+ */
+static struct gh__global {
+	struct remote *remote;
+
+	struct credential main_creds;
+	struct credential cache_creds;
+
+	const char *main_url;
+	const char *cache_server_url;
+
+	struct strbuf buf_odb_path;
+
+	int http_is_initialized;
+	int cache_server_is_initialized; /* did sub-command look for one */
+	int main_creds_need_approval; /* try to only approve them once */
+
+} gh__global;
+
+/*
+ * Stolen from http.c
+ */
+static CURLcode gh__curlinfo_strbuf(CURL *curl, CURLINFO info, struct strbuf *buf)
+{
+	char *ptr;
+	CURLcode ret;
+
+	strbuf_reset(buf);
+	ret = curl_easy_getinfo(curl, info, &ptr);
+	if (!ret && ptr)
+		strbuf_addstr(buf, ptr);
+	return ret;
+}
+
+enum gh__progress_state {
+	GH__PROGRESS_STATE__START = 0,
+	GH__PROGRESS_STATE__PHASE1,
+	GH__PROGRESS_STATE__PHASE2,
+	GH__PROGRESS_STATE__PHASE3,
+};
+
+/*
+ * Parameters to drive an HTTP request (with any necessary retries).
+ */
+struct gh__request_params {
+	int b_is_post;            /* POST=1 or GET=0 */
+	int b_write_to_file;      /* write to file=1 or strbuf=0 */
+	int b_no_cache_server;    /* force main server only */
+
+	unsigned long object_count; /* number of objects being fetched */
+
+	const struct strbuf *post_payload; /* POST body to send */
+
+	struct curl_slist *headers; /* additional http headers to send */
+	struct tempfile *tempfile; /* for response content when file */
+	struct strbuf *buffer;     /* for response content when strbuf */
+	struct strbuf label;       /* for trace2 regions */
+
+	struct strbuf loose_path;
+
+	/*
+	 * Note that I am putting all of the progress-related instance data
+	 * inside the request-params in the hope that we can eventually
+	 * do multi-threaded/concurrent HTTP requests when chunking
+	 * large requests.  However, the underlying "struct progress" API
+	 * is not thread safe (that is, it doesn't allow concurrent progress
+	 * reports (since that might require multiple lines on the screen
+	 * or something)).
+	 */
+	enum gh__progress_state progress_state;
+	struct strbuf progress_base_phase2_msg;
+	struct strbuf progress_base_phase3_msg;
+
+	/*
+	 * The buffer for the formatted progress message is shared by the
+	 * "struct progress" API and must remain valid for the duration of
+	 * the start_progress..stop_progress lifespan.
+	 */
+	struct strbuf progress_msg;
+	struct progress *progress;
+};
+
+#define GH__REQUEST_PARAMS_INIT { \
+	.b_is_post = 0, \
+	.b_write_to_file = 0, \
+	.b_no_cache_server = 0, \
+	.object_count = 0, \
+	.post_payload = NULL, \
+	.headers = NULL, \
+	.tempfile = NULL, \
+	.buffer = NULL, \
+	.label = STRBUF_INIT, \
+	.loose_path = STRBUF_INIT, \
+	.progress_state = GH__PROGRESS_STATE__START, \
+	.progress_base_phase2_msg = STRBUF_INIT, \
+	.progress_base_phase3_msg = STRBUF_INIT, \
+	.progress_msg = STRBUF_INIT, \
+	.progress = NULL, \
+	}
+
+static void gh__request_params__release(struct gh__request_params *params)
+{
+	if (!params)
+		return;
+
+	params->post_payload = NULL; /* we do not own this */
+
+	curl_slist_free_all(params->headers);
+	params->headers = NULL;
+
+	delete_tempfile(&params->tempfile);
+
+	params->buffer = NULL; /* we do not own this */
+
+	strbuf_release(&params->label);
+	strbuf_release(&params->loose_path);
+
+	strbuf_release(&params->progress_base_phase2_msg);
+	strbuf_release(&params->progress_base_phase3_msg);
+	strbuf_release(&params->progress_msg);
+
+	stop_progress(&params->progress);
+	params->progress = NULL;
+}
+
+/*
+ * Bucket to describe the results of an HTTP requests (may be
+ * overwritten during retries so that it describes the final attempt).
+ */
+struct gh__response_status {
+	struct strbuf error_message;
+	struct strbuf content_type;
+	long response_code; /* http response code */
+	CURLcode curl_code;
+	enum gh__error_code ec;
+	intmax_t bytes_received;
+};
+
+#define GH__RESPONSE_STATUS_INIT { \
+	.error_message = STRBUF_INIT, \
+	.content_type = STRBUF_INIT, \
+	.response_code = 0, \
+	.curl_code = CURLE_OK, \
+	.ec = GH__ERROR_CODE__OK, \
+	.bytes_received = 0, \
+	}
+
+static void gh__response_status__zero(struct gh__response_status *s)
+{
+	strbuf_setlen(&s->error_message, 0);
+	strbuf_setlen(&s->content_type, 0);
+	s->response_code = 0;
+	s->curl_code = CURLE_OK;
+	s->ec = GH__ERROR_CODE__OK;
+	s->bytes_received = 0;
+}
+
+/*
+ * Create a single normalized 'ec' error-code from the status we
+ * received from the HTTP request.  Map a few of the expected HTTP
+ * status code to 'ec', but don't get too crazy here.
+ */
+static void gh__response_status__set_from_slot(
+	struct gh__request_params *params,
+	struct gh__response_status *status,
+	const struct active_request_slot *slot)
+{
+	status->curl_code = slot->results->curl_result;
+	gh__curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE,
+			    &status->content_type);
+	curl_easy_getinfo(slot->curl, CURLINFO_RESPONSE_CODE,
+			  &status->response_code);
+
+	strbuf_setlen(&status->error_message, 0);
+
+	if (status->response_code == 200)
+		status->ec = GH__ERROR_CODE__OK;
+
+	else if (status->response_code == 401) {
+		strbuf_addstr(&status->error_message, "401 Not Authorized");
+		status->ec = GH__ERROR_CODE__HTTP_401;
+
+	} else if (status->response_code == 404) {
+		strbuf_addstr(&status->error_message, "404 Not Found");
+		status->ec = GH__ERROR_CODE__HTTP_404;
+
+	} else if (status->curl_code != CURLE_OK) {
+		strbuf_addf(&status->error_message, "%s (curl)",
+			    curl_easy_strerror(status->curl_code));
+		status->ec = GH__ERROR_CODE__CURL_ERROR;
+	} else {
+		strbuf_addf(&status->error_message, "HTTP %ld Unexpected",
+			    status->response_code);
+		status->ec = GH__ERROR_CODE__HTTP_UNEXPECTED_CODE;
+	}
+
+	if (status->ec != GH__ERROR_CODE__OK)
+		status->bytes_received = 0;
+	else if (params->b_write_to_file)
+		status->bytes_received = (intmax_t)ftell(params->tempfile->fp);
+	else
+		status->bytes_received = (intmax_t)params->buffer->len;
+}
+
+static void gh__response_status__release(struct gh__response_status *status)
+{
+	if (!status)
+		return;
+	strbuf_release(&status->error_message);
+	strbuf_release(&status->content_type);
+}
+
+/*
+ * The cache-server sends a somewhat bogus 400 instead of
+ * the normal 401 when AUTH is required.  Fixup the status
+ * to hide that.
+ */
+static void fixup_cache_server_400_to_401(struct gh__response_status *status)
+{
+	if (status->response_code != 400)
+		return;
+
+	/*
+	 * TODO Technically, the cache-server could send a 400
+	 * TODO for many reasons, not just for their bogus
+	 * TODO pseudo-401, but we're going to assume it is a
+	 * TODO 401 for now.  We should confirm the expected
+	 * TODO error message in the response-body.
+	 */
+	status->response_code = 401;
+}
+
+static int gh__curl_progress_cb(void *clientp,
+				curl_off_t dltotal, curl_off_t dlnow,
+				curl_off_t ultotal, curl_off_t ulnow)
+{
+	struct gh__request_params *params = clientp;
+
+	/*
+	 * From what I can tell, CURL progress arrives in 3 phases.
+	 *
+	 * [1] An initial connection setup phase where we get [0,0] [0,0].
+	 * [2] An upload phase where we start sending the request headers
+	 *     and body. ulnow will be > 0.  ultotal may or may not be 0.
+	 * [3] A download phase where we start receiving the response
+	 *     headers and payload body.  dlnow will be > 0. dltotal may
+	 *     or may not be 0.
+	 *
+	 * If we pass zero for the total to the "struct progress" API, we
+	 * get simple numbers rather than percentages.  So our progress
+	 * output format may vary depending.
+	 *
+	 * It is unclear if CURL will give us a final callback after
+	 * everything is finished, so we leave the progress handle open
+	 * and let the caller issue the final stop_progress().
+	 *
+	 * There is a bit of a mismatch between the CURL API and the
+	 * "struct progress" API.  The latter requires us to set the
+	 * progress message when we call one of the start_progress
+	 * methods.  We cannot change the progress message while we are
+	 * showing progress state.  And we cannot change the denominator
+	 * (total) after we start.  CURL may or may not give us the total
+	 * sizes for each phase.
+	 *
+	 * Also be advised that the "struct progress" API eats messages
+	 * so that the screen is only updated every second or so.  And
+	 * may not print anything if the start..stop happen in less then
+	 * 2 seconds.  Whereas CURL calls this callback very frequently.
+	 * The net-net is that we may not actually see this progress
+	 * message for small/fast HTTP requests.
+	 */
+
+	switch (params->progress_state) {
+	case GH__PROGRESS_STATE__START: /* first callback */
+		if (dlnow == 0 && ulnow == 0)
+			goto enter_phase_1;
+
+		if (ulnow)
+			goto enter_phase_2;
+		else
+			goto enter_phase_3;
+
+	case GH__PROGRESS_STATE__PHASE1:
+		if (dlnow == 0 && ulnow == 0)
+			return 0;
+
+		if (ulnow)
+			goto enter_phase_2;
+		else
+			goto enter_phase_3;
+
+	case GH__PROGRESS_STATE__PHASE2:
+		display_progress(params->progress, ulnow);
+		if (dlnow == 0)
+			return 0;
+
+		stop_progress(&params->progress);
+		goto enter_phase_3;
+
+	case GH__PROGRESS_STATE__PHASE3:
+		display_progress(params->progress, dlnow);
+		return 0;
+
+	default:
+		return 0;
+	}
+
+enter_phase_1:
+	/*
+	 * Don't bother to create a progress handle during phase [1].
+	 * Because we get [0,0,0,0], we don't have any data to report
+	 * and would just have to synthesize some type of progress.
+	 * From my testing, phase [1] is fairly quick (probably just
+	 * the SSL handshake), so the "struct progress" API will most
+	 * likely completely eat any messages that we did produce.
+	 */
+	params->progress_state = GH__PROGRESS_STATE__PHASE1;
+	return 0;
+
+enter_phase_2:
+	strbuf_setlen(&params->progress_msg, 0);
+	if (params->progress_base_phase2_msg.len) {
+		strbuf_addf(&params->progress_msg, "%s (bytes sent)",
+			    params->progress_base_phase2_msg.buf);
+		params->progress = start_progress(params->progress_msg.buf, ultotal);
+		display_progress(params->progress, ulnow);
+	}
+	params->progress_state = GH__PROGRESS_STATE__PHASE2;
+	return 0;
+
+enter_phase_3:
+	strbuf_setlen(&params->progress_msg, 0);
+	if (params->progress_base_phase3_msg.len) {
+		strbuf_addf(&params->progress_msg, "%s (bytes received)",
+			    params->progress_base_phase3_msg.buf);
+		params->progress = start_progress(params->progress_msg.buf, dltotal);
+		display_progress(params->progress, dlnow);
+	}
+	params->progress_state = GH__PROGRESS_STATE__PHASE3;
+	return 0;
+}
+
+/*
+ * Run the request without using "run_one_slot()" because we
+ * don't want the post-request normalization, error handling,
+ * and auto-reauth handling in http.c.
+ */
+static void gh__run_one_slot(struct active_request_slot *slot,
+			     struct gh__request_params *params,
+			     struct gh__response_status *status)
+{
+	trace2_region_enter("gvfs-helper", params->label.buf, NULL);
+
+	if (!start_active_slot(slot)) {
+		status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */
+		strbuf_addstr(&status->error_message,
+			      "failed to start HTTP request");
+	} else {
+		run_active_slot(slot);
+		if (params->b_write_to_file)
+			fflush(params->tempfile->fp);
+
+		gh__response_status__set_from_slot(params, status, slot);
+
+		if (status->ec == GH__ERROR_CODE__OK) {
+			int old_len = params->label.len;
+
+			strbuf_addstr(&params->label, "/nr_objects");
+			trace2_data_intmax("gvfs-helper", NULL,
+					   params->label.buf,
+					   params->object_count);
+			strbuf_setlen(&params->label, old_len);
+
+			strbuf_addstr(&params->label, "/nr_bytes");
+			trace2_data_intmax("gvfs-helper", NULL,
+					   params->label.buf,
+					   status->bytes_received);
+			strbuf_setlen(&params->label, old_len);
+		}
+	}
+
+	if (params->progress)
+		stop_progress(&params->progress);
+
+	trace2_region_leave("gvfs-helper", params->label.buf, NULL);
+}
+
+static int option_parse_cache_server_mode(const struct option *opt,
+					  const char *arg, int unset)
+{
+	if (unset) /* should not happen */
+		return error(_("missing value for switch '%s'"),
+			     opt->long_name);
+
+	else if (!strcmp(arg, "verify"))
+		gh__cmd_opts.cache_server_mode =
+			GH__CACHE_SERVER_MODE__VERIFY_DISABLE;
+
+	else if (!strcmp(arg, "error"))
+		gh__cmd_opts.cache_server_mode =
+			GH__CACHE_SERVER_MODE__VERIFY_ERROR;
+
+	else if (!strcmp(arg, "disable"))
+		gh__cmd_opts.cache_server_mode =
+			GH__CACHE_SERVER_MODE__DISABLE;
+
+	else if (!strcmp(arg, "trust"))
+		gh__cmd_opts.cache_server_mode =
+			GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY;
+
+	else
+		return error(_("invalid value for switch '%s'"),
+			     opt->long_name);
+
+	return 0;
+}
+
+/*
+ * Let command line args override "gvfs.sharedcache" config setting.
+ *
+ * It would be nice to move this to parse-options.c as an
+ * OPTION_PATHNAME handler.  And maybe have flags for exists()
+ * and is_directory().
+ */
+static int option_parse_shared_cache_directory(const struct option *opt,
+					       const char *arg, int unset)
+{
+	if (unset) /* should not happen */
+		return error(_("missing value for switch '%s'"),
+			     opt->long_name);
+
+	if (!is_directory(arg))
+		return error(_("value for switch '%s' is not a directory: '%s'"),
+			     opt->long_name, arg);
+
+	gvfs_shared_cache_pathname = arg;
+
+	return 0;
+}
+
+/*
+ * Lookup the URL for this remote (defaults to 'origin').
+ */
+static void lookup_main_url(void)
+{
+	/*
+	 * Both VFS and Scalar only work with 'origin', so we expect this.
+	 * The command line arg is mainly for debugging.
+	 */
+	if (!gh__cmd_opts.remote_name || !*gh__cmd_opts.remote_name)
+		gh__cmd_opts.remote_name = "origin";
+
+	gh__global.remote = remote_get(gh__cmd_opts.remote_name);
+	if (!gh__global.remote->url.v[0] || !*gh__global.remote->url.v[0])
+		die("unknown remote '%s'", gh__cmd_opts.remote_name);
+
+	/*
+	 * Strip out any in-line auth in the origin server URL so that
+	 * we can control which creds we fetch.
+	 *
+	 * Azure DevOps has been known to suggest https URLS of the
+	 * form "https://<account>@dev.azure.com/<account>/<path>".
+	 *
+	 * Break that so that we can force the use of a PAT.
+	 */
+	gh__global.main_url = transport_anonymize_url(gh__global.remote->url.v[0]);
+
+	trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url);
+}
+
+static void do__gvfs_config(struct gh__response_status *status,
+			    struct strbuf *config_data);
+
+/*
+ * Find the URL of the cache-server, if we have one.
+ *
+ * This routine is called by the initialization code and is allowed
+ * to call die() rather than returning an 'ec'.
+ */
+static void select_cache_server(void)
+{
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct strbuf config_data = STRBUF_INIT;
+	const char *match = NULL;
+
+	/*
+	 * This only indicates that the sub-command actually called
+	 * this routine.  We rely on gh__global.cache_server_url to tell
+	 * us if we actually have a cache-server configured.
+	 */
+	gh__global.cache_server_is_initialized = 1;
+	gh__global.cache_server_url = NULL;
+
+	if (gh__cmd_opts.cache_server_mode == GH__CACHE_SERVER_MODE__DISABLE) {
+		trace2_data_string("gvfs-helper", NULL, "cache/url", "disabled");
+		return;
+	}
+
+	/*
+	 * If the cache-server and main Git server have the same URL, we
+	 * can silently disable the cache-server (by NOT setting the field
+	 * in gh__global and explicitly disable the fallback logic.)
+	 */
+	if (!strcmp(gvfs_cache_server_url, gh__global.main_url)) {
+		gh__cmd_opts.try_fallback = 0;
+		trace2_data_string("gvfs-helper", NULL, "cache/url", "same");
+		return;
+	}
+
+	if (gh__cmd_opts.cache_server_mode ==
+	    GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY) {
+		gh__global.cache_server_url = gvfs_cache_server_url;
+		trace2_data_string("gvfs-helper", NULL, "cache/url",
+				   gvfs_cache_server_url);
+		return;
+	}
+
+	/*
+	 * GVFS cache-servers use the main Git server's creds rather
+	 * than having their own creds.  This feels like a security
+	 * hole.  For example, if the cache-server URL is pointed to a
+	 * bad site, we'll happily send them our creds to the main Git
+	 * server with each request to the cache-server.  This would
+	 * allow an attacker to later use our creds to impersonate us
+	 * on the main Git server.
+	 *
+	 * So we optionally verify that the URL to the cache-server is
+	 * well-known by the main Git server.
+	 */
+
+	do__gvfs_config(&status, &config_data);
+
+	if (status.ec == GH__ERROR_CODE__OK) {
+		/*
+		 * The gvfs/config response is in JSON, but I don't think
+		 * we need to parse it and all that.  Lets just do a simple
+		 * strstr() and assume it is sufficient.
+		 *
+		 * We do add some context to the pattern to guard against
+		 * some attacks.
+		 */
+		struct strbuf pattern = STRBUF_INIT;
+
+		strbuf_addf(&pattern, "\"Url\":\"%s\"", gvfs_cache_server_url);
+		match = strstr(config_data.buf, pattern.buf);
+
+		strbuf_release(&pattern);
+	}
+
+	strbuf_release(&config_data);
+
+	if (match) {
+		gh__global.cache_server_url = gvfs_cache_server_url;
+		trace2_data_string("gvfs-helper", NULL, "cache/url",
+				   gvfs_cache_server_url);
+	}
+
+	else if (gh__cmd_opts.cache_server_mode ==
+		 GH__CACHE_SERVER_MODE__VERIFY_ERROR) {
+		if (status.ec != GH__ERROR_CODE__OK)
+			die("could not verify cache-server '%s': %s",
+			    gvfs_cache_server_url,
+			    status.error_message.buf);
+		else
+			die("could not verify cache-server '%s'",
+			    gvfs_cache_server_url);
+	}
+
+	else if (gh__cmd_opts.cache_server_mode ==
+		 GH__CACHE_SERVER_MODE__VERIFY_DISABLE) {
+		if (status.ec != GH__ERROR_CODE__OK)
+			warning("could not verify cache-server '%s': %s",
+				gvfs_cache_server_url,
+				status.error_message.buf);
+		else
+			warning("could not verify cache-server '%s'",
+				gvfs_cache_server_url);
+		trace2_data_string("gvfs-helper", NULL, "cache/url",
+				   "disabled");
+	}
+
+	gh__response_status__release(&status);
+}
+
+/*
+ * Read stdin until EOF (or a blank line) and add the desired OIDs
+ * to the oidset.
+ *
+ * Stdin should contain a list of OIDs.  It may have additional
+ * decoration that we need to strip out.
+ *
+ * We expect:
+ * <hex_oid> [<path>]   // present OIDs
+ */
+static unsigned long read_stdin_from_rev_list(struct oidset *oids)
+{
+	struct object_id oid;
+	struct strbuf buf_stdin = STRBUF_INIT;
+	unsigned long count = 0;
+
+	do {
+		if (strbuf_getline(&buf_stdin, stdin) == EOF || !buf_stdin.len)
+			break;
+
+		if (get_oid_hex(buf_stdin.buf, &oid))
+			continue; /* just silently eat it */
+
+		if (!oidset_insert(oids, &oid))
+			count++;
+	} while (1);
+
+	strbuf_release(&buf_stdin);
+	return count;
+}
+
+/*
+ * Build a complete JSON payload for a gvfs/objects POST request
+ * containing the first n OIDs in an OIDSET index by the iterator.
+ *
+ * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md
+ */
+static unsigned long build_json_payload__gvfs_objects(
+	struct json_writer *jw_req,
+	struct oidset_iter *iter,
+	unsigned long nr_in_block)
+{
+	unsigned long k;
+	const struct object_id *oid;
+
+	k = 0;
+
+	jw_init(jw_req);
+	jw_object_begin(jw_req, 0);
+	jw_object_intmax(jw_req, "commitDepth", gh__cmd_opts.depth);
+	jw_object_inline_begin_array(jw_req, "objectIds");
+	while (k < nr_in_block && (oid = oidset_iter_next(iter))) {
+		jw_array_string(jw_req, oid_to_hex(oid));
+		k++;
+	}
+	jw_end(jw_req);
+	jw_end(jw_req);
+
+	return k;
+}
+
+/*
+ * Lookup the creds for the main/origin Git server.
+ */
+static void lookup_main_creds(void)
+{
+	if (gh__global.main_creds.username && *gh__global.main_creds.username)
+		return;
+
+	credential_from_url(&gh__global.main_creds, gh__global.main_url);
+	credential_fill(&gh__global.main_creds, 0);
+	gh__global.main_creds_need_approval = 1;
+}
+
+/*
+ * If we have a set of creds for the main Git server, tell the credential
+ * manager to throw them away and ask it to reacquire them.
+ */
+static void refresh_main_creds(void)
+{
+	if (gh__global.main_creds.username && *gh__global.main_creds.username)
+		credential_reject(&gh__global.main_creds);
+
+	lookup_main_creds();
+
+	// TODO should we compare before and after values of u/p and
+	// TODO shortcut reauth if we already know it will fail?
+	// TODO if so, return a bool if same/different.
+}
+
+static void approve_main_creds(void)
+{
+	if (!gh__global.main_creds_need_approval)
+		return;
+
+	credential_approve(&gh__global.main_creds);
+	gh__global.main_creds_need_approval = 0;
+}
+
+/*
+ * Build a set of creds for the cache-server based upon the main Git
+ * server (assuming we have a cache-server configured).
+ *
+ * That is, we NEVER fill them directly for the cache-server -- we
+ * only synthesize them from the filled main creds.
+ */
+static void synthesize_cache_server_creds(void)
+{
+	if (!gh__global.cache_server_is_initialized)
+		BUG("sub-command did not initialize cache-server vars");
+
+	if (!gh__global.cache_server_url)
+		return;
+
+	if (gh__global.cache_creds.username && *gh__global.cache_creds.username)
+		return;
+
+	/*
+	 * Get the main Git server creds so we can borrow the username
+	 * and password when we talk to the cache-server.
+	 */
+	lookup_main_creds();
+	free(gh__global.cache_creds.username);
+	gh__global.cache_creds.username = xstrdup(gh__global.main_creds.username);
+	free(gh__global.cache_creds.password);
+	gh__global.cache_creds.password = xstrdup(gh__global.main_creds.password);
+}
+
+/*
+ * Flush and refresh the cache-server creds.  Because the cache-server
+ * does not do 401s (or manage creds), we have to reload the main Git
+ * server creds first.
+ *
+ * That is, we NEVER reject them directly because we never filled them.
+ */
+static void refresh_cache_server_creds(void)
+{
+	credential_clear(&gh__global.cache_creds);
+
+	refresh_main_creds();
+	synthesize_cache_server_creds();
+}
+
+/*
+ * We NEVER approve cache-server creds directly because we never directly
+ * filled them.  However, we should be able to infer that the main ones
+ * are valid and can approve them if necessary.
+ */
+static void approve_cache_server_creds(void)
+{
+	approve_main_creds();
+}
+
+/*
+ * Select the ODB directory where we will write objects that we
+ * download.  If was given on the command line or define in the
+ * config, use the local ODB (in ".git/objects").
+ */
+static void select_odb(void)
+{
+	const char *odb_path = NULL;
+
+	strbuf_init(&gh__global.buf_odb_path, 0);
+
+	if (gvfs_shared_cache_pathname && *gvfs_shared_cache_pathname)
+		odb_path = gvfs_shared_cache_pathname;
+	else {
+		prepare_alt_odb(the_repository);
+		odb_path = the_repository->objects->odb->path;
+	}
+
+	strbuf_addstr(&gh__global.buf_odb_path, odb_path);
+}
+
+/*
+ * Create a tempfile to stream the packfile into.
+ *
+ * We create a tempfile in the chosen ODB directory and let CURL
+ * automatically stream data to the file.  If successful, we can
+ * later rename it to a proper .pack and run "git index-pack" on
+ * it to create the corresponding .idx file.
+ *
+ * TODO I would rather to just stream the packfile directly into
+ * TODO "git index-pack --stdin" (and save some I/O) because it
+ * TODO will automatically take care of the rename of both files
+ * TODO and any other cleanup.  BUT INDEX-PACK WILL ONLY WRITE
+ * TODO TO THE PRIMARY ODB -- it will not write into the alternates
+ * TODO (this is considered bad form).  So we would need to add
+ * TODO an option to index-pack to handle this.  I don't want to
+ * TODO deal with this issue right now.
+ *
+ * TODO Consider using lockfile for this rather than naked tempfile.
+ */
+static struct tempfile *create_tempfile_for_packfile(void)
+{
+	static unsigned int nth = 0;
+	static struct timeval tv = {0};
+	static struct tm tm = {0};
+	static time_t secs = 0;
+	static char tbuf[32] = {0};
+
+	struct tempfile *tempfile = NULL;
+	struct strbuf buf_path = STRBUF_INIT;
+
+	if (!nth) {
+		gettimeofday(&tv, NULL);
+		secs = tv.tv_sec;
+		gmtime_r(&secs, &tm);
+
+		xsnprintf(tbuf, sizeof(tbuf), "%4d%02d%02d-%02d%02d%02d-%06ld",
+			  tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+			  tm.tm_hour, tm.tm_min, tm.tm_sec,
+			  (long)tv.tv_usec);
+	}
+
+	// TODO should this be in the "<ODB>/pack/tempPacks/"
+	// TODO directory instead? YES
+
+	strbuf_addbuf(&buf_path, &gh__global.buf_odb_path);
+	strbuf_complete(&buf_path, '/');
+	strbuf_addf(&buf_path, "pack/vfs-%s-%04d.temp", tbuf, nth++);
+
+	tempfile = create_tempfile(buf_path.buf);
+	fdopen_tempfile(tempfile, "w");
+
+	strbuf_release(&buf_path);
+
+	return tempfile;
+}
+
+/*
+ * Create a tempfile to stream a loose object into.
+ *
+ * We create a tempfile in the chosen ODB directory and let CURL
+ * automatically stream data to the file.
+ *
+ * We put it directly in the "<odb>/xx/" directory.
+ */
+static void create_tempfile_for_loose(
+	struct gh__request_params *params,
+	struct gh__response_status *status,
+	const struct object_id *oid)
+{
+	struct strbuf buf_path = STRBUF_INIT;
+	const char *hex;
+
+	gh__response_status__zero(status);
+
+	hex = oid_to_hex(oid);
+
+	strbuf_addbuf(&buf_path, &gh__global.buf_odb_path);
+	strbuf_complete(&buf_path, '/');
+	strbuf_add(&buf_path, hex, 2);
+
+	if (!file_exists(buf_path.buf) &&
+	    mkdir(buf_path.buf, 0777) == -1 &&
+		!file_exists(buf_path.buf)) {
+		strbuf_addf(&status->error_message,
+			    "cannot create directory for loose object '%s'",
+			    buf_path.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+		goto cleanup;
+	}
+
+	strbuf_addch(&buf_path, '/');
+	strbuf_addstr(&buf_path, hex+2);
+
+	/* Remember the full path of the final destination. */
+	strbuf_setlen(&params->loose_path, 0);
+	strbuf_addbuf(&params->loose_path, &buf_path);
+
+	/*
+	 * Build a unique tempfile pathname based upon it.  We avoid
+	 * using lockfiles to avoid issues with stale locks after
+	 * crashes.
+	 */
+	strbuf_addf(&buf_path, ".%08u.temp", getpid());
+
+	params->tempfile = create_tempfile(buf_path.buf);
+	if (!params->tempfile) {
+		strbuf_addstr(&status->error_message,
+			      "could not create tempfile for loose object");
+		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+		goto cleanup;
+	}
+
+	fdopen_tempfile(params->tempfile, "w");
+
+cleanup:
+	strbuf_release(&buf_path);
+}
+
+/*
+ * Extract the filename portion of the given pathname.
+ *
+ * TODO Wish I could find a strbuf_filename() function for this.
+ */
+static void extract_filename(struct strbuf *filename,
+			     const struct strbuf *pathname)
+{
+	size_t len = pathname->len;
+
+	strbuf_setlen(filename, 0);
+
+	while (len > 0 && !is_dir_sep(pathname->buf[len - 1]))
+		len--;
+
+	strbuf_addstr(filename, &pathname->buf[len]);
+}
+
+/*
+ * Convert the tempfile into a permanent .pack packfile in the ODB.
+ * Create the corresponding .idx file.
+ *
+ * Return the filename (not pathname) of the resulting packfile.
+ */
+static void install_packfile(struct gh__response_status *status,
+			     struct tempfile **pp_tempfile,
+			     struct strbuf *packfile_filename)
+{
+	struct child_process ip = CHILD_PROCESS_INIT;
+	struct strbuf pack_name_tmp = STRBUF_INIT;
+	struct strbuf pack_name_dst = STRBUF_INIT;
+	struct strbuf idx_name_tmp = STRBUF_INIT;
+	struct strbuf idx_name_dst = STRBUF_INIT;
+	size_t len_base;
+
+	gh__response_status__zero(status);
+
+	strbuf_setlen(packfile_filename, 0);
+
+	/*
+	 * start with "<base>.temp" (that is owned by tempfile class).
+	 * rename to "<base>.pack.temp" to break ownership.
+	 *
+	 * create "<base>.idx.temp" on provisional packfile.
+	 *
+	 * officially install both "<base>.{pack,idx}.temp" as
+	 * "<base>.{pack,idx}".
+	 */
+
+	strbuf_addstr(&pack_name_tmp, get_tempfile_path(*pp_tempfile));
+	if (!strip_suffix(pack_name_tmp.buf, ".temp", &len_base)) {
+		/*
+		 * This is more of a BUG(), but I want the error
+		 * code propagated.
+		 */
+		strbuf_addf(&status->error_message,
+			    "packfile tempfile does not end in '.temp': '%s'",
+			    pack_name_tmp.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+		goto cleanup;
+	}
+
+	strbuf_setlen(&pack_name_tmp, (int)len_base);
+	strbuf_addbuf(&pack_name_dst, &pack_name_tmp);
+	strbuf_addbuf(&idx_name_tmp, &pack_name_tmp);
+	strbuf_addbuf(&idx_name_dst, &pack_name_tmp);
+
+	strbuf_addstr(&pack_name_tmp, ".pack.temp");
+	strbuf_addstr(&pack_name_dst, ".pack");
+	strbuf_addstr(&idx_name_tmp, ".idx.temp");
+	strbuf_addstr(&idx_name_dst, ".idx");
+
+	// TODO if either pack_name_dst or idx_name_dst already
+	// TODO exists in the ODB, create alternate names so that
+	// TODO we don't step on them.
+
+	if (rename_tempfile(pp_tempfile, pack_name_tmp.buf) == -1) {
+		strbuf_addf(&status->error_message,
+			    "could not rename packfile to '%s'",
+			    pack_name_tmp.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+		goto cleanup;
+	}
+
+	strvec_push(&ip.args, "index-pack");
+	if (gh__cmd_opts.show_progress)
+		strvec_push(&ip.args, "-v");
+	strvec_pushl(&ip.args, "-o", idx_name_tmp.buf, NULL);
+	strvec_push(&ip.args, pack_name_tmp.buf);
+	ip.git_cmd = 1;
+	ip.no_stdin = 1;
+	ip.no_stdout = 1;
+
+	// TODO consider capturing stdout from index-pack because
+	// TODO it will contain the SHA of the packfile and we can
+	// TODO (should?) add it to the .pack and .idx pathnames
+	// TODO when we install them.
+	// TODO
+	// TODO See pipe_command() rather than run_command().
+	// TODO
+	// TODO Or should be SHA-it ourselves (or read the last 20 bytes)?
+
+	/*
+	 * Note that I DO NOT have a region around the index-pack process.
+	 * The region in gh__run_one_slot() currently only covers the
+	 * download time.  This index-pack is a separate step not covered
+	 * in the above region.  Later, if/when we have CURL directly stream
+	 * to index-pack, that region will be the combined download+index
+	 * time.  So, I'm not going to introduce it here.
+	 */
+	if (run_command(&ip)) {
+		unlink(pack_name_tmp.buf);
+		unlink(idx_name_tmp.buf);
+		strbuf_addf(&status->error_message,
+			    "index-pack failed on '%s'", pack_name_tmp.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+		goto cleanup;
+	}
+
+	if (finalize_object_file(pack_name_tmp.buf, pack_name_dst.buf) ||
+	    finalize_object_file(idx_name_tmp.buf, idx_name_dst.buf)) {
+		unlink(pack_name_tmp.buf);
+		unlink(pack_name_dst.buf);
+		unlink(idx_name_tmp.buf);
+		unlink(idx_name_dst.buf);
+		strbuf_addf(&status->error_message,
+			    "could not install packfile '%s'",
+			    pack_name_dst.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+		goto cleanup;
+	}
+
+	extract_filename(packfile_filename, &pack_name_dst);
+
+cleanup:
+	child_process_clear(&ip);
+	strbuf_release(&pack_name_tmp);
+	strbuf_release(&pack_name_dst);
+	strbuf_release(&idx_name_tmp);
+	strbuf_release(&idx_name_dst);
+}
+
+/*
+ * Convert the tempfile into a permanent loose object in the ODB.
+ */
+static void install_loose(struct gh__request_params *params,
+			  struct gh__response_status *status)
+{
+	struct strbuf tmp_path = STRBUF_INIT;
+
+	gh__response_status__zero(status);
+
+	/*
+	 * close tempfile to steal ownership away from tempfile class.
+	 */
+	strbuf_addstr(&tmp_path, get_tempfile_path(params->tempfile));
+	close_tempfile_gently(params->tempfile);
+
+	/*
+	 * Try to install the tempfile as the actual loose object.
+	 *
+	 * If the loose object already exists, finalize_object_file()
+	 * will NOT overwrite/replace it.  It will silently eat the
+	 * EEXIST error and unlink the tempfile as it if was
+	 * successful.  We just let it lie to us.
+	 *
+	 * Since our job is to back-fill missing objects needed by a
+	 * foreground git process -- git should have called
+	 * oid_object_info_extended() and loose_object_info() BEFORE
+	 * asking us to download the missing object.  So if we get a
+	 * collision we have to assume something else is happening in
+	 * parallel and we lost the race.  And that's OK.
+	 */
+	if (finalize_object_file(tmp_path.buf, params->loose_path.buf)) {
+		unlink(tmp_path.buf);
+		strbuf_addf(&status->error_message,
+			    "could not install loose object '%s'",
+			    params->loose_path.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE;
+	}
+
+	strbuf_release(&tmp_path);
+}
+
+/*
+ * Our wrapper to initialize the HTTP layer.
+ *
+ * We always use the real origin server, not the cache-server, when
+ * initializing the http/curl layer.
+ */
+static void gh_http_init(void)
+{
+	if (gh__global.http_is_initialized)
+		return;
+
+	http_init(gh__global.remote, gh__global.main_url, 0);
+	gh__global.http_is_initialized = 1;
+}
+
+static void gh_http_cleanup(void)
+{
+	if (!gh__global.http_is_initialized)
+		return;
+
+	http_cleanup();
+	gh__global.http_is_initialized = 0;
+}
+
+static void set_main_creds_on_slot(struct active_request_slot *slot,
+				   const struct credential *creds)
+{
+	assert(creds == &gh__global.main_creds);
+
+	/*
+	 * When talking to the main/origin server, we have 3 modes
+	 * of operation:
+	 *
+	 * [1] The initial request is sent without loading creds
+	 *     and with ANY-AUTH set.  (And the `":"` is a magic
+	 *     value.)
+	 *
+	 *     This allows libcurl to negotiate for us if it can.
+	 *     For example, this allows NTLM to work by magic and
+	 *     we get 200s without ever seeing a 401.  If libcurl
+	 *     cannot negotiate for us, it gives us a 401 (and all
+	 *     of the 401 code in this file responds to that).
+	 *
+	 * [2] A 401 retry will load the main creds and try again.
+	 *     This causes `creds->username`to be non-NULL (even
+	 *     if refers to a zero-length string).  And we assume
+	 *     BASIC Authentication.  (And a zero-length username
+	 *     is a convention for PATs, but then sometimes users
+	 *     put the PAT in their `username` field and leave the
+	 *     `password` field blank.  And that works too.)
+	 *
+	 * [3] Subsequent requests on the same connection use
+	 *     whatever worked before.
+	 */
+	if (creds && creds->username) {
+		curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
+		curl_easy_setopt(slot->curl, CURLOPT_USERNAME, creds->username);
+		curl_easy_setopt(slot->curl, CURLOPT_PASSWORD, creds->password);
+	} else {
+		curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, CURLAUTH_ANY);
+		curl_easy_setopt(slot->curl, CURLOPT_USERPWD, ":");
+	}
+}
+
+static void set_cache_server_creds_on_slot(struct active_request_slot *slot,
+					   const struct credential *creds)
+{
+	assert(creds == &gh__global.cache_creds);
+	assert(creds->username);
+
+	/*
+	 * Things are weird when talking to a cache-server:
+	 *
+	 * [1] They don't send 401s on an auth error, rather they send
+	 *     a 400 (with a nice human-readable string in the html body).
+	 *     This prevents libcurl from doing any negotiation for us.
+	 *
+	 * [2] Cache-servers don't manage their own passwords, but
+	 *     rather require us to send the Basic Authentication
+	 *     username & password that we would send to the main
+	 *     server.  (So yes, we have to get creds validated
+	 *     against the main server creds and substitute them when
+	 *     talking to the cache-server.)
+	 *
+	 * This means that:
+	 *
+	 * [a] We cannot support cache-servers that want to use NTLM.
+	 *
+	 * [b] If we want to talk to a cache-server, we have get the
+	 *     Basic Auth creds for the main server.  And this may be
+	 *     problematic if the libcurl and/or the credential manager
+	 *     insists on using NTLM and prevents us from getting them.
+	 *
+	 * So we never try AUTH-ANY and force Basic Auth (if possible).
+	 */
+	if (creds && creds->username) {
+		curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
+		curl_easy_setopt(slot->curl, CURLOPT_USERNAME, creds->username);
+		curl_easy_setopt(slot->curl, CURLOPT_PASSWORD, creds->password);
+	}
+}
+
+/*
+ * Do a single HTTP request without auth-retry or fallback.
+ */
+static void do_req(const char *url_base,
+		   const char *url_component,
+		   const struct credential *creds,
+		   struct gh__request_params *params,
+		   struct gh__response_status *status)
+{
+	struct active_request_slot *slot;
+	struct slot_results results;
+	struct strbuf rest_url = STRBUF_INIT;
+
+	gh__response_status__zero(status);
+
+	if (params->b_write_to_file) {
+		// TODO ftruncate tempfile ??
+	} else {
+		strbuf_setlen(params->buffer, 0);
+	}
+
+	end_url_with_slash(&rest_url, url_base);
+	strbuf_addstr(&rest_url, url_component);
+
+	slot = get_active_slot();
+	slot->results = &results;
+
+	curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0); /* not a HEAD request */
+	curl_easy_setopt(slot->curl, CURLOPT_URL, rest_url.buf);
+	curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, params->headers);
+
+	if (params->b_is_post) {
+		curl_easy_setopt(slot->curl, CURLOPT_POST, 1);
+		curl_easy_setopt(slot->curl, CURLOPT_ENCODING, NULL);
+		curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS,
+				 params->post_payload->buf);
+		curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE,
+				 (long)params->post_payload->len);
+	} else {
+		curl_easy_setopt(slot->curl, CURLOPT_POST, 0);
+	}
+
+	if (params->b_write_to_file) {
+		curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
+		curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA,
+				 (void*)params->tempfile->fp);
+	} else {
+		curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION,
+				 fwrite_buffer);
+		curl_easy_setopt(slot->curl, CURLOPT_FILE, params->buffer);
+	}
+
+	if (url_base == gh__global.main_url)
+		set_main_creds_on_slot(slot, creds);
+	else
+		set_cache_server_creds_on_slot(slot, creds);
+
+	if (params->progress_base_phase2_msg.len ||
+	    params->progress_base_phase3_msg.len) {
+		curl_easy_setopt(slot->curl, CURLOPT_XFERINFOFUNCTION,
+				 gh__curl_progress_cb);
+		curl_easy_setopt(slot->curl, CURLOPT_XFERINFODATA, params);
+		curl_easy_setopt(slot->curl, CURLOPT_NOPROGRESS, 0);
+	} else {
+		curl_easy_setopt(slot->curl, CURLOPT_NOPROGRESS, 1);
+	}
+
+	gh__run_one_slot(slot, params, status);
+	strbuf_release(&rest_url);
+}
+
+static void do_req__to_main(const char *url_component,
+			    struct gh__request_params *params,
+			    struct gh__response_status *status)
+{
+//	lookup_main_creds();
+
+	/*
+	 * When talking to the main Git server, we DO NOT preload the
+	 * creds before the first request.
+	 */
+
+	do_req(gh__global.main_url, url_component, &gh__global.main_creds,
+	       params, status);
+
+	if (status->response_code == 401) {
+		refresh_main_creds();
+
+		do_req(gh__global.main_url, url_component, &gh__global.main_creds,
+		       params, status);
+	}
+
+	if (status->response_code == 200)
+		approve_main_creds();
+}
+
+static void do_req__to_cache_server(const char *url_component,
+				    struct gh__request_params *params,
+				    struct gh__response_status *status)
+{
+	/*
+	 * When talking to a cache-server, DO force load the creds.
+	 * This implicitly preloads the creds to the main server.
+	 */
+	synthesize_cache_server_creds();
+
+	do_req(gh__global.cache_server_url, url_component, &gh__global.cache_creds,
+	       params, status);
+	fixup_cache_server_400_to_401(status);
+
+	if (status->response_code == 401) {
+		refresh_cache_server_creds();
+
+		do_req(gh__global.cache_server_url, url_component,
+		       &gh__global.cache_creds, params, status);
+		fixup_cache_server_400_to_401(status);
+	}
+
+	if (status->response_code == 200)
+		approve_cache_server_creds();
+}
+
+static void do_req__with_fallback(const char *url_component,
+				  struct gh__request_params *params,
+				  struct gh__response_status *status)
+{
+	if (gh__global.cache_server_url && !params->b_no_cache_server) {
+		do_req__to_cache_server(url_component, params, status);
+
+		if (status->response_code == 200)
+			return;
+
+		if (!gh__cmd_opts.try_fallback)
+			return;
+
+		/*
+		 * The cache-server shares creds with the main Git server,
+		 * so if our creds failed against the cache-server, they
+		 * will also fail against the main Git server.  We just let
+		 * this fail.
+		 *
+		 * Falling-back would likely just cause the 3rd (or maybe
+		 * 4th) cred prompt.
+		 */
+		if (status->response_code == 401)
+			return;
+	}
+
+	do_req__to_main(url_component, params, status);
+}
+
+/*
+ * Call "gvfs/config" REST API.
+ *
+ * Return server's response buffer.  This is probably a raw JSON string.
+ */
+static void do__gvfs_config(struct gh__response_status *status,
+			    struct strbuf *config_data)
+{
+	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
+
+	strbuf_addstr(&params.label, "GET/config");
+
+	params.b_is_post = 0;
+	params.b_write_to_file = 0;
+	params.b_no_cache_server = 1; /* they don't handle gvfs/config API */
+	params.buffer = config_data;
+
+	params.object_count = 1; /* a bit of a lie */
+
+	/*
+	 * "X-TFS-FedAuthRedirect: Suppress" disables the 302 + 203 redirect
+	 * sequence to a login page and forces the main Git server to send a
+	 * normal 401.
+	 */
+	params.headers = http_copy_default_headers();
+	params.headers = curl_slist_append(params.headers,
+					   "X-TFS-FedAuthRedirect: Suppress");
+	params.headers = curl_slist_append(params.headers,
+					   "Pragma: no-cache");
+
+	if (gh__cmd_opts.show_progress) {
+		/*
+		 * gvfs/config has a very small reqest payload, so I don't
+		 * see any need to report progress on the upload side of
+		 * the GET.  So just report progress on the download side.
+		 */
+		strbuf_addstr(&params.progress_base_phase3_msg,
+			      "Receiving gvfs/config");
+	}
+
+	do_req__with_fallback("gvfs/config", &params, status);
+
+	gh__request_params__release(&params);
+}
+
+/*
+ * Call "gvfs/objects/<oid>" REST API to fetch a loose object
+ * and write it to the ODB.
+ */
+static void do__loose__gvfs_object(struct gh__response_status *status,
+				   const struct object_id *oid)
+{
+	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
+	struct strbuf component_url = STRBUF_INIT;
+
+	gh__response_status__zero(status);
+
+	strbuf_addf(&component_url, "gvfs/objects/%s", oid_to_hex(oid));
+
+	strbuf_addstr(&params.label, "GET/objects");
+
+	params.b_is_post = 0;
+	params.b_write_to_file = 1;
+	params.b_no_cache_server = 0;
+
+	params.object_count = 1;
+
+	params.headers = http_copy_default_headers();
+	params.headers = curl_slist_append(params.headers,
+					   "X-TFS-FedAuthRedirect: Suppress");
+	params.headers = curl_slist_append(params.headers,
+					   "Pragma: no-cache");
+
+	create_tempfile_for_loose(&params, status, oid);
+	if (!params.tempfile)
+		goto cleanup;
+
+	if (gh__cmd_opts.show_progress) {
+		/*
+		 * Likewise, a gvfs/objects/{oid} has a very small reqest
+		 * payload, so I don't see any need to report progress on
+		 * the upload side of the GET.  So just report progress
+		 * on the download side.
+		 */
+		strbuf_addstr(&params.progress_base_phase3_msg,
+			      "Receiving 1 loose object");
+	}
+
+	do_req__with_fallback(component_url.buf, &params, status);
+
+	if (status->ec == GH__ERROR_CODE__OK)
+		install_loose(&params, status);
+
+cleanup:
+	gh__request_params__release(&params);
+	strbuf_release(&component_url);
+}
+
+/*
+ * Call "gvfs/objects" POST REST API to fetch a packfile containing
+ * the objects in the requested OIDSET.  Returns the filename (not
+ * pathname) to the new packfile.
+ */
+static void do__packfile__gvfs_objects(struct gh__response_status *status,
+				       struct oidset_iter *iter,
+				       unsigned long nr_wanted_in_block,
+				       struct strbuf *output_filename,
+				       unsigned long *nr_taken)
+{
+	struct json_writer jw_req = JSON_WRITER_INIT;
+	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
+
+	gh__response_status__zero(status);
+
+	params.object_count = build_json_payload__gvfs_objects(
+		&jw_req, iter, nr_wanted_in_block);
+	*nr_taken = params.object_count;
+
+	strbuf_addstr(&params.label, "POST/objects");
+
+	params.b_is_post = 1;
+	params.b_write_to_file = 1;
+	params.b_no_cache_server = 0;
+
+	params.post_payload = &jw_req.json;
+
+	params.headers = http_copy_default_headers();
+	params.headers = curl_slist_append(params.headers,
+					   "X-TFS-FedAuthRedirect: Suppress");
+	params.headers = curl_slist_append(params.headers,
+					   "Pragma: no-cache");
+	params.headers = curl_slist_append(params.headers,
+					   "Content-Type: application/json");
+	/*
+	 * We really always want a packfile.  But if the payload only
+	 * requests 1 OID, the server will/may send us a single loose
+	 * objects instead.  (Apparently the server ignores us when we
+	 * only send application/x-git-packfile and does it anyway.)
+	 *
+	 * So to make it clear to my future self, go ahead and add
+	 * an accept header for loose objects and own it.
+	 */
+	params.headers = curl_slist_append(params.headers,
+					   "Accept: application/x-git-packfile");
+	params.headers = curl_slist_append(params.headers,
+					   "Accept: application/x-git-loose-object");
+
+	params.tempfile = create_tempfile_for_packfile();
+	if (!params.tempfile) {
+		strbuf_addstr(&status->error_message,
+			      "could not create tempfile for packfile");
+		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+		goto cleanup;
+	}
+
+	if (gh__cmd_opts.show_progress) {
+		strbuf_addf(&params.progress_base_phase2_msg,
+			    "Requesting packfile with %ld objects",
+			    params.object_count);
+		strbuf_addf(&params.progress_base_phase3_msg,
+			    "Receiving packfile with %ld objects",
+			    params.object_count);
+	}
+
+	do_req__with_fallback("gvfs/objects", &params, status);
+
+	if (status->ec == GH__ERROR_CODE__OK) {
+		if (!strcmp(status->content_type.buf,
+			    "application/x-git-packfile")) {
+
+			// TODO Consider having a worker thread to manage
+			// TODO running index-pack and then install the
+			// TODO resulting .idx and .pack files.  This would
+			// TODO let us interleave those steps with our thread
+			// TODO fetching the next block of objects from the
+			// TODO server.  (Need to think about how progress
+			// TODO messages from our thread and index-pack
+			// TODO would mesh.)
+			// TODO
+			// TODO But then again, if we hack index-pack to write
+			// TODO to our alternate and stream the data thru it,
+			// TODO it won't matter.
+
+			install_packfile(status, &params.tempfile,
+					 output_filename);
+			goto cleanup;
+		}
+
+		if (!strcmp(status->content_type.buf,
+			    "application/x-git-loose-object"))
+		{
+			/*
+			 * This should not happen (when we request
+			 * more than one object).  The server can send
+			 * us a loose object (even when we use the
+			 * POST form) if there is only one object in
+			 * the payload (and despite the set of accept
+			 * headers we send), so I'm going to leave
+			 * this here.
+			 */
+			strbuf_addstr(&status->error_message,
+				      "received loose object when packfile expected");
+			status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
+			goto cleanup;
+		}
+
+		strbuf_addf(&status->error_message,
+			    "received unknown content-type '%s'",
+			    status->content_type.buf);
+		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
+		goto cleanup;
+	}
+
+cleanup:
+	gh__request_params__release(&params);
+	jw_release(&jw_req);
+}
+
+/*
+ * Bulk or individually fetch a list of objects in one or more http requests.
+ * Create one or more packfiles and/or loose objects.
+ *
+ * We accumulate results for each request in `result_list` until we get a
+ * hard error and have to stop.
+ */
+static void do_fetch_oidset(struct gh__response_status *status,
+			    struct oidset *oids,
+			    unsigned long nr_total,
+			    struct string_list *result_list)
+{
+	struct oidset_iter iter;
+	struct strbuf output_filename = STRBUF_INIT;
+	struct strbuf msg = STRBUF_INIT;
+	struct strbuf err404 = STRBUF_INIT;
+	const struct object_id *oid;
+	unsigned long k;
+	unsigned long nr_taken;
+	int had_404 = 0;
+
+	gh__response_status__zero(status);
+	if (!nr_total)
+		return;
+
+	oidset_iter_init(oids, &iter);
+
+	for (k = 0; k < nr_total; k += nr_taken) {
+		if (nr_total - k == 1 || gh__cmd_opts.block_size == 1) {
+			oid = oidset_iter_next(&iter);
+			nr_taken = 1;
+
+			do__loose__gvfs_object(status, oid);
+
+			/*
+			 * If we get a 404 for an individual object, ignore
+			 * it and get the rest.  We'll fixup the 'ec' later.
+			 */
+			if (status->ec == GH__ERROR_CODE__HTTP_404) {
+				if (!err404.len)
+					strbuf_addf(&err404, "%s: loose object %s",
+						    status->error_message.buf,
+						    oid_to_hex(oid));
+				/*
+				 * Mark the fetch as "incomplete", but don't
+				 * stop trying to get other chunks.
+				 */
+				had_404 = 1;
+				continue;
+			}
+
+			if (status->ec != GH__ERROR_CODE__OK) {
+				/* Stop at the first hard error. */
+				strbuf_addf(&status->error_message, ": loose %s",
+					    oid_to_hex(oid));
+				goto cleanup;
+			}
+
+			strbuf_setlen(&msg, 0);
+			strbuf_addf(&msg, "loose %s", oid_to_hex(oid));
+			string_list_append(result_list, msg.buf);
+
+		} else {
+			strbuf_setlen(&output_filename, 0);
+
+			do__packfile__gvfs_objects(status, &iter,
+						   gh__cmd_opts.block_size,
+						   &output_filename,
+						   &nr_taken);
+
+			/*
+			 * Because the oidset iterator has random
+			 * order, it does no good to say the k-th or
+			 * n-th chunk was incomplete; the client
+			 * cannot use that index for anything.
+			 *
+			 * We get a 404 when at least one object in
+			 * the chunk was not found.
+			 *
+			 * TODO Consider various retry strategies (such as
+			 * TODO loose or bisect) on the members within this
+			 * TODO chunk to reduce the impact of the miss.
+			 *
+			 * For now, ignore the 404 and go on to the
+			 * next chunk and then fixup the 'ec' later.
+			 */
+			if (status->ec == GH__ERROR_CODE__HTTP_404) {
+				if (!err404.len)
+					strbuf_addf(&err404,
+						    "%s: packfile object",
+						    status->error_message.buf);
+				/*
+				 * Mark the fetch as "incomplete", but don't
+				 * stop trying to get other chunks.
+				 */
+				had_404 = 1;
+				continue;
+			}
+
+			if (status->ec != GH__ERROR_CODE__OK) {
+				/* Stop at the first hard error. */
+				strbuf_addstr(&status->error_message,
+					      ": in packfile");
+				goto cleanup;
+			}
+
+			strbuf_setlen(&msg, 0);
+			strbuf_addf(&msg, "packfile %s", output_filename.buf);
+			string_list_append(result_list, msg.buf);
+		}
+	}
+
+cleanup:
+	strbuf_release(&msg);
+	strbuf_release(&err404);
+	strbuf_release(&output_filename);
+
+	if (had_404 && status->ec == GH__ERROR_CODE__OK) {
+		strbuf_setlen(&status->error_message, 0);
+		strbuf_addstr(&status->error_message, "404 Not Found");
+		status->ec = GH__ERROR_CODE__HTTP_404;
+	}
+}
+
+/*
+ * Finish with initialization.  This happens after the main option
+ * parsing, dispatch to sub-command, and sub-command option parsing
+ * and before actually doing anything.
+ *
+ * Optionally configure the cache-server if the sub-command will
+ * use it.
+ */
+static void finish_init(int setup_cache_server)
+{
+	select_odb();
+
+	lookup_main_url();
+	gh_http_init();
+
+	if (setup_cache_server)
+		select_cache_server();
+}
+
+/*
+ * Request gvfs/config from main Git server.  (Config data is not
+ * available from a GVFS cache-server.)
+ *
+ * Print the received server configuration (as the raw JSON string).
+ */
+static enum gh__error_code do_sub_cmd__config(int argc UNUSED, const char **argv UNUSED)
+{
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct strbuf config_data = STRBUF_INIT;
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+
+	trace2_cmd_mode("config");
+
+	finish_init(0);
+
+	do__gvfs_config(&status, &config_data);
+	ec = status.ec;
+
+	if (ec == GH__ERROR_CODE__OK)
+		printf("%s\n", config_data.buf);
+	else
+		error("config: %s", status.error_message.buf);
+
+	gh__response_status__release(&status);
+	strbuf_release(&config_data);
+
+	return ec;
+}
+
+/*
+ * Read a list of objects from stdin and fetch them in a single request (or
+ * multiple block-size requests).
+ */
+static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
+{
+	static struct option get_options[] = {
+		OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size,
+			      N_("number of objects to request at a time")),
+		OPT_INTEGER('d', "depth", &gh__cmd_opts.depth,
+			    N_("Commit depth")),
+		OPT_END(),
+	};
+
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct oidset oids = OIDSET_INIT;
+	struct string_list result_list = STRING_LIST_INIT_DUP;
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+	unsigned long nr_total;
+	size_t k;
+
+	trace2_cmd_mode("get");
+
+	if (argc > 1 && !strcmp(argv[1], "-h"))
+		usage_with_options(get_usage, get_options);
+
+	argc = parse_options(argc, argv, NULL, get_options, get_usage, 0);
+	if (gh__cmd_opts.depth < 1)
+		gh__cmd_opts.depth = 1;
+
+	finish_init(1);
+
+	nr_total = read_stdin_from_rev_list(&oids);
+
+	trace2_region_enter("gvfs-helper", "get", NULL);
+	trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_total);
+	do_fetch_oidset(&status, &oids, nr_total, &result_list);
+	trace2_region_leave("gvfs-helper", "get", NULL);
+
+	ec = status.ec;
+
+	for (k = 0; k < result_list.nr; k++)
+		printf("%s\n", result_list.items[k].string);
+
+	if (ec != GH__ERROR_CODE__OK)
+		error("get: %s", status.error_message.buf);
+
+	gh__response_status__release(&status);
+	oidset_clear(&oids);
+	string_list_clear(&result_list, 0);
+
+	return ec;
+}
+
+/*
+ * Handle the 'get' command when in "server mode".  Only call error()
+ * for hard errors where we cannot communicate correctly with the foreground
+ * client process.  Pass any actual data errors (such as 404's or 401's from
+ * the fetch back to the client process.
+ */
+static enum gh__error_code do_server_subprocess_get(void)
+{
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct oidset oids = OIDSET_INIT;
+	struct object_id oid;
+	struct string_list result_list = STRING_LIST_INIT_DUP;
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+	char *line;
+	int len;
+	int err;
+	size_t k;
+	unsigned long nr_total = 0;
+
+	/*
+	 * Inside the "get" command, we expect a list of OIDs
+	 * and a flush.
+	 */
+	while (1) {
+		len = packet_read_line_gently(0, NULL, &line);
+		if (len < 0 || !line)
+			break;
+
+		if (get_oid_hex(line, &oid)) {
+			error("server: invalid oid syntax '%s'", line);
+			ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+			goto cleanup;
+		}
+
+		if (!oidset_insert(&oids, &oid))
+			nr_total++;
+	}
+
+	if (!nr_total) {
+		if (packet_write_fmt_gently(1, "ok\n")) {
+			error("server: cannot write 'get' result to client");
+			ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		} else
+			ec = GH__ERROR_CODE__OK;
+		goto cleanup;
+	}
+
+	trace2_region_enter("gvfs-helper", "server/get", NULL);
+	trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_total);
+	do_fetch_oidset(&status, &oids, nr_total, &result_list);
+	trace2_region_leave("gvfs-helper", "server/get", NULL);
+
+	/*
+	 * Write pathname of the ODB where we wrote all of the objects
+	 * we fetched.
+	 */
+	if (packet_write_fmt_gently(1, "odb %s\n",
+				    gh__global.buf_odb_path.buf)) {
+		error("server: cannot write 'odb' to client");
+		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		goto cleanup;
+	}
+
+	for (k = 0; k < result_list.nr; k++)
+		if (packet_write_fmt_gently(1, "%s\n",
+					    result_list.items[k].string))
+		{
+			error("server: cannot write result to client: '%s'",
+			      result_list.items[k].string);
+			ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+			goto cleanup;
+		}
+
+	err = 0;
+	if (ec == GH__ERROR_CODE__OK)
+		err = packet_write_fmt_gently(1, "ok\n");
+	else if (ec == GH__ERROR_CODE__HTTP_404)
+		err = packet_write_fmt_gently(1, "partial\n");
+	else
+		err = packet_write_fmt_gently(1, "error %s\n",
+					      status.error_message.buf);
+	if (err) {
+		error("server: cannot write result to client");
+		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		goto cleanup;
+	}
+
+	if (packet_flush_gently(1)) {
+		error("server: cannot flush result to client");
+		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		goto cleanup;
+	}
+
+cleanup:
+	oidset_clear(&oids);
+	string_list_clear(&result_list, 0);
+	gh__response_status__release(&status);
+
+	return ec;
+}
+
+typedef enum gh__error_code (fn_subprocess_cmd)(void);
+
+struct subprocess_capability {
+	const char *name;
+	int client_has;
+	fn_subprocess_cmd *pfn;
+};
+
+static struct subprocess_capability caps[] = {
+	{ "get", 0, do_server_subprocess_get },
+	{ NULL, 0, NULL },
+};
+
+/*
+ * Handle the subprocess protocol handshake as described in:
+ * [] Documentation/technical/protocol-common.txt
+ * [] Documentation/technical/long-running-process-protocol.txt
+ */
+static int do_protocol_handshake(void)
+{
+#define OUR_SUBPROCESS_VERSION "1"
+
+	char *line;
+	int len;
+	int k;
+	int b_support_our_version = 0;
+
+	len = packet_read_line_gently(0, NULL, &line);
+	if (len < 0 || !line || strcmp(line, "gvfs-helper-client")) {
+		error("server: subprocess welcome handshake failed: %s", line);
+		return -1;
+	}
+
+	while (1) {
+		const char *v;
+		len = packet_read_line_gently(0, NULL, &line);
+		if (len < 0 || !line)
+			break;
+		if (!skip_prefix(line, "version=", &v)) {
+			error("server: subprocess version handshake failed: %s",
+			      line);
+			return -1;
+		}
+		b_support_our_version |= (!strcmp(v, OUR_SUBPROCESS_VERSION));
+	}
+	if (!b_support_our_version) {
+		error("server: client does not support our version: %s",
+		      OUR_SUBPROCESS_VERSION);
+		return -1;
+	}
+
+	if (packet_write_fmt_gently(1, "gvfs-helper-server\n") ||
+	    packet_write_fmt_gently(1, "version=%s\n",
+				    OUR_SUBPROCESS_VERSION) ||
+	    packet_flush_gently(1)) {
+		error("server: cannot write version handshake");
+		return -1;
+	}
+
+	while (1) {
+		const char *v;
+		int k;
+
+		len = packet_read_line_gently(0, NULL, &line);
+		if (len < 0 || !line)
+			break;
+		if (!skip_prefix(line, "capability=", &v)) {
+			error("server: subprocess capability handshake failed: %s",
+			      line);
+			return -1;
+		}
+		for (k = 0; caps[k].name; k++)
+			if (!strcmp(v, caps[k].name))
+				caps[k].client_has = 1;
+	}
+
+	for (k = 0; caps[k].name; k++)
+		if (caps[k].client_has)
+			if (packet_write_fmt_gently(1, "capability=%s\n",
+						    caps[k].name)) {
+				error("server: cannot write capabilities handshake: %s",
+				      caps[k].name);
+				return -1;
+			}
+	if (packet_flush_gently(1)) {
+		error("server: cannot write capabilities handshake");
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Interactively listen to stdin for a series of commands and execute them.
+ */
+static enum gh__error_code do_sub_cmd__server(int argc, const char **argv)
+{
+	static struct option server_options[] = {
+		OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size,
+			      N_("number of objects to request at a time")),
+		OPT_INTEGER('d', "depth", &gh__cmd_opts.depth,
+			    N_("Commit depth")),
+		OPT_END(),
+	};
+
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+	char *line;
+	int len;
+	int k;
+
+	trace2_cmd_mode("server");
+
+	if (argc > 1 && !strcmp(argv[1], "-h"))
+		usage_with_options(server_usage, server_options);
+
+	argc = parse_options(argc, argv, NULL, server_options, server_usage, 0);
+	if (gh__cmd_opts.depth < 1)
+		gh__cmd_opts.depth = 1;
+
+	finish_init(1);
+
+	if (do_protocol_handshake()) {
+		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		goto cleanup;
+	}
+
+top_of_loop:
+	while (1) {
+		len = packet_read_line_gently(0, NULL, &line);
+		if (len < 0 || !line) {
+			/* use extra FLUSH as a QUIT */
+			ec = GH__ERROR_CODE__OK;
+			goto cleanup;
+		}
+
+		for (k = 0; caps[k].name; k++) {
+			if (caps[k].client_has && !strcmp(line, caps[k].name)) {
+				ec = (caps[k].pfn)();
+				if (ec != GH__ERROR_CODE__OK)
+					goto cleanup;
+				goto top_of_loop;
+			}
+		}
+
+		error("server: unknown command '%s'", line);
+		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		goto cleanup;
+	}
+
+cleanup:
+	return ec;
+}
+
+static enum gh__error_code do_sub_cmd(int argc, const char **argv)
+{
+	if (!strcmp(argv[0], "get"))
+		return do_sub_cmd__get(argc, argv);
+
+	if (!strcmp(argv[0], "config"))
+		return do_sub_cmd__config(argc, argv);
+
+	if (!strcmp(argv[0], "server"))
+		return do_sub_cmd__server(argc, argv);
+
+	// TODO have "test" mode that could be used to drive
+	// TODO unit testing.
+
+	return GH__ERROR_CODE__USAGE;
+}
+
+/*
+ * Communicate with the primary Git server or a GVFS cache-server using the
+ * GVFS Protocol.
+ *
+ * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md
+ */
+int cmd_main(int argc, const char **argv)
+{
+	static struct option main_options[] = {
+		OPT_STRING('r', "remote", &gh__cmd_opts.remote_name,
+			   N_("remote"),
+			   N_("Remote name")),
+		OPT_BOOL('f', "fallback", &gh__cmd_opts.try_fallback,
+			 N_("Fallback to Git server if cache-server fails")),
+		OPT_CALLBACK(0, "cache-server", NULL,
+			     N_("cache-server"),
+			     N_("cache-server=disable|trust|verify|error"),
+			     option_parse_cache_server_mode),
+		OPT_CALLBACK(0, "shared-cache", NULL,
+			     N_("pathname"),
+			     N_("Pathname to shared objects directory"),
+			     option_parse_shared_cache_directory),
+		OPT_BOOL('p', "progress", &gh__cmd_opts.show_progress,
+			 N_("Show progress")),
+		OPT_END(),
+	};
+
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+
+	if (argc > 1 && !strcmp(argv[1], "-h"))
+		usage_with_options(main_usage, main_options);
+
+	trace2_cmd_name("gvfs-helper");
+
+	setup_git_directory_gently(NULL);
+
+	git_config(git_default_config, NULL);
+
+	/* Set any non-zero initial values in gh__cmd_opts. */
+	gh__cmd_opts.depth = 1;
+	gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE;
+	gh__cmd_opts.show_progress = !!isatty(2);
+
+	argc = parse_options(argc, argv, NULL, main_options, main_usage,
+			     PARSE_OPT_STOP_AT_NON_OPTION);
+	if (argc == 0)
+		usage_with_options(main_usage, main_options);
+
+	ec = do_sub_cmd(argc, argv);
+
+	gh_http_cleanup();
+
+	if (ec == GH__ERROR_CODE__USAGE)
+		usage_with_options(main_usage, main_options);
+
+	return ec;
+}
diff --git a/meson.build b/meson.build
index 4e7a219b6517c6..e977eda6e6a349 100644
--- a/meson.build
+++ b/meson.build
@@ -295,6 +295,7 @@ libgit_sources = [
   'graph.c',
   'grep.c',
   'gvfs.c',
+  'gvfs-helper-client.c',
   'hash-lookup.c',
   'hashmap.c',
   'help.c',
@@ -1643,6 +1644,13 @@ if get_option('curl').enabled()
     )
   endif
 
+  test_dependencies += executable('git-gvfs-helper',
+    sources: curl_sources + 'gvfs-helper.c',
+    dependencies: [libgit, common_main],
+    install: true,
+    install_dir: get_option('libexecdir') / 'git-core',
+  )
+
   foreach alias : [ 'git-remote-https', 'git-remote-ftp', 'git-remote-ftps' ]
     test_dependencies += executable(alias,
       objects: git_remote_http.extract_all_objects(recursive: false),
@@ -1682,6 +1690,7 @@ endforeach
 
 foreach symlink : [
   'git',
+  'git-gvfs-helper',
   'git-receive-pack',
   'git-shell',
   'git-upload-archive',
diff --git a/object-file.c b/object-file.c
index 36e32dde4a3ad1..6b77ac219b3c76 100644
--- a/object-file.c
+++ b/object-file.c
@@ -47,6 +47,7 @@
 #include "sigchain.h"
 #include "sub-process.h"
 #include "pkt-line.h"
+#include "gvfs-helper-client.h"
 
 /* The maximum size for an object header. */
 #define MAX_HEADER_LEN 32
@@ -1767,7 +1768,7 @@ static int do_oid_object_info_extended(struct repository *r,
 	const struct object_id *real = oid;
 	int already_retried = 0;
 	int tried_hook = 0;
-
+	int tried_gvfs_helper = 0;
 
 	if (flags & OBJECT_INFO_LOOKUP_REPLACE)
 		real = lookup_replace_object(r, oid);
@@ -1805,13 +1806,41 @@ static int do_oid_object_info_extended(struct repository *r,
 		if (!loose_object_info(r, real, oi, flags))
 			return 0;
 
+		if (core_use_gvfs_helper && !tried_gvfs_helper) {
+			enum gh_client__created ghc;
+
+			if (flags & OBJECT_INFO_SKIP_FETCH_OBJECT)
+				return -1;
+
+			gh_client__get_immediate(real, &ghc);
+			tried_gvfs_helper = 1;
+
+			/*
+			 * Retry the lookup IIF `gvfs-helper` created one
+			 * or more new packfiles or loose objects.
+			 */
+			if (ghc != GHC__CREATED__NOTHING)
+				continue;
+
+			/*
+			 * If `gvfs-helper` fails, we just want to return -1.
+			 * But allow the other providers to have a shot at it.
+			 * (At least until we have a chance to consolidate
+			 * them.)
+			 */
+		}
+
 		/* Not a loose object; someone else may have just packed it. */
 		if (!(flags & OBJECT_INFO_QUICK)) {
 			reprepare_packed_git(r);
 			if (find_pack_entry(r, real, &e))
 				break;
 			if (core_virtualize_objects && !tried_hook) {
+				// TODO Assert or at least trace2 if gvfs-helper
+				// TODO was tried and failed and then read-object-hook
+				// TODO is successful at getting this object.
 				tried_hook = 1;
+				// TODO BUG? Should 'oid' be 'real' ?
 				if (!read_object_process(oid))
 					goto retry;
 			}
diff --git a/promisor-remote.c b/promisor-remote.c
index c714f4f00728e4..243ac157fa87d5 100644
--- a/promisor-remote.c
+++ b/promisor-remote.c
@@ -1,9 +1,11 @@
 #define USE_THE_REPOSITORY_VARIABLE
 
 #include "git-compat-util.h"
+#include "environment.h"
 #include "gettext.h"
 #include "hex.h"
 #include "object-store-ll.h"
+#include "gvfs-helper-client.h"
 #include "promisor-remote.h"
 #include "config.h"
 #include "trace2.h"
@@ -218,7 +220,7 @@ struct promisor_remote *repo_promisor_remote_find(struct repository *r,
 
 int repo_has_promisor_remote(struct repository *r)
 {
-	return !!repo_promisor_remote_find(r, NULL);
+	return core_use_gvfs_helper || !!repo_promisor_remote_find(r, NULL);
 }
 
 static int remove_fetched_oids(struct repository *repo,
@@ -265,6 +267,15 @@ void promisor_remote_get_direct(struct repository *repo,
 
 	if (oid_nr == 0)
 		return;
+	if (core_use_gvfs_helper) {
+		enum gh_client__created ghc = GHC__CREATED__NOTHING;
+
+		trace2_data_intmax("bug", the_repository, "fetch_objects/gvfs-helper", oid_nr);
+		gh_client__queue_oid_array(oids, oid_nr);
+		if (!gh_client__drain_queue(&ghc))
+			return;
+		die(_("failed to fetch missing objects from the remote"));
+	}
 
 	promisor_remote_init(repo);
 
diff --git a/t/helper/.gitignore b/t/helper/.gitignore
index 8c2ddcce95f7aa..4687ed470c5978 100644
--- a/t/helper/.gitignore
+++ b/t/helper/.gitignore
@@ -1,2 +1,3 @@
+/test-gvfs-protocol
 /test-tool
 /test-fake-ssh

From 4629b895b1bfab03f84f02758980c18678af5066 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 16 Jul 2019 10:40:56 -0400
Subject: [PATCH 091/207] trace2:gvfs:experiment: add unpack_entry() counter to
 unpack_trees() and report_tracking()

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 builtin/checkout.c | 6 ++++++
 packfile.c         | 9 +++++++++
 packfile.h         | 5 +++++
 unpack-trees.c     | 5 +++++
 4 files changed, 25 insertions(+)

diff --git a/builtin/checkout.c b/builtin/checkout.c
index 7fbf5a0dfe2e5f..a2c9eec1c94910 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -20,6 +20,7 @@
 #include "merge-recursive.h"
 #include "object-name.h"
 #include "object-store-ll.h"
+#include "packfile.h"
 #include "parse-options.h"
 #include "path.h"
 #include "preload-index.h"
@@ -1050,8 +1051,13 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
 	if (!opts->quiet &&
 	    !opts->force_detach &&
 	    (new_branch_info->path || !strcmp(new_branch_info->name, "HEAD"))) {
+		unsigned long nr_unpack_entry_at_start;
+
 		trace2_region_enter("exp", "report_tracking", the_repository);
+		nr_unpack_entry_at_start = get_nr_unpack_entry();
 		report_tracking(new_branch_info);
+		trace2_data_intmax("exp", NULL, "report_tracking/nr_unpack_entries",
+				   (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
 		trace2_region_leave("exp", "report_tracking", the_repository);
 	}
 }
diff --git a/packfile.c b/packfile.c
index cc7ab6403ae5fd..9f4a71b3f0f2d4 100644
--- a/packfile.c
+++ b/packfile.c
@@ -1695,6 +1695,13 @@ struct unpack_entry_stack_ent {
 	unsigned long size;
 };
 
+static unsigned long g_nr_unpack_entry;
+
+unsigned long get_nr_unpack_entry(void)
+{
+	return g_nr_unpack_entry;
+}
+
 void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
 		   enum object_type *final_type, unsigned long *final_size)
 {
@@ -1708,6 +1715,8 @@ void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
 	int delta_stack_nr = 0, delta_stack_alloc = UNPACK_ENTRY_STACK_PREALLOC;
 	int base_from_cache = 0;
 
+	g_nr_unpack_entry++;
+
 	prepare_repo_settings(p->repo);
 
 	write_pack_access_log(p, obj_offset);
diff --git a/packfile.h b/packfile.h
index 58104fa009d601..7c9edf7526c54a 100644
--- a/packfile.h
+++ b/packfile.h
@@ -216,4 +216,9 @@ int is_promisor_object(struct repository *r, const struct object_id *oid);
 int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
 	     size_t idx_size, struct packed_git *p);
 
+/*
+ * Return the number of objects fetched from a packfile.
+ */
+unsigned long get_nr_unpack_entry(void);
+
 #endif
diff --git a/unpack-trees.c b/unpack-trees.c
index 71e208f11b5ba4..5c97e47572819a 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -17,6 +17,7 @@
 #include "tree-walk.h"
 #include "cache-tree.h"
 #include "unpack-trees.h"
+#include "packfile.h"
 #include "progress.h"
 #include "refs.h"
 #include "attr.h"
@@ -1901,6 +1902,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	struct pattern_list pl;
 	int free_pattern_list = 0;
 	struct dir_struct dir = DIR_INIT;
+	unsigned long nr_unpack_entry_at_start;
 
 	if (o->reset == UNPACK_RESET_INVALID)
 		BUG("o->reset had a value of 1; should be UNPACK_TREES_*_UNTRACKED");
@@ -1915,6 +1917,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 		BUG("o->df_conflict_entry is an output only field");
 
 	trace2_region_enter("exp", "unpack_trees", NULL);
+	nr_unpack_entry_at_start = get_nr_unpack_entry();
 
 	trace_performance_enter();
 	trace2_region_enter("unpack_trees", "unpack_trees", the_repository);
@@ -2125,6 +2128,8 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	}
 	trace2_region_leave("unpack_trees", "unpack_trees", the_repository);
 	trace_performance_leave("unpack_trees");
+	trace2_data_intmax("unpack_trees", NULL, "unpack_trees/nr_unpack_entries",
+			   (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
 	trace2_region_leave("exp", "unpack_trees", NULL);
 	return ret;
 

From d1c3e0af662434f315f5e0d4a802f70bfe235665 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Mon, 7 Oct 2019 13:55:42 -0400
Subject: [PATCH 092/207] sha1-file: create shared-cache directory if it
 doesn't exist

The config variable `gvfs.sharedCache` contains the pathname to an alternate
<odb> that will be used by `gvfs-helper` to store dynamically-fetched missing
objects.  If this directory does not exist on disk, `prepare_alt_odb()` omits
this directory from the in-memory list of alternates.  This causes `git`
commands (and `gvfs-helper` in particular) to fall-back to `.git/objects` for
storage of these objects.  This disables the shared-cache and leads to poorer
performance.

Teach `alt_obj_usable()` and `prepare_alt_odb()`, match up the directory
named in `gvfs.sharedCache` with an entry in `.git/objects/info/alternates`
and force-create the `<odb>` root directory (and the associated `<odb>/pack`
directory) if necessary.

If the value of `gvfs.sharedCache` refers to a directory that is NOT listed
as an alternate, create an in-memory alternate entry in the odb-list.  (This
is similar to how GIT_ALTERNATE_OBJECT_DIRECTORIES works.)

This work happens the first time that `prepare_alt_odb()` is called.

Furthermore, teach the `--shared-cache=<odb>` command line option in
`gvfs-helper` (which is runs after the first call to `prepare_alt_odb()`)
to override the inherited shared-cache (and again, create the ODB directory
if necessary).

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 config.c             |  12 ++---
 environment.c        |   2 +-
 environment.h        |   2 +-
 gvfs-helper-client.c |  20 ++++++++
 gvfs-helper.c        | 107 +++++++++++++++++++++++++++++++++----------
 object-file.c        |  75 ++++++++++++++++++++++++++++++
 6 files changed, 185 insertions(+), 33 deletions(-)

diff --git a/config.c b/config.c
index 0a9a2856d242d1..8b5143518eced9 100644
--- a/config.c
+++ b/config.c
@@ -1798,19 +1798,17 @@ static int git_default_gvfs_config(const char *var, const char *value)
 	}
 
 	if (!strcmp(var, "gvfs.sharedcache") && value && *value) {
-		struct strbuf buf = STRBUF_INIT;
-		strbuf_addstr(&buf, value);
-		if (strbuf_normalize_path(&buf) < 0) {
+		strbuf_setlen(&gvfs_shared_cache_pathname, 0);
+		strbuf_addstr(&gvfs_shared_cache_pathname, value);
+		if (strbuf_normalize_path(&gvfs_shared_cache_pathname) < 0) {
 			/*
 			 * Pretend it wasn't set.  This will cause us to
 			 * fallback to ".git/objects" effectively.
 			 */
-			strbuf_release(&buf);
+			strbuf_release(&gvfs_shared_cache_pathname);
 			return 0;
 		}
-		strbuf_trim_trailing_dir_sep(&buf);
-
-		gvfs_shared_cache_pathname = strbuf_detach(&buf, NULL);
+		strbuf_trim_trailing_dir_sep(&gvfs_shared_cache_pathname);
 		return 0;
 	}
 
diff --git a/environment.c b/environment.c
index f7393925730d87..d7832244da25c9 100644
--- a/environment.c
+++ b/environment.c
@@ -100,7 +100,7 @@ int protect_hfs = PROTECT_HFS_DEFAULT;
 int protect_ntfs = PROTECT_NTFS_DEFAULT;
 int core_use_gvfs_helper;
 char *gvfs_cache_server_url;
-const char *gvfs_shared_cache_pathname;
+struct strbuf gvfs_shared_cache_pathname = STRBUF_INIT;
 
 /*
  * The character that begins a commented line in user-editable file
diff --git a/environment.h b/environment.h
index 4d92b6a8b64ee9..abb3366957fb1d 100644
--- a/environment.h
+++ b/environment.h
@@ -177,7 +177,7 @@ extern int protect_hfs;
 extern int protect_ntfs;
 extern int core_use_gvfs_helper;
 extern char *gvfs_cache_server_url;
-extern const char *gvfs_shared_cache_pathname;
+extern struct strbuf gvfs_shared_cache_pathname;
 
 extern int core_apply_sparse_checkout;
 extern int core_sparse_checkout_cone;
diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
index b81f38a4fe721f..50ff4c672d2418 100644
--- a/gvfs-helper-client.c
+++ b/gvfs-helper-client.c
@@ -1,5 +1,6 @@
 #define USE_THE_REPOSITORY_VARIABLE
 #include "git-compat-util.h"
+#include "environment.h"
 #include "hex.h"
 #include "strvec.h"
 #include "trace2.h"
@@ -208,13 +209,32 @@ static int gh_client__get__receive_response(
 	return err;
 }
 
+/*
+ * Select the preferred ODB for fetching missing objects.
+ * This should be the alternate with the same directory
+ * name as set in `gvfs.sharedCache`.
+ *
+ * Fallback to .git/objects if necessary.
+ */
 static void gh_client__choose_odb(void)
 {
+	struct object_directory *odb;
+
 	if (gh_client__chosen_odb)
 		return;
 
 	prepare_alt_odb(the_repository);
 	gh_client__chosen_odb = the_repository->objects->odb;
+
+	if (!gvfs_shared_cache_pathname.len)
+		return;
+
+	for (odb = the_repository->objects->odb->next; odb; odb = odb->next) {
+		if (!fspathcmp(odb->path, gvfs_shared_cache_pathname.buf)) {
+			gh_client__chosen_odb = odb;
+			return;
+		}
+	}
 }
 
 static int gh_client__get(enum gh_client__created *p_ghc)
diff --git a/gvfs-helper.c b/gvfs-helper.c
index abf97c95e1ce9e..6d31eb808532de 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -81,10 +81,11 @@
 //
 //                 Fetch 1 or more objects.  If a cache-server is configured,
 //                 try it first.  Optionally fallback to the main Git server.
+//
 //                 Create 1 or more loose objects and/or packfiles in the
-//                 requested shared-cache directory (given on the command
-//                 line and which is reported at the beginning of the
-//                 response).
+//                 shared-cache ODB.  (The pathname of the selected ODB is
+//                 reported at the beginning of the response; this should
+//                 match the pathname given on the command line).
 //
 //                 git> get
 //                 git> <oid>
@@ -641,26 +642,88 @@ static int option_parse_cache_server_mode(const struct option *opt,
 }
 
 /*
- * Let command line args override "gvfs.sharedcache" config setting.
+ * Let command line args override "gvfs.sharedcache" config setting
+ * and override the value set by git_default_config().
+ *
+ * The command line is parsed *AFTER* the config is loaded, so
+ * prepared_alt_odb() has already been called any default or inherited
+ * shared-cache has already been set.
  *
- * It would be nice to move this to parse-options.c as an
- * OPTION_PATHNAME handler.  And maybe have flags for exists()
- * and is_directory().
+ * We have a chance to override it here.
  */
 static int option_parse_shared_cache_directory(const struct option *opt,
 					       const char *arg, int unset)
 {
+	struct strbuf buf_arg = STRBUF_INIT;
+
 	if (unset) /* should not happen */
 		return error(_("missing value for switch '%s'"),
 			     opt->long_name);
 
-	if (!is_directory(arg))
-		return error(_("value for switch '%s' is not a directory: '%s'"),
-			     opt->long_name, arg);
+	strbuf_addstr(&buf_arg, arg);
+	if (strbuf_normalize_path(&buf_arg) < 0) {
+		/*
+		 * Pretend command line wasn't given.  Use whatever
+		 * settings we already have from the config.
+		 */
+		strbuf_release(&buf_arg);
+		return 0;
+	}
+	strbuf_trim_trailing_dir_sep(&buf_arg);
+
+	if (!strbuf_cmp(&buf_arg, &gvfs_shared_cache_pathname)) {
+		/*
+		 * The command line argument matches what we got from
+		 * the config, so we're already setup correctly. (And
+		 * we have already verified that the directory exists
+		 * on disk.)
+		 */
+		strbuf_release(&buf_arg);
+		return 0;
+	}
+
+	else if (!gvfs_shared_cache_pathname.len) {
+		/*
+		 * A shared-cache was requested and we did not inherit one.
+		 * Try it, but let alt_odb_usable() secretly disable it if
+		 * it cannot create the directory on disk.
+		 */
+		strbuf_addbuf(&gvfs_shared_cache_pathname, &buf_arg);
 
-	gvfs_shared_cache_pathname = arg;
+		add_to_alternates_memory(buf_arg.buf);
 
-	return 0;
+		strbuf_release(&buf_arg);
+		return 0;
+	}
+
+	else {
+		/*
+		 * The requested shared-cache is different from the one
+		 * we inherited.  Replace the inherited value with this
+		 * one, but smartly fallback if necessary.
+		 */
+		struct strbuf buf_prev = STRBUF_INIT;
+
+		strbuf_addbuf(&buf_prev, &gvfs_shared_cache_pathname);
+
+		strbuf_setlen(&gvfs_shared_cache_pathname, 0);
+		strbuf_addbuf(&gvfs_shared_cache_pathname, &buf_arg);
+
+		add_to_alternates_memory(buf_arg.buf);
+
+		/*
+		 * alt_odb_usable() releases gvfs_shared_cache_pathname
+		 * if it cannot create the directory on disk, so fallback
+		 * to the previous choice when it fails.
+		 */
+		if (!gvfs_shared_cache_pathname.len)
+			strbuf_addbuf(&gvfs_shared_cache_pathname,
+				      &buf_prev);
+
+		strbuf_release(&buf_arg);
+		strbuf_release(&buf_prev);
+		return 0;
+	}
 }
 
 /*
@@ -961,24 +1024,20 @@ static void approve_cache_server_creds(void)
 }
 
 /*
- * Select the ODB directory where we will write objects that we
- * download.  If was given on the command line or define in the
- * config, use the local ODB (in ".git/objects").
+ * Get the pathname to the ODB where we write objects that we download.
  */
 static void select_odb(void)
 {
-	const char *odb_path = NULL;
+	prepare_alt_odb(the_repository);
 
 	strbuf_init(&gh__global.buf_odb_path, 0);
 
-	if (gvfs_shared_cache_pathname && *gvfs_shared_cache_pathname)
-		odb_path = gvfs_shared_cache_pathname;
-	else {
-		prepare_alt_odb(the_repository);
-		odb_path = the_repository->objects->odb->path;
-	}
-
-	strbuf_addstr(&gh__global.buf_odb_path, odb_path);
+	if (gvfs_shared_cache_pathname.len)
+		strbuf_addbuf(&gh__global.buf_odb_path,
+			      &gvfs_shared_cache_pathname);
+	else
+		strbuf_addstr(&gh__global.buf_odb_path,
+			      the_repository->objects->odb->path);
 }
 
 /*
diff --git a/object-file.c b/object-file.c
index 6b77ac219b3c76..559254405c4766 100644
--- a/object-file.c
+++ b/object-file.c
@@ -527,6 +527,8 @@ const char *loose_object_path(struct repository *r, struct strbuf *buf,
 	return odb_loose_path(r->objects->odb, buf, oid);
 }
 
+static int gvfs_matched_shared_cache_to_alternate;
+
 /*
  * Return non-zero iff the path is usable as an alternate object database.
  */
@@ -536,6 +538,52 @@ static int alt_odb_usable(struct raw_object_store *o,
 {
 	int r;
 
+	if (!strbuf_cmp(path, &gvfs_shared_cache_pathname)) {
+		/*
+		 * `gvfs.sharedCache` is the preferred alternate that we
+		 * will use with `gvfs-helper.exe` to dynamically fetch
+		 * missing objects.  It is set during git_default_config().
+		 *
+		 * Make sure the directory exists on disk before we let the
+		 * stock code discredit it.
+		 */
+		struct strbuf buf_pack_foo = STRBUF_INIT;
+		enum scld_error scld;
+
+		/*
+		 * Force create the "<odb>" and "<odb>/pack" directories, if
+		 * not present on disk.  Append an extra bogus directory to
+		 * get safe_create_leading_directories() to see "<odb>/pack"
+		 * as a leading directory of something deeper (which it
+		 * won't create).
+		 */
+		strbuf_addf(&buf_pack_foo, "%s/pack/foo", path->buf);
+
+		scld = safe_create_leading_directories(buf_pack_foo.buf);
+		if (scld != SCLD_OK && scld != SCLD_EXISTS) {
+			error_errno(_("could not create shared-cache ODB '%s'"),
+				    gvfs_shared_cache_pathname.buf);
+
+			strbuf_release(&buf_pack_foo);
+
+			/*
+			 * Pretend no shared-cache was requested and
+			 * effectively fallback to ".git/objects" for
+			 * fetching missing objects.
+			 */
+			strbuf_release(&gvfs_shared_cache_pathname);
+			return 0;
+		}
+
+		/*
+		 * We know that there is an alternate (either from
+		 * .git/objects/info/alternates or from a memory-only
+		 * entry) associated with the shared-cache directory.
+		 */
+		gvfs_matched_shared_cache_to_alternate++;
+		strbuf_release(&buf_pack_foo);
+	}
+
 	/* Detect cases where alternate disappeared */
 	if (!is_directory(path->buf)) {
 		error(_("object directory %s does not exist; "
@@ -1019,6 +1067,33 @@ void prepare_alt_odb(struct repository *r)
 	link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
 
 	read_info_alternates(r, r->objects->odb->path, 0);
+
+	if (gvfs_shared_cache_pathname.len &&
+	    !gvfs_matched_shared_cache_to_alternate) {
+		/*
+		 * There is no entry in .git/objects/info/alternates for
+		 * the requested shared-cache directory.  Therefore, the
+		 * odb-list does not contain this directory.
+		 *
+		 * Force this directory into the odb-list as an in-memory
+		 * alternate.  Implicitly create the directory on disk, if
+		 * necessary.
+		 *
+		 * See GIT_ALTERNATE_OBJECT_DIRECTORIES for another example
+		 * of this kind of usage.
+		 *
+		 * Note: This has the net-effect of allowing Git to treat
+		 * `gvfs.sharedCache` as an unofficial alternate.  This
+		 * usage should be discouraged for compatbility reasons
+		 * with other tools in the overall Git ecosystem (that
+		 * won't know about this trick).  It would be much better
+		 * for us to update .git/objects/info/alternates instead.
+		 * The code here is considered a backstop.
+		 */
+		link_alt_odb_entries(r, gvfs_shared_cache_pathname.buf,
+				     '\n', NULL, 0);
+	}
+
 	r->objects->loaded_alternates = 1;
 }
 

From 384f7b4ab95ee87f9daa3d1fe88104b3a603fc9e Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Thu, 25 Jul 2019 15:43:50 -0400
Subject: [PATCH 093/207] trace2:gvfs:experiment: increase default event depth
 for unpack-tree data

---
 trace2/tr2_tgt_event.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/trace2/tr2_tgt_event.c b/trace2/tr2_tgt_event.c
index 69ee40449fa4a7..b0a1c39a378b73 100644
--- a/trace2/tr2_tgt_event.c
+++ b/trace2/tr2_tgt_event.c
@@ -39,7 +39,7 @@ static struct tr2_dst tr2dst_event = {
  * event target.  Use the TR2_SYSENV_EVENT_NESTING setting to increase
  * region details in the event target.
  */
-static int tr2env_event_max_nesting_levels = 2;
+static int tr2env_event_max_nesting_levels = 4;
 
 /*
  * Use the TR2_SYSENV_EVENT_BRIEF to omit the <time>, <file>, and

From dcccb28824b328330551aca039c6319bd8e8b2cf Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 8 Oct 2019 14:01:26 -0400
Subject: [PATCH 094/207] gvfs-helper: better handling of network errors

Add trace2 message for CURL and HTTP errors.

Fix typo reporting network error code back to gvfs-helper-client.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper.c | 18 +++++++++++++++---
 1 file changed, 15 insertions(+), 3 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 6d31eb808532de..197fce1ff39757 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -414,10 +414,16 @@ static void gh__response_status__set_from_slot(
 		strbuf_addf(&status->error_message, "%s (curl)",
 			    curl_easy_strerror(status->curl_code));
 		status->ec = GH__ERROR_CODE__CURL_ERROR;
+
+		trace2_data_string("gvfs-helper", NULL,
+				   "error/curl", status->error_message.buf);
 	} else {
 		strbuf_addf(&status->error_message, "HTTP %ld Unexpected",
 			    status->response_code);
 		status->ec = GH__ERROR_CODE__HTTP_UNEXPECTED_CODE;
+
+		trace2_data_string("gvfs-helper", NULL,
+				   "error/http", status->error_message.buf);
 	}
 
 	if (status->ec != GH__ERROR_CODE__OK)
@@ -2041,7 +2047,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 }
 
 /*
- * Handle the 'get' command when in "server mode".  Only call error()
+ * Handle the 'get' command when in "server mode".  Only call error() and set ec
  * for hard errors where we cannot communicate correctly with the foreground
  * client process.  Pass any actual data errors (such as 404's or 401's from
  * the fetch back to the client process.
@@ -2113,10 +2119,15 @@ static enum gh__error_code do_server_subprocess_get(void)
 			goto cleanup;
 		}
 
+	/*
+	 * We only use status.ec to tell the client whether the request
+	 * was complete, incomplete, or had IO errors.  We DO NOT return
+	 * this value to our caller.
+	 */
 	err = 0;
-	if (ec == GH__ERROR_CODE__OK)
+	if (status.ec == GH__ERROR_CODE__OK)
 		err = packet_write_fmt_gently(1, "ok\n");
-	else if (ec == GH__ERROR_CODE__HTTP_404)
+	else if (status.ec == GH__ERROR_CODE__HTTP_404)
 		err = packet_write_fmt_gently(1, "partial\n");
 	else
 		err = packet_write_fmt_gently(1, "error %s\n",
@@ -2344,6 +2355,7 @@ int cmd_main(int argc, const char **argv)
 		usage_with_options(main_usage, main_options);
 
 	trace2_cmd_name("gvfs-helper");
+	packet_trace_identity("gvfs-helper");
 
 	setup_git_directory_gently(NULL);
 

From 1489a114fd2f94ec97942c9191c9196b289df9ff Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Thu, 25 Jul 2019 14:52:33 -0400
Subject: [PATCH 095/207] trace2:gvfs:experiment: add data for check_updates()
 in unpack_trees()

Add data for the number of files created/overwritten and deleted during the checkout.

Give proper category name to all events in unpack-trees.c and eliminate "exp".

This is modified slightly from the original version due to interactions with 26f924d
(unpack-trees: exit check_updates() early if updates are not wanted, 2020-01-07).

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 unpack-trees.c | 25 +++++++++++++++++++------
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/unpack-trees.c b/unpack-trees.c
index 5c97e47572819a..00d47b319b9349 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -431,8 +431,12 @@ static int check_updates(struct unpack_trees_options *o,
 	struct progress *progress;
 	struct checkout state = CHECKOUT_INIT;
 	int i, pc_workers, pc_threshold;
+	intmax_t sum_unlink = 0;
+	intmax_t sum_prefetch = 0;
+	intmax_t sum_checkout = 0;
 
 	trace_performance_enter();
+	trace2_region_enter("unpack_trees", "check_updates", NULL);
 	state.super_prefix = o->super_prefix;
 	state.force = 1;
 	state.quiet = 1;
@@ -442,8 +446,7 @@ static int check_updates(struct unpack_trees_options *o,
 
 	if (!o->update || o->dry_run) {
 		remove_marked_cache_entries(index, 0);
-		trace_performance_leave("check_updates");
-		return 0;
+		goto done;
 	}
 
 	if (o->clone)
@@ -465,6 +468,7 @@ static int check_updates(struct unpack_trees_options *o,
 		if (ce->ce_flags & CE_WT_REMOVE) {
 			display_progress(progress, ++cnt);
 			unlink_entry(ce, o->super_prefix);
+			sum_unlink++;
 		}
 	}
 
@@ -500,6 +504,7 @@ static int check_updates(struct unpack_trees_options *o,
 
 			if (last_pc_queue_size == pc_queue_size())
 				display_progress(progress, ++cnt);
+			sum_checkout++;
 		}
 	}
 	if (pc_workers > 1)
@@ -512,6 +517,15 @@ static int check_updates(struct unpack_trees_options *o,
 	if (o->clone)
 		report_collided_checkout(index);
 
+	if (sum_unlink > 0)
+		trace2_data_intmax("unpack_trees", NULL, "check_updates/nr_unlink", sum_unlink);
+	if (sum_prefetch > 0)
+		trace2_data_intmax("unpack_trees", NULL, "check_updates/nr_prefetch", sum_prefetch);
+	if (sum_checkout > 0)
+		trace2_data_intmax("unpack_trees", NULL, "check_updates/nr_write", sum_checkout);
+
+done:
+	trace2_region_leave("unpack_trees", "check_updates", NULL);
 	trace_performance_leave("check_updates");
 	return errs != 0;
 }
@@ -1789,10 +1803,9 @@ static int clear_ce_flags(struct index_state *istate,
 					_("Updating index flags"),
 					istate->cache_nr);
 
-	xsnprintf(label, sizeof(label), "clear_ce_flags(0x%08lx,0x%08lx)",
+	xsnprintf(label, sizeof(label), "clear_ce_flags/0x%08lx_0x%08lx",
 		  (unsigned long)select_mask, (unsigned long)clear_mask);
 	trace2_region_enter("unpack_trees", label, the_repository);
-
 	rval = clear_ce_flags_1(istate,
 				istate->cache,
 				istate->cache_nr,
@@ -1916,7 +1929,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	if (o->df_conflict_entry)
 		BUG("o->df_conflict_entry is an output only field");
 
-	trace2_region_enter("exp", "unpack_trees", NULL);
+	trace2_region_enter("unpack_trees", "unpack_trees", NULL);
 	nr_unpack_entry_at_start = get_nr_unpack_entry();
 
 	trace_performance_enter();
@@ -2130,7 +2143,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 	trace_performance_leave("unpack_trees");
 	trace2_data_intmax("unpack_trees", NULL, "unpack_trees/nr_unpack_entries",
 			   (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
-	trace2_region_leave("exp", "unpack_trees", NULL);
+	trace2_region_leave("unpack_trees", "unpack_trees", NULL);
 	return ret;
 
 return_failed:

From 947384cfef3f285710afa3a17813547532133acb Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 8 Oct 2019 14:30:25 -0400
Subject: [PATCH 096/207] gvfs-helper-client: properly update loose cache with
 fetched OID

Fix parsing of the "loose <odb>" response from `gvfs-helper` and
use the actually parsed OID when updating the loose oid cache.

Previously, an uninitialized "struct oid" was used to update
the cache.  This did not cause any corruption, but could cause
extra fetches for objects visited multiple times.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper-client.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
index 50ff4c672d2418..9b699e082931b4 100644
--- a/gvfs-helper-client.c
+++ b/gvfs-helper-client.c
@@ -13,6 +13,7 @@
 #include "pkt-line.h"
 #include "quote.h"
 #include "packfile.h"
+#include "hex.h"
 
 static struct oidset gh_client__oidset_queued = OIDSET_INIT;
 static unsigned long gh_client__oidset_count;
@@ -94,6 +95,9 @@ static void gh_client__update_loose_cache(const char *line)
 	if (!skip_prefix(line, "loose ", &v1_oid))
 		BUG("update_loose_cache: invalid line '%s'", line);
 
+	if (get_oid_hex(v1_oid, &oid))
+		BUG("update_loose_cache: invalid line '%s'", line);
+
 	odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid);
 }
 

From ed48c16c43f423a904714834fdf5fb7db40fea1e Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 26 Jul 2019 08:51:17 -0400
Subject: [PATCH 097/207] Trace2:gvfs:experiment: capture more 'tracking'
 details

Update tracing around report_tracking() to use 'tracking' category
rather than 'exp' category.

Add ahead/behind results from stat_tracking_info().

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 builtin/checkout.c |  6 +++---
 remote.c           | 10 ++++++++++
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/builtin/checkout.c b/builtin/checkout.c
index a2c9eec1c94910..f19bc559505fdc 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -1053,12 +1053,12 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
 	    (new_branch_info->path || !strcmp(new_branch_info->name, "HEAD"))) {
 		unsigned long nr_unpack_entry_at_start;
 
-		trace2_region_enter("exp", "report_tracking", the_repository);
+		trace2_region_enter("tracking", "report_tracking", the_repository);
 		nr_unpack_entry_at_start = get_nr_unpack_entry();
 		report_tracking(new_branch_info);
-		trace2_data_intmax("exp", NULL, "report_tracking/nr_unpack_entries",
+		trace2_data_intmax("tracking", NULL, "report_tracking/nr_unpack_entries",
 				   (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
-		trace2_region_leave("exp", "report_tracking", the_repository);
+		trace2_region_leave("tracking", "report_tracking", the_repository);
 	}
 }
 
diff --git a/remote.c b/remote.c
index 18e5ccf3918445..5a76af08d3d828 100644
--- a/remote.c
+++ b/remote.c
@@ -21,6 +21,7 @@
 #include "setup.h"
 #include "string-list.h"
 #include "strvec.h"
+#include "trace2.h"
 #include "commit-reach.h"
 #include "advice.h"
 #include "connect.h"
@@ -2374,7 +2375,16 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb,
 	char *base;
 	int upstream_is_gone = 0;
 
+	trace2_region_enter("tracking", "stat_tracking_info", NULL);
 	sti = stat_tracking_info(branch, &ours, &theirs, &full_base, 0, abf);
+	trace2_data_intmax("tracking", NULL, "stat_tracking_info/ab_flags", abf);
+	trace2_data_intmax("tracking", NULL, "stat_tracking_info/ab_result", sti);
+	if (sti >= 0 && abf == AHEAD_BEHIND_FULL) {
+	    trace2_data_intmax("tracking", NULL, "stat_tracking_info/ab_ahead", ours);
+	    trace2_data_intmax("tracking", NULL, "stat_tracking_info/ab_behind", theirs);
+	}
+	trace2_region_leave("tracking", "stat_tracking_info", NULL);
+
 	if (sti < 0) {
 		if (!full_base)
 			return 0;

From 0b58c532f48be14144cf03de1d8f7b6203ff76a4 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Thu, 10 Oct 2019 10:58:07 -0400
Subject: [PATCH 098/207] gvfs-helper: V2 robust retry and throttling

Add robust-retry mechanism to automatically retry a request after network
errors.  This includes retry after:
   [] transient network problems reported by CURL.
   [] http 429 throttling (with associated Retry-After)
   [] http 503 server unavailable (with associated Retry-After)

Add voluntary throttling using Azure X-RateLimit-* hints to avoid being
soft-throttled (tarpitted) or hard-throttled (429) on later requests.

Add global (outside of a single request) azure-throttle data to track the
rate limit hints from the cache-server and main Git server independently.

Add exponential retry backoff.  This is used for transient network problems
when we don't have a Retry-After hint.

Move the call to index-pack earlier in the response/error handling sequence
so that if we receive a 200 but yet the packfile is truncated/corrupted, we
can use the regular retry logic to get it again.

Refactor the way we create tempfiles for packfiles to use
<odb>/pack/tempPacks/ rather than working directly in the <odb>/pack/
directory.

Move the code to create a new tempfile to the start of a single request
attempt (initial and retry attempts), rather than at the overall start
of a request.  This gives us a fresh tempfile for each network request
attempt.  This simplifies the retry mechanism and isolates us from the file
ownership issues hidden within the tempfile class.  And avoids the need to
truncate previous incomplete results.  This was necessary because index-pack
was pulled into the retry loop.

Minor: Add support for logging X-VSS-E2EID to telemetry on network errors.

Minor: rename variable:
    params.b_no_cache_server --> params.b_permit_cache_server_if_defined.
This variable is used to indicate whether we should try to use the
cache-server when it is defined.  Got rid of double-negative logic.

Minor: rename variable:
    params.label --> params.tr2_label
Clarify that this variable is only used with trace2 logging.

Minor: Move the code to automatically map cache-server 400 responses
to normal 401 response earlier in the response/error handling sequence
to simplify later retry logic.

Minor: Decorate trace2 messages with "(cs)" or "(main)" to identify the
server in log messages.  Add params->server_type to simplify this.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper.c | 1403 ++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 1101 insertions(+), 302 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 197fce1ff39757..197a40771bcff6 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -61,6 +61,11 @@
 //
 //                 --depth=<depth>       // defaults to "1"
 //
+//                 --max-retries=<n>     // defaults to "6"
+//
+//                       Number of retries after transient network errors.
+//                       Set to zero to disable such retries.
+//
 //     server
 //
 //            Interactive/sub-process mode.  Listen for a series of commands
@@ -77,6 +82,11 @@
 //
 //                 --depth=<depth>       // defaults to "1"
 //
+//                 --max-retries=<n>     // defaults to "6"
+//
+//                       Number of retries after transient network errors.
+//                       Set to zero to disable such retries.
+//
 //            Interactive verb: get
 //
 //                 Fetch 1 or more objects.  If a cache-server is configured,
@@ -175,8 +185,26 @@ static const char *const server_usage[] = {
 	NULL
 };
 
+/*
+ * "commitDepth" field in gvfs protocol
+ */
+#define GH__DEFAULT_COMMIT_DEPTH 1
+
+/*
+ * Chunk/block size in number of objects we request in each packfile
+ */
 #define GH__DEFAULT_BLOCK_SIZE 4000
 
+/*
+ * Retry attempts (after the initial request) for transient errors and 429s.
+ */
+#define GH__DEFAULT_MAX_RETRIES 6
+
+/*
+ * Maximum delay in seconds for transient (network) error retries.
+ */
+#define GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC 300
+
 /*
  * Our exit-codes.
  */
@@ -184,16 +212,18 @@ enum gh__error_code {
 	GH__ERROR_CODE__USAGE = -1, /* will be mapped to usage() */
 	GH__ERROR_CODE__OK = 0,
 	GH__ERROR_CODE__ERROR = 1, /* unspecified */
-//	GH__ERROR_CODE__CACHE_SERVER_NOT_FOUND = 2,
-	GH__ERROR_CODE__CURL_ERROR = 3,
-	GH__ERROR_CODE__HTTP_401 = 4,
-	GH__ERROR_CODE__HTTP_404 = 5,
-	GH__ERROR_CODE__HTTP_UNEXPECTED_CODE = 6,
-	GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE = 7,
+	GH__ERROR_CODE__CURL_ERROR = 2,
+	GH__ERROR_CODE__HTTP_401 = 3,
+	GH__ERROR_CODE__HTTP_404 = 4,
+	GH__ERROR_CODE__HTTP_429 = 5,
+	GH__ERROR_CODE__HTTP_503 = 6,
+	GH__ERROR_CODE__HTTP_OTHER = 7,
+	GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE = 8,
 	GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE = 8,
-	GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE = 9,
-	GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 10,
-	GH__ERROR_CODE__SUBPROCESS_SYNTAX = 11,
+	GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE = 10,
+	GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 11,
+	GH__ERROR_CODE__SUBPROCESS_SYNTAX = 12,
+	GH__ERROR_CODE__INDEX_PACK_FAILED = 13,
 };
 
 enum gh__cache_server_mode {
@@ -219,6 +249,8 @@ static struct gh__cmd_opts {
 
 	int depth;
 	int block_size;
+	int max_retries;
+	int max_transient_backoff_sec;
 
 	enum gh__cache_server_mode cache_server_mode;
 } gh__cmd_opts;
@@ -243,6 +275,47 @@ static struct gh__global {
 
 } gh__global;
 
+enum gh__server_type {
+	GH__SERVER_TYPE__MAIN = 0,
+	GH__SERVER_TYPE__CACHE = 1,
+
+	GH__SERVER_TYPE__NR,
+};
+
+static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = {
+	"(main)",
+	"(cs)"
+};
+
+struct gh__azure_throttle
+{
+	unsigned long tstu_limit;
+	unsigned long tstu_remaining;
+
+	unsigned long reset_sec;
+	unsigned long retry_after_sec;
+};
+
+static void gh__azure_throttle__zero(struct gh__azure_throttle *azure)
+{
+	azure->tstu_limit = 0;
+	azure->tstu_remaining = 0;
+	azure->reset_sec = 0;
+	azure->retry_after_sec = 0;
+}
+
+#define GH__AZURE_THROTTLE_INIT { \
+	.tstu_limit = 0, \
+	.tstu_remaining = 0, \
+	.reset_sec = 0, \
+	.retry_after_sec = 0, \
+	}
+
+static struct gh__azure_throttle gh__global_throttle[GH__SERVER_TYPE__NR] = {
+	GH__AZURE_THROTTLE_INIT,
+	GH__AZURE_THROTTLE_INIT,
+};
+
 /*
  * Stolen from http.c
  */
@@ -271,7 +344,12 @@ enum gh__progress_state {
 struct gh__request_params {
 	int b_is_post;            /* POST=1 or GET=0 */
 	int b_write_to_file;      /* write to file=1 or strbuf=0 */
-	int b_no_cache_server;    /* force main server only */
+	int b_permit_cache_server_if_defined;
+
+	enum gh__server_type server_type;
+
+	int k_attempt; /* robust retry attempt */
+	int k_transient_delay_sec; /* delay before transient error retries */
 
 	unsigned long object_count; /* number of objects being fetched */
 
@@ -280,9 +358,16 @@ struct gh__request_params {
 	struct curl_slist *headers; /* additional http headers to send */
 	struct tempfile *tempfile; /* for response content when file */
 	struct strbuf *buffer;     /* for response content when strbuf */
-	struct strbuf label;       /* for trace2 regions */
+	struct strbuf tr2_label;   /* for trace2 regions */
 
 	struct strbuf loose_path;
+	struct object_id loose_oid;
+
+	struct strbuf temp_path_pack;
+	struct strbuf temp_path_idx;
+	struct strbuf final_path_pack;
+	struct strbuf final_path_idx;
+	struct strbuf final_packfile_filename;
 
 	/*
 	 * Note that I am putting all of the progress-related instance data
@@ -304,24 +389,36 @@ struct gh__request_params {
 	 */
 	struct strbuf progress_msg;
 	struct progress *progress;
+
+	struct strbuf e2eid;
 };
 
 #define GH__REQUEST_PARAMS_INIT { \
 	.b_is_post = 0, \
 	.b_write_to_file = 0, \
-	.b_no_cache_server = 0, \
+	.b_permit_cache_server_if_defined = 1, \
+	.server_type = GH__SERVER_TYPE__MAIN, \
+	.k_attempt = 0, \
+	.k_transient_delay_sec = 0, \
 	.object_count = 0, \
 	.post_payload = NULL, \
 	.headers = NULL, \
 	.tempfile = NULL, \
 	.buffer = NULL, \
-	.label = STRBUF_INIT, \
+	.tr2_label = STRBUF_INIT, \
 	.loose_path = STRBUF_INIT, \
+	.loose_oid = {{0}}, \
+	.temp_path_pack = STRBUF_INIT, \
+	.temp_path_idx = STRBUF_INIT, \
+	.final_path_pack = STRBUF_INIT, \
+	.final_path_idx = STRBUF_INIT, \
+	.final_packfile_filename = STRBUF_INIT, \
 	.progress_state = GH__PROGRESS_STATE__START, \
 	.progress_base_phase2_msg = STRBUF_INIT, \
 	.progress_base_phase3_msg = STRBUF_INIT, \
 	.progress_msg = STRBUF_INIT, \
 	.progress = NULL, \
+	.e2eid = STRBUF_INIT, \
 	}
 
 static void gh__request_params__release(struct gh__request_params *params)
@@ -338,8 +435,13 @@ static void gh__request_params__release(struct gh__request_params *params)
 
 	params->buffer = NULL; /* we do not own this */
 
-	strbuf_release(&params->label);
+	strbuf_release(&params->tr2_label);
 	strbuf_release(&params->loose_path);
+	strbuf_release(&params->temp_path_pack);
+	strbuf_release(&params->temp_path_idx);
+	strbuf_release(&params->final_path_pack);
+	strbuf_release(&params->final_path_idx);
+	strbuf_release(&params->final_packfile_filename);
 
 	strbuf_release(&params->progress_base_phase2_msg);
 	strbuf_release(&params->progress_base_phase3_msg);
@@ -347,8 +449,55 @@ static void gh__request_params__release(struct gh__request_params *params)
 
 	stop_progress(&params->progress);
 	params->progress = NULL;
+
+	strbuf_release(&params->e2eid);
 }
 
+/*
+ * How we handle retries for various unexpected network errors.
+ */
+enum gh__retry_mode {
+	/*
+	 * The operation was successful, so no retry is needed.
+	 * Use this for HTTP 200, for example.
+	 */
+	GH__RETRY_MODE__SUCCESS = 0,
+
+	/*
+	 * Retry using the normal 401 Auth mechanism.
+	 */
+	GH__RETRY_MODE__HTTP_401,
+
+	/*
+	 * Fail because at least one of the requested OIDs does not exist.
+	 */
+	GH__RETRY_MODE__FAIL_404,
+
+	/*
+	 * A transient network error, such as dropped connection
+	 * or network IO error.  Our belief is that a retry MAY
+	 * succeed.  (See Gremlins and Cosmic Rays....)
+	 */
+	GH__RETRY_MODE__TRANSIENT,
+
+	/*
+	 * Request was blocked completely because of a 429.
+	 */
+	GH__RETRY_MODE__HTTP_429,
+
+	/*
+	 * Request failed because the server was (temporarily?) offline.
+	 */
+	GH__RETRY_MODE__HTTP_503,
+
+	/*
+	 * The operation had a hard failure and we have no
+	 * expectation that a second attempt will give a different
+	 * answer, such as a bad hostname or a mal-formed URL.
+	 */
+	GH__RETRY_MODE__HARD_FAIL,
+};
+
 /*
  * Bucket to describe the results of an HTTP requests (may be
  * overwritten during retries so that it describes the final attempt).
@@ -359,7 +508,9 @@ struct gh__response_status {
 	long response_code; /* http response code */
 	CURLcode curl_code;
 	enum gh__error_code ec;
+	enum gh__retry_mode retry;
 	intmax_t bytes_received;
+	struct gh__azure_throttle *azure;
 };
 
 #define GH__RESPONSE_STATUS_INIT { \
@@ -368,7 +519,9 @@ struct gh__response_status {
 	.response_code = 0, \
 	.curl_code = CURLE_OK, \
 	.ec = GH__ERROR_CODE__OK, \
+	.retry = GH__RETRY_MODE__SUCCESS, \
 	.bytes_received = 0, \
+	.azure = NULL, \
 	}
 
 static void gh__response_status__zero(struct gh__response_status *s)
@@ -378,7 +531,323 @@ static void gh__response_status__zero(struct gh__response_status *s)
 	s->response_code = 0;
 	s->curl_code = CURLE_OK;
 	s->ec = GH__ERROR_CODE__OK;
+	s->retry = GH__RETRY_MODE__SUCCESS;
 	s->bytes_received = 0;
+	s->azure = NULL;
+}
+
+static void install_packfile(struct gh__request_params *params,
+			     struct gh__response_status *status);
+static void install_loose(struct gh__request_params *params,
+			  struct gh__response_status *status);
+
+/*
+ * Log the E2EID for the current request.
+ *
+ * Since every HTTP request to the cache-server and to the main Git server
+ * will send back a unique E2EID (probably a GUID), we don't want to overload
+ * telemetry with each ID -- rather, only the ones for which there was a
+ * problem and that may be helpful in a post mortem.
+ */
+static void log_e2eid(struct gh__request_params *params,
+		      struct gh__response_status *status)
+{
+	if (!params->e2eid.len)
+		return;
+
+	switch (status->retry) {
+	default:
+	case GH__RETRY_MODE__SUCCESS:
+	case GH__RETRY_MODE__HTTP_401:
+	case GH__RETRY_MODE__FAIL_404:
+		return;
+
+	case GH__RETRY_MODE__HARD_FAIL:
+	case GH__RETRY_MODE__TRANSIENT:
+	case GH__RETRY_MODE__HTTP_429:
+	case GH__RETRY_MODE__HTTP_503:
+		break;
+	}
+
+	if (trace2_is_enabled()) {
+		struct strbuf key = STRBUF_INIT;
+
+		strbuf_addstr(&key, "e2eid");
+		strbuf_addstr(&key, gh__server_type_label[params->server_type]);
+
+		trace2_data_string("gvfs-helper", NULL, key.buf,
+				   params->e2eid.buf);
+
+		strbuf_release(&key);
+	}
+}
+
+/*
+ * Normalize a few error codes before we try to decide
+ * how to dispatch on them.
+ */
+static void gh__response_status__normalize_odd_codes(
+	struct gh__request_params *params,
+	struct gh__response_status *status)
+{
+	if (params->server_type == GH__SERVER_TYPE__CACHE &&
+	    status->response_code == 400) {
+		/*
+		 * The cache-server sends a somewhat bogus 400 instead of
+		 * the normal 401 when AUTH is required.  Fixup the status
+		 * to hide that.
+		 *
+		 * TODO Technically, the cache-server could send a 400
+		 * TODO for many reasons, not just for their bogus
+		 * TODO pseudo-401, but we're going to assume it is a
+		 * TODO 401 for now.  We should confirm the expected
+		 * TODO error message in the response-body.
+		 */
+		status->response_code = 401;
+	}
+
+	if (status->response_code == 203) {
+		/*
+		 * A proxy server transformed a 200 from the origin server
+		 * into a 203.  We don't care about the subtle distinction.
+		 */
+		status->response_code = 200;
+	}
+}
+
+/*
+ * Map HTTP response codes into a retry strategy.
+ * See https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
+ *
+ * https://docs.microsoft.com/en-us/azure/devops/integrate/concepts/rate-limits?view=azure-devops
+ */
+static void compute_retry_mode_from_http_response(
+	struct gh__response_status *status)
+{
+	switch (status->response_code) {
+
+	case 200:
+		status->retry = GH__RETRY_MODE__SUCCESS;
+		status->ec = GH__ERROR_CODE__OK;
+		return;
+
+	case 301: /* all the various flavors of HTTP Redirect */
+	case 302:
+	case 303:
+	case 304:
+	case 305:
+	case 306:
+	case 307:
+	case 308:
+		/*
+		 * TODO Consider a redirected-retry (with or without
+		 * TODO a Retry-After header).
+		 */
+		goto hard_fail;
+
+	case 401:
+		strbuf_addstr(&status->error_message,
+			      "(http:401) Not Authorized");
+		status->retry = GH__RETRY_MODE__HTTP_401;
+		status->ec = GH__ERROR_CODE__HTTP_401;
+		return;
+
+	case 404:
+		/*
+		 * TODO if params->object_count > 1, consider
+		 * TODO splitting the request into 2 halves
+		 * TODO and retrying each half in series.
+		 */
+		strbuf_addstr(&status->error_message,
+			      "(http:404) Not Found");
+		status->retry = GH__RETRY_MODE__FAIL_404;
+		status->ec = GH__ERROR_CODE__HTTP_404;
+		return;
+
+	case 429:
+		/*
+		 * This is a hard block because we've been bad.
+		 */
+		strbuf_addstr(&status->error_message,
+			      "(http:429) Too Many Requests [throttled]");
+		status->retry = GH__RETRY_MODE__HTTP_429;
+		status->ec = GH__ERROR_CODE__HTTP_429;
+
+		trace2_data_string("gvfs-helper", NULL, "error/http",
+				   status->error_message.buf);
+		return;
+
+	case 503:
+		/*
+		 * We assume that this comes with a "Retry-After" header like 429s.
+		 */
+		strbuf_addstr(&status->error_message,
+			      "(http:503) Server Unavailable [throttled]");
+		status->retry = GH__RETRY_MODE__HTTP_503;
+		status->ec = GH__ERROR_CODE__HTTP_503;
+
+		trace2_data_string("gvfs-helper", NULL, "error/http",
+				   status->error_message.buf);
+		return;
+
+	default:
+		goto hard_fail;
+	}
+
+hard_fail:
+	strbuf_addf(&status->error_message, "(http:%d) Other [hard_fail]",
+		    (int)status->response_code);
+	status->retry = GH__RETRY_MODE__HARD_FAIL;
+	status->ec = GH__ERROR_CODE__HTTP_OTHER;
+
+	trace2_data_string("gvfs-helper", NULL, "error/http",
+			   status->error_message.buf);
+	return;
+}
+
+/*
+ * Map CURLE errors code to a retry strategy.
+ * See <curl/curl.h> and
+ * https://curl.haxx.se/libcurl/c/libcurl-errors.html
+ *
+ * This could be a static table rather than a switch, but
+ * that is harder to debug and we may want to selectively
+ * log errors.
+ *
+ * I've commented out all of the hard-fail cases for now
+ * and let the default handle them.  This is to indicate
+ * that I considered them and found them to be not actionable.
+ * Also, the spelling of some of the CURLE_ symbols seem
+ * to change between curl releases on different platforms,
+ * so I'm not going to fight that.
+ */
+static void compute_retry_mode_from_curl_error(
+	struct gh__response_status *status)
+{
+	switch (status->curl_code) {
+	case CURLE_OK:
+		status->retry = GH__RETRY_MODE__SUCCESS;
+		status->ec = GH__ERROR_CODE__OK;
+		return;
+
+	//se CURLE_UNSUPPORTED_PROTOCOL:     goto hard_fail;
+	//se CURLE_FAILED_INIT:              goto hard_fail;
+	//se CURLE_URL_MALFORMAT:            goto hard_fail;
+	//se CURLE_NOT_BUILT_IN:             goto hard_fail;
+	//se CURLE_COULDNT_RESOLVE_PROXY:    goto hard_fail;
+	//se CURLE_COULDNT_RESOLVE_HOST:     goto hard_fail;
+	case CURLE_COULDNT_CONNECT:          goto transient;
+	//se CURLE_WEIRD_SERVER_REPLY:       goto hard_fail;
+	//se CURLE_REMOTE_ACCESS_DENIED:     goto hard_fail;
+	//se CURLE_FTP_ACCEPT_FAILED:        goto hard_fail;
+	//se CURLE_FTP_WEIRD_PASS_REPLY:     goto hard_fail;
+	//se CURLE_FTP_ACCEPT_TIMEOUT:       goto hard_fail;
+	//se CURLE_FTP_WEIRD_PASV_REPLY:     goto hard_fail;
+	//se CURLE_FTP_WEIRD_227_FORMAT:     goto hard_fail;
+	//se CURLE_FTP_CANT_GET_HOST:        goto hard_fail;
+	case CURLE_HTTP2:                    goto transient;
+	//se CURLE_FTP_COULDNT_SET_TYPE:     goto hard_fail;
+	case CURLE_PARTIAL_FILE:             goto transient;
+	//se CURLE_FTP_COULDNT_RETR_FILE:    goto hard_fail;
+	//se CURLE_OBSOLETE20:               goto hard_fail;
+	//se CURLE_QUOTE_ERROR:              goto hard_fail;
+	//se CURLE_HTTP_RETURNED_ERROR:      goto hard_fail;
+	case CURLE_WRITE_ERROR:              goto transient;
+	//se CURLE_OBSOLETE24:               goto hard_fail;
+	case CURLE_UPLOAD_FAILED:            goto transient;
+	//se CURLE_READ_ERROR:               goto hard_fail;
+	//se CURLE_OUT_OF_MEMORY:            goto hard_fail;
+	case CURLE_OPERATION_TIMEDOUT:       goto transient;
+	//se CURLE_OBSOLETE29:               goto hard_fail;
+	//se CURLE_FTP_PORT_FAILED:          goto hard_fail;
+	//se CURLE_FTP_COULDNT_USE_REST:     goto hard_fail;
+	//se CURLE_OBSOLETE32:               goto hard_fail;
+	//se CURLE_RANGE_ERROR:              goto hard_fail;
+	case CURLE_HTTP_POST_ERROR:          goto transient;
+	//se CURLE_SSL_CONNECT_ERROR:        goto hard_fail;
+	//se CURLE_BAD_DOWNLOAD_RESUME:      goto hard_fail;
+	//se CURLE_FILE_COULDNT_READ_FILE:   goto hard_fail;
+	//se CURLE_LDAP_CANNOT_BIND:         goto hard_fail;
+	//se CURLE_LDAP_SEARCH_FAILED:       goto hard_fail;
+	//se CURLE_OBSOLETE40:               goto hard_fail;
+	//se CURLE_FUNCTION_NOT_FOUND:       goto hard_fail;
+	//se CURLE_ABORTED_BY_CALLBACK:      goto hard_fail;
+	//se CURLE_BAD_FUNCTION_ARGUMENT:    goto hard_fail;
+	//se CURLE_OBSOLETE44:               goto hard_fail;
+	//se CURLE_INTERFACE_FAILED:         goto hard_fail;
+	//se CURLE_OBSOLETE46:               goto hard_fail;
+	//se CURLE_TOO_MANY_REDIRECTS:       goto hard_fail;
+	//se CURLE_UNKNOWN_OPTION:           goto hard_fail;
+	//se CURLE_TELNET_OPTION_SYNTAX:     goto hard_fail;
+	//se CURLE_OBSOLETE50:               goto hard_fail;
+	//se CURLE_PEER_FAILED_VERIFICATION: goto hard_fail;
+	//se CURLE_GOT_NOTHING:              goto hard_fail;
+	//se CURLE_SSL_ENGINE_NOTFOUND:      goto hard_fail;
+	//se CURLE_SSL_ENGINE_SETFAILED:     goto hard_fail;
+	case CURLE_SEND_ERROR:               goto transient;
+	case CURLE_RECV_ERROR:               goto transient;
+	//se CURLE_OBSOLETE57:               goto hard_fail;
+	//se CURLE_SSL_CERTPROBLEM:          goto hard_fail;
+	//se CURLE_SSL_CIPHER:               goto hard_fail;
+	//se CURLE_SSL_CACERT:               goto hard_fail;
+	//se CURLE_BAD_CONTENT_ENCODING:     goto hard_fail;
+	//se CURLE_LDAP_INVALID_URL:         goto hard_fail;
+	//se CURLE_FILESIZE_EXCEEDED:        goto hard_fail;
+	//se CURLE_USE_SSL_FAILED:           goto hard_fail;
+	//se CURLE_SEND_FAIL_REWIND:         goto hard_fail;
+	//se CURLE_SSL_ENGINE_INITFAILED:    goto hard_fail;
+	//se CURLE_LOGIN_DENIED:             goto hard_fail;
+	//se CURLE_TFTP_NOTFOUND:            goto hard_fail;
+	//se CURLE_TFTP_PERM:                goto hard_fail;
+	//se CURLE_REMOTE_DISK_FULL:         goto hard_fail;
+	//se CURLE_TFTP_ILLEGAL:             goto hard_fail;
+	//se CURLE_TFTP_UNKNOWNID:           goto hard_fail;
+	//se CURLE_REMOTE_FILE_EXISTS:       goto hard_fail;
+	//se CURLE_TFTP_NOSUCHUSER:          goto hard_fail;
+	//se CURLE_CONV_FAILED:              goto hard_fail;
+	//se CURLE_CONV_REQD:                goto hard_fail;
+	//se CURLE_SSL_CACERT_BADFILE:       goto hard_fail;
+	//se CURLE_REMOTE_FILE_NOT_FOUND:    goto hard_fail;
+	//se CURLE_SSH:                      goto hard_fail;
+	//se CURLE_SSL_SHUTDOWN_FAILED:      goto hard_fail;
+	case CURLE_AGAIN:                    goto transient;
+	//se CURLE_SSL_CRL_BADFILE:          goto hard_fail;
+	//se CURLE_SSL_ISSUER_ERROR:         goto hard_fail;
+	//se CURLE_FTP_PRET_FAILED:          goto hard_fail;
+	//se CURLE_RTSP_CSEQ_ERROR:          goto hard_fail;
+	//se CURLE_RTSP_SESSION_ERROR:       goto hard_fail;
+	//se CURLE_FTP_BAD_FILE_LIST:        goto hard_fail;
+	//se CURLE_CHUNK_FAILED:             goto hard_fail;
+	//se CURLE_NO_CONNECTION_AVAILABLE:  goto hard_fail;
+	//se CURLE_SSL_PINNEDPUBKEYNOTMATCH: goto hard_fail;
+	//se CURLE_SSL_INVALIDCERTSTATUS:    goto hard_fail;
+#ifdef CURLE_HTTP2_STREAM
+	case CURLE_HTTP2_STREAM:             goto transient;
+#endif
+	default:                             goto hard_fail;
+	}
+
+hard_fail:
+	strbuf_addf(&status->error_message, "(curl:%d) %s [hard_fail]",
+		    status->curl_code,
+		    curl_easy_strerror(status->curl_code));
+	status->retry = GH__RETRY_MODE__HARD_FAIL;
+	status->ec = GH__ERROR_CODE__CURL_ERROR;
+
+	trace2_data_string("gvfs-helper", NULL, "error/curl",
+			   status->error_message.buf);
+	return;
+
+transient:
+	strbuf_addf(&status->error_message, "(curl:%d) %s [transient]",
+		    status->curl_code,
+		    curl_easy_strerror(status->curl_code));
+	status->retry = GH__RETRY_MODE__TRANSIENT;
+	status->ec = GH__ERROR_CODE__CURL_ERROR;
+
+	trace2_data_string("gvfs-helper", NULL, "error/curl",
+			   status->error_message.buf);
+	return;
 }
 
 /*
@@ -399,32 +868,18 @@ static void gh__response_status__set_from_slot(
 
 	strbuf_setlen(&status->error_message, 0);
 
-	if (status->response_code == 200)
-		status->ec = GH__ERROR_CODE__OK;
-
-	else if (status->response_code == 401) {
-		strbuf_addstr(&status->error_message, "401 Not Authorized");
-		status->ec = GH__ERROR_CODE__HTTP_401;
-
-	} else if (status->response_code == 404) {
-		strbuf_addstr(&status->error_message, "404 Not Found");
-		status->ec = GH__ERROR_CODE__HTTP_404;
-
-	} else if (status->curl_code != CURLE_OK) {
-		strbuf_addf(&status->error_message, "%s (curl)",
-			    curl_easy_strerror(status->curl_code));
-		status->ec = GH__ERROR_CODE__CURL_ERROR;
+	gh__response_status__normalize_odd_codes(params, status);
 
-		trace2_data_string("gvfs-helper", NULL,
-				   "error/curl", status->error_message.buf);
-	} else {
-		strbuf_addf(&status->error_message, "HTTP %ld Unexpected",
-			    status->response_code);
-		status->ec = GH__ERROR_CODE__HTTP_UNEXPECTED_CODE;
-
-		trace2_data_string("gvfs-helper", NULL,
-				   "error/http", status->error_message.buf);
-	}
+	/*
+	 * Use normalized response/status codes form curl/http to decide
+	 * how to set the error-code we propagate *AND* to decide if we
+	 * we should retry because of transient network problems.
+	 */
+	if (status->curl_code == CURLE_OK ||
+	    status->curl_code == CURLE_HTTP_RETURNED_ERROR)
+		compute_retry_mode_from_http_response(status);
+	else
+		compute_retry_mode_from_curl_error(status);
 
 	if (status->ec != GH__ERROR_CODE__OK)
 		status->bytes_received = 0;
@@ -442,26 +897,6 @@ static void gh__response_status__release(struct gh__response_status *status)
 	strbuf_release(&status->content_type);
 }
 
-/*
- * The cache-server sends a somewhat bogus 400 instead of
- * the normal 401 when AUTH is required.  Fixup the status
- * to hide that.
- */
-static void fixup_cache_server_400_to_401(struct gh__response_status *status)
-{
-	if (status->response_code != 400)
-		return;
-
-	/*
-	 * TODO Technically, the cache-server could send a 400
-	 * TODO for many reasons, not just for their bogus
-	 * TODO pseudo-401, but we're going to assume it is a
-	 * TODO 401 for now.  We should confirm the expected
-	 * TODO error message in the response-body.
-	 */
-	status->response_code = 401;
-}
-
 static int gh__curl_progress_cb(void *clientp,
 				curl_off_t dltotal, curl_off_t dlnow,
 				curl_off_t ultotal, curl_off_t ulnow)
@@ -552,8 +987,13 @@ static int gh__curl_progress_cb(void *clientp,
 enter_phase_2:
 	strbuf_setlen(&params->progress_msg, 0);
 	if (params->progress_base_phase2_msg.len) {
-		strbuf_addf(&params->progress_msg, "%s (bytes sent)",
-			    params->progress_base_phase2_msg.buf);
+		if (params->k_attempt > 0)
+			strbuf_addf(&params->progress_msg, "%s [retry %d/%d] (bytes sent)",
+				    params->progress_base_phase2_msg.buf,
+				    params->k_attempt, gh__cmd_opts.max_retries);
+		else
+			strbuf_addf(&params->progress_msg, "%s (bytes sent)",
+				    params->progress_base_phase2_msg.buf);
 		params->progress = start_progress(params->progress_msg.buf, ultotal);
 		display_progress(params->progress, ulnow);
 	}
@@ -563,8 +1003,13 @@ static int gh__curl_progress_cb(void *clientp,
 enter_phase_3:
 	strbuf_setlen(&params->progress_msg, 0);
 	if (params->progress_base_phase3_msg.len) {
-		strbuf_addf(&params->progress_msg, "%s (bytes received)",
-			    params->progress_base_phase3_msg.buf);
+		if (params->k_attempt > 0)
+			strbuf_addf(&params->progress_msg, "%s [retry %d/%d] (bytes received)",
+				    params->progress_base_phase3_msg.buf,
+				    params->k_attempt, gh__cmd_opts.max_retries);
+		else
+			strbuf_addf(&params->progress_msg, "%s (bytes received)",
+				    params->progress_base_phase3_msg.buf);
 		params->progress = start_progress(params->progress_msg.buf, dltotal);
 		display_progress(params->progress, dlnow);
 	}
@@ -581,12 +1026,19 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 			     struct gh__request_params *params,
 			     struct gh__response_status *status)
 {
-	trace2_region_enter("gvfs-helper", params->label.buf, NULL);
+	struct strbuf key = STRBUF_INIT;
+
+	strbuf_addbuf(&key, &params->tr2_label);
+	strbuf_addstr(&key, gh__server_type_label[params->server_type]);
+
+	params->progress_state = GH__PROGRESS_STATE__START;
+	strbuf_setlen(&params->e2eid, 0);
+
+	trace2_region_enter("gvfs-helper", key.buf, NULL);
 
 	if (!start_active_slot(slot)) {
 		status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */
-		strbuf_addstr(&status->error_message,
-			      "failed to start HTTP request");
+		compute_retry_mode_from_curl_error(status);
 	} else {
 		run_active_slot(slot);
 		if (params->b_write_to_file)
@@ -594,27 +1046,38 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 
 		gh__response_status__set_from_slot(params, status, slot);
 
-		if (status->ec == GH__ERROR_CODE__OK) {
-			int old_len = params->label.len;
+		log_e2eid(params, status);
 
-			strbuf_addstr(&params->label, "/nr_objects");
-			trace2_data_intmax("gvfs-helper", NULL,
-					   params->label.buf,
-					   params->object_count);
-			strbuf_setlen(&params->label, old_len);
+		if (status->ec == GH__ERROR_CODE__OK) {
+			int old_len = key.len;
 
-			strbuf_addstr(&params->label, "/nr_bytes");
+			/*
+			 * We only log the number of bytes received.
+			 * We do not log the number of objects requested
+			 * because the server may give us more than that
+			 * (such as when we request a commit).
+			 */
+			strbuf_addstr(&key, "/nr_bytes");
 			trace2_data_intmax("gvfs-helper", NULL,
-					   params->label.buf,
+					   key.buf,
 					   status->bytes_received);
-			strbuf_setlen(&params->label, old_len);
+			strbuf_setlen(&key, old_len);
 		}
 	}
 
 	if (params->progress)
 		stop_progress(&params->progress);
 
-	trace2_region_leave("gvfs-helper", params->label.buf, NULL);
+	if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) {
+		if (params->b_is_post)
+			install_packfile(params, status);
+		else
+			install_loose(params, status);
+	}
+
+	trace2_region_leave("gvfs-helper", key.buf, NULL);
+
+	strbuf_release(&key);
 }
 
 static int option_parse_cache_server_mode(const struct option *opt,
@@ -1065,41 +1528,115 @@ static void select_odb(void)
  *
  * TODO Consider using lockfile for this rather than naked tempfile.
  */
-static struct tempfile *create_tempfile_for_packfile(void)
+static void create_tempfile_for_packfile(
+	struct gh__request_params *params,
+	struct gh__response_status *status)
 {
 	static unsigned int nth = 0;
 	static struct timeval tv = {0};
 	static struct tm tm = {0};
 	static time_t secs = 0;
-	static char tbuf[32] = {0};
+	static char date[32] = {0};
 
-	struct tempfile *tempfile = NULL;
-	struct strbuf buf_path = STRBUF_INIT;
+	struct strbuf basename = STRBUF_INIT;
+	struct strbuf buf = STRBUF_INIT;
+	int len_p;
+	enum scld_error scld;
+
+	gh__response_status__zero(status);
 
 	if (!nth) {
+		/*
+		 * Create a <date> string to use in the name of all packfiles
+		 * created by this process.
+		 */
 		gettimeofday(&tv, NULL);
 		secs = tv.tv_sec;
 		gmtime_r(&secs, &tm);
 
-		xsnprintf(tbuf, sizeof(tbuf), "%4d%02d%02d-%02d%02d%02d-%06ld",
+		xsnprintf(date, sizeof(date), "%4d%02d%02d-%02d%02d%02d-%06ld",
 			  tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
 			  tm.tm_hour, tm.tm_min, tm.tm_sec,
 			  (long)tv.tv_usec);
 	}
 
-	// TODO should this be in the "<ODB>/pack/tempPacks/"
-	// TODO directory instead? YES
+	/*
+	 * Create a <basename> for this packfile using a series number <n>,
+	 * so that all of the chunks we download will group together.
+	 */
+	strbuf_addf(&basename, "vfs-%s-%04d", date, nth++);
 
-	strbuf_addbuf(&buf_path, &gh__global.buf_odb_path);
-	strbuf_complete(&buf_path, '/');
-	strbuf_addf(&buf_path, "pack/vfs-%s-%04d.temp", tbuf, nth++);
+	/*
+	 * We will stream the data into a managed tempfile() in:
+	 *
+	 *     "<odb>/pack/tempPacks/vfs-<date>-<n>.temp"
+	 */
+	strbuf_setlen(&buf, 0);
+	strbuf_addbuf(&buf, &gh__global.buf_odb_path);
+	strbuf_complete(&buf, '/');
+	strbuf_addstr(&buf, "pack/");
+	len_p = buf.len;
+	strbuf_addstr(&buf, "tempPacks/");
+	strbuf_addbuf(&buf, &basename);
+	strbuf_addstr(&buf, ".temp");
+
+	scld = safe_create_leading_directories(buf.buf);
+	if (scld != SCLD_OK && scld != SCLD_EXISTS) {
+		strbuf_addf(&status->error_message,
+			    "could not create directory for packfile: '%s'",
+			    buf.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+		goto cleanup;
+	}
 
-	tempfile = create_tempfile(buf_path.buf);
-	fdopen_tempfile(tempfile, "w");
+	params->tempfile = create_tempfile(buf.buf);
+	if (!params->tempfile) {
+		strbuf_addf(&status->error_message,
+			    "could not create tempfile for packfile: '%s'",
+			    buf.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+		goto cleanup;
+	}
 
-	strbuf_release(&buf_path);
+	fdopen_tempfile(params->tempfile, "w");
+
+	/*
+	 * After the download is complete, we will need to steal the file
+	 * from the tempfile() class (so that it doesn't magically delete
+	 * it when we close the file handle) and then index it.
+	 *
+	 * We do this into the tempPacks directory to avoid contaminating
+	 * the real pack directory until we know there is no corruption.
+	 *
+	 *     "<odb>/pack/tempPacks/vfs-<date>-<n>.temp.pack"
+	 *     "<odb>/pack/tempPacks/vfs-<date>-<n>.temp.idx"
+	 */
+	strbuf_setlen(&params->temp_path_pack, 0);
+	strbuf_addf(&params->temp_path_pack, "%s.pack", buf.buf);
+
+	strbuf_setlen(&params->temp_path_idx, 0);
+	strbuf_addf(&params->temp_path_idx, "%s.idx", buf.buf);
 
-	return tempfile;
+	/*
+	 * Later, if all goes well, we will install them as:
+	 *
+	 *     "<odb>/pack/vfs-<date>-<n>.pack"
+	 *     "<odb>/pack/vfs-<date>-<n>.idx"
+	 */
+	strbuf_setlen(&buf, len_p);
+	strbuf_setlen(&params->final_path_pack, 0);
+	strbuf_addf(&params->final_path_pack, "%s%s.pack",
+		    buf.buf, basename.buf);
+	strbuf_setlen(&params->final_path_idx, 0);
+	strbuf_addf(&params->final_path_idx, "%s%s.idx",
+		    buf.buf, basename.buf);
+	strbuf_setlen(&params->final_packfile_filename, 0);
+	strbuf_addf(&params->final_packfile_filename, "%s.pack",
+		    basename.buf);
+
+cleanup:
+	strbuf_release(&buf);
+	strbuf_release(&basename);
 }
 
 /*
@@ -1112,15 +1649,15 @@ static struct tempfile *create_tempfile_for_packfile(void)
  */
 static void create_tempfile_for_loose(
 	struct gh__request_params *params,
-	struct gh__response_status *status,
-	const struct object_id *oid)
+	struct gh__response_status *status)
 {
+	static int nth = 0;
 	struct strbuf buf_path = STRBUF_INIT;
 	const char *hex;
 
 	gh__response_status__zero(status);
 
-	hex = oid_to_hex(oid);
+	hex = oid_to_hex(&params->loose_oid);
 
 	strbuf_addbuf(&buf_path, &gh__global.buf_odb_path);
 	strbuf_complete(&buf_path, '/');
@@ -1148,7 +1685,7 @@ static void create_tempfile_for_loose(
 	 * using lockfiles to avoid issues with stale locks after
 	 * crashes.
 	 */
-	strbuf_addf(&buf_path, ".%08u.temp", getpid());
+	strbuf_addf(&buf_path, ".%08u.%.06u.temp", getpid(), nth++);
 
 	params->tempfile = create_tempfile(buf_path.buf);
 	if (!params->tempfile) {
@@ -1165,85 +1702,34 @@ static void create_tempfile_for_loose(
 }
 
 /*
- * Extract the filename portion of the given pathname.
- *
- * TODO Wish I could find a strbuf_filename() function for this.
+ * Convert the tempfile into a temporary .pack, index it into a temporary .idx
+ * file, and then install the pair into ODB.
  */
-static void extract_filename(struct strbuf *filename,
-			     const struct strbuf *pathname)
-{
-	size_t len = pathname->len;
-
-	strbuf_setlen(filename, 0);
-
-	while (len > 0 && !is_dir_sep(pathname->buf[len - 1]))
-		len--;
-
-	strbuf_addstr(filename, &pathname->buf[len]);
-}
-
-/*
- * Convert the tempfile into a permanent .pack packfile in the ODB.
- * Create the corresponding .idx file.
- *
- * Return the filename (not pathname) of the resulting packfile.
- */
-static void install_packfile(struct gh__response_status *status,
-			     struct tempfile **pp_tempfile,
-			     struct strbuf *packfile_filename)
+static void install_packfile(struct gh__request_params *params,
+			     struct gh__response_status *status)
 {
 	struct child_process ip = CHILD_PROCESS_INIT;
-	struct strbuf pack_name_tmp = STRBUF_INIT;
-	struct strbuf pack_name_dst = STRBUF_INIT;
-	struct strbuf idx_name_tmp = STRBUF_INIT;
-	struct strbuf idx_name_dst = STRBUF_INIT;
-	size_t len_base;
-
-	gh__response_status__zero(status);
-
-	strbuf_setlen(packfile_filename, 0);
 
 	/*
-	 * start with "<base>.temp" (that is owned by tempfile class).
-	 * rename to "<base>.pack.temp" to break ownership.
-	 *
-	 * create "<base>.idx.temp" on provisional packfile.
-	 *
-	 * officially install both "<base>.{pack,idx}.temp" as
-	 * "<base>.{pack,idx}".
+	 * When we request more than 1 object, the server should always
+	 * send us a packfile.
 	 */
-
-	strbuf_addstr(&pack_name_tmp, get_tempfile_path(*pp_tempfile));
-	if (!strip_suffix(pack_name_tmp.buf, ".temp", &len_base)) {
-		/*
-		 * This is more of a BUG(), but I want the error
-		 * code propagated.
-		 */
+	if (strcmp(status->content_type.buf,
+		   "application/x-git-packfile")) {
 		strbuf_addf(&status->error_message,
-			    "packfile tempfile does not end in '.temp': '%s'",
-			    pack_name_tmp.buf);
-		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+			    "received unknown content-type '%s'",
+			    status->content_type.buf);
+		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
 		goto cleanup;
 	}
 
-	strbuf_setlen(&pack_name_tmp, (int)len_base);
-	strbuf_addbuf(&pack_name_dst, &pack_name_tmp);
-	strbuf_addbuf(&idx_name_tmp, &pack_name_tmp);
-	strbuf_addbuf(&idx_name_dst, &pack_name_tmp);
-
-	strbuf_addstr(&pack_name_tmp, ".pack.temp");
-	strbuf_addstr(&pack_name_dst, ".pack");
-	strbuf_addstr(&idx_name_tmp, ".idx.temp");
-	strbuf_addstr(&idx_name_dst, ".idx");
-
-	// TODO if either pack_name_dst or idx_name_dst already
-	// TODO exists in the ODB, create alternate names so that
-	// TODO we don't step on them.
+	gh__response_status__zero(status);
 
-	if (rename_tempfile(pp_tempfile, pack_name_tmp.buf) == -1) {
+	if (rename_tempfile(&params->tempfile,
+			    params->temp_path_pack.buf) == -1) {
 		strbuf_addf(&status->error_message,
 			    "could not rename packfile to '%s'",
-			    pack_name_tmp.buf);
+			    params->temp_path_pack.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
 		goto cleanup;
 	}
@@ -1251,59 +1737,54 @@ static void install_packfile(struct gh__response_status *status,
 	strvec_push(&ip.args, "index-pack");
 	if (gh__cmd_opts.show_progress)
 		strvec_push(&ip.args, "-v");
-	strvec_pushl(&ip.args, "-o", idx_name_tmp.buf, NULL);
-	strvec_push(&ip.args, pack_name_tmp.buf);
+	strvec_pushl(&ip.args, "-o", params->temp_path_idx.buf, NULL);
+	strvec_push(&ip.args, params->temp_path_pack.buf);
 	ip.git_cmd = 1;
 	ip.no_stdin = 1;
 	ip.no_stdout = 1;
 
-	// TODO consider capturing stdout from index-pack because
-	// TODO it will contain the SHA of the packfile and we can
-	// TODO (should?) add it to the .pack and .idx pathnames
-	// TODO when we install them.
-	// TODO
-	// TODO See pipe_command() rather than run_command().
-	// TODO
-	// TODO Or should be SHA-it ourselves (or read the last 20 bytes)?
-
 	/*
-	 * Note that I DO NOT have a region around the index-pack process.
-	 * The region in gh__run_one_slot() currently only covers the
-	 * download time.  This index-pack is a separate step not covered
-	 * in the above region.  Later, if/when we have CURL directly stream
-	 * to index-pack, that region will be the combined download+index
-	 * time.  So, I'm not going to introduce it here.
+	 * Note that I DO NOT have a trace2 region around the
+	 * index-pack process by itself.  Currently, we are inside the
+	 * trace2 region for running the request and that's fine.
+	 * Later, if/when we stream the download directly to
+	 * index-pack, it will be inside under the same region anyway.
+	 * So, I'm not going to introduce it here.
 	 */
 	if (run_command(&ip)) {
-		unlink(pack_name_tmp.buf);
-		unlink(idx_name_tmp.buf);
+		unlink(params->temp_path_pack.buf);
+		unlink(params->temp_path_idx.buf);
 		strbuf_addf(&status->error_message,
-			    "index-pack failed on '%s'", pack_name_tmp.buf);
-		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+			    "index-pack failed on '%s'",
+			    params->temp_path_pack.buf);
+		/*
+		 * Lets assume that index-pack failed because the
+		 * downloaded file is corrupt (truncated).
+		 *
+		 * Retry it as if the network had dropped.
+		 */
+		status->retry = GH__RETRY_MODE__TRANSIENT;
+		status->ec = GH__ERROR_CODE__INDEX_PACK_FAILED;
 		goto cleanup;
 	}
 
-	if (finalize_object_file(pack_name_tmp.buf, pack_name_dst.buf) ||
-	    finalize_object_file(idx_name_tmp.buf, idx_name_dst.buf)) {
-		unlink(pack_name_tmp.buf);
-		unlink(pack_name_dst.buf);
-		unlink(idx_name_tmp.buf);
-		unlink(idx_name_dst.buf);
+	if (finalize_object_file(params->temp_path_pack.buf,
+				 params->final_path_pack.buf) ||
+	    finalize_object_file(params->temp_path_idx.buf,
+				 params->final_path_idx.buf)) {
+		unlink(params->temp_path_pack.buf);
+		unlink(params->temp_path_idx.buf);
+		unlink(params->final_path_pack.buf);
+		unlink(params->final_path_idx.buf);
 		strbuf_addf(&status->error_message,
 			    "could not install packfile '%s'",
-			    pack_name_dst.buf);
+			    params->final_path_pack.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
 		goto cleanup;
 	}
 
-	extract_filename(packfile_filename, &pack_name_dst);
-
 cleanup:
 	child_process_clear(&ip);
-	strbuf_release(&pack_name_tmp);
-	strbuf_release(&pack_name_dst);
-	strbuf_release(&idx_name_tmp);
-	strbuf_release(&idx_name_dst);
 }
 
 /*
@@ -1372,6 +1853,249 @@ static void gh_http_cleanup(void)
 	gh__global.http_is_initialized = 0;
 }
 
+/*
+ * buffer has "<key>: <value>[\r]\n"
+ */
+static void parse_resp_hdr_1(const char *buffer, size_t size, size_t nitems,
+			     struct strbuf *key, struct strbuf *value)
+{
+	const char *end = buffer + (size * nitems);
+	const char *p;
+
+	p = strchr(buffer, ':');
+
+	strbuf_setlen(key, 0);
+	strbuf_add(key, buffer, (p - buffer));
+
+	p++; /* skip ':' */
+	p++; /* skip ' ' */
+
+	strbuf_setlen(value, 0);
+	strbuf_add(value, p, (end - p));
+	strbuf_trim_trailing_newline(value);
+}
+
+static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems,
+			     void *void_params)
+{
+	struct gh__request_params *params = void_params;
+	struct gh__azure_throttle *azure = &gh__global_throttle[params->server_type];
+
+	if (starts_with(buffer, "X-RateLimit-")) {
+		struct strbuf key = STRBUF_INIT;
+		struct strbuf val = STRBUF_INIT;
+
+		parse_resp_hdr_1(buffer, size, nitems, &key, &val);
+
+		/*
+		 * The following X- headers are specific to AzureDevOps.
+		 * Other servers have similar sets of values, but I haven't
+		 * compared them in depth.
+		 *
+		 * TODO Remove this.
+		 */
+		trace2_printf("Throttle: %s %s", key.buf, val.buf);
+
+		if (!strcmp(key.buf, "X-RateLimit-Resource")) {
+			/*
+			 * The name of the resource that is complaining.
+			 * Just log it because we can't do anything with it.
+			 */
+			strbuf_setlen(&key, 0);
+			strbuf_addstr(&key, "ratelimit/resource");
+			strbuf_addstr(&key, gh__server_type_label[params->server_type]);
+
+			trace2_data_string("gvfs-helper", NULL, key.buf, val.buf);
+		}
+
+		else if (!strcmp(key.buf, "X-RateLimit-Delay")) {
+			/*
+			 * The amount of delay added to our response.
+			 * Just log it because we can't do anything with it.
+			 */
+			unsigned long tarpit_delay_ms;
+
+			strbuf_setlen(&key, 0);
+			strbuf_addstr(&key, "ratelimit/delay_ms");
+			strbuf_addstr(&key, gh__server_type_label[params->server_type]);
+
+			git_parse_ulong(val.buf, &tarpit_delay_ms);
+
+			trace2_data_intmax("gvfs-helper", NULL, key.buf, tarpit_delay_ms);
+		}
+
+		else if (!strcmp(key.buf, "X-RateLimit-Limit")) {
+			/*
+			 * The resource limit/quota before we get a 429.
+			 */
+			git_parse_ulong(val.buf, &azure->tstu_limit);
+		}
+
+		else if (!strcmp(key.buf, "X-RateLimit-Remaining")) {
+			/*
+			 * The amount of our quota remaining.  When zero, we
+			 * should get 429s on futher requests until the reset
+			 * time.
+			 */
+			git_parse_ulong(val.buf, &azure->tstu_remaining);
+		}
+
+		else if (!strcmp(key.buf, "X-RateLimit-Reset")) {
+			/*
+			 * The server gave us a time-in-seconds-since-the-epoch
+			 * for when our quota will be reset (if we stop all
+			 * activity right now).
+			 *
+			 * Checkpoint the local system clock so we can do some
+			 * sanity checks on any clock skew.  Also, since we get
+			 * the headers before we get the content, we can adjust
+			 * our delay to compensate for the full download time.
+			 */
+			unsigned long now = time(NULL);
+			unsigned long reset_time;
+
+			git_parse_ulong(val.buf, &reset_time);
+			if (reset_time > now)
+				azure->reset_sec = reset_time - now;
+		}
+
+		strbuf_release(&key);
+		strbuf_release(&val);
+	}
+
+	else if (starts_with(buffer, "Retry-After")) {
+		struct strbuf key = STRBUF_INIT;
+		struct strbuf val = STRBUF_INIT;
+
+		parse_resp_hdr_1(buffer, size, nitems, &key, &val);
+
+		/*
+		 * We get this header with a 429 and 503 and possibly a 30x.
+		 *
+		 * Curl does have CURLINFO_RETRY_AFTER that nicely parses and
+		 * normalizes the value (and supports HTTP/1.1 usage), but it
+		 * is not present yet in the version shipped with the Mac, so
+		 * we do it directly here.
+		 */
+		git_parse_ulong(val.buf, &azure->retry_after_sec);
+
+		strbuf_release(&key);
+		strbuf_release(&val);
+	}
+
+	else if (starts_with(buffer, "X-VSS-E2EID")) {
+		struct strbuf key = STRBUF_INIT;
+
+		/*
+		 * Capture the E2EID as it goes by, but don't log it until we
+		 * know the request result.
+		 */
+		parse_resp_hdr_1(buffer, size, nitems, &key, &params->e2eid);
+
+		strbuf_release(&key);
+	}
+
+	return nitems * size;
+}
+
+/*
+ * Wait "duration" seconds and drive the progress mechanism.
+ *
+ * We spin slightly faster than we need to to keep the progress bar
+ * drawn (especially if the user presses return while waiting) and to
+ * compensate for delay factors built into the progress class (which
+ * might wait for 2 seconds before drawing the first message).
+ */
+static void do_throttle_spin(struct gh__request_params *params,
+			     const char *tr2_label,
+			     const char *progress_msg,
+			     int duration)
+{
+	struct strbuf region = STRBUF_INIT;
+	struct progress *progress = NULL;
+	unsigned long begin = time(NULL);
+	unsigned long now = begin;
+	unsigned long end = begin + duration;
+
+	strbuf_addstr(&region, tr2_label);
+	strbuf_addstr(&region, gh__server_type_label[params->server_type]);
+	trace2_region_enter("gvfs-helper", region.buf, NULL);
+
+	progress = start_progress(progress_msg, duration);
+	while (now < end) {
+		display_progress(progress, (now - begin));
+
+		sleep_millisec(100);
+
+		now = time(NULL);
+	}
+	display_progress(progress, duration);
+	stop_progress(&progress);
+
+	trace2_region_leave("gvfs-helper", region.buf, NULL);
+	strbuf_release(&region);
+}
+
+/*
+ * Delay the outbound request if necessary in response to previous throttle
+ * blockages or hints.  Throttle data is somewhat orthogonal to the status
+ * results from any previous request and/or the request params of the next
+ * request.
+ *
+ * Note that the throttle info also is cross-process information, such as
+ * 2 concurrent fetches in 2 different terminal windows to the same server
+ * will be sharing the same server quota.  These could be coordinated too,
+ * so that a blockage received in one process would prevent the other
+ * process from starting another request (and also blocked or extending
+ * the delay interval).  We're NOT going to do that level of integration.
+ * We will let both processes independently attempt the next request.
+ * This may cause us to miss the end-of-quota boundary if the server
+ * extends it because of the second request.
+ *
+ * TODO Should we have a max-wait option and then return a hard-error
+ * TODO of some type?
+ */
+static void do_throttle_wait(struct gh__request_params *params,
+			     struct gh__response_status *status UNUSED)
+{
+	struct gh__azure_throttle *azure =
+		&gh__global_throttle[params->server_type];
+
+	if (azure->retry_after_sec) {
+		/*
+		 * We were given a hard delay (such as after a 429).
+		 * Spin until the requested time.
+		 */
+		do_throttle_spin(params, "throttle/hard",
+				 "Waiting on hard throttle (sec)",
+				 azure->retry_after_sec);
+		return;
+	}
+
+	if (azure->reset_sec > 0) {
+		/*
+		 * We were given a hint that we are overloading
+		 * the server.  Voluntarily backoff (before we
+		 * get tarpitted or blocked).
+		 */
+		do_throttle_spin(params, "throttle/soft",
+				 "Waiting on soft throttle (sec)",
+				 azure->reset_sec);
+		return;
+	}
+
+	if (params->k_transient_delay_sec) {
+		/*
+		 * Insert an arbitrary delay before retrying after a
+		 * transient (network) failure.
+		 */
+		do_throttle_spin(params, "throttle/transient",
+				 "Waiting to retry after network error (sec)",
+				 params->k_transient_delay_sec);
+		return;
+	}
+}
+
 static void set_main_creds_on_slot(struct active_request_slot *slot,
 				   const struct credential *creds)
 {
@@ -1451,7 +2175,7 @@ static void set_cache_server_creds_on_slot(struct active_request_slot *slot,
 }
 
 /*
- * Do a single HTTP request without auth-retry or fallback.
+ * Do a single HTTP request WITHOUT robust-retry, auth-retry or fallback.
  */
 static void do_req(const char *url_base,
 		   const char *url_component,
@@ -1466,14 +2190,27 @@ static void do_req(const char *url_base,
 	gh__response_status__zero(status);
 
 	if (params->b_write_to_file) {
-		// TODO ftruncate tempfile ??
+		/* Delete dirty tempfile from a previous attempt. */
+		if (params->tempfile)
+			delete_tempfile(&params->tempfile);
+
+		if (params->b_is_post)
+			create_tempfile_for_packfile(params, status);
+		else
+			create_tempfile_for_loose(params, status);
+		if (!params->tempfile || status->ec != GH__ERROR_CODE__OK)
+			return;
 	} else {
+		/* Guard against caller using dirty buffer */
 		strbuf_setlen(params->buffer, 0);
 	}
 
 	end_url_with_slash(&rest_url, url_base);
 	strbuf_addstr(&rest_url, url_component);
 
+	do_throttle_wait(params, status);
+	gh__azure_throttle__zero(&gh__global_throttle[params->server_type]);
+
 	slot = get_active_slot();
 	slot->results = &results;
 
@@ -1502,7 +2239,10 @@ static void do_req(const char *url_base,
 		curl_easy_setopt(slot->curl, CURLOPT_FILE, params->buffer);
 	}
 
-	if (url_base == gh__global.main_url)
+	curl_easy_setopt(slot->curl, CURLOPT_HEADERFUNCTION, parse_resp_hdr);
+	curl_easy_setopt(slot->curl, CURLOPT_HEADERDATA, params);
+
+	if (params->server_type == GH__SERVER_TYPE__MAIN)
 		set_main_creds_on_slot(slot, creds);
 	else
 		set_cache_server_creds_on_slot(slot, creds);
@@ -1521,25 +2261,104 @@ static void do_req(const char *url_base,
 	strbuf_release(&rest_url);
 }
 
+/*
+ * Compute the delay for the nth attempt.
+ *
+ * No delay for the first attempt. Then use a normal exponential backoff
+ * starting from 8.
+ */
+static int compute_transient_delay(int attempt)
+{
+	int v;
+
+	if (attempt < 1)
+		return 0;
+
+	/*
+	 * Let 8K be our hard limit (for integer overflow protection).
+	 * That's over 2 hours.  This is 8<<10.
+	 */
+	if (attempt > 10)
+		attempt = 10;
+
+	v = 8 << (attempt - 1);
+
+	if (v > gh__cmd_opts.max_transient_backoff_sec)
+		v = gh__cmd_opts.max_transient_backoff_sec;
+
+	return v;
+}
+
+/*
+ * Robustly make an HTTP request.  Retry if necessary to hide common
+ * transient network errors and/or 429 blockages.
+ *
+ * For a transient (network) failure (where we do not have a throttle
+ * delay factor), we should insert a small delay to let the network
+ * recover.  The outage might be because the VPN dropped, or the
+ * machine went to sleep or something and we want to give the network
+ * time to come back up.  Insert AI here :-)
+ */
+static void do_req__with_robust_retry(const char *url_base,
+				      const char *url_component,
+				      const struct credential *creds,
+				      struct gh__request_params *params,
+				      struct gh__response_status *status)
+{
+	for (params->k_attempt = 0;
+	     params->k_attempt < gh__cmd_opts.max_retries + 1;
+	     params->k_attempt++) {
+
+		do_req(url_base, url_component, creds, params, status);
+
+		switch (status->retry) {
+		default:
+		case GH__RETRY_MODE__SUCCESS:
+		case GH__RETRY_MODE__HTTP_401: /* caller does auth-retry */
+		case GH__RETRY_MODE__HARD_FAIL:
+		case GH__RETRY_MODE__FAIL_404:
+			return;
+
+		case GH__RETRY_MODE__HTTP_429:
+		case GH__RETRY_MODE__HTTP_503:
+			/*
+			 * We should have gotten a "Retry-After" header with
+			 * these and that gives us the wait time.  If not,
+			 * fallthru and use the backoff delay.
+			 */
+			if (gh__global_throttle[params->server_type].retry_after_sec)
+				continue;
+			/*fallthru*/
+
+		case GH__RETRY_MODE__TRANSIENT:
+			params->k_transient_delay_sec =
+				compute_transient_delay(params->k_attempt);
+			continue;
+		}
+	}
+}
+
 static void do_req__to_main(const char *url_component,
 			    struct gh__request_params *params,
 			    struct gh__response_status *status)
 {
-//	lookup_main_creds();
+	params->server_type = GH__SERVER_TYPE__MAIN;
 
 	/*
 	 * When talking to the main Git server, we DO NOT preload the
 	 * creds before the first request.
 	 */
 
-	do_req(gh__global.main_url, url_component, &gh__global.main_creds,
-	       params, status);
+	do_req__with_robust_retry(gh__global.main_url, url_component,
+				  &gh__global.main_creds,
+				  params, status);
 
 	if (status->response_code == 401) {
 		refresh_main_creds();
 
-		do_req(gh__global.main_url, url_component, &gh__global.main_creds,
-		       params, status);
+		do_req__with_robust_retry(gh__global.main_url, url_component,
+					  &gh__global.main_creds,
+					  params, status);
 	}
 
 	if (status->response_code == 200)
@@ -1550,33 +2369,40 @@ static void do_req__to_cache_server(const char *url_component,
 				    struct gh__request_params *params,
 				    struct gh__response_status *status)
 {
+	params->server_type = GH__SERVER_TYPE__CACHE;
+
 	/*
 	 * When talking to a cache-server, DO force load the creds.
 	 * This implicitly preloads the creds to the main server.
 	 */
 	synthesize_cache_server_creds();
 
-	do_req(gh__global.cache_server_url, url_component, &gh__global.cache_creds,
-	       params, status);
-	fixup_cache_server_400_to_401(status);
+	do_req__with_robust_retry(gh__global.cache_server_url, url_component,
+				  &gh__global.cache_creds,
+				  params, status);
 
 	if (status->response_code == 401) {
 		refresh_cache_server_creds();
 
-		do_req(gh__global.cache_server_url, url_component,
-		       &gh__global.cache_creds, params, status);
-		fixup_cache_server_400_to_401(status);
+		do_req__with_robust_retry(gh__global.cache_server_url,
+					  url_component,
+					  &gh__global.cache_creds,
+					  params, status);
 	}
 
 	if (status->response_code == 200)
 		approve_cache_server_creds();
 }
 
+/*
+ * Try the cache-server (if configured) then fall-back to the main Git server.
+ */
 static void do_req__with_fallback(const char *url_component,
 				  struct gh__request_params *params,
 				  struct gh__response_status *status)
 {
-	if (gh__global.cache_server_url && !params->b_no_cache_server) {
+	if (gh__global.cache_server_url &&
+	    params->b_permit_cache_server_if_defined) {
 		do_req__to_cache_server(url_component, params, status);
 
 		if (status->response_code == 200)
@@ -1611,11 +2437,12 @@ static void do__gvfs_config(struct gh__response_status *status,
 {
 	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
 
-	strbuf_addstr(&params.label, "GET/config");
+	strbuf_addstr(&params.tr2_label, "GET/config");
 
 	params.b_is_post = 0;
 	params.b_write_to_file = 0;
-	params.b_no_cache_server = 1; /* they don't handle gvfs/config API */
+	/* cache-servers do not handle gvfs/config REST calls */
+	params.b_permit_cache_server_if_defined = 0;
 	params.buffer = config_data;
 
 	params.object_count = 1; /* a bit of a lie */
@@ -1660,11 +2487,11 @@ static void do__loose__gvfs_object(struct gh__response_status *status,
 
 	strbuf_addf(&component_url, "gvfs/objects/%s", oid_to_hex(oid));
 
-	strbuf_addstr(&params.label, "GET/objects");
+	strbuf_addstr(&params.tr2_label, "GET/objects");
 
 	params.b_is_post = 0;
 	params.b_write_to_file = 1;
-	params.b_no_cache_server = 0;
+	params.b_permit_cache_server_if_defined = 1;
 
 	params.object_count = 1;
 
@@ -1674,9 +2501,7 @@ static void do__loose__gvfs_object(struct gh__response_status *status,
 	params.headers = curl_slist_append(params.headers,
 					   "Pragma: no-cache");
 
-	create_tempfile_for_loose(&params, status, oid);
-	if (!params.tempfile)
-		goto cleanup;
+	oidcpy(&params.loose_oid, oid);
 
 	if (gh__cmd_opts.show_progress) {
 		/*
@@ -1691,10 +2516,6 @@ static void do__loose__gvfs_object(struct gh__response_status *status,
 
 	do_req__with_fallback(component_url.buf, &params, status);
 
-	if (status->ec == GH__ERROR_CODE__OK)
-		install_loose(&params, status);
-
-cleanup:
 	gh__request_params__release(&params);
 	strbuf_release(&component_url);
 }
@@ -1707,23 +2528,26 @@ static void do__loose__gvfs_object(struct gh__response_status *status,
 static void do__packfile__gvfs_objects(struct gh__response_status *status,
 				       struct oidset_iter *iter,
 				       unsigned long nr_wanted_in_block,
+				       int j_pack_num, int j_pack_den,
 				       struct strbuf *output_filename,
-				       unsigned long *nr_taken)
+				       unsigned long *nr_oid_taken)
 {
 	struct json_writer jw_req = JSON_WRITER_INIT;
 	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
 
+	strbuf_setlen(output_filename, 0);
+
 	gh__response_status__zero(status);
 
 	params.object_count = build_json_payload__gvfs_objects(
 		&jw_req, iter, nr_wanted_in_block);
-	*nr_taken = params.object_count;
+	*nr_oid_taken = params.object_count;
 
-	strbuf_addstr(&params.label, "POST/objects");
+	strbuf_addstr(&params.tr2_label, "POST/objects");
 
 	params.b_is_post = 1;
 	params.b_write_to_file = 1;
-	params.b_no_cache_server = 0;
+	params.b_permit_cache_server_if_defined = 1;
 
 	params.post_payload = &jw_req.json;
 
@@ -1748,73 +2572,21 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status,
 	params.headers = curl_slist_append(params.headers,
 					   "Accept: application/x-git-loose-object");
 
-	params.tempfile = create_tempfile_for_packfile();
-	if (!params.tempfile) {
-		strbuf_addstr(&status->error_message,
-			      "could not create tempfile for packfile");
-		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
-		goto cleanup;
-	}
-
 	if (gh__cmd_opts.show_progress) {
 		strbuf_addf(&params.progress_base_phase2_msg,
-			    "Requesting packfile with %ld objects",
+			    "Requesting packfile %d/%d with %ld objects",
+			    j_pack_num, j_pack_den,
 			    params.object_count);
 		strbuf_addf(&params.progress_base_phase3_msg,
-			    "Receiving packfile with %ld objects",
+			    "Receiving packfile %d/%d with %ld objects",
+			    j_pack_num, j_pack_den,
 			    params.object_count);
 	}
 
 	do_req__with_fallback("gvfs/objects", &params, status);
+	if (status->ec == GH__ERROR_CODE__OK)
+		strbuf_addbuf(output_filename, &params.final_packfile_filename);
 
-	if (status->ec == GH__ERROR_CODE__OK) {
-		if (!strcmp(status->content_type.buf,
-			    "application/x-git-packfile")) {
-
-			// TODO Consider having a worker thread to manage
-			// TODO running index-pack and then install the
-			// TODO resulting .idx and .pack files.  This would
-			// TODO let us interleave those steps with our thread
-			// TODO fetching the next block of objects from the
-			// TODO server.  (Need to think about how progress
-			// TODO messages from our thread and index-pack
-			// TODO would mesh.)
-			// TODO
-			// TODO But then again, if we hack index-pack to write
-			// TODO to our alternate and stream the data thru it,
-			// TODO it won't matter.
-
-			install_packfile(status, &params.tempfile,
-					 output_filename);
-			goto cleanup;
-		}
-
-		if (!strcmp(status->content_type.buf,
-			    "application/x-git-loose-object"))
-		{
-			/*
-			 * This should not happen (when we request
-			 * more than one object).  The server can send
-			 * us a loose object (even when we use the
-			 * POST form) if there is only one object in
-			 * the payload (and despite the set of accept
-			 * headers we send), so I'm going to leave
-			 * this here.
-			 */
-			strbuf_addstr(&status->error_message,
-				      "received loose object when packfile expected");
-			status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
-			goto cleanup;
-		}
-
-		strbuf_addf(&status->error_message,
-			    "received unknown content-type '%s'",
-			    status->content_type.buf);
-		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
-		goto cleanup;
-	}
-
-cleanup:
 	gh__request_params__release(&params);
 	jw_release(&jw_req);
 }
@@ -1828,7 +2600,7 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status,
  */
 static void do_fetch_oidset(struct gh__response_status *status,
 			    struct oidset *oids,
-			    unsigned long nr_total,
+			    unsigned long nr_oid_total,
 			    struct string_list *result_list)
 {
 	struct oidset_iter iter;
@@ -1837,19 +2609,25 @@ static void do_fetch_oidset(struct gh__response_status *status,
 	struct strbuf err404 = STRBUF_INIT;
 	const struct object_id *oid;
 	unsigned long k;
-	unsigned long nr_taken;
+	unsigned long nr_oid_taken;
 	int had_404 = 0;
+	int j_pack_den = 0;
+	int j_pack_num = 0;
 
 	gh__response_status__zero(status);
-	if (!nr_total)
+	if (!nr_oid_total)
 		return;
 
+	if (nr_oid_total > 1)
+		j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1)
+			      / gh__cmd_opts.block_size);
+
 	oidset_iter_init(oids, &iter);
 
-	for (k = 0; k < nr_total; k += nr_taken) {
-		if (nr_total - k == 1 || gh__cmd_opts.block_size == 1) {
+	for (k = 0; k < nr_oid_total; k += nr_oid_taken) {
+		if (nr_oid_total - k == 1 || gh__cmd_opts.block_size == 1) {
 			oid = oidset_iter_next(&iter);
-			nr_taken = 1;
+			nr_oid_taken = 1;
 
 			do__loose__gvfs_object(status, oid);
 
@@ -1884,10 +2662,13 @@ static void do_fetch_oidset(struct gh__response_status *status,
 		} else {
 			strbuf_setlen(&output_filename, 0);
 
+			j_pack_num++;
+
 			do__packfile__gvfs_objects(status, &iter,
 						   gh__cmd_opts.block_size,
+						   j_pack_num, j_pack_den,
 						   &output_filename,
-						   &nr_taken);
+						   &nr_oid_taken);
 
 			/*
 			 * Because the oidset iterator has random
@@ -2003,6 +2784,8 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 			      N_("number of objects to request at a time")),
 		OPT_INTEGER('d', "depth", &gh__cmd_opts.depth,
 			    N_("Commit depth")),
+		OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries,
+			    N_("retries for transient network errors")),
 		OPT_END(),
 	};
 
@@ -2010,7 +2793,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 	struct oidset oids = OIDSET_INIT;
 	struct string_list result_list = STRING_LIST_INIT_DUP;
 	enum gh__error_code ec = GH__ERROR_CODE__OK;
-	unsigned long nr_total;
+	unsigned long nr_oid_total;
 	size_t k;
 
 	trace2_cmd_mode("get");
@@ -2021,14 +2804,16 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 	argc = parse_options(argc, argv, NULL, get_options, get_usage, 0);
 	if (gh__cmd_opts.depth < 1)
 		gh__cmd_opts.depth = 1;
+	if (gh__cmd_opts.max_retries < 0)
+		gh__cmd_opts.max_retries = 0;
 
 	finish_init(1);
 
-	nr_total = read_stdin_from_rev_list(&oids);
+	nr_oid_total = read_stdin_from_rev_list(&oids);
 
 	trace2_region_enter("gvfs-helper", "get", NULL);
-	trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_total);
-	do_fetch_oidset(&status, &oids, nr_total, &result_list);
+	trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_oid_total);
+	do_fetch_oidset(&status, &oids, nr_oid_total, &result_list);
 	trace2_region_leave("gvfs-helper", "get", NULL);
 
 	ec = status.ec;
@@ -2063,7 +2848,7 @@ static enum gh__error_code do_server_subprocess_get(void)
 	int len;
 	int err;
 	size_t k;
-	unsigned long nr_total = 0;
+	unsigned long nr_oid_total = 0;
 
 	/*
 	 * Inside the "get" command, we expect a list of OIDs
@@ -2081,10 +2866,10 @@ static enum gh__error_code do_server_subprocess_get(void)
 		}
 
 		if (!oidset_insert(&oids, &oid))
-			nr_total++;
+			nr_oid_total++;
 	}
 
-	if (!nr_total) {
+	if (!nr_oid_total) {
 		if (packet_write_fmt_gently(1, "ok\n")) {
 			error("server: cannot write 'get' result to client");
 			ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
@@ -2094,8 +2879,8 @@ static enum gh__error_code do_server_subprocess_get(void)
 	}
 
 	trace2_region_enter("gvfs-helper", "server/get", NULL);
-	trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_total);
-	do_fetch_oidset(&status, &oids, nr_total, &result_list);
+	trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_oid_total);
+	do_fetch_oidset(&status, &oids, nr_oid_total, &result_list);
 	trace2_region_leave("gvfs-helper", "server/get", NULL);
 
 	/*
@@ -2254,6 +3039,8 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv)
 			      N_("number of objects to request at a time")),
 		OPT_INTEGER('d', "depth", &gh__cmd_opts.depth,
 			    N_("Commit depth")),
+		OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries,
+			    N_("retries for transient network errors")),
 		OPT_END(),
 	};
 
@@ -2270,6 +3057,8 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv)
 	argc = parse_options(argc, argv, NULL, server_options, server_usage, 0);
 	if (gh__cmd_opts.depth < 1)
 		gh__cmd_opts.depth = 1;
+	if (gh__cmd_opts.max_retries < 0)
+		gh__cmd_opts.max_retries = 0;
 
 	finish_init(1);
 
@@ -2359,13 +3148,23 @@ int cmd_main(int argc, const char **argv)
 
 	setup_git_directory_gently(NULL);
 
-	git_config(git_default_config, NULL);
-
 	/* Set any non-zero initial values in gh__cmd_opts. */
-	gh__cmd_opts.depth = 1;
+	gh__cmd_opts.depth = GH__DEFAULT_COMMIT_DEPTH;
 	gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE;
+	gh__cmd_opts.max_retries = GH__DEFAULT_MAX_RETRIES;
+	gh__cmd_opts.max_transient_backoff_sec =
+		GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC;
+
 	gh__cmd_opts.show_progress = !!isatty(2);
 
+	// TODO use existing gvfs config settings to override our GH__DEFAULT_
+	// TODO values in gh__cmd_opts.  (And maybe add/remove our command line
+	// TODO options for them.)
+	// TODO
+	// TODO See "scalar.max-retries" (and maybe "gvfs.max-retries")
+
+	git_config(git_default_config, NULL);
+
 	argc = parse_options(argc, argv, NULL, main_options, main_usage,
 			     PARSE_OPT_STOP_AT_NON_OPTION);
 	if (argc == 0)

From 4817b745e70576acbdd91b56d25c8fb78885c10a Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Mon, 21 Oct 2019 12:29:19 -0400
Subject: [PATCH 099/207] gvfs-helper: expose gvfs/objects GET and POST
 semantics

Expose the differences in the semantics of GET and POST for
the "gvfs/objects" API:

    HTTP GET: fetches a single loose object over the network.
              When a commit object is requested, it just returns
	      the single object.

    HTTP POST: fetches a batch of objects over the network.
               When the oid-set contains a commit object, all
	       referenced trees are also included in the response.

gvfs-helper is updated to take "get" and "post" command line options.
the gvfs-helper "server" mode is updated to take "objects.get" and
"objects.post" verbs.

For convenience, the "get" option and the "objects.get" verb
do allow more than one object to be requested.  gvfs-helper will
automatically issue a series of (single object) HTTP GET requests
and creating a series of loose objects.

The "post" option and the "objects.post" verb will perform bulk
object fetching using the batch-size chunking.  Individual HTTP
POST requests containing more than one object will be created
as a packfile.  A HTTP POST for a single object will create a
loose object.

This commit also contains some refactoring to eliminate the
assumption that POST is always associated with packfiles.

In gvfs-helper-client.c, gh_client__get_immediate() now uses the
"objects.get" verb and ignores any currently queued objects.

In gvfs-helper-client.c, the OIDSET built by gh_client__queue_oid()
is only processed when gh_client__drain_queue() is called.  The queue
is processed using the "object.post" verb.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper-client.c | 226 +++++++++-----
 gvfs-helper.c        | 712 +++++++++++++++++++++++++++----------------
 2 files changed, 607 insertions(+), 331 deletions(-)

diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
index 9b699e082931b4..ce437a64db3b74 100644
--- a/gvfs-helper-client.c
+++ b/gvfs-helper-client.c
@@ -17,7 +17,6 @@
 
 static struct oidset gh_client__oidset_queued = OIDSET_INIT;
 static unsigned long gh_client__oidset_count;
-static int gh_client__includes_immediate;
 
 struct gh_server__process {
 	struct subprocess_entry subprocess; /* must be first */
@@ -28,13 +27,20 @@ static int gh_server__subprocess_map_initialized;
 static struct hashmap gh_server__subprocess_map;
 static struct object_directory *gh_client__chosen_odb;
 
-#define CAP_GET      (1u<<1)
+/*
+ * The "objects" capability has 2 verbs: "get" and "post".
+ */
+#define CAP_OBJECTS      (1u<<1)
+#define CAP_OBJECTS_NAME "objects"
+
+#define CAP_OBJECTS__VERB_GET1_NAME "get"
+#define CAP_OBJECTS__VERB_POST_NAME "post"
 
 static int gh_client__start_fn(struct subprocess_entry *subprocess)
 {
 	static int versions[] = {1, 0};
 	static struct subprocess_capability capabilities[] = {
-		{ "get", CAP_GET },
+		{ CAP_OBJECTS_NAME, CAP_OBJECTS },
 		{ NULL, 0 }
 	};
 
@@ -46,14 +52,16 @@ static int gh_client__start_fn(struct subprocess_entry *subprocess)
 }
 
 /*
- * Send:
+ * Send the queued OIDs in the OIDSET to gvfs-helper for it to
+ * fetch from the cache-server or main Git server using "/gvfs/objects"
+ * POST semantics.
  *
- *     get LF
+ *     objects.post LF
  *     (<hex-oid> LF)*
  *     <flush>
  *
  */
-static int gh_client__get__send_command(struct child_process *process)
+static int gh_client__send__objects_post(struct child_process *process)
 {
 	struct oidset_iter iter;
 	struct object_id *oid;
@@ -64,7 +72,9 @@ static int gh_client__get__send_command(struct child_process *process)
 	 * so that we don't have to.
 	 */
 
-	err = packet_write_fmt_gently(process->in, "get\n");
+	err = packet_write_fmt_gently(
+		process->in,
+		(CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_POST_NAME "\n"));
 	if (err)
 		return err;
 
@@ -83,6 +93,46 @@ static int gh_client__get__send_command(struct child_process *process)
 	return 0;
 }
 
+/*
+ * Send the given OID to gvfs-helper for it to fetch from the
+ * cache-server or main Git server using "/gvfs/objects" GET
+ * semantics.
+ *
+ * This ignores any queued OIDs.
+ *
+ *     objects.get LF
+ *     <hex-oid> LF
+ *     <flush>
+ *
+ */
+static int gh_client__send__objects_get(struct child_process *process,
+					const struct object_id *oid)
+{
+	int err;
+
+	/*
+	 * We assume that all of the packet_ routines call error()
+	 * so that we don't have to.
+	 */
+
+	err = packet_write_fmt_gently(
+		process->in,
+		(CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_GET1_NAME "\n"));
+	if (err)
+		return err;
+
+	err = packet_write_fmt_gently(process->in, "%s\n",
+				      oid_to_hex(oid));
+	if (err)
+		return err;
+
+	err = packet_flush_gently(process->in);
+	if (err)
+		return err;
+
+	return 0;
+}
+
 /*
  * Update the loose object cache to include the newly created
  * object.
@@ -131,7 +181,7 @@ static void gh_client__update_packed_git(const char *line)
 }
 
 /*
- * We expect:
+ * Both CAP_OBJECTS verbs return the same format response:
  *
  *    <odb>
  *    <data>*
@@ -162,7 +212,7 @@ static void gh_client__update_packed_git(const char *line)
  * grouped with a queued request for a blob.  The tree-walk *might* be
  * able to continue and let the 404 blob be handled later.
  */
-static int gh_client__get__receive_response(
+static int gh_client__objects__receive_response(
 	struct child_process *process,
 	enum gh_client__created *p_ghc,
 	int *p_nr_loose, int *p_nr_packfile)
@@ -241,17 +291,12 @@ static void gh_client__choose_odb(void)
 	}
 }
 
-static int gh_client__get(enum gh_client__created *p_ghc)
+static struct gh_server__process *gh_client__find_long_running_process(
+	unsigned int cap_needed)
 {
 	struct gh_server__process *entry;
-	struct child_process *process;
 	struct strvec argv = STRVEC_INIT;
 	struct strbuf quoted = STRBUF_INIT;
-	int nr_loose = 0;
-	int nr_packfile = 0;
-	int err = 0;
-
-	trace2_region_enter("gh-client", "get", the_repository);
 
 	gh_client__choose_odb();
 
@@ -267,6 +312,11 @@ static int gh_client__get(enum gh_client__created *p_ghc)
 
 	sq_quote_argv_pretty(&quoted, argv.v);
 
+	/*
+	 * Find an existing long-running process with the above command
+	 * line -or- create a new long-running process for this and
+	 * subsequent 'get' requests.
+	 */
 	if (!gh_server__subprocess_map_initialized) {
 		gh_server__subprocess_map_initialized = 1;
 		hashmap_init(&gh_server__subprocess_map,
@@ -280,70 +330,24 @@ static int gh_client__get(enum gh_client__created *p_ghc)
 		entry = xmalloc(sizeof(*entry));
 		entry->supported_capabilities = 0;
 
-		err = subprocess_start_strvec(
-			&gh_server__subprocess_map, &entry->subprocess, 1,
-			&argv, gh_client__start_fn);
-		if (err) {
-			free(entry);
-			goto leave_region;
-		}
+		if (subprocess_start_strvec(&gh_server__subprocess_map,
+					  &entry->subprocess, 1,
+					  &argv, gh_client__start_fn))
+			FREE_AND_NULL(entry);
 	}
 
-	process = &entry->subprocess.process;
-
-	if (!(CAP_GET & entry->supported_capabilities)) {
-		error("gvfs-helper: does not support GET");
-		subprocess_stop(&gh_server__subprocess_map,
-				(struct subprocess_entry *)entry);
-		free(entry);
-		err = -1;
-		goto leave_region;
-	}
-
-	sigchain_push(SIGPIPE, SIG_IGN);
-
-	err = gh_client__get__send_command(process);
-	if (!err)
-		err = gh_client__get__receive_response(process, p_ghc,
-						 &nr_loose, &nr_packfile);
-
-	sigchain_pop(SIGPIPE);
-
-	if (err) {
+	if (entry &&
+	    (entry->supported_capabilities & cap_needed) != cap_needed) {
+		error("gvfs-helper: does not support needed capabilities");
 		subprocess_stop(&gh_server__subprocess_map,
 				(struct subprocess_entry *)entry);
-		free(entry);
+		FREE_AND_NULL(entry);
 	}
 
-leave_region:
 	strvec_clear(&argv);
 	strbuf_release(&quoted);
 
-	trace2_data_intmax("gh-client", the_repository,
-			   "get/immediate", gh_client__includes_immediate);
-
-	trace2_data_intmax("gh-client", the_repository,
-			   "get/nr_objects", gh_client__oidset_count);
-
-	if (nr_loose)
-		trace2_data_intmax("gh-client", the_repository,
-				   "get/nr_loose", nr_loose);
-
-	if (nr_packfile)
-		trace2_data_intmax("gh-client", the_repository,
-				   "get/nr_packfile", nr_packfile);
-
-	if (err)
-		trace2_data_intmax("gh-client", the_repository,
-				   "get/error", err);
-
-	trace2_region_leave("gh-client", "get", the_repository);
-
-	oidset_clear(&gh_client__oidset_queued);
-	gh_client__oidset_count = 0;
-	gh_client__includes_immediate = 0;
-
-	return err;
+	return entry;
 }
 
 void gh_client__queue_oid(const struct object_id *oid)
@@ -370,27 +374,97 @@ void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr)
 		gh_client__queue_oid(&oids[k]);
 }
 
+/*
+ * Bulk fetch all of the queued OIDs in the OIDSET.
+ */
 int gh_client__drain_queue(enum gh_client__created *p_ghc)
 {
+	struct gh_server__process *entry;
+	struct child_process *process;
+	int nr_loose = 0;
+	int nr_packfile = 0;
+	int err = 0;
+
 	*p_ghc = GHC__CREATED__NOTHING;
 
 	if (!gh_client__oidset_count)
 		return 0;
 
-	return gh_client__get(p_ghc);
+	entry = gh_client__find_long_running_process(CAP_OBJECTS);
+	if (!entry)
+		return -1;
+
+	trace2_region_enter("gh-client", "objects/post", the_repository);
+
+	process = &entry->subprocess.process;
+
+	sigchain_push(SIGPIPE, SIG_IGN);
+
+	err = gh_client__send__objects_post(process);
+	if (!err)
+		err = gh_client__objects__receive_response(
+			process, p_ghc, &nr_loose, &nr_packfile);
+
+	sigchain_pop(SIGPIPE);
+
+	if (err) {
+		subprocess_stop(&gh_server__subprocess_map,
+				(struct subprocess_entry *)entry);
+		FREE_AND_NULL(entry);
+	}
+
+	trace2_data_intmax("gh-client", the_repository,
+			   "objects/post/nr_objects", gh_client__oidset_count);
+	trace2_region_leave("gh-client", "objects/post", the_repository);
+
+	oidset_clear(&gh_client__oidset_queued);
+	gh_client__oidset_count = 0;
+
+	return err;
 }
+
+/*
+ * Get exactly 1 object immediately.
+ * Ignore any queued objects.
+ */
 int gh_client__get_immediate(const struct object_id *oid,
 			     enum gh_client__created *p_ghc)
 {
-	gh_client__includes_immediate = 1;
+	struct gh_server__process *entry;
+	struct child_process *process;
+	int nr_loose = 0;
+	int nr_packfile = 0;
+	int err = 0;
 
 	// TODO consider removing this trace2.  it is useful for interactive
 	// TODO debugging, but may generate way too much noise for a data
 	// TODO event.
 	trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
 
-	if (!oidset_insert(&gh_client__oidset_queued, oid))
-		gh_client__oidset_count++;
+	entry = gh_client__find_long_running_process(CAP_OBJECTS);
+	if (!entry)
+		return -1;
+
+	trace2_region_enter("gh-client", "objects/get", the_repository);
 
-	return gh_client__drain_queue(p_ghc);
+	process = &entry->subprocess.process;
+
+	sigchain_push(SIGPIPE, SIG_IGN);
+
+	err = gh_client__send__objects_get(process, oid);
+	if (!err)
+		err = gh_client__objects__receive_response(
+			process, p_ghc, &nr_loose, &nr_packfile);
+
+	sigchain_pop(SIGPIPE);
+
+	if (err) {
+		subprocess_stop(&gh_server__subprocess_map,
+				(struct subprocess_entry *)entry);
+		FREE_AND_NULL(entry);
+	}
+
+	trace2_region_leave("gh-client", "objects/get", the_repository);
+
+	return err;
 }
diff --git a/gvfs-helper.c b/gvfs-helper.c
index 197a40771bcff6..8bdbc8f7dcf250 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -46,7 +46,28 @@
 //
 //     get
 //
-//            Fetch 1 or more objects.  If a cache-server is configured,
+//            Fetch 1 or more objects one at a time using a "/gvfs/objects"
+//            GET request.
+//
+//            If a cache-server is configured,
+//            try it first.  Optionally fallback to the main Git server.
+//
+//            The set of objects is given on stdin and is assumed to be
+//            a list of <oid>, one per line.
+//
+//            <get-options>:
+//
+//                 --max-retries=<n>     // defaults to "6"
+//
+//                       Number of retries after transient network errors.
+//                       Set to zero to disable such retries.
+//
+//     post
+//
+//            Fetch 1 or more objects in bulk using a "/gvfs/objects" POST
+//            request.
+//
+//            If a cache-server is configured,
 //            try it first.  Optionally fallback to the main Git server.
 //
 //            The set of objects is given on stdin and is assumed to be
@@ -78,7 +99,8 @@
 //                 --block-size=<n>      // defaults to "4000"
 //
 //                       Request objects from server in batches of at
-//                       most n objects (not bytes).
+//                       most n objects (not bytes) when using POST
+//                       requests.
 //
 //                 --depth=<depth>       // defaults to "1"
 //
@@ -87,17 +109,27 @@
 //                       Number of retries after transient network errors.
 //                       Set to zero to disable such retries.
 //
-//            Interactive verb: get
+//            Interactive verb: objects.get
+//
+//                 Fetch 1 or more objects, one at a time, using a
+//                 "/gvfs/objects" GET requests.
+//
+//                 Each object will be created as a loose object in the ODB.
+//
+//            Interactive verb: objects.post
 //
-//                 Fetch 1 or more objects.  If a cache-server is configured,
-//                 try it first.  Optionally fallback to the main Git server.
+//                 Fetch 1 or more objects, in bulk, using one or more
+//                 "/gvfs/objects" POST requests.
+//
+//            For both verbs, if a cache-server is configured, try it first.
+//            Optionally fallback to the main Git server.
 //
 //                 Create 1 or more loose objects and/or packfiles in the
 //                 shared-cache ODB.  (The pathname of the selected ODB is
 //                 reported at the beginning of the response; this should
 //                 match the pathname given on the command line).
 //
-//                 git> get
+//                 git> objects.get | objects.post
 //                 git> <oid>
 //                 git> <oid>
 //                 git> ...
@@ -116,20 +148,6 @@
 //            [2] Documentation/technical/long-running-process-protocol.txt
 //            [3] See GIT_TRACE_PACKET
 //
-// Example:
-//
-// $ git -c core.virtualizeobjects=false -c core.usegvfshelper=false
-//           rev-list --objects --no-walk --missing=print HEAD
-//     | grep "^?"
-//     | sed 's/^?//'
-//     | git gvfs-helper get-missing
-//
-// Note: In this example, we need to turn off "core.virtualizeobjects" and
-//       "core.usegvfshelper" when building the list of objects.  This prevents
-//       rev-list (in oid_object_info_extended() from automatically fetching
-//       them with read-object-hook or "gvfs-helper server" sub-process (and
-//       defeating the whole purpose of this example).
-//
 //////////////////////////////////////////////////////////////////
 
 #define USE_THE_REPOSITORY_VARIABLE
@@ -171,15 +189,21 @@
 static const char * const main_usage[] = {
 	N_("git gvfs-helper [<main_options>] config      [<options>]"),
 	N_("git gvfs-helper [<main_options>] get         [<options>]"),
+	N_("git gvfs-helper [<main_options>] post        [<options>]"),
 	N_("git gvfs-helper [<main_options>] server      [<options>]"),
 	NULL
 };
 
-static const char *const get_usage[] = {
+static const char *const objects_get_usage[] = {
 	N_("git gvfs-helper [<main_options>] get [<options>]"),
 	NULL
 };
 
+static const char *const objects_post_usage[] = {
+	N_("git gvfs-helper [<main_options>] post [<options>]"),
+	NULL
+};
+
 static const char *const server_usage[] = {
 	N_("git gvfs-helper [<main_options>] server [<options>]"),
 	NULL
@@ -188,12 +212,12 @@ static const char *const server_usage[] = {
 /*
  * "commitDepth" field in gvfs protocol
  */
-#define GH__DEFAULT_COMMIT_DEPTH 1
+#define GH__DEFAULT__OBJECTS_POST__COMMIT_DEPTH 1
 
 /*
  * Chunk/block size in number of objects we request in each packfile
  */
-#define GH__DEFAULT_BLOCK_SIZE 4000
+#define GH__DEFAULT__OBJECTS_POST__BLOCK_SIZE 4000
 
 /*
  * Retry attempts (after the initial request) for transient errors and 429s.
@@ -287,6 +311,28 @@ static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = {
 	"(cs)"
 };
 
+enum gh__objects_mode {
+	/*
+	 * Bulk fetch objects.
+	 *
+	 * But also, force the use of HTTP POST regardless of how many
+	 * objects we are requesting.
+	 *
+	 * The GVFS Protocol treats requests for commit objects
+	 * differently in GET and POST requests WRT whether it
+	 * automatically also fetches the referenced trees.
+	 */
+	GH__OBJECTS_MODE__POST,
+
+	/*
+	 * Fetch objects one at a time using HTTP GET.
+	 *
+	 * Force the use of GET (primarily because of the commit
+	 * object treatment).
+	 */
+	GH__OBJECTS_MODE__GET,
+};
+
 struct gh__azure_throttle
 {
 	unsigned long tstu_limit;
@@ -342,7 +388,20 @@ enum gh__progress_state {
  * Parameters to drive an HTTP request (with any necessary retries).
  */
 struct gh__request_params {
-	int b_is_post;            /* POST=1 or GET=0 */
+	/*
+	 * b_is_post indicates if the current HTTP request is a POST=1 or
+	 * a GET=0.  This is a lower level field used to setup CURL and
+	 * the tempfile used to receive the content.
+	 *
+	 * It is related to, but different from the GH__OBJECTS_MODE__
+	 * field that we present to the gvfs-helper client or in the CLI
+	 * (which only concerns the semantics of the /gvfs/objects protocol
+	 * on the set of requested OIDs).
+	 *
+	 * For example, we use an HTTP GET to get the /gvfs/config data
+	 * into a buffer.
+	 */
+	int b_is_post;
 	int b_write_to_file;      /* write to file=1 or strbuf=0 */
 	int b_permit_cache_server_if_defined;
 
@@ -505,8 +564,6 @@ enum gh__retry_mode {
 struct gh__response_status {
 	struct strbuf error_message;
 	struct strbuf content_type;
-	long response_code; /* http response code */
-	CURLcode curl_code;
 	enum gh__error_code ec;
 	enum gh__retry_mode retry;
 	intmax_t bytes_received;
@@ -516,8 +573,6 @@ struct gh__response_status {
 #define GH__RESPONSE_STATUS_INIT { \
 	.error_message = STRBUF_INIT, \
 	.content_type = STRBUF_INIT, \
-	.response_code = 0, \
-	.curl_code = CURLE_OK, \
 	.ec = GH__ERROR_CODE__OK, \
 	.retry = GH__RETRY_MODE__SUCCESS, \
 	.bytes_received = 0, \
@@ -528,8 +583,6 @@ static void gh__response_status__zero(struct gh__response_status *s)
 {
 	strbuf_setlen(&s->error_message, 0);
 	strbuf_setlen(&s->content_type, 0);
-	s->response_code = 0;
-	s->curl_code = CURLE_OK;
 	s->ec = GH__ERROR_CODE__OK;
 	s->retry = GH__RETRY_MODE__SUCCESS;
 	s->bytes_received = 0;
@@ -583,15 +636,14 @@ static void log_e2eid(struct gh__request_params *params,
 }
 
 /*
- * Normalize a few error codes before we try to decide
+ * Normalize a few HTTP response codes before we try to decide
  * how to dispatch on them.
  */
-static void gh__response_status__normalize_odd_codes(
-	struct gh__request_params *params,
-	struct gh__response_status *status)
+static long gh__normalize_odd_codes(struct gh__request_params *params,
+				    long http_response_code)
 {
 	if (params->server_type == GH__SERVER_TYPE__CACHE &&
-	    status->response_code == 400) {
+	    http_response_code == 400) {
 		/*
 		 * The cache-server sends a somewhat bogus 400 instead of
 		 * the normal 401 when AUTH is required.  Fixup the status
@@ -603,16 +655,18 @@ static void gh__response_status__normalize_odd_codes(
 		 * TODO 401 for now.  We should confirm the expected
 		 * TODO error message in the response-body.
 		 */
-		status->response_code = 401;
+		return 401;
 	}
 
-	if (status->response_code == 203) {
+	if (http_response_code == 203) {
 		/*
 		 * A proxy server transformed a 200 from the origin server
 		 * into a 203.  We don't care about the subtle distinction.
 		 */
-		status->response_code = 200;
+		return 200;
 	}
+
+	return http_response_code;
 }
 
 /*
@@ -622,9 +676,10 @@ static void gh__response_status__normalize_odd_codes(
  * https://docs.microsoft.com/en-us/azure/devops/integrate/concepts/rate-limits?view=azure-devops
  */
 static void compute_retry_mode_from_http_response(
-	struct gh__response_status *status)
+	struct gh__response_status *status,
+	long http_response_code)
 {
-	switch (status->response_code) {
+	switch (http_response_code) {
 
 	case 200:
 		status->retry = GH__RETRY_MODE__SUCCESS;
@@ -696,7 +751,7 @@ static void compute_retry_mode_from_http_response(
 
 hard_fail:
 	strbuf_addf(&status->error_message, "(http:%d) Other [hard_fail]",
-		    (int)status->response_code);
+		    (int)http_response_code);
 	status->retry = GH__RETRY_MODE__HARD_FAIL;
 	status->ec = GH__ERROR_CODE__HTTP_OTHER;
 
@@ -722,9 +777,10 @@ static void compute_retry_mode_from_http_response(
  * so I'm not going to fight that.
  */
 static void compute_retry_mode_from_curl_error(
-	struct gh__response_status *status)
+	struct gh__response_status *status,
+	CURLcode curl_code)
 {
-	switch (status->curl_code) {
+	switch (curl_code) {
 	case CURLE_OK:
 		status->retry = GH__RETRY_MODE__SUCCESS;
 		status->ec = GH__ERROR_CODE__OK;
@@ -829,8 +885,7 @@ static void compute_retry_mode_from_curl_error(
 
 hard_fail:
 	strbuf_addf(&status->error_message, "(curl:%d) %s [hard_fail]",
-		    status->curl_code,
-		    curl_easy_strerror(status->curl_code));
+		    curl_code, curl_easy_strerror(curl_code));
 	status->retry = GH__RETRY_MODE__HARD_FAIL;
 	status->ec = GH__ERROR_CODE__CURL_ERROR;
 
@@ -840,8 +895,7 @@ static void compute_retry_mode_from_curl_error(
 
 transient:
 	strbuf_addf(&status->error_message, "(curl:%d) %s [transient]",
-		    status->curl_code,
-		    curl_easy_strerror(status->curl_code));
+		    curl_code, curl_easy_strerror(curl_code));
 	status->retry = GH__RETRY_MODE__TRANSIENT;
 	status->ec = GH__ERROR_CODE__CURL_ERROR;
 
@@ -860,26 +914,31 @@ static void gh__response_status__set_from_slot(
 	struct gh__response_status *status,
 	const struct active_request_slot *slot)
 {
-	status->curl_code = slot->results->curl_result;
+	long http_response_code;
+	CURLcode curl_code;
+
+	curl_code = slot->results->curl_result;
 	gh__curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE,
 			    &status->content_type);
 	curl_easy_getinfo(slot->curl, CURLINFO_RESPONSE_CODE,
-			  &status->response_code);
+			  &http_response_code);
 
 	strbuf_setlen(&status->error_message, 0);
 
-	gh__response_status__normalize_odd_codes(params, status);
+	http_response_code = gh__normalize_odd_codes(params,
+						     http_response_code);
 
 	/*
 	 * Use normalized response/status codes form curl/http to decide
 	 * how to set the error-code we propagate *AND* to decide if we
 	 * we should retry because of transient network problems.
 	 */
-	if (status->curl_code == CURLE_OK ||
-	    status->curl_code == CURLE_HTTP_RETURNED_ERROR)
-		compute_retry_mode_from_http_response(status);
+	if (curl_code == CURLE_OK ||
+	    curl_code == CURLE_HTTP_RETURNED_ERROR)
+		compute_retry_mode_from_http_response(status,
+						      http_response_code);
 	else
-		compute_retry_mode_from_curl_error(status);
+		compute_retry_mode_from_curl_error(status, curl_code);
 
 	if (status->ec != GH__ERROR_CODE__OK)
 		status->bytes_received = 0;
@@ -1037,8 +1096,8 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 	trace2_region_enter("gvfs-helper", key.buf, NULL);
 
 	if (!start_active_slot(slot)) {
-		status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */
-		compute_retry_mode_from_curl_error(status);
+		compute_retry_mode_from_curl_error(status,
+						   CURLE_FAILED_INIT);
 	} else {
 		run_active_slot(slot);
 		if (params->b_write_to_file)
@@ -1069,7 +1128,7 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 		stop_progress(&params->progress);
 
 	if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) {
-		if (params->b_is_post)
+		if (params->b_is_post && params->object_count > 1)
 			install_packfile(params, status);
 		else
 			install_loose(params, status);
@@ -1225,8 +1284,8 @@ static void lookup_main_url(void)
 	trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url);
 }
 
-static void do__gvfs_config(struct gh__response_status *status,
-			    struct strbuf *config_data);
+static void do__http_get__gvfs_config(struct gh__response_status *status,
+				      struct strbuf *config_data);
 
 /*
  * Find the URL of the cache-server, if we have one.
@@ -1285,7 +1344,7 @@ static void select_cache_server(void)
 	 * well-known by the main Git server.
 	 */
 
-	do__gvfs_config(&status, &config_data);
+	do__http_get__gvfs_config(&status, &config_data);
 
 	if (status.ec == GH__ERROR_CODE__OK) {
 		/*
@@ -1343,13 +1402,10 @@ static void select_cache_server(void)
  * Read stdin until EOF (or a blank line) and add the desired OIDs
  * to the oidset.
  *
- * Stdin should contain a list of OIDs.  It may have additional
- * decoration that we need to strip out.
- *
- * We expect:
- * <hex_oid> [<path>]   // present OIDs
+ * Stdin should contain a list of OIDs.  Lines may have additional
+ * text following the OID that we ignore.
  */
-static unsigned long read_stdin_from_rev_list(struct oidset *oids)
+static unsigned long read_stdin_for_oids(struct oidset *oids)
 {
 	struct object_id oid;
 	struct strbuf buf_stdin = STRBUF_INIT;
@@ -1372,17 +1428,23 @@ static unsigned long read_stdin_from_rev_list(struct oidset *oids)
 
 /*
  * Build a complete JSON payload for a gvfs/objects POST request
- * containing the first n OIDs in an OIDSET index by the iterator.
+ * containing the first `nr_in_block` OIDs found in the OIDSET
+ * indexed by the given iterator.
  *
  * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md
+ *
+ * Return the number of OIDs we actually put into the payload.
+ * If only 1 OID was found, also return it.
  */
 static unsigned long build_json_payload__gvfs_objects(
 	struct json_writer *jw_req,
 	struct oidset_iter *iter,
-	unsigned long nr_in_block)
+	unsigned long nr_in_block,
+	struct object_id *oid_out)
 {
 	unsigned long k;
 	const struct object_id *oid;
+	const struct object_id *oid_prev = NULL;
 
 	k = 0;
 
@@ -1393,10 +1455,18 @@ static unsigned long build_json_payload__gvfs_objects(
 	while (k < nr_in_block && (oid = oidset_iter_next(iter))) {
 		jw_array_string(jw_req, oid_to_hex(oid));
 		k++;
+		oid_prev = oid;
 	}
 	jw_end(jw_req);
 	jw_end(jw_req);
 
+	if (oid_out) {
+		if (k == 1)
+			oidcpy(oid_out, oid_prev);
+		else
+			oidclr(oid_out, the_repository->hash_algo);
+	}
+
 	return k;
 }
 
@@ -1639,6 +1709,33 @@ static void create_tempfile_for_packfile(
 	strbuf_release(&basename);
 }
 
+/*
+ * Create a pathname to the loose object in the shared-cache ODB
+ * with the given OID.  Try to "mkdir -p" to ensure the parent
+ * directories exist.
+ */
+static int create_loose_pathname_in_odb(struct strbuf *buf_path,
+					const struct object_id *oid)
+{
+	enum scld_error scld;
+	const char *hex;
+
+	hex = oid_to_hex(oid);
+
+	strbuf_setlen(buf_path, 0);
+	strbuf_addbuf(buf_path, &gh__global.buf_odb_path);
+	strbuf_complete(buf_path, '/');
+	strbuf_add(buf_path, hex, 2);
+	strbuf_addch(buf_path, '/');
+	strbuf_addstr(buf_path, hex+2);
+
+	scld = safe_create_leading_directories(buf_path->buf);
+	if (scld != SCLD_OK && scld != SCLD_EXISTS)
+		return -1;
+
+	return 0;
+}
+
 /*
  * Create a tempfile to stream a loose object into.
  *
@@ -1653,19 +1750,10 @@ static void create_tempfile_for_loose(
 {
 	static int nth = 0;
 	struct strbuf buf_path = STRBUF_INIT;
-	const char *hex;
 
 	gh__response_status__zero(status);
 
-	hex = oid_to_hex(&params->loose_oid);
-
-	strbuf_addbuf(&buf_path, &gh__global.buf_odb_path);
-	strbuf_complete(&buf_path, '/');
-	strbuf_add(&buf_path, hex, 2);
-
-	if (!file_exists(buf_path.buf) &&
-	    mkdir(buf_path.buf, 0777) == -1 &&
-		!file_exists(buf_path.buf)) {
+	if (create_loose_pathname_in_odb(&buf_path, &params->loose_oid)) {
 		strbuf_addf(&status->error_message,
 			    "cannot create directory for loose object '%s'",
 			    buf_path.buf);
@@ -1673,9 +1761,6 @@ static void create_tempfile_for_loose(
 		goto cleanup;
 	}
 
-	strbuf_addch(&buf_path, '/');
-	strbuf_addstr(&buf_path, hex+2);
-
 	/* Remember the full path of the final destination. */
 	strbuf_setlen(&params->loose_path, 0);
 	strbuf_addbuf(&params->loose_path, &buf_path);
@@ -1717,7 +1802,7 @@ static void install_packfile(struct gh__request_params *params,
 	if (strcmp(status->content_type.buf,
 		   "application/x-git-packfile")) {
 		strbuf_addf(&status->error_message,
-			    "received unknown content-type '%s'",
+			    "install_packfile: received unknown content-type '%s'",
 			    status->content_type.buf);
 		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
 		goto cleanup;
@@ -1795,6 +1880,19 @@ static void install_loose(struct gh__request_params *params,
 {
 	struct strbuf tmp_path = STRBUF_INIT;
 
+	/*
+	 * We expect a loose object when we do a GET -or- when we
+	 * do a POST with only 1 object.
+	 */
+	if (strcmp(status->content_type.buf,
+		   "application/x-git-loose-object")) {
+		strbuf_addf(&status->error_message,
+			    "install_loose: received unknown content-type '%s'",
+			    status->content_type.buf);
+		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
+		return;
+	}
+
 	gh__response_status__zero(status);
 
 	/*
@@ -1891,10 +1989,8 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems,
 		 * The following X- headers are specific to AzureDevOps.
 		 * Other servers have similar sets of values, but I haven't
 		 * compared them in depth.
-		 *
-		 * TODO Remove this.
 		 */
-		trace2_printf("Throttle: %s %s", key.buf, val.buf);
+		// trace2_printf("Throttle: %s %s", key.buf, val.buf);
 
 		if (!strcmp(key.buf, "X-RateLimit-Resource")) {
 			/*
@@ -2194,10 +2290,11 @@ static void do_req(const char *url_base,
 		if (params->tempfile)
 			delete_tempfile(&params->tempfile);
 
-		if (params->b_is_post)
+		if (params->b_is_post && params->object_count > 1)
 			create_tempfile_for_packfile(params, status);
 		else
 			create_tempfile_for_loose(params, status);
+
 		if (!params->tempfile || status->ec != GH__ERROR_CODE__OK)
 			return;
 	} else {
@@ -2353,7 +2450,7 @@ static void do_req__to_main(const char *url_component,
 				  &gh__global.main_creds,
 				  params, status);
 
-	if (status->response_code == 401) {
+	if (status->retry == GH__RETRY_MODE__HTTP_401) {
 		refresh_main_creds();
 
 		do_req__with_robust_retry(gh__global.main_url, url_component,
@@ -2361,7 +2458,7 @@ static void do_req__to_main(const char *url_component,
 					  params, status);
 	}
 
-	if (status->response_code == 200)
+	if (status->retry == GH__RETRY_MODE__SUCCESS)
 		approve_main_creds();
 }
 
@@ -2381,7 +2478,7 @@ static void do_req__to_cache_server(const char *url_component,
 				  &gh__global.cache_creds,
 				  params, status);
 
-	if (status->response_code == 401) {
+	if (status->retry == GH__RETRY_MODE__HTTP_401) {
 		refresh_cache_server_creds();
 
 		do_req__with_robust_retry(gh__global.cache_server_url,
@@ -2390,7 +2487,7 @@ static void do_req__to_cache_server(const char *url_component,
 					  params, status);
 	}
 
-	if (status->response_code == 200)
+	if (status->retry == GH__RETRY_MODE__SUCCESS)
 		approve_cache_server_creds();
 }
 
@@ -2405,7 +2502,7 @@ static void do_req__with_fallback(const char *url_component,
 	    params->b_permit_cache_server_if_defined) {
 		do_req__to_cache_server(url_component, params, status);
 
-		if (status->response_code == 200)
+		if (status->retry == GH__RETRY_MODE__SUCCESS)
 			return;
 
 		if (!gh__cmd_opts.try_fallback)
@@ -2420,7 +2517,7 @@ static void do_req__with_fallback(const char *url_component,
 		 * Falling-back would likely just cause the 3rd (or maybe
 		 * 4th) cred prompt.
 		 */
-		if (status->response_code == 401)
+		if (status->retry == GH__RETRY_MODE__HTTP_401)
 			return;
 	}
 
@@ -2432,8 +2529,8 @@ static void do_req__with_fallback(const char *url_component,
  *
  * Return server's response buffer.  This is probably a raw JSON string.
  */
-static void do__gvfs_config(struct gh__response_status *status,
-			    struct strbuf *config_data)
+static void do__http_get__gvfs_config(struct gh__response_status *status,
+				      struct strbuf *config_data)
 {
 	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
 
@@ -2473,12 +2570,34 @@ static void do__gvfs_config(struct gh__response_status *status,
 	gh__request_params__release(&params);
 }
 
+static void setup_gvfs_objects_progress(struct gh__request_params *params,
+					unsigned long num, unsigned long den)
+{
+	if (!gh__cmd_opts.show_progress)
+		return;
+
+	if (params->b_is_post && params->object_count > 1) {
+		strbuf_addf(&params->progress_base_phase2_msg,
+			    "Requesting packfile %ld/%ld with %ld objects",
+			    num, den, params->object_count);
+		strbuf_addf(&params->progress_base_phase3_msg,
+			    "Receiving packfile %ld/%ld with %ld objects",
+			    num, den, params->object_count);
+	} else {
+		strbuf_addf(&params->progress_base_phase3_msg,
+			    "Receiving %ld/%ld loose object",
+			    num, den);
+	}
+}
+
 /*
  * Call "gvfs/objects/<oid>" REST API to fetch a loose object
  * and write it to the ODB.
  */
-static void do__loose__gvfs_object(struct gh__response_status *status,
-				   const struct object_id *oid)
+static void do__http_get__gvfs_object(struct gh__response_status *status,
+				      const struct object_id *oid,
+				      unsigned long l_num, unsigned long l_den,
+				      struct string_list *result_list)
 {
 	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
 	struct strbuf component_url = STRBUF_INIT;
@@ -2503,44 +2622,52 @@ static void do__loose__gvfs_object(struct gh__response_status *status,
 
 	oidcpy(&params.loose_oid, oid);
 
-	if (gh__cmd_opts.show_progress) {
-		/*
-		 * Likewise, a gvfs/objects/{oid} has a very small reqest
-		 * payload, so I don't see any need to report progress on
-		 * the upload side of the GET.  So just report progress
-		 * on the download side.
-		 */
-		strbuf_addstr(&params.progress_base_phase3_msg,
-			      "Receiving 1 loose object");
-	}
+	setup_gvfs_objects_progress(&params, l_num, l_den);
 
 	do_req__with_fallback(component_url.buf, &params, status);
 
+	if (status->ec == GH__ERROR_CODE__OK) {
+		struct strbuf msg = STRBUF_INIT;
+
+		strbuf_addf(&msg, "loose %s",
+			    oid_to_hex(&params.loose_oid));
+
+		string_list_append(result_list, msg.buf);
+		strbuf_release(&msg);
+	}
+
 	gh__request_params__release(&params);
 	strbuf_release(&component_url);
 }
 
 /*
- * Call "gvfs/objects" POST REST API to fetch a packfile containing
- * the objects in the requested OIDSET.  Returns the filename (not
- * pathname) to the new packfile.
+ * Call "gvfs/objects" POST REST API to fetch a batch of objects
+ * from the OIDSET.  Normal, this is results in a packfile containing
+ * `nr_wanted_in_block` objects.  And we return the number actually
+ * consumed (along with the filename of the resulting packfile).
+ *
+ * However, if we only have 1 oid (remaining) in the OIDSET, the
+ * server will respond to our POST with a loose object rather than
+ * a packfile with 1 object.
+ *
+ * Append a message to the result_list describing the result.
+ *
+ * Return the number of OIDs consumed from the OIDSET.
  */
-static void do__packfile__gvfs_objects(struct gh__response_status *status,
-				       struct oidset_iter *iter,
-				       unsigned long nr_wanted_in_block,
-				       int j_pack_num, int j_pack_den,
-				       struct strbuf *output_filename,
-				       unsigned long *nr_oid_taken)
+static void do__http_post__gvfs_objects(struct gh__response_status *status,
+					struct oidset_iter *iter,
+					unsigned long nr_wanted_in_block,
+					int j_pack_num, int j_pack_den,
+					struct string_list *result_list,
+					unsigned long *nr_oid_taken)
 {
 	struct json_writer jw_req = JSON_WRITER_INIT;
 	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
 
-	strbuf_setlen(output_filename, 0);
-
 	gh__response_status__zero(status);
 
 	params.object_count = build_json_payload__gvfs_objects(
-		&jw_req, iter, nr_wanted_in_block);
+		&jw_req, iter, nr_wanted_in_block, &params.loose_oid);
 	*nr_oid_taken = params.object_count;
 
 	strbuf_addstr(&params.tr2_label, "POST/objects");
@@ -2560,7 +2687,7 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status,
 					   "Content-Type: application/json");
 	/*
 	 * We really always want a packfile.  But if the payload only
-	 * requests 1 OID, the server will/may send us a single loose
+	 * requests 1 OID, the server will send us a single loose
 	 * objects instead.  (Apparently the server ignores us when we
 	 * only send application/x-git-packfile and does it anyway.)
 	 *
@@ -2572,156 +2699,172 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status,
 	params.headers = curl_slist_append(params.headers,
 					   "Accept: application/x-git-loose-object");
 
-	if (gh__cmd_opts.show_progress) {
-		strbuf_addf(&params.progress_base_phase2_msg,
-			    "Requesting packfile %d/%d with %ld objects",
-			    j_pack_num, j_pack_den,
-			    params.object_count);
-		strbuf_addf(&params.progress_base_phase3_msg,
-			    "Receiving packfile %d/%d with %ld objects",
-			    j_pack_num, j_pack_den,
-			    params.object_count);
-	}
+	setup_gvfs_objects_progress(&params, j_pack_num, j_pack_den);
 
 	do_req__with_fallback("gvfs/objects", &params, status);
-	if (status->ec == GH__ERROR_CODE__OK)
-		strbuf_addbuf(output_filename, &params.final_packfile_filename);
+
+	if (status->ec == GH__ERROR_CODE__OK) {
+		struct strbuf msg = STRBUF_INIT;
+
+		if (params.object_count > 1)
+			strbuf_addf(&msg, "packfile %s",
+				    params.final_packfile_filename.buf);
+		else
+			strbuf_addf(&msg, "loose %s",
+				    oid_to_hex(&params.loose_oid));
+
+		string_list_append(result_list, msg.buf);
+		strbuf_release(&msg);
+	}
 
 	gh__request_params__release(&params);
 	jw_release(&jw_req);
 }
 
 /*
- * Bulk or individually fetch a list of objects in one or more http requests.
- * Create one or more packfiles and/or loose objects.
+ * Drive one or more HTTP GET requests to fetch the objects
+ * in the given OIDSET.  These are received into loose objects.
  *
- * We accumulate results for each request in `result_list` until we get a
+ * Accumulate results for each request in `result_list` until we get a
  * hard error and have to stop.
  */
-static void do_fetch_oidset(struct gh__response_status *status,
-			    struct oidset *oids,
-			    unsigned long nr_oid_total,
-			    struct string_list *result_list)
+static void do__http_get__fetch_oidset(struct gh__response_status *status,
+				       struct oidset *oids,
+				       unsigned long nr_oid_total,
+				       struct string_list *result_list)
 {
 	struct oidset_iter iter;
-	struct strbuf output_filename = STRBUF_INIT;
-	struct strbuf msg = STRBUF_INIT;
 	struct strbuf err404 = STRBUF_INIT;
 	const struct object_id *oid;
 	unsigned long k;
-	unsigned long nr_oid_taken;
 	int had_404 = 0;
-	int j_pack_den = 0;
-	int j_pack_num = 0;
 
 	gh__response_status__zero(status);
 	if (!nr_oid_total)
 		return;
 
-	if (nr_oid_total > 1)
-		j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1)
-			      / gh__cmd_opts.block_size);
-
 	oidset_iter_init(oids, &iter);
 
-	for (k = 0; k < nr_oid_total; k += nr_oid_taken) {
-		if (nr_oid_total - k == 1 || gh__cmd_opts.block_size == 1) {
-			oid = oidset_iter_next(&iter);
-			nr_oid_taken = 1;
+	for (k = 0; k < nr_oid_total; k++) {
+		oid = oidset_iter_next(&iter);
 
-			do__loose__gvfs_object(status, oid);
+		do__http_get__gvfs_object(status, oid, k+1, nr_oid_total,
+					  result_list);
 
+		/*
+		 * If we get a 404 for an individual object, ignore
+		 * it and get the rest.  We'll fixup the 'ec' later.
+		 */
+		if (status->ec == GH__ERROR_CODE__HTTP_404) {
+			if (!err404.len)
+				strbuf_addf(&err404, "%s: from GET %s",
+					    status->error_message.buf,
+					    oid_to_hex(oid));
 			/*
-			 * If we get a 404 for an individual object, ignore
-			 * it and get the rest.  We'll fixup the 'ec' later.
+			 * Mark the fetch as "incomplete", but don't
+			 * stop trying to get other chunks.
 			 */
-			if (status->ec == GH__ERROR_CODE__HTTP_404) {
-				if (!err404.len)
-					strbuf_addf(&err404, "%s: loose object %s",
-						    status->error_message.buf,
-						    oid_to_hex(oid));
-				/*
-				 * Mark the fetch as "incomplete", but don't
-				 * stop trying to get other chunks.
-				 */
-				had_404 = 1;
-				continue;
-			}
+			had_404 = 1;
+			continue;
+		}
 
-			if (status->ec != GH__ERROR_CODE__OK) {
-				/* Stop at the first hard error. */
-				strbuf_addf(&status->error_message, ": loose %s",
-					    oid_to_hex(oid));
-				goto cleanup;
-			}
+		if (status->ec != GH__ERROR_CODE__OK) {
+			/* Stop at the first hard error. */
+			strbuf_addf(&status->error_message, ": from GET %s",
+				    oid_to_hex(oid));
+			goto cleanup;
+		}
+	}
 
-			strbuf_setlen(&msg, 0);
-			strbuf_addf(&msg, "loose %s", oid_to_hex(oid));
-			string_list_append(result_list, msg.buf);
+cleanup:
+	if (had_404 && status->ec == GH__ERROR_CODE__OK) {
+		strbuf_setlen(&status->error_message, 0);
+		strbuf_addbuf(&status->error_message, &err404);
+		status->ec = GH__ERROR_CODE__HTTP_404;
+	}
 
-		} else {
-			strbuf_setlen(&output_filename, 0);
+	strbuf_release(&err404);
+}
 
-			j_pack_num++;
+/*
+ * Drive one or more HTTP POST requests to bulk fetch the objects in
+ * the given OIDSET.  Create one or more packfiles and/or loose objects.
+ *
+ * Accumulate results for each request in `result_list` until we get a
+ * hard error and have to stop.
+ */
+static void do__http_post__fetch_oidset(struct gh__response_status *status,
+					struct oidset *oids,
+					unsigned long nr_oid_total,
+					struct string_list *result_list)
+{
+	struct oidset_iter iter;
+	struct strbuf err404 = STRBUF_INIT;
+	unsigned long k;
+	unsigned long nr_oid_taken;
+	int j_pack_den = 0;
+	int j_pack_num = 0;
+	int had_404 = 0;
+
+	gh__response_status__zero(status);
+	if (!nr_oid_total)
+		return;
 
-			do__packfile__gvfs_objects(status, &iter,
-						   gh__cmd_opts.block_size,
-						   j_pack_num, j_pack_den,
-						   &output_filename,
-						   &nr_oid_taken);
+	oidset_iter_init(oids, &iter);
+
+	j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1)
+		      / gh__cmd_opts.block_size);
+
+	for (k = 0; k < nr_oid_total; k += nr_oid_taken) {
+		j_pack_num++;
 
+		do__http_post__gvfs_objects(status, &iter,
+					    gh__cmd_opts.block_size,
+					    j_pack_num, j_pack_den,
+					    result_list,
+					    &nr_oid_taken);
+
+		/*
+		 * Because the oidset iterator has random
+		 * order, it does no good to say the k-th or
+		 * n-th chunk was incomplete; the client
+		 * cannot use that index for anything.
+		 *
+		 * We get a 404 when at least one object in
+		 * the chunk was not found.
+		 *
+		 * For now, ignore the 404 and go on to the
+		 * next chunk and then fixup the 'ec' later.
+		 */
+		if (status->ec == GH__ERROR_CODE__HTTP_404) {
+			if (!err404.len)
+				strbuf_addf(&err404,
+					    "%s: from POST",
+					    status->error_message.buf);
 			/*
-			 * Because the oidset iterator has random
-			 * order, it does no good to say the k-th or
-			 * n-th chunk was incomplete; the client
-			 * cannot use that index for anything.
-			 *
-			 * We get a 404 when at least one object in
-			 * the chunk was not found.
-			 *
-			 * TODO Consider various retry strategies (such as
-			 * TODO loose or bisect) on the members within this
-			 * TODO chunk to reduce the impact of the miss.
-			 *
-			 * For now, ignore the 404 and go on to the
-			 * next chunk and then fixup the 'ec' later.
+			 * Mark the fetch as "incomplete", but don't
+			 * stop trying to get other chunks.
 			 */
-			if (status->ec == GH__ERROR_CODE__HTTP_404) {
-				if (!err404.len)
-					strbuf_addf(&err404,
-						    "%s: packfile object",
-						    status->error_message.buf);
-				/*
-				 * Mark the fetch as "incomplete", but don't
-				 * stop trying to get other chunks.
-				 */
-				had_404 = 1;
-				continue;
-			}
-
-			if (status->ec != GH__ERROR_CODE__OK) {
-				/* Stop at the first hard error. */
-				strbuf_addstr(&status->error_message,
-					      ": in packfile");
-				goto cleanup;
-			}
+			had_404 = 1;
+			continue;
+		}
 
-			strbuf_setlen(&msg, 0);
-			strbuf_addf(&msg, "packfile %s", output_filename.buf);
-			string_list_append(result_list, msg.buf);
+		if (status->ec != GH__ERROR_CODE__OK) {
+			/* Stop at the first hard error. */
+			strbuf_addstr(&status->error_message,
+				      ": from POST");
+			goto cleanup;
 		}
 	}
 
 cleanup:
-	strbuf_release(&msg);
-	strbuf_release(&err404);
-	strbuf_release(&output_filename);
-
 	if (had_404 && status->ec == GH__ERROR_CODE__OK) {
 		strbuf_setlen(&status->error_message, 0);
-		strbuf_addstr(&status->error_message, "404 Not Found");
+		strbuf_addbuf(&status->error_message, &err404);
 		status->ec = GH__ERROR_CODE__HTTP_404;
 	}
+
+	strbuf_release(&err404);
 }
 
 /*
@@ -2759,7 +2902,7 @@ static enum gh__error_code do_sub_cmd__config(int argc UNUSED, const char **argv
 
 	finish_init(0);
 
-	do__gvfs_config(&status, &config_data);
+	do__http_get__gvfs_config(&status, &config_data);
 	ec = status.ec;
 
 	if (ec == GH__ERROR_CODE__OK)
@@ -2774,12 +2917,61 @@ static enum gh__error_code do_sub_cmd__config(int argc UNUSED, const char **argv
 }
 
 /*
- * Read a list of objects from stdin and fetch them in a single request (or
- * multiple block-size requests).
+ * Read a list of objects from stdin and fetch them as a series of
+ * single object HTTP GET requests.
  */
 static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 {
 	static struct option get_options[] = {
+		OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries,
+			    N_("retries for transient network errors")),
+		OPT_END(),
+	};
+
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct oidset oids = OIDSET_INIT;
+	struct string_list result_list = STRING_LIST_INIT_DUP;
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+	unsigned long nr_oid_total;
+	size_t k;
+
+	trace2_cmd_mode("get");
+
+	if (argc > 1 && !strcmp(argv[1], "-h"))
+		usage_with_options(objects_get_usage, get_options);
+
+	argc = parse_options(argc, argv, NULL, get_options, objects_get_usage, 0);
+	if (gh__cmd_opts.max_retries < 0)
+		gh__cmd_opts.max_retries = 0;
+
+	finish_init(1);
+
+	nr_oid_total = read_stdin_for_oids(&oids);
+
+	do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list);
+
+	ec = status.ec;
+
+	for (k = 0; k < result_list.nr; k++)
+		printf("%s\n", result_list.items[k].string);
+
+	if (ec != GH__ERROR_CODE__OK)
+		error("get: %s", status.error_message.buf);
+
+	gh__response_status__release(&status);
+	oidset_clear(&oids);
+	string_list_clear(&result_list, 0);
+
+	return ec;
+}
+
+/*
+ * Read a list of objects from stdin and fetch them in a single request (or
+ * multiple block-size requests) using one or more HTTP POST requests.
+ */
+static enum gh__error_code do_sub_cmd__post(int argc, const char **argv)
+{
+	static struct option post_options[] = {
 		OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size,
 			      N_("number of objects to request at a time")),
 		OPT_INTEGER('d', "depth", &gh__cmd_opts.depth,
@@ -2796,12 +2988,12 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 	unsigned long nr_oid_total;
 	size_t k;
 
-	trace2_cmd_mode("get");
+	trace2_cmd_mode("post");
 
 	if (argc > 1 && !strcmp(argv[1], "-h"))
-		usage_with_options(get_usage, get_options);
+		usage_with_options(objects_post_usage, post_options);
 
-	argc = parse_options(argc, argv, NULL, get_options, get_usage, 0);
+	argc = parse_options(argc, argv, NULL, post_options, objects_post_usage, 0);
 	if (gh__cmd_opts.depth < 1)
 		gh__cmd_opts.depth = 1;
 	if (gh__cmd_opts.max_retries < 0)
@@ -2809,12 +3001,9 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 
 	finish_init(1);
 
-	nr_oid_total = read_stdin_from_rev_list(&oids);
+	nr_oid_total = read_stdin_for_oids(&oids);
 
-	trace2_region_enter("gvfs-helper", "get", NULL);
-	trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_oid_total);
-	do_fetch_oidset(&status, &oids, nr_oid_total, &result_list);
-	trace2_region_leave("gvfs-helper", "get", NULL);
+	do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list);
 
 	ec = status.ec;
 
@@ -2822,7 +3011,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 		printf("%s\n", result_list.items[k].string);
 
 	if (ec != GH__ERROR_CODE__OK)
-		error("get: %s", status.error_message.buf);
+		error("post: %s", status.error_message.buf);
 
 	gh__response_status__release(&status);
 	oidset_clear(&oids);
@@ -2832,12 +3021,14 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv)
 }
 
 /*
- * Handle the 'get' command when in "server mode".  Only call error() and set ec
- * for hard errors where we cannot communicate correctly with the foreground
- * client process.  Pass any actual data errors (such as 404's or 401's from
- * the fetch back to the client process.
+ * Handle the 'objects.get' and 'objects.post' verbs in "server mode".
+ *
+ * Only call error() and set ec for hard errors where we cannot
+ * communicate correctly with the foreground client process.  Pass any
+ * actual data errors (such as 404's or 401's from the fetch) back to
+ * the client process.
  */
-static enum gh__error_code do_server_subprocess_get(void)
+static enum gh__error_code do_server_subprocess__objects(const char *verb_line)
 {
 	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
 	struct oidset oids = OIDSET_INIT;
@@ -2848,12 +3039,19 @@ static enum gh__error_code do_server_subprocess_get(void)
 	int len;
 	int err;
 	size_t k;
+	enum gh__objects_mode objects_mode;
 	unsigned long nr_oid_total = 0;
 
-	/*
-	 * Inside the "get" command, we expect a list of OIDs
-	 * and a flush.
-	 */
+	if (!strcmp(verb_line, "objects.get"))
+		objects_mode = GH__OBJECTS_MODE__GET;
+	else if (!strcmp(verb_line, "objects.post"))
+		objects_mode = GH__OBJECTS_MODE__POST;
+	else {
+		error("server: unexpected objects-mode verb '%s'", verb_line);
+		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+		goto cleanup;
+	}
+
 	while (1) {
 		len = packet_read_line_gently(0, NULL, &line);
 		if (len < 0 || !line)
@@ -2878,10 +3076,10 @@ static enum gh__error_code do_server_subprocess_get(void)
 		goto cleanup;
 	}
 
-	trace2_region_enter("gvfs-helper", "server/get", NULL);
-	trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_oid_total);
-	do_fetch_oidset(&status, &oids, nr_oid_total, &result_list);
-	trace2_region_leave("gvfs-helper", "server/get", NULL);
+	if (objects_mode == GH__OBJECTS_MODE__GET)
+		do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list);
+	else
+		do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list);
 
 	/*
 	 * Write pathname of the ODB where we wrote all of the objects
@@ -2937,7 +3135,7 @@ static enum gh__error_code do_server_subprocess_get(void)
 	return ec;
 }
 
-typedef enum gh__error_code (fn_subprocess_cmd)(void);
+typedef enum gh__error_code (fn_subprocess_cmd)(const char *verb_line);
 
 struct subprocess_capability {
 	const char *name;
@@ -2946,7 +3144,7 @@ struct subprocess_capability {
 };
 
 static struct subprocess_capability caps[] = {
-	{ "get", 0, do_server_subprocess_get },
+	{ "objects", 0, do_server_subprocess__objects },
 	{ NULL, 0, NULL },
 };
 
@@ -3077,8 +3275,9 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv)
 		}
 
 		for (k = 0; caps[k].name; k++) {
-			if (caps[k].client_has && !strcmp(line, caps[k].name)) {
-				ec = (caps[k].pfn)();
+			if (caps[k].client_has &&
+			    starts_with(line, caps[k].name)) {
+				ec = (caps[k].pfn)(line);
 				if (ec != GH__ERROR_CODE__OK)
 					goto cleanup;
 				goto top_of_loop;
@@ -3099,6 +3298,9 @@ static enum gh__error_code do_sub_cmd(int argc, const char **argv)
 	if (!strcmp(argv[0], "get"))
 		return do_sub_cmd__get(argc, argv);
 
+	if (!strcmp(argv[0], "post"))
+		return do_sub_cmd__post(argc, argv);
+
 	if (!strcmp(argv[0], "config"))
 		return do_sub_cmd__config(argc, argv);
 
@@ -3149,8 +3351,8 @@ int cmd_main(int argc, const char **argv)
 	setup_git_directory_gently(NULL);
 
 	/* Set any non-zero initial values in gh__cmd_opts. */
-	gh__cmd_opts.depth = GH__DEFAULT_COMMIT_DEPTH;
-	gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE;
+	gh__cmd_opts.depth = GH__DEFAULT__OBJECTS_POST__COMMIT_DEPTH;
+	gh__cmd_opts.block_size = GH__DEFAULT__OBJECTS_POST__BLOCK_SIZE;
 	gh__cmd_opts.max_retries = GH__DEFAULT_MAX_RETRIES;
 	gh__cmd_opts.max_transient_backoff_sec =
 		GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC;

From 40ae3a9ab4773c125d87bd58b5477fe064eeec10 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 24 Oct 2019 08:11:25 -0400
Subject: [PATCH 100/207] gvfs-helper: dramatically reduce progress noise

During development, it was very helpful to see the gvfs-helper do its
work to request a pack-file or download a loose object. When these
messages appear during normal use, it leads to a very noisy terminal
output.

Remove all progress indicators when downloading loose objects. We know
that these can be numbered in the thousands in certain kinds of history
calls, and would litter the terminal output with noise. This happens
during 'git fetch' or 'git pull' as well when the tip commits are
checked for the new refs.

Remove the "Requesting packfile with %ld objects" message, as this
operation is very fast. We quickly follow up with the more valuable
"Receiving packfile %ld%ld with %ld objects". When a large "git
checkout" causes many pack-file downloads, it is good to know that Git
is asking for data from the server.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper.c | 8 +-------
 1 file changed, 1 insertion(+), 7 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 8bdbc8f7dcf250..99b7f5e93acfaf 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -2577,17 +2577,11 @@ static void setup_gvfs_objects_progress(struct gh__request_params *params,
 		return;
 
 	if (params->b_is_post && params->object_count > 1) {
-		strbuf_addf(&params->progress_base_phase2_msg,
-			    "Requesting packfile %ld/%ld with %ld objects",
-			    num, den, params->object_count);
 		strbuf_addf(&params->progress_base_phase3_msg,
 			    "Receiving packfile %ld/%ld with %ld objects",
 			    num, den, params->object_count);
-	} else {
-		strbuf_addf(&params->progress_base_phase3_msg,
-			    "Receiving %ld/%ld loose object",
-			    num, den);
 	}
+	/* If requesting only one object, then do not show progress */
 }
 
 /*

From 78c7efc0d74a99ef0354f6d8083e01821ef5248e Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 4 Nov 2019 14:47:57 -0500
Subject: [PATCH 101/207] gvfs-helper-client.h: define struct object_id

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper-client.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h
index a5a951ff5b5bfe..c1e38fad75f841 100644
--- a/gvfs-helper-client.h
+++ b/gvfs-helper-client.h
@@ -3,6 +3,7 @@
 
 struct repository;
 struct commit;
+struct object_id;
 
 enum gh_client__created {
 	/*

From 3fcc433b475d5975390311196d9c499f49d76faa Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 11 Nov 2019 14:56:02 -0500
Subject: [PATCH 102/207] gvfs-helper: handle pack-file after single POST
 request

If our POST request includes a commit ID, then the the remote will
send a pack-file containing the commit and all trees reachable from
its root tree. With the current implementation, this causes a
failure since we call install_loose() when asking for one object.

Modify the condition to check for install_pack() when the response
type changes.

Also, create a tempfile for the pack-file download or else we will
have problems!

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 99b7f5e93acfaf..19223e8422c9b0 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -1128,7 +1128,9 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 		stop_progress(&params->progress);
 
 	if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) {
-		if (params->b_is_post && params->object_count > 1)
+		if (params->b_is_post &&
+		    !strcmp(status->content_type.buf,
+			    "application/x-git-packfile"))
 			install_packfile(params, status);
 		else
 			install_loose(params, status);
@@ -2290,10 +2292,10 @@ static void do_req(const char *url_base,
 		if (params->tempfile)
 			delete_tempfile(&params->tempfile);
 
-		if (params->b_is_post && params->object_count > 1)
+		if (params->b_is_post)
 			create_tempfile_for_packfile(params, status);
-		else
-			create_tempfile_for_loose(params, status);
+
+		create_tempfile_for_loose(params, status);
 
 		if (!params->tempfile || status->ec != GH__ERROR_CODE__OK)
 			return;

From eb9422fa9c551440ad6984c8e64b4ceb7249ce50 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 25 Oct 2019 17:10:25 -0400
Subject: [PATCH 103/207] test-gvfs-prococol, t5799: tests for gvfs-helper

Create t/helper/test-gvfs-protocol.c and t/t5799-gvfs-helper.sh
to test gvfs-helper.

Create t/helper/test-gvfs-protocol.c as a stand-alone web server that
speaks the GVFS Protocol [1] and serves loose objects and packfiles
to clients.  It is borrows heavily from the code in daemon.c.
It includes a "mayhem" mode to cause various network and HTTP errors
to test the retry/recovery ability of gvfs-helper.

Create t/t5799-gvfs-helper.sh to test gvfs-helper.

[1] https://github.com/microsoft/VFSForGit/blob/master/Protocol.md

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Makefile                            |    1 +
 bin-wrappers/.gitignore             |    1 +
 contrib/buildsystems/CMakeLists.txt |   14 +
 gvfs-helper.c                       |   21 +-
 t/helper/meson.build                |    7 +
 t/helper/test-gvfs-protocol.c       | 1764 +++++++++++++++++++++++++++
 t/meson.build                       |    1 +
 t/t5799-gvfs-helper.sh              |  974 +++++++++++++++
 8 files changed, 2776 insertions(+), 7 deletions(-)
 create mode 100644 t/helper/test-gvfs-protocol.c
 create mode 100755 t/t5799-gvfs-helper.sh

diff --git a/Makefile b/Makefile
index 9269f9fd8c2a1d..748a9f4eb9dd2a 100644
--- a/Makefile
+++ b/Makefile
@@ -1688,6 +1688,7 @@ endif
 	BASIC_CFLAGS += $(CURL_CFLAGS)
 
 	PROGRAM_OBJS += gvfs-helper.o
+	TEST_PROGRAMS_NEED_X += test-gvfs-protocol
 
 	REMOTE_CURL_PRIMARY = git-remote-http$X
 	REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
diff --git a/bin-wrappers/.gitignore b/bin-wrappers/.gitignore
index 1c6c90458b7586..e481f5a45a7a0d 100644
--- a/bin-wrappers/.gitignore
+++ b/bin-wrappers/.gitignore
@@ -6,4 +6,5 @@
 /git-upload-pack
 /scalar
 /test-fake-ssh
+/test-gvfs-protocol
 /test-tool
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index d1635d13270fd7..6c562b6d633adf 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -1119,6 +1119,20 @@ set(wrapper_scripts
 set(wrapper_test_scripts
 	test-fake-ssh test-tool)
 
+if(CURL_FOUND)
+	list(APPEND wrapper_test_scripts test-gvfs-protocol)
+
+	add_executable(test-gvfs-protocol ${CMAKE_SOURCE_DIR}/t/helper/test-gvfs-protocol.c)
+	target_link_libraries(test-gvfs-protocol common-main)
+
+	if(MSVC)
+		set_target_properties(test-gvfs-protocol
+					PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/t/helper)
+		set_target_properties(test-gvfs-protocol
+					PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/t/helper)
+	endif()
+endif()
+
 
 foreach(script ${wrapper_scripts})
 	file(STRINGS ${CMAKE_SOURCE_DIR}/bin-wrappers/wrap-for-bin.sh content NEWLINE_CONSUME)
diff --git a/gvfs-helper.c b/gvfs-helper.c
index 19223e8422c9b0..00518cd170c071 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -1885,6 +1885,8 @@ static void install_loose(struct gh__request_params *params,
 	/*
 	 * We expect a loose object when we do a GET -or- when we
 	 * do a POST with only 1 object.
+	 *
+	 * Note that this content type is singular, not plural.
 	 */
 	if (strcmp(status->content_type.buf,
 		   "application/x-git-loose-object")) {
@@ -2119,7 +2121,9 @@ static void do_throttle_spin(struct gh__request_params *params,
 	strbuf_addstr(&region, gh__server_type_label[params->server_type]);
 	trace2_region_enter("gvfs-helper", region.buf, NULL);
 
-	progress = start_progress(progress_msg, duration);
+	if (gh__cmd_opts.show_progress)
+		progress = start_progress(progress_msg, duration);
+
 	while (now < end) {
 		display_progress(progress, (now - begin));
 
@@ -2127,6 +2131,7 @@ static void do_throttle_spin(struct gh__request_params *params,
 
 		now = time(NULL);
 	}
+
 	display_progress(progress, duration);
 	stop_progress(&progress);
 
@@ -2682,13 +2687,15 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status,
 	params.headers = curl_slist_append(params.headers,
 					   "Content-Type: application/json");
 	/*
-	 * We really always want a packfile.  But if the payload only
-	 * requests 1 OID, the server will send us a single loose
-	 * objects instead.  (Apparently the server ignores us when we
-	 * only send application/x-git-packfile and does it anyway.)
+	 * If our POST contains more than one object, we want the
+	 * server to send us a packfile.  We DO NOT want the non-standard
+	 * concatenated loose object format, so we DO NOT send:
+	 *     "Accept: application/x-git-loose-objects" (plural)
 	 *
-	 * So to make it clear to my future self, go ahead and add
-	 * an accept header for loose objects and own it.
+	 * However, if the payload only requests 1 OID, the server
+	 * will send us a single loose object instead of a packfile,
+	 * so we ACK that and send:
+	 *     "Accept: application/x-git-loose-object" (singular)
 	 */
 	params.headers = curl_slist_append(params.headers,
 					   "Accept: application/x-git-packfile");
diff --git a/t/helper/meson.build b/t/helper/meson.build
index 3373d3fe5fa160..9a1eb4278d3fac 100644
--- a/t/helper/meson.build
+++ b/t/helper/meson.build
@@ -86,6 +86,13 @@ test_tool = executable('test-tool',
 bin_wrappers += test_tool
 test_dependencies += test_tool
 
+test_gvfs_protocol = executable('test-gvfs-protocol',
+  sources: 'test-gvfs-protocol.c',
+  dependencies: [libgit, common_main],
+)
+bin_wrappers += test_gvfs_protocol
+test_dependencies += test_gvfs_protocol
+
 test_fake_ssh = executable('test-fake-ssh',
   sources: 'test-fake-ssh.c',
   dependencies: [libgit, common_main],
diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
new file mode 100644
index 00000000000000..ba3faf35cdc2e2
--- /dev/null
+++ b/t/helper/test-gvfs-protocol.c
@@ -0,0 +1,1764 @@
+#define USE_THE_REPOSITORY_VARIABLE
+#include "git-compat-util.h"
+#include "environment.h"
+#include "hex.h"
+#include "alloc.h"
+#include "setup.h"
+#include "protocol.h"
+#include "config.h"
+#include "pkt-line.h"
+#include "run-command.h"
+#include "strbuf.h"
+#include "string-list.h"
+#include "trace2.h"
+#include "object.h"
+#include "object-store.h"
+#include "replace-object.h"
+#include "repository.h"
+#include "version.h"
+#include "dir.h"
+#include "json-writer.h"
+#include "oidset.h"
+#include "date.h"
+#include "wrapper.h"
+#include "git-zlib.h"
+
+#define TR2_CAT "test-gvfs-protocol"
+
+static const char *pid_file;
+static int verbose;
+static int reuseaddr;
+static struct string_list mayhem_list = STRING_LIST_INIT_DUP;
+static int mayhem_child = 0;
+static struct json_writer jw_config = JSON_WRITER_INIT;
+
+/*
+ * We look for one of these "servertypes" in the uri-base
+ * so we can behave differently when we need to.
+ */
+#define MY_SERVER_TYPE__ORIGIN "servertype/origin"
+#define MY_SERVER_TYPE__CACHE  "servertype/cache"
+
+static const char test_gvfs_protocol_usage[] =
+"gvfs-protocol [--verbose]\n"
+"           [--timeout=<n>] [--init-timeout=<n>] [--max-connections=<n>]\n"
+"           [--reuseaddr] [--pid-file=<file>]\n"
+"           [--listen=<host_or_ipaddr>]* [--port=<n>]\n"
+"           [--mayhem=<token>]*\n"
+;
+
+/* Timeout, and initial timeout */
+static unsigned int timeout;
+static unsigned int init_timeout;
+
+static void logreport(const char *label, const char *err, va_list params)
+{
+	struct strbuf msg = STRBUF_INIT;
+
+	strbuf_addf(&msg, "[%"PRIuMAX"] %s: ", (uintmax_t)getpid(), label);
+	strbuf_vaddf(&msg, err, params);
+	strbuf_addch(&msg, '\n');
+
+	fwrite(msg.buf, sizeof(char), msg.len, stderr);
+	fflush(stderr);
+
+	strbuf_release(&msg);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void logerror(const char *err, ...)
+{
+	va_list params;
+	va_start(params, err);
+	logreport("error", err, params);
+	va_end(params);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void loginfo(const char *err, ...)
+{
+	va_list params;
+	if (!verbose)
+		return;
+	va_start(params, err);
+	logreport("info", err, params);
+	va_end(params);
+}
+
+__attribute__((format (printf, 1, 2)))
+static void logmayhem(const char *err, ...)
+{
+	va_list params;
+	if (!verbose)
+		return;
+	va_start(params, err);
+	logreport("mayhem", err, params);
+	va_end(params);
+}
+
+static void set_keep_alive(int sockfd)
+{
+	int ka = 1;
+
+	if (setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, &ka, sizeof(ka)) < 0) {
+		if (errno != ENOTSOCK)
+			logerror("unable to set SO_KEEPALIVE on socket: %s",
+				strerror(errno));
+	}
+}
+
+//////////////////////////////////////////////////////////////////
+// The code in this section is used by "worker" instances to service
+// a single connection from a client.  The worker talks to the client
+// on 0 and 1.
+//////////////////////////////////////////////////////////////////
+
+enum worker_result {
+	/*
+	 * Operation successful.
+	 * Caller *might* keep the socket open and allow keep-alive.
+	 */
+	WR_OK       = 0,
+	/*
+	 * Various errors while processing the request and/or the response.
+	 * Close the socket and clean up.
+	 * Exit child-process with non-zero status.
+	 */
+	WR_IO_ERROR = 1<<0,
+	/*
+	 * Close the socket and clean up.  Does not imply an error.
+	 */
+	WR_HANGUP   = 1<<1,
+	/*
+	 * The result of a function was influenced by the mayhem settings.
+	 * Does not imply that we need to exit or close the socket.
+	 * Just advice to callers in the worker stack.
+	 */
+	WR_MAYHEM   = 1<<2,
+
+	WR_STOP_THE_MUSIC = (WR_IO_ERROR | WR_HANGUP),
+};
+
+/*
+ * Fields from a parsed HTTP request.
+ */
+struct req {
+	struct strbuf start_line;
+	struct string_list start_line_fields;
+
+	struct strbuf uri_base;
+	struct strbuf gvfs_api;
+	struct strbuf slash_args;
+	struct strbuf quest_args;
+
+	struct string_list header_list;
+};
+
+#define REQ__INIT { \
+	.start_line = STRBUF_INIT, \
+	.start_line_fields = STRING_LIST_INIT_DUP, \
+	.uri_base = STRBUF_INIT, \
+	.gvfs_api = STRBUF_INIT, \
+	.slash_args = STRBUF_INIT, \
+	.quest_args = STRBUF_INIT, \
+	.header_list = STRING_LIST_INIT_DUP, \
+	}
+
+static void req__release(struct req *req)
+{
+	strbuf_release(&req->start_line);
+	string_list_clear(&req->start_line_fields, 0);
+
+	strbuf_release(&req->uri_base);
+	strbuf_release(&req->gvfs_api);
+	strbuf_release(&req->slash_args);
+	strbuf_release(&req->quest_args);
+
+	string_list_clear(&req->header_list, 0);
+}
+
+/*
+ * Generate a somewhat bogus UUID/GUID that is good enough for
+ * a test suite, but without requiring platform-specific UUID
+ * or GUID libraries.
+ */
+static void gen_fake_uuid(struct strbuf *uuid)
+{
+	static unsigned int seq = 0;
+	static struct timeval tv;
+	static struct tm tm;
+	static time_t secs;
+
+	strbuf_setlen(uuid, 0);
+
+	if (!seq) {
+		gettimeofday(&tv, NULL);
+		secs = tv.tv_sec;
+		gmtime_r(&secs, &tm);
+	}
+
+	/*
+	 * Build a string that looks like:
+	 *
+	 *     "ffffffff-eeee-dddd-cccc-bbbbbbbbbbbb"
+	 *
+	 * Note that the first digit in the "dddd" section gives the
+	 * UUID type.  We set it to zero so that we won't collide with
+	 * any "real" UUIDs.
+	 */
+	strbuf_addf(uuid, "%04d%02d%02d-%02d%02d-00%02d-%04x-%08x%04x",
+		    tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+		    tm.tm_hour, tm.tm_min,
+		    tm.tm_sec,
+		    (unsigned)(getpid() & 0xffff),
+		    (unsigned)(tv.tv_usec & 0xffffffff),
+		    (seq++ & 0xffff));
+}
+
+/*
+ * Send a chunk of data to the client using HTTP chunked
+ * transfer coding rules.
+ *
+ * https://tools.ietf.org/html/rfc7230#section-4.1
+ */
+static enum worker_result send_chunk(int fd, const unsigned char *buf,
+					 size_t len_buf)
+{
+	char chunk_size[100];
+	int chunk_size_len = xsnprintf(chunk_size, sizeof(chunk_size),
+				       "%x\r\n", (unsigned int)len_buf);
+
+	if ((write_in_full(fd, chunk_size, chunk_size_len) < 0) ||
+	    (write_in_full(fd, buf, len_buf) < 0) ||
+	    (write_in_full(fd, "\r\n", 2) < 0)) {
+		logerror("unable to send chunk");
+		return WR_IO_ERROR;
+	}
+
+	return WR_OK;
+}
+
+static enum worker_result send_final_chunk(int fd)
+{
+	if (write_in_full(fd, "0\r\n\r\n", 5) < 0) {
+		logerror("unable to send final chunk");
+		return WR_IO_ERROR;
+	}
+
+	return WR_OK;
+}
+
+static enum worker_result send_http_error(
+	int fd,
+	int http_code, const char *http_code_name,
+	int retry_after_seconds, enum worker_result wr_in)
+{
+	struct strbuf response_header = STRBUF_INIT;
+	struct strbuf response_content = STRBUF_INIT;
+	struct strbuf uuid = STRBUF_INIT;
+	enum worker_result wr;
+
+	strbuf_addf(&response_content, "Error: %d %s\r\n",
+		    http_code, http_code_name);
+	if (retry_after_seconds > 0)
+		strbuf_addf(&response_content, "Retry-After: %d\r\n",
+			    retry_after_seconds);
+
+	strbuf_addf  (&response_header, "HTTP/1.1 %d %s\r\n", http_code, http_code_name);
+	strbuf_addstr(&response_header, "Cache-Control: private\r\n");
+	strbuf_addstr(&response_header,	"Content-Type: text/plain\r\n");
+	strbuf_addf  (&response_header,	"Content-Length: %d\r\n", (int)response_content.len);
+	if (retry_after_seconds > 0)
+		strbuf_addf  (&response_header, "Retry-After: %d\r\n", retry_after_seconds);
+	strbuf_addf(  &response_header,	"Server: test-gvfs-protocol/%s\r\n", git_version_string);
+	strbuf_addf(  &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822)));
+	gen_fake_uuid(&uuid);
+	strbuf_addf(  &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf);
+	strbuf_addstr(&response_header, "\r\n");
+
+	if (write_in_full(fd, response_header.buf, response_header.len) < 0) {
+		logerror("unable to write response header");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	if (write_in_full(fd, response_content.buf, response_content.len) < 0) {
+		logerror("unable to write response content body");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	wr = wr_in;
+
+done:
+	strbuf_release(&uuid);
+	strbuf_release(&response_header);
+	strbuf_release(&response_content);
+
+	return wr;
+}
+
+/*
+ * Return 1 if we send an AUTH error to the client.
+ */
+static int mayhem_try_auth(struct req *req, enum worker_result *wr_out)
+{
+	*wr_out = WR_OK;
+
+	if (string_list_has_string(&mayhem_list, "http_401")) {
+		struct string_list_item *item;
+		int has_auth = 0;
+		for_each_string_list_item(item, &req->header_list) {
+			if (starts_with(item->string, "Authorization: Basic")) {
+				has_auth = 1;
+				break;
+			}
+		}
+		if (!has_auth) {
+			if (strstr(req->uri_base.buf, MY_SERVER_TYPE__ORIGIN)) {
+				logmayhem("http_401 (origin)");
+				*wr_out = send_http_error(1, 401, "Unauthorized", -1,
+							  WR_MAYHEM);
+				return 1;
+			}
+
+			else if (strstr(req->uri_base.buf, MY_SERVER_TYPE__CACHE)) {
+				/*
+				 * Cache servers use a non-standard 400 rather than a 401.
+				 */
+				logmayhem("http_400 (cacheserver)");
+				*wr_out = send_http_error(1, 400, "Bad Request", -1,
+							  WR_MAYHEM);
+				return 1;
+			}
+
+			else {
+				/*
+				 * Non-qualified server type.
+				 */
+				logmayhem("http_401");
+				*wr_out = send_http_error(1, 401, "Unauthorized", -1,
+							  WR_MAYHEM);
+				return 1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Build fake gvfs/config data using our IP address and port.
+ *
+ * The Min/Max data is just random noise copied from the example
+ * in the documentation.
+ */
+static void build_gvfs_config_json(struct json_writer *jw,
+				   struct string_list *listen_addr,
+				   int listen_port)
+{
+	jw_object_begin(jw, 0);
+	{
+		jw_object_inline_begin_array(jw, "AllowedGvfsClientVersions");
+		{
+			jw_array_inline_begin_object(jw);
+			{
+				jw_object_inline_begin_object(jw, "Max");
+				{
+					jw_object_intmax(jw, "Major", 0);
+					jw_object_intmax(jw, "Minor", 4);
+					jw_object_intmax(jw, "Build", 0);
+					jw_object_intmax(jw, "Revision", 0);
+				}
+				jw_end(jw);
+
+				jw_object_inline_begin_object(jw, "Min");
+				{
+					jw_object_intmax(jw, "Major", 0);
+					jw_object_intmax(jw, "Minor", 2);
+					jw_object_intmax(jw, "Build", 0);
+					jw_object_intmax(jw, "Revision", 0);
+				}
+				jw_end(jw);
+			}
+			jw_end(jw);
+
+			jw_array_inline_begin_object(jw);
+			{
+				jw_object_null(jw, "Max");
+				jw_object_inline_begin_object(jw, "Min");
+				{
+					jw_object_intmax(jw, "Major", 0);
+					jw_object_intmax(jw, "Minor", 5);
+					jw_object_intmax(jw, "Build", 16326);
+					jw_object_intmax(jw, "Revision", 1);
+				}
+				jw_end(jw);
+			}
+			jw_end(jw);
+		}
+		jw_end(jw);
+
+		jw_object_inline_begin_array(jw, "CacheServers");
+		{
+			struct string_list_item *item;
+			int k = 0;
+
+			for_each_string_list_item(item, listen_addr) {
+				jw_array_inline_begin_object(jw);
+				{
+					struct strbuf buf = STRBUF_INIT;
+
+					strbuf_addf(&buf, "http://%s:%d/%s",
+						    item->string,
+						    listen_port,
+						    MY_SERVER_TYPE__CACHE);
+					jw_object_string(jw, "Url", buf.buf);
+					strbuf_release(&buf);
+
+					strbuf_addf(&buf, "cs%02d", k);
+					jw_object_string(jw, "Name", buf.buf);
+					strbuf_release(&buf);
+
+					jw_object_bool(jw, "GlobalDefault",
+						       k++ == 0);
+				}
+				jw_end(jw);
+			}
+		}
+		jw_end(jw);
+	}
+	jw_end(jw);
+}
+/*
+ * Per the GVFS Protocol, this should only be recognized on the origin
+ * server (not the cache-server).  It returns a JSON payload of config
+ * data.
+ */
+static enum worker_result do__gvfs_config__get(struct req *req)
+{
+	struct strbuf response_header = STRBUF_INIT;
+	struct strbuf uuid = STRBUF_INIT;
+	enum worker_result wr;
+
+	if (strstr(req->uri_base.buf, MY_SERVER_TYPE__CACHE))
+		return send_http_error(1, 404, "Not Found", -1, WR_OK);
+
+	strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n");
+	strbuf_addstr(&response_header, "Cache-Control: private\r\n");
+	strbuf_addstr(&response_header,	"Content-Type: text/plain\r\n");
+	strbuf_addf(  &response_header,	"Content-Length: %d\r\n", (int)jw_config.json.len);
+	strbuf_addf(  &response_header,	"Server: test-gvfs-protocol/%s\r\n", git_version_string);
+	strbuf_addf(  &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822)));
+	gen_fake_uuid(&uuid);
+	strbuf_addf(  &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf);
+	strbuf_addstr(&response_header, "\r\n");
+
+	if (write_in_full(1, response_header.buf, response_header.len) < 0) {
+		logerror("unable to write response header");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	if (write_in_full(1, jw_config.json.buf, jw_config.json.len) < 0) {
+		logerror("unable to write response content body");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	wr = WR_OK;
+
+done:
+	strbuf_release(&uuid);
+	strbuf_release(&response_header);
+
+	return wr;
+}
+
+/*
+ * Send the contents of the in-memory inflated object in "compressed
+ * loose object" format over the socket.
+ *
+ * Because we are using keep-alive and are streaming the compressed
+ * chunks as we produce them, we set the transport-encoding and not
+ * the content-length.
+ *
+ * Our usage here is different from `git-http-backend` because it will
+ * only send a loose object if it exists as a loose object in the ODB
+ * (see the "/objects/[0-9a-f]{2}/[0-9a-f]{38}$" regex_t declarations)
+ * by doing a file-copy.
+ *
+ * We want to send an arbitrary object without regard for how it is
+ * currently stored in the local ODB.
+ *
+ * Also, we don't want any of the type-specific branching found in the
+ * sha1-file.c functions (such as special casing BLOBs).  Specifically,
+ * we DO NOT want any of the content conversion filters.  We just want
+ * to send the raw content as is.
+ *
+ * So, we steal freely from sha1-file.c routines:
+ *     write_object_file_prepare()
+ *     write_loose_object()
+ */
+static enum worker_result send_loose_object(const struct object_info *oi,
+					    const struct object_id *oid,
+					    int fd)
+{
+#define MAX_HEADER_LEN 32
+	struct strbuf response_header = STRBUF_INIT;
+	struct strbuf uuid = STRBUF_INIT;
+	char object_header[MAX_HEADER_LEN];
+	unsigned char compressed[4096];
+	git_zstream stream;
+	struct object_id oid_check;
+	git_hash_ctx c;
+	int object_header_len;
+	int ret;
+
+	/*
+	 * We are blending several somewhat independent concepts here:
+	 *
+	 * [1] reconstructing the object format in parts:
+	 *
+	 *           <object>          ::= <object_header> <object_content>
+	 *
+	 *      [1a] <object_header>   ::= <object_type> SP <object_length> NUL
+	 *      [1b] <object_conttent> ::= <array_of_bytes>
+	 *
+	 * [2] verify that we constructed [1] correctly by computing
+	 *     the hash of [1] and verify it matches the passed OID.
+	 *
+	 * [3] compress [1] because that is how loose objects are
+	 *     stored on disk.  We compress it as we stream it to
+	 *     the client.
+	 *
+	 * [4] send HTTP response headers to the client.
+	 *
+	 * [5] stream each chunk from [3] to the client using the HTTP
+	 *     chunked transfer coding.
+	 *
+	 * [6] for extra credit, we repeat the hash construction in [2]
+	 *     as we stream it.
+	 */
+
+	/* [4] */
+	strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n");
+	strbuf_addstr(&response_header, "Cache-Control: private\r\n");
+	strbuf_addstr(&response_header,	"Content-Type: application/x-git-loose-object\r\n");
+	strbuf_addf(  &response_header,	"Server: test-gvfs-protocol/%s\r\n", git_version_string);
+	strbuf_addstr(&response_header, "Transfer-Encoding: chunked\r\n");
+	strbuf_addf(  &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822)));
+	gen_fake_uuid(&uuid);
+	strbuf_addf(  &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf);
+	strbuf_addstr(&response_header, "\r\n");
+
+	if (write_in_full(fd, response_header.buf, response_header.len) < 0) {
+		logerror("unable to write response header");
+		return WR_IO_ERROR;
+	}
+
+	strbuf_release(&uuid);
+	strbuf_release(&response_header);
+
+	if (string_list_has_string(&mayhem_list, "close_write")) {
+		logmayhem("close_write");
+		return WR_MAYHEM | WR_HANGUP;
+	}
+
+	/* [1a] */
+	object_header_len = 1 + xsnprintf(object_header, MAX_HEADER_LEN,
+					  "%s %"PRIuMAX,
+					  type_name(*oi->typep),
+					  (uintmax_t)*oi->sizep);
+
+	/* [2] */
+	memset(&oid_check, 0, sizeof(oid_check));
+	the_hash_algo->init_fn(&c);
+	the_hash_algo->update_fn(&c, object_header, object_header_len);
+	the_hash_algo->update_fn(&c, *oi->contentp, *oi->sizep);
+	the_hash_algo->final_fn(oid_check.hash, &c);
+	if (!oideq(oid, &oid_check))
+		BUG("send_loose_object[2]: invalid construction '%s' '%s'",
+		    oid_to_hex(oid), oid_to_hex(&oid_check));
+
+	/* [3, 6] */
+	git_deflate_init(&stream, zlib_compression_level);
+	stream.next_out = compressed;
+	stream.avail_out = sizeof(compressed);
+	the_hash_algo->init_fn(&c);
+
+	/* [3, 1a, 6] */
+	stream.next_in = (unsigned char *)object_header;
+	stream.avail_in = object_header_len;
+	while (git_deflate(&stream, 0) == Z_OK)
+		; /* nothing */
+	the_hash_algo->update_fn(&c, object_header, object_header_len);
+
+	/* [3, 1b, 5, 6] */
+	stream.next_in = *oi->contentp;
+	stream.avail_in = *oi->sizep;
+	do {
+		enum worker_result wr;
+		unsigned char *in0 = stream.next_in;
+		ret = git_deflate(&stream, Z_FINISH);
+		the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
+
+		/* [5] */
+		wr = send_chunk(fd, compressed, stream.next_out - compressed);
+		if (wr & WR_STOP_THE_MUSIC) {
+			return wr;
+		}
+
+		stream.next_out = compressed;
+		stream.avail_out = sizeof(compressed);
+
+	} while (ret == Z_OK);
+
+	/* [3] */
+	if (ret != Z_STREAM_END)
+		BUG("unable to deflate object '%s' (%d)", oid_to_hex(oid), ret);
+	ret = git_deflate_end_gently(&stream);
+	if (ret != Z_OK)
+		BUG("deflateEnd on object '%s' failed (%d)", oid_to_hex(oid), ret);
+
+	/* [6] */
+	the_hash_algo->final_fn(oid_check.hash, &c);
+	if (!oideq(oid, &oid_check))
+		BUG("send_loose_object[6]: invalid construction '%s' '%s'",
+		    oid_to_hex(oid), oid_to_hex(&oid_check));
+
+	/* [5] */
+	return send_final_chunk(fd);
+}
+
+/*
+ * Per the GVFS Protocol, a single OID should be in the slash-arg:
+ *
+ *     GET /gvfs/objects/fc3fff3a25559d2d30d1719c4f4a6d9fe7e05170 HTTP/1.1
+ *
+ * Look it up in our repo (loose or packed) and send it to gvfs-helper
+ * over the socket as a loose object.
+ */
+static enum worker_result do__gvfs_objects__get(struct req *req)
+{
+	struct object_id oid;
+	void *content;
+	unsigned long size;
+	enum object_type type;
+	struct object_info oi = OBJECT_INFO_INIT;
+	unsigned flags = 0;
+
+	/*
+	 * Since `test-gvfs-protocol` is mocking a real GVFS server (cache or
+	 * main), we don't want a request for a missing object to cause the
+	 * implicit dynamic fetch mechanism to try to fault-it-in (and cause
+	 * our call to oid_object_info_extended() to launch another instance
+	 * of `gvfs-helper` to magically fetch it (which would connect to a
+	 * new instance of `test-gvfs-protocol`)).
+	 *
+	 * Rather, we want a missing object to fail, so we can respond with
+	 * a 404, for example.
+	 */
+	flags |= OBJECT_INFO_FOR_PREFETCH;
+	flags |= OBJECT_INFO_LOOKUP_REPLACE;
+
+	if (!req->slash_args.len ||
+	    get_oid_hex(req->slash_args.buf, &oid)) {
+		logerror("invalid OID in GET gvfs/objects: '%s'",
+			 req->slash_args.buf);
+		return WR_IO_ERROR;
+	}
+
+	trace2_printf("%s: GET %s", TR2_CAT, oid_to_hex(&oid));
+
+	oi.typep = &type;
+	oi.sizep = &size;
+	oi.contentp = &content;
+
+	if (oid_object_info_extended(the_repository, &oid, &oi, flags)) {
+		logerror("Could not find OID: '%s'", oid_to_hex(&oid));
+		return send_http_error(1, 404, "Not Found", -1, WR_OK);
+	}
+
+	if (string_list_has_string(&mayhem_list, "http_404")) {
+		logmayhem("http_404");
+		return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM);
+	}
+
+	trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT,
+		      type, size, (const char *)content);
+
+	return send_loose_object(&oi, &oid, 1);
+}
+
+static enum worker_result read_json_post_body(
+	struct req *req,
+	struct oidset *oids)
+{
+	struct object_id oid;
+	struct string_list_item *item;
+	char *post_body = NULL;
+	const char *v;
+	ssize_t len_expected = 0;
+	ssize_t len_received;
+	const char *pkey;
+	const char *plbracket;
+	const char *pstart;
+	const char *pend;
+
+	for_each_string_list_item(item, &req->header_list) {
+		if (skip_prefix(item->string, "Content-Length: ", &v)) {
+			char *p;
+			len_expected = strtol(v, &p, 10);
+			break;
+		}
+	}
+	if (!len_expected) {
+		logerror("no content length in POST");
+		return WR_IO_ERROR;
+	}
+	post_body = xcalloc(1, len_expected + 1);
+	if (!post_body) {
+		logerror("could not malloc buffer for POST body");
+		return WR_IO_ERROR;
+	}
+	len_received = read_in_full(0, post_body, len_expected);
+	if (len_received != len_expected) {
+		logerror("short read in POST (expected %d, received %d)",
+			 (int)len_expected, (int)len_received);
+		return WR_IO_ERROR;
+	}
+
+	/*
+	 * A very primitive JSON parser for a very fixed and well-known
+	 * message format.  Please don't judge me.
+	 *
+	 * We expect:
+	 *
+	 *     ..."objectIds":["<oid_1>","<oid_2>",..."<oid_n>"]...
+	 *
+	 * We expect compact (non-pretty) JSON, but do allow it.
+	 */
+	pkey = strstr(post_body, "\"objectIds\"");
+	if (!pkey)
+		goto could_not_parse_json;
+	plbracket = strchr(pkey, '[');
+	if (!plbracket)
+		goto could_not_parse_json;
+	pstart = plbracket + 1;
+
+	while (1) {
+		/* Eat leading whitespace before opening DQUOTE */
+		while (*pstart && isspace(*pstart))
+			pstart++;
+		if (!*pstart)
+			goto could_not_parse_json;
+		pstart++;
+
+		/* find trailing DQUOTE */
+		pend = strchr(pstart, '"');
+		if (!pend)
+			goto could_not_parse_json;
+
+		if (get_oid_hex(pstart, &oid))
+			goto could_not_parse_json;
+		oidset_insert(oids, &oid);
+		trace2_printf("%s: POST %s", TR2_CAT, oid_to_hex(&oid));
+
+		/* Eat trailing whitespace after trailing DQUOTE */
+		pend++;
+		while (*pend && isspace(*pend))
+			pend++;
+		if (!*pend)
+			goto could_not_parse_json;
+
+		/* End of list or is there another OID */
+		if (*pend == ']')
+			break;
+		if (*pend != ',')
+			goto could_not_parse_json;
+
+		pstart = pend + 1;
+	}
+
+	/*
+	 * We do not care about the "commitDepth" parameter.
+	 */
+
+	free(post_body);
+	return WR_OK;
+
+could_not_parse_json:
+	logerror("could not parse JSON in POST body");
+	free(post_body);
+	return WR_IO_ERROR;
+}
+
+/*
+ * Since this is a test helper, I'm going to be lazy and
+ * run pack-objects as a background child using pipe_command
+ * and get the resulting packfile into a buffer.  And then
+ * the caller can pump it to the client over the socket.
+ *
+ * This avoids the need to set up a custom loop (like in
+ * upload-pack) to drive it and/or the use of a bunch of
+ * tempfiles.
+ *
+ * My assumption here is that we're not testing with GBs
+ * of data....
+ *
+ * Note: The GVFS Protocol POST verb behaves like GET for
+ * Note: non-commit objects (in that it just returns the
+ * Note: requested object), but for commit objects POST
+ * Note: *also* returns all trees referenced by the commit.
+ * Note:
+ * Note: Since the goal of this test is to confirm that
+ * Note: gvfs-helper can request and receive a packfile
+ * Note: *at all*, I'm not going to blur the issue and
+ * Note: support the extra semantics for commit objects.
+ */
+static enum worker_result get_packfile_from_oids(
+	struct oidset *oids,
+	struct strbuf *buf_packfile)
+{
+	struct child_process pack_objects = CHILD_PROCESS_INIT;
+	struct strbuf buf_child_stdin = STRBUF_INIT;
+	struct strbuf buf_child_stderr = STRBUF_INIT;
+	struct oidset_iter iter;
+	struct object_id *oid;
+	enum worker_result wr;
+	int result;
+
+	strvec_push(&pack_objects.args, "git");
+	strvec_push(&pack_objects.args, "pack-objects");
+	strvec_push(&pack_objects.args, "-q");
+	strvec_push(&pack_objects.args, "--revs");
+	strvec_push(&pack_objects.args, "--delta-base-offset");
+	strvec_push(&pack_objects.args, "--window=0");
+	strvec_push(&pack_objects.args, "--depth=4095");
+	strvec_push(&pack_objects.args, "--compression=1");
+	strvec_push(&pack_objects.args, "--stdout");
+
+	pack_objects.in = -1;
+	pack_objects.out = -1;
+	pack_objects.err = -1;
+
+	oidset_iter_init(oids, &iter);
+	while ((oid = oidset_iter_next(&iter)))
+		strbuf_addf(&buf_child_stdin, "%s\n", oid_to_hex(oid));
+	strbuf_addstr(&buf_child_stdin, "\n");
+
+	result = pipe_command(&pack_objects,
+			      buf_child_stdin.buf, buf_child_stdin.len,
+			      buf_packfile, 0,
+			      &buf_child_stderr, 0);
+	if (result) {
+		logerror("pack-objects failed: %s", buf_child_stderr.buf);
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	trace2_printf("%s: pack-objects returned %d bytes", TR2_CAT, buf_packfile->len);
+	wr = WR_OK;
+
+done:
+	strbuf_release(&buf_child_stdin);
+	strbuf_release(&buf_child_stderr);
+
+	return wr;
+}
+
+static enum worker_result send_packfile_from_buffer(const struct strbuf *packfile)
+{
+	struct strbuf response_header = STRBUF_INIT;
+	struct strbuf uuid = STRBUF_INIT;
+	enum worker_result wr;
+
+	strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n");
+	strbuf_addstr(&response_header, "Cache-Control: private\r\n");
+	strbuf_addstr(&response_header,	"Content-Type: application/x-git-packfile\r\n");
+	strbuf_addf(  &response_header,	"Content-Length: %d\r\n", (int)packfile->len);
+	strbuf_addf(  &response_header,	"Server: test-gvfs-protocol/%s\r\n", git_version_string);
+	strbuf_addf(  &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822)));
+	gen_fake_uuid(&uuid);
+	strbuf_addf(  &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf);
+	strbuf_addstr(&response_header, "\r\n");
+
+	if (write_in_full(1, response_header.buf, response_header.len) < 0) {
+		logerror("unable to write response header");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	if (write_in_full(1, packfile->buf, packfile->len) < 0) {
+		logerror("unable to write response content body");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	wr = WR_OK;
+
+done:
+	strbuf_release(&uuid);
+	strbuf_release(&response_header);
+
+	return wr;
+}
+
+static enum worker_result do__gvfs_objects__post(struct req *req)
+{
+	struct oidset oids = OIDSET_INIT;
+	struct strbuf packfile = STRBUF_INIT;
+	enum worker_result wr;
+
+	wr = read_json_post_body(req, &oids);
+	if (wr & WR_STOP_THE_MUSIC)
+		goto done;
+
+	wr = get_packfile_from_oids(&oids, &packfile);
+	if (wr & WR_STOP_THE_MUSIC)
+		goto done;
+
+	wr = send_packfile_from_buffer(&packfile);
+
+done:
+	oidset_clear(&oids);
+	strbuf_release(&packfile);
+
+	return wr;
+}
+
+/*
+ * Read the HTTP request up to the start of the optional message-body.
+ * We do this byte-by-byte because we have keep-alive turned on and
+ * cannot rely on an EOF.
+ *
+ * https://tools.ietf.org/html/rfc7230
+ * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md
+ *
+ * We cannot call die() here because our caller needs to properly
+ * respond to the client and/or close the socket before this
+ * child exits so that the client doesn't get a connection reset
+ * by peer error.
+ */
+static enum worker_result req__read(struct req *req, int fd)
+{
+	struct strbuf h = STRBUF_INIT;
+	int nr_start_line_fields;
+	const char *uri_target;
+	const char *http_version;
+	const char *gvfs;
+
+	/*
+	 * Read line 0 of the request and split it into component parts:
+	 *
+	 *    <method> SP <uri-target> SP <HTTP-version> CRLF
+	 *
+	 */
+	if (strbuf_getwholeline_fd(&req->start_line, fd, '\n') == EOF)
+		return WR_OK | WR_HANGUP;
+
+	if (string_list_has_string(&mayhem_list, "close_read")) {
+		logmayhem("close_read");
+		return WR_MAYHEM | WR_HANGUP;
+	}
+
+	if (string_list_has_string(&mayhem_list, "close_read_1") &&
+	    mayhem_child == 0) {
+		/*
+		 * Mayhem: fail the first request, but let retries succeed.
+		 */
+		logmayhem("close_read_1");
+		return WR_MAYHEM | WR_HANGUP;
+	}
+
+	strbuf_trim_trailing_newline(&req->start_line);
+
+	nr_start_line_fields = string_list_split(&req->start_line_fields,
+						 req->start_line.buf,
+						 ' ', -1);
+	if (nr_start_line_fields != 3) {
+		logerror("could not parse request start-line '%s'",
+			 req->start_line.buf);
+		return WR_IO_ERROR;
+	}
+	uri_target = req->start_line_fields.items[1].string;
+	http_version = req->start_line_fields.items[2].string;
+
+	if (strcmp(http_version, "HTTP/1.1")) {
+		logerror("unsupported version '%s' (expecting HTTP/1.1)",
+			 http_version);
+		return WR_IO_ERROR;
+	}
+
+	/*
+	 * Next, extract the GVFS terms from the <uri-target>.  The
+	 * GVFS Protocol defines a REST API containing several GVFS
+	 * commands of the form:
+	 *
+	 *     [<uri-base>]/gvfs/<token>[/<args>]
+	 *     [<uri-base>]/gvfs/<token>[?<args>]
+	 *
+	 * For example:
+	 *     "GET /gvfs/config HTTP/1.1"
+	 *     "GET /gvfs/objects/aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd HTTP/1.1"
+	 *     "GET /gvfs/prefetch?lastPackTimestamp=123456789 HTTP/1.1"
+	 *
+	 *     "GET /<uri-base>/gvfs/config HTTP/1.1"
+	 *     "GET /<uri-base>/gvfs/objects/aaaaaaaaaabbbbbbbbbbccccccccccdddddddddd HTTP/1.1"
+	 *     "GET /<uri-base>/gvfs/prefetch?lastPackTimestamp=123456789 HTTP/1.1"
+	 *
+	 *     "POST /<uri-base>/gvfs/objects HTTP/1.1"
+	 *
+	 * For other testing later, we also allow non-gvfs URLs of the form:
+	 *     "GET /<uri>[?<args>] HTTP/1.1"
+	 *
+	 * We do not attempt to split the query-params within the args.
+	 * The caller can do that if they need to.
+	 */
+	gvfs = strstr(uri_target, "/gvfs/");
+	if (gvfs) {
+		strbuf_add(&req->uri_base, uri_target, (gvfs - uri_target));
+		strbuf_trim_trailing_dir_sep(&req->uri_base);
+
+		gvfs += 6; /* skip "/gvfs/" */
+		strbuf_add(&req->gvfs_api, "gvfs/", 5);
+		while (*gvfs && *gvfs != '/' && *gvfs != '?')
+			strbuf_addch(&req->gvfs_api, *gvfs++);
+
+		/*
+		 */
+		if (*gvfs == '/')
+			strbuf_addstr(&req->slash_args, gvfs + 1);
+		else if (*gvfs == '?')
+			strbuf_addstr(&req->quest_args, gvfs + 1);
+	} else {
+
+		const char *quest = strchr(uri_target, '?');
+
+		if (quest) {
+			strbuf_add(&req->uri_base, uri_target, (quest - uri_target));
+			strbuf_trim_trailing_dir_sep(&req->uri_base);
+			strbuf_addstr(&req->quest_args, quest + 1);
+		} else {
+			strbuf_addstr(&req->uri_base, uri_target);
+			strbuf_trim_trailing_dir_sep(&req->uri_base);
+		}
+	}
+
+	/*
+	 * Read the set of HTTP headers into a string-list.
+	 */
+	while (1) {
+		if (strbuf_getwholeline_fd(&h, fd, '\n') == EOF)
+			goto done;
+		strbuf_trim_trailing_newline(&h);
+
+		if (!h.len)
+			goto done; /* a blank line ends the header */
+
+		string_list_append(&req->header_list, h.buf);
+	}
+
+	/*
+	 * TODO If the set of HTTP headers includes things like:
+	 * TODO
+	 * TODO     Connection: Upgrade, HTTP2-Settings
+	 * TODO     Upgrade: h2c
+	 * TODO     HTTP2-Settings: AAMAAABkAARAAAAAAAIAAAAA
+	 * TODO
+	 * TODO then the client is asking to optionally switch to HTTP/2.
+	 * TODO
+	 * TODO We currently DO NOT support that (and I don't currently
+	 * TODO see a need to do so (because we don't need the multiplexed
+	 * TODO streams feature (because the client never asks for n packfiles
+	 * TODO at the same time))).
+	 * TODO
+	 * TODO https://en.wikipedia.org/wiki/HTTP/1.1_Upgrade_header
+	 */
+
+	/*
+	 * We do not attempt to read the <message-body>, if it exists.
+	 * We let our caller read/chunk it in as appropriate.
+	 */
+done:
+	if (trace2_is_enabled()) {
+		struct string_list_item *item;
+		trace2_printf("%s: %s", TR2_CAT, req->start_line.buf);
+		for_each_string_list_item(item, &req->start_line_fields)
+			trace2_printf("%s: Field: %s", TR2_CAT, item->string);
+		trace2_printf("%s: [uri-base '%s'][gvfs '%s'][args '%s' '%s']",
+			      TR2_CAT,
+			      req->uri_base.buf,
+			      req->gvfs_api.buf,
+			      req->slash_args.buf,
+			      req->quest_args.buf);
+		for_each_string_list_item(item, &req->header_list)
+			trace2_printf("%s: Hdrs: %s", TR2_CAT, item->string);
+	}
+
+	strbuf_release(&h);
+
+	return WR_OK;
+}
+
+static enum worker_result dispatch(struct req *req)
+{
+	const char *method;
+	enum worker_result wr;
+
+	if (string_list_has_string(&mayhem_list, "close_no_write")) {
+		logmayhem("close_no_write");
+		return WR_MAYHEM | WR_HANGUP;
+	}
+	if (string_list_has_string(&mayhem_list, "http_503")) {
+		logmayhem("http_503");
+		return send_http_error(1, 503, "Service Unavailable", 2,
+				       WR_MAYHEM | WR_HANGUP);
+	}
+	if (string_list_has_string(&mayhem_list, "http_429")) {
+		logmayhem("http_429");
+		return send_http_error(1, 429, "Too Many Requests", 2,
+				       WR_MAYHEM | WR_HANGUP);
+	}
+	if (string_list_has_string(&mayhem_list, "http_429_1") &&
+	    mayhem_child == 0) {
+		logmayhem("http_429_1");
+		return send_http_error(1, 429, "Too Many Requests", 2,
+				       WR_MAYHEM | WR_HANGUP);
+	}
+	if (mayhem_try_auth(req, &wr))
+		return wr;
+
+	method = req->start_line_fields.items[0].string;
+
+	if (!strcmp(req->gvfs_api.buf, "gvfs/objects")) {
+
+		if (!strcmp(method, "GET"))
+			return do__gvfs_objects__get(req);
+		if (!strcmp(method, "POST"))
+			return do__gvfs_objects__post(req);
+	}
+
+	if (!strcmp(req->gvfs_api.buf, "gvfs/config")) {
+
+		if (!strcmp(method, "GET"))
+			return do__gvfs_config__get(req);
+	}
+
+	return send_http_error(1, 501, "Not Implemented", -1,
+			       WR_OK | WR_HANGUP);
+}
+
+static enum worker_result worker(void)
+{
+	struct req req = REQ__INIT;
+	char *client_addr = getenv("REMOTE_ADDR");
+	char *client_port = getenv("REMOTE_PORT");
+	enum worker_result wr = WR_OK;
+
+	if (client_addr)
+		loginfo("Connection from %s:%s", client_addr, client_port);
+
+	set_keep_alive(0);
+
+	while (1) {
+		req__release(&req);
+
+		alarm(init_timeout ? init_timeout : timeout);
+		wr = req__read(&req, 0);
+		alarm(0);
+
+		if (wr & WR_STOP_THE_MUSIC)
+			break;
+
+		wr = dispatch(&req);
+		if (wr & WR_STOP_THE_MUSIC)
+			break;
+	}
+
+	close(0);
+	close(1);
+
+	req__release(&req);
+	return !!(wr & WR_IO_ERROR);
+}
+
+//////////////////////////////////////////////////////////////////
+// This section contains the listener and child-process management
+// code used by the primary instance to accept incoming connections
+// and dispatch them to async child process "worker" instances.
+//////////////////////////////////////////////////////////////////
+
+static int addrcmp(const struct sockaddr_storage *s1,
+		   const struct sockaddr_storage *s2)
+{
+	const struct sockaddr *sa1 = (const struct sockaddr*) s1;
+	const struct sockaddr *sa2 = (const struct sockaddr*) s2;
+
+	if (sa1->sa_family != sa2->sa_family)
+		return sa1->sa_family - sa2->sa_family;
+	if (sa1->sa_family == AF_INET)
+		return memcmp(&((struct sockaddr_in *)s1)->sin_addr,
+		    &((struct sockaddr_in *)s2)->sin_addr,
+		    sizeof(struct in_addr));
+#ifndef NO_IPV6
+	if (sa1->sa_family == AF_INET6)
+		return memcmp(&((struct sockaddr_in6 *)s1)->sin6_addr,
+		    &((struct sockaddr_in6 *)s2)->sin6_addr,
+		    sizeof(struct in6_addr));
+#endif
+	return 0;
+}
+
+static int max_connections = 32;
+
+static unsigned int live_children;
+
+static struct child {
+	struct child *next;
+	struct child_process cld;
+	struct sockaddr_storage address;
+} *firstborn;
+
+static void add_child(struct child_process *cld, struct sockaddr *addr, socklen_t addrlen)
+{
+	struct child *newborn, **cradle;
+
+	newborn = xcalloc(1, sizeof(*newborn));
+	live_children++;
+	memcpy(&newborn->cld, cld, sizeof(*cld));
+	memcpy(&newborn->address, addr, addrlen);
+	for (cradle = &firstborn; *cradle; cradle = &(*cradle)->next)
+		if (!addrcmp(&(*cradle)->address, &newborn->address))
+			break;
+	newborn->next = *cradle;
+	*cradle = newborn;
+}
+
+/*
+ * This gets called if the number of connections grows
+ * past "max_connections".
+ *
+ * We kill the newest connection from a duplicate IP.
+ */
+static void kill_some_child(void)
+{
+	const struct child *blanket, *next;
+
+	if (!(blanket = firstborn))
+		return;
+
+	for (; (next = blanket->next); blanket = next)
+		if (!addrcmp(&blanket->address, &next->address)) {
+			kill(blanket->cld.pid, SIGTERM);
+			break;
+		}
+}
+
+static void check_dead_children(void)
+{
+	int status;
+	pid_t pid;
+
+	struct child **cradle, *blanket;
+	for (cradle = &firstborn; (blanket = *cradle);)
+		if ((pid = waitpid(blanket->cld.pid, &status, WNOHANG)) > 1) {
+			const char *dead = "";
+			if (status)
+				dead = " (with error)";
+			loginfo("[%"PRIuMAX"] Disconnected%s", (uintmax_t)pid, dead);
+
+			/* remove the child */
+			*cradle = blanket->next;
+			live_children--;
+			child_process_clear(&blanket->cld);
+			free(blanket);
+		} else
+			cradle = &blanket->next;
+}
+
+static struct strvec cld_argv = STRVEC_INIT;
+static void handle(int incoming, struct sockaddr *addr, socklen_t addrlen)
+{
+	struct child_process cld = CHILD_PROCESS_INIT;
+
+	if (max_connections >= 0 && live_children >= (unsigned int)max_connections) {
+		kill_some_child();
+		sleep(1);  /* give it some time to die */
+		check_dead_children();
+		if (live_children >= (unsigned int)max_connections) {
+			close(incoming);
+			logerror("Too many children, dropping connection");
+			return;
+		}
+	}
+
+	if (addr->sa_family == AF_INET) {
+		char buf[128] = "";
+		struct sockaddr_in *sin_addr = (void *) addr;
+		inet_ntop(addr->sa_family, &sin_addr->sin_addr, buf, sizeof(buf));
+		strvec_pushf(&cld.env, "REMOTE_ADDR=%s", buf);
+		strvec_pushf(&cld.env, "REMOTE_PORT=%d",
+				 ntohs(sin_addr->sin_port));
+#ifndef NO_IPV6
+	} else if (addr->sa_family == AF_INET6) {
+		char buf[128] = "";
+		struct sockaddr_in6 *sin6_addr = (void *) addr;
+		inet_ntop(AF_INET6, &sin6_addr->sin6_addr, buf, sizeof(buf));
+		strvec_pushf(&cld.env, "REMOTE_ADDR=[%s]", buf);
+		strvec_pushf(&cld.env, "REMOTE_PORT=%d",
+				 ntohs(sin6_addr->sin6_port));
+#endif
+	}
+
+	if (mayhem_list.nr) {
+		strvec_pushf(&cld.env, "MAYHEM_CHILD=%d",
+				 mayhem_child++);
+	}
+
+	strvec_pushv(&cld.args, cld_argv.v);
+	cld.in = incoming;
+	cld.out = dup(incoming);
+
+	if (cld.out < 0)
+		logerror("could not dup() `incoming`");
+	else if (start_command(&cld))
+		logerror("unable to fork");
+	else
+		add_child(&cld, addr, addrlen);
+}
+
+static void child_handler(int signo UNUSED)
+{
+	/*
+	 * Otherwise empty handler because systemcalls will get interrupted
+	 * upon signal receipt
+	 * SysV needs the handler to be rearmed
+	 */
+	signal(SIGCHLD, child_handler);
+}
+
+static int set_reuse_addr(int sockfd)
+{
+	int on = 1;
+
+	if (!reuseaddr)
+		return 0;
+	return setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR,
+			  &on, sizeof(on));
+}
+
+struct socketlist {
+	int *list;
+	size_t nr;
+	size_t alloc;
+};
+
+static const char *ip2str(int family, struct sockaddr *sin, socklen_t len)
+{
+#ifdef NO_IPV6
+	static char ip[INET_ADDRSTRLEN];
+#else
+	static char ip[INET6_ADDRSTRLEN];
+#endif
+
+	switch (family) {
+#ifndef NO_IPV6
+	case AF_INET6:
+		inet_ntop(family, &((struct sockaddr_in6*)sin)->sin6_addr, ip, len);
+		break;
+#endif
+	case AF_INET:
+		inet_ntop(family, &((struct sockaddr_in*)sin)->sin_addr, ip, len);
+		break;
+	default:
+		xsnprintf(ip, sizeof(ip), "<unknown>");
+	}
+	return ip;
+}
+
+#ifndef NO_IPV6
+
+static int setup_named_sock(const char *listen_addr, int listen_port, struct socketlist *socklist)
+{
+	int socknum = 0;
+	char pbuf[NI_MAXSERV];
+	struct addrinfo hints, *ai0, *ai;
+	int gai;
+	long flags;
+
+	xsnprintf(pbuf, sizeof(pbuf), "%d", listen_port);
+	memset(&hints, 0, sizeof(hints));
+	hints.ai_family = AF_UNSPEC;
+	hints.ai_socktype = SOCK_STREAM;
+	hints.ai_protocol = IPPROTO_TCP;
+	hints.ai_flags = AI_PASSIVE;
+
+	gai = getaddrinfo(listen_addr, pbuf, &hints, &ai0);
+	if (gai) {
+		logerror("getaddrinfo() for %s failed: %s", listen_addr, gai_strerror(gai));
+		return 0;
+	}
+
+	for (ai = ai0; ai; ai = ai->ai_next) {
+		int sockfd;
+
+		sockfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
+		if (sockfd < 0)
+			continue;
+		if (sockfd >= FD_SETSIZE) {
+			logerror("Socket descriptor too large");
+			close(sockfd);
+			continue;
+		}
+
+#ifdef IPV6_V6ONLY
+		if (ai->ai_family == AF_INET6) {
+			int on = 1;
+			setsockopt(sockfd, IPPROTO_IPV6, IPV6_V6ONLY,
+				   &on, sizeof(on));
+			/* Note: error is not fatal */
+		}
+#endif
+
+		if (set_reuse_addr(sockfd)) {
+			logerror("Could not set SO_REUSEADDR: %s", strerror(errno));
+			close(sockfd);
+			continue;
+		}
+
+		set_keep_alive(sockfd);
+
+		if (bind(sockfd, ai->ai_addr, ai->ai_addrlen) < 0) {
+			logerror("Could not bind to %s: %s",
+				 ip2str(ai->ai_family, ai->ai_addr, ai->ai_addrlen),
+				 strerror(errno));
+			close(sockfd);
+			continue;	/* not fatal */
+		}
+		if (listen(sockfd, 5) < 0) {
+			logerror("Could not listen to %s: %s",
+				 ip2str(ai->ai_family, ai->ai_addr, ai->ai_addrlen),
+				 strerror(errno));
+			close(sockfd);
+			continue;	/* not fatal */
+		}
+
+		flags = fcntl(sockfd, F_GETFD, 0);
+		if (flags >= 0)
+			fcntl(sockfd, F_SETFD, flags | FD_CLOEXEC);
+
+		ALLOC_GROW(socklist->list, socklist->nr + 1, socklist->alloc);
+		socklist->list[socklist->nr++] = sockfd;
+		socknum++;
+	}
+
+	freeaddrinfo(ai0);
+
+	return socknum;
+}
+
+#else /* NO_IPV6 */
+
+static int setup_named_sock(char *listen_addr, int listen_port, struct socketlist *socklist)
+{
+	struct sockaddr_in sin;
+	int sockfd;
+	long flags;
+
+	memset(&sin, 0, sizeof sin);
+	sin.sin_family = AF_INET;
+	sin.sin_port = htons(listen_port);
+
+	if (listen_addr) {
+		/* Well, host better be an IP address here. */
+		if (inet_pton(AF_INET, listen_addr, &sin.sin_addr.s_addr) <= 0)
+			return 0;
+	} else {
+		sin.sin_addr.s_addr = htonl(INADDR_ANY);
+	}
+
+	sockfd = socket(AF_INET, SOCK_STREAM, 0);
+	if (sockfd < 0)
+		return 0;
+
+	if (set_reuse_addr(sockfd)) {
+		logerror("Could not set SO_REUSEADDR: %s", strerror(errno));
+		close(sockfd);
+		return 0;
+	}
+
+	set_keep_alive(sockfd);
+
+	if ( bind(sockfd, (struct sockaddr *)&sin, sizeof sin) < 0 ) {
+		logerror("Could not bind to %s: %s",
+			 ip2str(AF_INET, (struct sockaddr *)&sin, sizeof(sin)),
+			 strerror(errno));
+		close(sockfd);
+		return 0;
+	}
+
+	if (listen(sockfd, 5) < 0) {
+		logerror("Could not listen to %s: %s",
+			 ip2str(AF_INET, (struct sockaddr *)&sin, sizeof(sin)),
+			 strerror(errno));
+		close(sockfd);
+		return 0;
+	}
+
+	flags = fcntl(sockfd, F_GETFD, 0);
+	if (flags >= 0)
+		fcntl(sockfd, F_SETFD, flags | FD_CLOEXEC);
+
+	ALLOC_GROW(socklist->list, socklist->nr + 1, socklist->alloc);
+	socklist->list[socklist->nr++] = sockfd;
+	return 1;
+}
+
+#endif
+
+static void socksetup(struct string_list *listen_addr, int listen_port, struct socketlist *socklist)
+{
+	if (!listen_addr->nr)
+		setup_named_sock("127.0.0.1", listen_port, socklist);
+	else {
+		size_t i;
+		int socknum;
+		for (i = 0; i < listen_addr->nr; i++) {
+			socknum = setup_named_sock(listen_addr->items[i].string,
+						   listen_port, socklist);
+
+			if (socknum == 0)
+				logerror("unable to allocate any listen sockets for host %s on port %u",
+					 listen_addr->items[i].string, listen_port);
+		}
+	}
+}
+
+static int service_loop(struct socketlist *socklist)
+{
+	struct pollfd *pfd;
+	size_t i;
+
+	CALLOC_ARRAY(pfd, socklist->nr);
+
+	for (i = 0; i < socklist->nr; i++) {
+		pfd[i].fd = socklist->list[i];
+		pfd[i].events = POLLIN;
+	}
+
+	signal(SIGCHLD, child_handler);
+
+	for (;;) {
+		size_t i;
+		int nr_ready;
+		int timeout = (pid_file ? 100 : -1);
+
+		check_dead_children();
+
+		nr_ready = poll(pfd, socklist->nr, timeout);
+		if (nr_ready < 0) {
+			if (errno != EINTR) {
+				logerror("Poll failed, resuming: %s",
+				      strerror(errno));
+				sleep(1);
+			}
+			continue;
+		}
+		else if (nr_ready == 0) {
+			/*
+			 * If we have a pid_file, then we watch it.
+			 * If someone deletes it, we shutdown the service.
+			 * The shell scripts in the test suite will use this.
+			 */
+			if (!pid_file || file_exists(pid_file))
+				continue;
+			goto shutdown;
+		}
+
+		for (i = 0; i < socklist->nr; i++) {
+			if (pfd[i].revents & POLLIN) {
+				union {
+					struct sockaddr sa;
+					struct sockaddr_in sai;
+#ifndef NO_IPV6
+					struct sockaddr_in6 sai6;
+#endif
+				} ss;
+				socklen_t sslen = sizeof(ss);
+				int incoming = accept(pfd[i].fd, &ss.sa, &sslen);
+				if (incoming < 0) {
+					switch (errno) {
+					case EAGAIN:
+					case EINTR:
+					case ECONNABORTED:
+						continue;
+					default:
+						die_errno("accept returned");
+					}
+				}
+				handle(incoming, &ss.sa, sslen);
+			}
+		}
+	}
+
+shutdown:
+	loginfo("Starting graceful shutdown (pid-file gone)");
+	for (i = 0; i < socklist->nr; i++)
+		close(socklist->list[i]);
+	free(socklist->list);
+	free(pfd);
+
+	return 0;
+}
+
+static int serve(struct string_list *listen_addr, int listen_port)
+{
+	struct socketlist socklist = { NULL, 0, 0 };
+
+	socksetup(listen_addr, listen_port, &socklist);
+	if (socklist.nr == 0)
+		die("unable to allocate any listen sockets on port %u",
+		    listen_port);
+
+	loginfo("Ready to rumble");
+
+	/*
+	 * Wait to create the pid-file until we've setup the sockets
+	 * and are open for business.
+	 */
+	if (pid_file)
+		write_file(pid_file, "%"PRIuMAX, (uintmax_t) getpid());
+
+	return service_loop(&socklist);
+}
+
+//////////////////////////////////////////////////////////////////
+// This section is executed by both the primary instance and all
+// worker instances.  So, yes, each child-process re-parses the
+// command line argument and re-discovers how it should behave.
+//////////////////////////////////////////////////////////////////
+
+int cmd_main(int argc, const char **argv)
+{
+	int listen_port = 0;
+	static struct string_list listen_addr = STRING_LIST_INIT_NODUP;
+	int worker_mode = 0;
+	int i;
+
+	trace2_cmd_name("test-gvfs-protocol");
+	setup_git_directory_gently(NULL);
+
+	for (i = 1; i < argc; i++) {
+		const char *arg = argv[i];
+		const char *v;
+
+		if (skip_prefix(arg, "--listen=", &v)) {
+			string_list_append_nodup(&listen_addr, xstrdup_tolower(v));
+			continue;
+		}
+		if (skip_prefix(arg, "--port=", &v)) {
+			char *end;
+			unsigned long n;
+			n = strtoul(v, &end, 0);
+			if (*v && !*end) {
+				listen_port = n;
+				continue;
+			}
+		}
+		if (!strcmp(arg, "--worker")) {
+			worker_mode = 1;
+			trace2_cmd_mode("worker");
+			continue;
+		}
+		if (!strcmp(arg, "--verbose")) {
+			verbose = 1;
+			continue;
+		}
+		if (skip_prefix(arg, "--timeout=", &v)) {
+			timeout = atoi(v);
+			continue;
+		}
+		if (skip_prefix(arg, "--init-timeout=", &v)) {
+			init_timeout = atoi(v);
+			continue;
+		}
+		if (skip_prefix(arg, "--max-connections=", &v)) {
+			max_connections = atoi(v);
+			if (max_connections < 0)
+				max_connections = 0; /* unlimited */
+			continue;
+		}
+		if (!strcmp(arg, "--reuseaddr")) {
+			reuseaddr = 1;
+			continue;
+		}
+		if (skip_prefix(arg, "--pid-file=", &v)) {
+			pid_file = v;
+			continue;
+		}
+		if (skip_prefix(arg, "--mayhem=", &v)) {
+			string_list_append(&mayhem_list, v);
+			continue;
+		}
+
+		usage(test_gvfs_protocol_usage);
+	}
+
+	/* avoid splitting a message in the middle */
+	setvbuf(stderr, NULL, _IOFBF, 4096);
+
+	if (listen_port == 0)
+		listen_port = DEFAULT_GIT_PORT;
+
+	/*
+	 * If no --listen=<addr> args are given, the setup_named_sock()
+	 * code will use receive a NULL address and set INADDR_ANY.
+	 * This exposes both internal and external interfaces on the
+	 * port.
+	 *
+	 * Disallow that and default to the internal-use-only loopback
+	 * address.
+	 */
+	if (!listen_addr.nr)
+		string_list_append(&listen_addr, "127.0.0.1");
+
+	/*
+	 * worker_mode is set in our own child process instances
+	 * (that are bound to a connected socket from a client).
+	 */
+	if (worker_mode) {
+		if (mayhem_list.nr) {
+			const char *string = getenv("MAYHEM_CHILD");
+			if (string && *string)
+				mayhem_child = atoi(string);
+		}
+
+		build_gvfs_config_json(&jw_config, &listen_addr, listen_port);
+
+		return worker();
+	}
+
+	/*
+	 * `cld_argv` is a bit of a clever hack.  The top-level instance
+	 * of test-gvfs-protocol.exe does the normal bind/listen/accept
+	 * stuff.  For each incoming socket, the top-level process spawns
+	 * a child instance of test-gvfs-protocol.exe *WITH* the additional
+	 * `--worker` argument.  This causes the child to set `worker_mode`
+	 * and immediately call `worker()` using the connected socket (and
+	 * without the usual need for fork() or threads).
+	 *
+	 * The magic here is made possible because `cld_argv` is static
+	 * and handle() (called by service_loop()) knows about it.
+	 */
+	strvec_push(&cld_argv, argv[0]);
+	strvec_push(&cld_argv, "--worker");
+	for (i = 1; i < argc; ++i)
+		strvec_push(&cld_argv, argv[i]);
+
+	/*
+	 * Setup primary instance to listen for connections.
+	 */
+	return serve(&listen_addr, listen_port);
+}
diff --git a/t/meson.build b/t/meson.build
index 014fd228740e88..3e8fd774d6c0eb 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -741,6 +741,7 @@ integration_tests = [
   't5731-protocol-v2-bundle-uri-git.sh',
   't5732-protocol-v2-bundle-uri-http.sh',
   't5750-bundle-uri-parse.sh',
+  't5799-gvfs-helper.sh',
   't5801-remote-helpers.sh',
   't5802-connect-helper.sh',
   't5810-proto-disable-local.sh',
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
new file mode 100755
index 00000000000000..f67ba209466f86
--- /dev/null
+++ b/t/t5799-gvfs-helper.sh
@@ -0,0 +1,974 @@
+#!/bin/sh
+
+test_description='test gvfs-helper and GVFS Protocol'
+
+. ./test-lib.sh
+
+# Set the port for t/helper/test-gvfs-protocol.exe from either the
+# environment or from the test number of this shell script.
+#
+test_set_port GIT_TEST_GVFS_PROTOCOL_PORT
+
+# Setup the following repos:
+#
+#    repo_src:
+#        A normal, no-magic, fully-populated clone of something.
+#        No GVFS (aka VFS4G).  No Scalar.  No partial-clone.
+#        This will be used by "t/helper/test-gvfs-protocol.exe"
+#        to serve objects.
+#
+#    repo_t1:
+#        An empty repo with no contents nor commits.  That is,
+#        everything is missing.  For the tests based on this repo,
+#        we don't care why it is missing objects (or if we could
+#        actually use it).  We are only testing explicit object
+#        fetching using gvfs-helper.exe in isolation.
+#
+REPO_SRC="$PWD"/repo_src
+REPO_T1="$PWD"/repo_t1
+
+# Setup some loopback URLs where test-gvfs-protocol.exe will be
+# listening.  We will spawn it directly inside the repo_src directory,
+# so we don't need any of the directory mapping or configuration
+# machinery found in "git-daemon.exe" or "git-http-backend.exe".
+#
+# This lets us use the "uri-base" part of the URL (prior to the REST
+# API "/gvfs/<token>") to control how our mock server responds.  For
+# example, only the origin (main Git) server supports "/gvfs/config".
+#
+# For example, this means that if we add a remote containing $ORIGIN_URL,
+# it will work with gvfs-helper, but not for fetch (without some mapping
+# tricks).
+#
+HOST_PORT=127.0.0.1:$GIT_TEST_GVFS_PROTOCOL_PORT
+ORIGIN_URL=http://$HOST_PORT/servertype/origin
+CACHE_URL=http://$HOST_PORT/servertype/cache
+
+SHARED_CACHE_T1="$PWD"/shared_cache_t1
+
+# The pid-file is created by test-gvfs-protocol.exe when it starts.
+# The server will shut down if/when we delete it.  (This is a little
+# easier than killing it by PID.)
+#
+PID_FILE="$PWD"/pid-file.pid
+SERVER_LOG="$PWD"/OUT.server.log
+
+PATH="$GIT_BUILD_DIR/t/helper/:$PATH" && export PATH
+
+OIDS_FILE="$PWD"/oid_list.txt
+OIDS_CT_FILE="$PWD"/oid_ct_list.txt
+OIDS_BLOBS_FILE="$PWD"/oids_blobs_file.txt
+OID_ONE_BLOB_FILE="$PWD"/oid_one_blob_file.txt
+
+# Get a list of available OIDs in repo_src so that we can try to fetch
+# them and so that we don't have to hard-code a list of known OIDs.
+# This doesn't need to be a complete list -- just enough to drive some
+# representative tests.
+#
+# Optionally require that we find a minimum number of OIDs.
+#
+get_list_of_oids () {
+	git -C "$REPO_SRC" rev-list --objects HEAD | sed 's/ .*//' | sort >"$OIDS_FILE"
+
+	if test $# -eq 1
+	then
+		actual_nr=$(( $(wc -l <"$OIDS_FILE") ))
+		if test $actual_nr -lt $1
+		then
+			echo "get_list_of_oids: insufficient data.  Need $1 OIDs."
+			return 1
+		fi
+	fi
+	return 0
+}
+
+get_list_of_blobs_oids () {
+	git -C "$REPO_SRC" ls-tree HEAD | grep ' blob ' | awk "{print \$3}" | sort >"$OIDS_BLOBS_FILE"
+	head -1 <"$OIDS_BLOBS_FILE" >"$OID_ONE_BLOB_FILE"
+}
+
+get_list_of_commit_and_tree_oids () {
+	git -C "$REPO_SRC" cat-file --batch-check --batch-all-objects | awk "/commit|tree/ {print \$1}" | sort >"$OIDS_CT_FILE"
+
+	if test $# -eq 1
+	then
+		actual_nr=$(( $(wc -l <"$OIDS_CT_FILE") ))
+		if test $actual_nr -lt $1
+		then
+			echo "get_list_of_commit_and_tree_oids: insufficient data.  Need $1 OIDs."
+			return 1
+		fi
+	fi
+	return 0
+}
+
+test_expect_success 'setup repos' '
+	test_create_repo "$REPO_SRC" &&
+	git -C "$REPO_SRC" branch -M main &&
+	#
+	# test_commit_bulk() does magic to create a packfile containing
+	# the new commits.
+	#
+	test_commit_bulk -C "$REPO_SRC" --filename="batch_a.%s.t" 9 &&
+	git -C "$REPO_SRC" rev-parse refs/heads/main >m1.branch &&
+	test_commit_bulk -C "$REPO_SRC" --filename="batch_b.%s.t" 9 &&
+	git -C "$REPO_SRC" rev-parse refs/heads/main >m2.branch &&
+	#
+	# test_commit() creates commits, trees, tags, and blobs and leave
+	# them loose.
+	#
+	test_commit -C "$REPO_SRC" file1.txt &&
+	test_commit -C "$REPO_SRC" file2.txt &&
+	test_commit -C "$REPO_SRC" file3.txt &&
+	test_commit -C "$REPO_SRC" file4.txt &&
+	test_commit -C "$REPO_SRC" file5.txt &&
+	test_commit -C "$REPO_SRC" file6.txt &&
+	test_commit -C "$REPO_SRC" file7.txt &&
+	test_commit -C "$REPO_SRC" file8.txt &&
+	test_commit -C "$REPO_SRC" file9.txt &&
+	git -C "$REPO_SRC" rev-parse refs/heads/main >m3.branch &&
+	#
+	# gvfs-helper.exe writes downloaded objects to a shared-cache directory
+	# rather than the ODB inside the .git directory.
+	#
+	mkdir "$SHARED_CACHE_T1" &&
+	mkdir "$SHARED_CACHE_T1/pack" &&
+	mkdir "$SHARED_CACHE_T1/info" &&
+	#
+	# setup repo_t1 and point all of the gvfs.* values to repo_src.
+	#
+	test_create_repo "$REPO_T1" &&
+	git -C "$REPO_T1" branch -M main &&
+	git -C "$REPO_T1" remote add origin $ORIGIN_URL &&
+	git -C "$REPO_T1" config --local gvfs.cache-server $CACHE_URL &&
+	git -C "$REPO_T1" config --local gvfs.sharedCache "$SHARED_CACHE_T1" &&
+	echo "$SHARED_CACHE_T1" >> "$REPO_T1"/.git/objects/info/alternates &&
+	#
+	#
+	#
+	cat <<-EOF >creds.txt &&
+		username=x
+		password=y
+	EOF
+	cat <<-EOF >creds.sh &&
+		#!/bin/sh
+		cat "$PWD"/creds.txt
+	EOF
+	chmod 755 creds.sh &&
+	git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$PWD\"/creds.txt; }; f" &&
+	#
+	# Create some test data sets.
+	#
+	get_list_of_oids 30 &&
+	get_list_of_commit_and_tree_oids 30 &&
+	get_list_of_blobs_oids
+'
+
+stop_gvfs_protocol_server () {
+	if ! test -f "$PID_FILE"
+	then
+		return 0
+	fi
+	#
+	# The server will shutdown automatically when we delete the pid-file.
+	#
+	rm -f "$PID_FILE"
+	#
+	# Give it a few seconds to shutdown (mainly to completely release the
+	# port before the next test start another instance and it attempts to
+	# bind to it).
+	#
+	for k in 0 1 2 3 4
+	do
+		if grep -q "Starting graceful shutdown" "$SERVER_LOG"
+		then
+			return 0
+		fi
+		sleep 1
+	done
+
+	echo "stop_gvfs_protocol_server: timeout waiting for server shutdown"
+	return 1
+}
+
+start_gvfs_protocol_server () {
+	#
+	# Launch our server into the background in repo_src.
+	#
+	(
+		cd "$REPO_SRC"
+		test-gvfs-protocol --verbose \
+			--listen=127.0.0.1 \
+			--port=$GIT_TEST_GVFS_PROTOCOL_PORT \
+			--reuseaddr \
+			--pid-file="$PID_FILE" \
+			2>"$SERVER_LOG" &
+	)
+	#
+	# Give it a few seconds to get started.
+	#
+	for k in 0 1 2 3 4
+	do
+		if test -f "$PID_FILE"
+		then
+			return 0
+		fi
+		sleep 1
+	done
+
+	echo "start_gvfs_protocol_server: timeout waiting for server startup"
+	return 1
+}
+
+start_gvfs_protocol_server_with_mayhem () {
+	if test $# -lt 1
+	then
+		echo "start_gvfs_protocol_server_with_mayhem: need mayhem args"
+		return 1
+	fi
+
+	mayhem=""
+	for k in $*
+	do
+		mayhem="$mayhem --mayhem=$k"
+	done
+	#
+	# Launch our server into the background in repo_src.
+	#
+	(
+		cd "$REPO_SRC"
+		test-gvfs-protocol --verbose \
+			--listen=127.0.0.1 \
+			--port=$GIT_TEST_GVFS_PROTOCOL_PORT \
+			--reuseaddr \
+			--pid-file="$PID_FILE" \
+			$mayhem \
+			2>"$SERVER_LOG" &
+	)
+	#
+	# Give it a few seconds to get started.
+	#
+	for k in 0 1 2 3 4
+	do
+		if test -f "$PID_FILE"
+		then
+			return 0
+		fi
+		sleep 1
+	done
+
+	echo "start_gvfs_protocol_server: timeout waiting for server startup"
+	return 1
+}
+
+# Verify the number of connections from the client.
+#
+# If keep-alive is working, a series of successful sequential requests to the
+# same server should use the same TCP connection, so a simple multi-get would
+# only have one connection.
+#
+# On the other hand, an auto-retry after a network error (mayhem) will have
+# more than one for a single object request.
+#
+# TODO This may generate false alarm when we get to complicated tests, so
+# TODO we might only want to use it for basic tests.
+#
+verify_connection_count () {
+	if test $# -eq 1
+	then
+		expected_nr=$1
+	else
+		expected_nr=1
+	fi
+
+	actual_nr=$(( $(grep "Connection from" "$SERVER_LOG" | wc -l) ))
+
+	if test $actual_nr -ne $expected_nr
+	then
+		echo "verify_keep_live: expected $expected_nr; actual $actual_nr"
+		return 1
+	fi
+	return 0
+}
+
+# Verify that the set of requested objects are present in
+# the shared-cache and that there is no corruption.  We use
+# cat-file to hide whether the object is packed or loose in
+# the test repo.
+#
+# Usage: <pathname_to_file_of_oids>
+#
+verify_objects_in_shared_cache () {
+	#
+	# See if any of the objects are missing from repo_t1.
+	#
+	git -C "$REPO_T1" cat-file --batch-check <"$1" >OUT.bc_actual || return 1
+	grep -q " missing" OUT.bc_actual && return 1
+	#
+	# See if any of the objects have different sizes or types than repo_src.
+	#
+	git -C "$REPO_SRC" cat-file --batch-check <"$1" >OUT.bc_expect || return 1
+	test_cmp OUT.bc_expect OUT.bc_actual || return 1
+	#
+	# See if any of the objects are corrupt in repo_t1.  This fully
+	# reconstructs the objects and verifies the hash and therefore
+	# detects corruption not found by the earlier "batch-check" step.
+	#
+	git -C "$REPO_T1" cat-file --batch <"$1" >OUT.b_actual || return 1
+	#
+	# TODO move the shared-cache directory (and/or the
+	# TODO .git/objects/info/alternates and temporarily unset
+	# TODO gvfs.sharedCache) and repeat the first "batch-check"
+	# TODO and make sure that they are ALL missing.
+	#
+	return 0
+}
+
+verify_received_packfile_count () {
+	if test $# -eq 1
+	then
+		expected_nr=$1
+	else
+		expected_nr=1
+	fi
+
+	actual_nr=$(( $(grep "packfile " OUT.output | wc -l) ))
+
+	if test $actual_nr -ne $expected_nr
+	then
+		echo "verify_received_packfile_count: expected $expected_nr; actual $actual_nr"
+		return 1
+	fi
+	return 0
+}
+
+per_test_cleanup () {
+	stop_gvfs_protocol_server
+
+	rm -rf "$SHARED_CACHE_T1"/[0-9a-f][0-9a-f]/
+	rm -rf "$SHARED_CACHE_T1"/info/*
+	rm -rf "$SHARED_CACHE_T1"/pack/*
+
+	rm -rf OUT.*
+	return 0
+}
+
+#################################################################
+# Basic tests to confirm the happy path works.
+#################################################################
+
+test_expect_success 'basic: GET origin multi-get no-auth' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the origin server (w/o auth) and make a series of
+	# single-object GET requests.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		<"$OIDS_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OIDS_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OIDS_FILE" &&
+	verify_connection_count 1
+'
+
+test_expect_success 'basic: GET cache-server multi-get trust-mode' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the cache-server and make a series of
+	# single-object GET requests.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=trust \
+		--remote=origin \
+		get \
+		<"$OIDS_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OIDS_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OIDS_FILE" &&
+	verify_connection_count 1
+'
+
+test_expect_success 'basic: GET gvfs/config' '
+#	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the cache-server and make a series of
+	# single-object GET requests.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		config \
+		<"$OIDS_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# The cache-server URL should be listed in the gvfs/config output.
+	# We confirm this before assuming error-mode will work.
+	#
+	grep -q "$CACHE_URL" OUT.output
+'
+
+test_expect_success 'basic: GET cache-server multi-get error-mode' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the cache-server and make a series of
+	# single-object GET requests.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=error \
+		--remote=origin \
+		get \
+		<"$OIDS_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OIDS_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OIDS_FILE" &&
+
+	# Technically, we have 1 connection to the origin server
+	# for the "gvfs/config" request and 1 to cache server to
+	# get the objects, but because we are using the same port
+	# for both, keep-alive will handle it.  So 1 connection.
+	#
+	verify_connection_count 1
+'
+
+# The GVFS Protocol POST verb behaves like GET for non-commit objects
+# (in that it just returns the requested object), but for commit
+# objects POST *also* returns all trees referenced by the commit.
+#
+# The goal of this test is to confirm that gvfs-helper can send us
+# a packfile at all.  So, this test only passes blobs to not blur
+# the issue.
+#
+test_expect_success 'basic: POST origin blobs' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the origin server (w/o auth) and make
+	# multi-object POST request.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OIDS_BLOBS_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.  We verify the number of expected packfile(s) and we
+	# individually verify that each requested object is present in the
+	# shared cache (and index-pack already verified the integrity of
+	# the packfile), so we do not bother to run "git verify-pack -v"
+	# and do an exact matchup here.
+	#
+	verify_received_packfile_count 1 &&
+
+	verify_objects_in_shared_cache "$OIDS_BLOBS_FILE" &&
+	verify_connection_count 1
+'
+
+#################################################################
+# Tests to see how gvfs-helper responds to network problems.
+#
+# We use small --max-retry value because of exponential backoff.
+#
+# These mayhem tests are interested in how gvfs-helper gracefully
+# retries when there is a network error.  And verify that it gives
+# up gracefully too.
+#################################################################
+
+mayhem_observed__close__connections () {
+	if $(grep -q "transient" OUT.stderr)
+	then
+		# Transient errors should retry.
+		# 1 for initial request + 2 retries.
+		#
+		verify_connection_count 3
+		return $?
+	elif $(grep -q "hard_fail" OUT.stderr)
+	then
+		# Hard errors should not retry.
+		#
+		verify_connection_count 1
+		return $?
+	else
+		error "mayhem_observed__close: unexpected mayhem-induced error type"
+		return 1
+	fi
+}
+
+mayhem_observed__close () {
+	# Expected error codes for mayhem events:
+	#     close_read
+	#     close_write
+	#     close_no_write
+	#
+	# CURLE_PARTIAL_FILE 18
+	# CURLE_GOT_NOTHING 52
+	# CURLE_SEND_ERROR 55
+	# CURLE_RECV_ERROR 56
+	#
+	# I don't want to pin it down to an exact error for each because there may
+	# be races here because of network buffering.
+	#
+	# Also, It is unclear which of these network errors should be transient
+	# (with retry) and which should be a hard-fail (without retry).  I'm only
+	# going to verify the connection counts based upon what type of error
+	# gvfs-helper claimed it to be.
+	#
+	if      $(grep -q "error: get: (curl:18)" OUT.stderr) ||
+		$(grep -q "error: get: (curl:52)" OUT.stderr) ||
+		$(grep -q "error: get: (curl:55)" OUT.stderr) ||
+		$(grep -q "error: get: (curl:56)" OUT.stderr)
+	then
+		mayhem_observed__close__connections
+		return $?
+	else
+		echo "mayhem_observed__close: unexpected mayhem-induced error"
+		return 1
+	fi
+}
+
+test_expect_success 'curl-error: no server' '
+	test_when_finished "per_test_cleanup" &&
+
+	# Try to do a multi-get without a server.
+	#
+	# Use small max-retry value because of exponential backoff,
+	# but yet do exercise retry some.
+	#
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	# CURLE_COULDNT_CONNECT 7
+	grep -q "error: get: (curl:7)" OUT.stderr
+'
+
+test_expect_success 'curl-error: close socket while reading request' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem close_read &&
+
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	mayhem_observed__close
+'
+
+test_expect_success 'curl-error: close socket while writing response' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem close_write &&
+
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	mayhem_observed__close
+'
+
+test_expect_success 'curl-error: close socket before writing response' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem close_no_write &&
+
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	mayhem_observed__close
+'
+
+#################################################################
+# Tests to confirm that gvfs-helper does silently recover when
+# a retry succeeds.
+#
+# Note: I'm only to do this for 1 of the close_* mayhem events.
+#################################################################
+
+test_expect_success 'successful retry after curl-error: origin get' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem close_read_1 &&
+
+	# Connect to the origin server (w/o auth).
+	# Make a single-object GET request.
+	# Confirm that it succeeds without error.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OID_ONE_BLOB_FILE" >OUT.output &&
+
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OID_ONE_BLOB_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" &&
+	verify_connection_count 2
+'
+
+#################################################################
+# Tests to see how gvfs-helper responds to HTTP errors/problems.
+#
+#################################################################
+
+# See "enum gh__error_code" in gvfs-helper.c
+#
+GH__ERROR_CODE__HTTP_404=4
+GH__ERROR_CODE__HTTP_429=5
+GH__ERROR_CODE__HTTP_503=6
+
+test_expect_success 'http-error: 503 Service Unavailable (with retry)' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_503 &&
+
+	test_expect_code $GH__ERROR_CODE__HTTP_503 \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	grep -q "error: get: (http:503)" OUT.stderr &&
+	verify_connection_count 3
+'
+
+test_expect_success 'http-error: 429 Service Unavailable (with retry)' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_429 &&
+
+	test_expect_code $GH__ERROR_CODE__HTTP_429 \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	grep -q "error: get: (http:429)" OUT.stderr &&
+	verify_connection_count 3
+'
+
+test_expect_success 'http-error: 404 Not Found (no retry)' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_404 &&
+
+	test_expect_code $GH__ERROR_CODE__HTTP_404 \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OID_ONE_BLOB_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	grep -q "error: get: (http:404)" OUT.stderr &&
+	verify_connection_count 1
+'
+
+#################################################################
+# Tests to confirm that gvfs-helper does silently recover when an
+# HTTP request succeeds after a failure.
+#
+#################################################################
+
+test_expect_success 'successful retry after http-error: origin get' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_429_1 &&
+
+	# Connect to the origin server (w/o auth).
+	# Make a single-object GET request.
+	# Confirm that it succeeds without error.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		--max-retries=2 \
+		<"$OID_ONE_BLOB_FILE" >OUT.output &&
+
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OID_ONE_BLOB_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" &&
+	verify_connection_count 2
+'
+
+#################################################################
+# Test HTTP Auth
+#
+#################################################################
+
+test_expect_success 'HTTP GET Auth on Origin Server' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_401 &&
+
+	# Force server to require auth.
+	# Connect to the origin server without auth.
+	# Make a single-object GET request.
+	# Confirm that it gets a 401 and then retries with auth.
+	#
+	GIT_CONFIG_NOSYSTEM=1 \
+		git -C "$REPO_T1" gvfs-helper \
+			--cache-server=disable \
+			--remote=origin \
+			get \
+			--max-retries=2 \
+			<"$OID_ONE_BLOB_FILE" >OUT.output &&
+
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OID_ONE_BLOB_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" &&
+	verify_connection_count 2
+'
+
+test_expect_success 'HTTP POST Auth on Origin Server' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_401 &&
+
+	# Connect to the origin server and make multi-object POST
+	# request and verify that it automatically handles the 401.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OIDS_BLOBS_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.  We verify the number of expected packfile(s) and we
+	# individually verify that each requested object is present in the
+	# shared cache (and index-pack already verified the integrity of
+	# the packfile), so we do not bother to run "git verify-pack -v"
+	# and do an exact matchup here.
+	#
+	verify_received_packfile_count 1 &&
+
+	verify_objects_in_shared_cache "$OIDS_BLOBS_FILE" &&
+	verify_connection_count 2
+'
+
+test_expect_success 'HTTP GET Auth on Cache Server' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_401 &&
+
+	# Try auth to cache-server.  Note that gvfs-helper *ALWAYS* sends
+	# creds to cache-servers, so we will never see the "400 Bad Request"
+	# response.  And we are using "trust" mode, so we only expect 1
+	# connection to the server.
+	#
+	GIT_CONFIG_NOSYSTEM=1 \
+		git -C "$REPO_T1" gvfs-helper \
+			--cache-server=trust \
+			--remote=origin \
+			get \
+			--max-retries=2 \
+			<"$OID_ONE_BLOB_FILE" >OUT.output &&
+
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received object.
+	# Verify that gvfs-helper received each of the requested objects.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OID_ONE_BLOB_FILE" OUT.actual &&
+
+	verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" &&
+	verify_connection_count 1
+'
+
+#################################################################
+# Integration tests with Git.exe
+#
+# Now that we have confirmed that gvfs-helper works in isolation,
+# run a series of tests using random Git commands that fault-in
+# objects as needed.
+#
+# At this point, I'm going to stop verifying the shape of the ODB
+# (loose vs packfiles) and the number of connections required to
+# get them.  The tests from here on are to verify that objects are
+# magically fetched whenever required.
+#################################################################
+
+test_expect_success 'integration: explicit commit/trees, implicit blobs: log file' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# We have a very empty repo.  Seed it with all of the commits
+	# and trees.  The purpose of this test is to demand-load the
+	# needed blobs only, so we prefetch the commits and trees.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		<"$OIDS_CT_FILE" >OUT.output &&
+
+	# Confirm that we do not have the blobs locally.
+	# With gvfs-helper turned off, we should fail.
+	#
+	test_must_fail \
+		git -C "$REPO_T1" -c core.useGVFSHelper=false \
+			log $(cat m3.brach) -- file9.txt \
+			>OUT.output 2>OUT.stderr &&
+
+	# Turn on gvfs-helper and retry.  This should implicitly fetch
+	# any needed blobs.
+	#
+	git -C "$REPO_T1" -c core.useGVFSHelper=true \
+		log $(cat m3.branch) -- file9.txt \
+		>OUT.output 2>OUT.stderr &&
+
+	# Verify that gvfs-helper wrote the fetched the blobs to the
+	# local ODB, such that a second attempt with gvfs-helper
+	# turned off should succeed.
+	#
+	git -C "$REPO_T1" -c core.useGVFSHelper=false \
+		log $(cat m3.branch) -- file9.txt \
+		>OUT.output 2>OUT.stderr
+'
+
+test_expect_success 'integration: explicit commit/trees, implicit blobs: diff 2 commits' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# We have a very empty repo.  Seed it with all of the commits
+	# and trees.  The purpose of this test is to demand-load the
+	# needed blobs only, so we prefetch the commits and trees.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		get \
+		<"$OIDS_CT_FILE" >OUT.output &&
+
+	# Confirm that we do not have the blobs locally.
+	# With gvfs-helper turned off, we should fail.
+	#
+	test_must_fail \
+		git -C "$REPO_T1" -c core.useGVFSHelper=false \
+			diff $(cat m1.branch)..$(cat m3.branch) \
+			>OUT.output 2>OUT.stderr &&
+
+	# Turn on gvfs-helper and retry.  This should implicitly fetch
+	# any needed blobs.
+	#
+	git -C "$REPO_T1" -c core.useGVFSHelper=true \
+		diff $(cat m1.branch)..$(cat m3.branch) \
+		>OUT.output 2>OUT.stderr &&
+
+	# Verify that gvfs-helper wrote the fetched the blobs to the
+	# local ODB, such that a second attempt with gvfs-helper
+	# turned off should succeed.
+	#
+	git -C "$REPO_T1" -c core.useGVFSHelper=false \
+		diff $(cat m1.branch)..$(cat m3.branch) \
+		>OUT.output 2>OUT.stderr
+'
+
+test_expect_success 'integration: fully implicit: diff 2 commits' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Implicitly demand-load everything without any pre-seeding.
+	#
+	git -C "$REPO_T1" -c core.useGVFSHelper=true \
+		diff $(cat m1.branch)..$(cat m3.branch) \
+		>OUT.output 2>OUT.stderr
+'
+
+test_done

From acdaf94b54eb7f2f131a95a0b60e1712ade48111 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 13 Nov 2019 14:19:44 -0500
Subject: [PATCH 104/207] gvfs-helper: move result-list construction into
 install functions

gvfs-helper prints a "loose <oid>" or "packfile <name>" messages after
they are received to help invokers update their in-memory caches.
Move the code to accumulate these messages in the result_list into
the install_* functions rather than waiting until the end.

POST requests containing 1 object may return a loose object or a packfile
depending on whether the object is a commit or non-commit.  Delaying the
message generation just complicated the caller.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper.c | 58 ++++++++++++++++++++++++++++-----------------------
 1 file changed, 32 insertions(+), 26 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 00518cd170c071..23e26d5360f1da 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -450,6 +450,8 @@ struct gh__request_params {
 	struct progress *progress;
 
 	struct strbuf e2eid;
+
+	struct string_list *result_list; /* we do not own this */
 };
 
 #define GH__REQUEST_PARAMS_INIT { \
@@ -478,6 +480,7 @@ struct gh__request_params {
 	.progress_msg = STRBUF_INIT, \
 	.progress = NULL, \
 	.e2eid = STRBUF_INIT, \
+	.result_list = NULL, \
 	}
 
 static void gh__request_params__release(struct gh__request_params *params)
@@ -510,6 +513,8 @@ static void gh__request_params__release(struct gh__request_params *params)
 	params->progress = NULL;
 
 	strbuf_release(&params->e2eid);
+
+	params->result_list = NULL; /* we do not own this */
 }
 
 /*
@@ -1870,6 +1875,16 @@ static void install_packfile(struct gh__request_params *params,
 		goto cleanup;
 	}
 
+
+	if (params->result_list) {
+		struct strbuf result_msg = STRBUF_INIT;
+
+		strbuf_addf(&result_msg, "packfile %s",
+			    params->final_packfile_filename.buf);
+		string_list_append(params->result_list, result_msg.buf);
+		strbuf_release(&result_msg);
+	}
+
 cleanup:
 	child_process_clear(&ip);
 }
@@ -1926,8 +1941,19 @@ static void install_loose(struct gh__request_params *params,
 			    "could not install loose object '%s'",
 			    params->loose_path.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE;
+		goto cleanup;
+	}
+
+	if (params->result_list) {
+		struct strbuf result_msg = STRBUF_INIT;
+
+		strbuf_addf(&result_msg, "loose %s",
+			    oid_to_hex(&params->loose_oid));
+		string_list_append(params->result_list, result_msg.buf);
+		strbuf_release(&result_msg);
 	}
 
+cleanup:
 	strbuf_release(&tmp_path);
 }
 
@@ -2583,7 +2609,7 @@ static void setup_gvfs_objects_progress(struct gh__request_params *params,
 	if (!gh__cmd_opts.show_progress)
 		return;
 
-	if (params->b_is_post && params->object_count > 1) {
+	if (params->b_is_post) {
 		strbuf_addf(&params->progress_base_phase3_msg,
 			    "Receiving packfile %ld/%ld with %ld objects",
 			    num, den, params->object_count);
@@ -2615,6 +2641,8 @@ static void do__http_get__gvfs_object(struct gh__response_status *status,
 
 	params.object_count = 1;
 
+	params.result_list = result_list;
+
 	params.headers = http_copy_default_headers();
 	params.headers = curl_slist_append(params.headers,
 					   "X-TFS-FedAuthRedirect: Suppress");
@@ -2627,16 +2655,6 @@ static void do__http_get__gvfs_object(struct gh__response_status *status,
 
 	do_req__with_fallback(component_url.buf, &params, status);
 
-	if (status->ec == GH__ERROR_CODE__OK) {
-		struct strbuf msg = STRBUF_INIT;
-
-		strbuf_addf(&msg, "loose %s",
-			    oid_to_hex(&params.loose_oid));
-
-		string_list_append(result_list, msg.buf);
-		strbuf_release(&msg);
-	}
-
 	gh__request_params__release(&params);
 	strbuf_release(&component_url);
 }
@@ -2648,7 +2666,7 @@ static void do__http_get__gvfs_object(struct gh__response_status *status,
  * consumed (along with the filename of the resulting packfile).
  *
  * However, if we only have 1 oid (remaining) in the OIDSET, the
- * server will respond to our POST with a loose object rather than
+ * server *MAY* respond to our POST with a loose object rather than
  * a packfile with 1 object.
  *
  * Append a message to the result_list describing the result.
@@ -2679,6 +2697,8 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status,
 
 	params.post_payload = &jw_req.json;
 
+	params.result_list = result_list;
+
 	params.headers = http_copy_default_headers();
 	params.headers = curl_slist_append(params.headers,
 					   "X-TFS-FedAuthRedirect: Suppress");
@@ -2706,20 +2726,6 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status,
 
 	do_req__with_fallback("gvfs/objects", &params, status);
 
-	if (status->ec == GH__ERROR_CODE__OK) {
-		struct strbuf msg = STRBUF_INIT;
-
-		if (params.object_count > 1)
-			strbuf_addf(&msg, "packfile %s",
-				    params.final_packfile_filename.buf);
-		else
-			strbuf_addf(&msg, "loose %s",
-				    oid_to_hex(&params.loose_oid));
-
-		string_list_append(result_list, msg.buf);
-		strbuf_release(&msg);
-	}
-
 	gh__request_params__release(&params);
 	jw_release(&jw_req);
 }

From b2626ef20f9381d6e369e917135013970aeecc52 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Mon, 11 Nov 2019 15:09:31 -0500
Subject: [PATCH 105/207] gvfs-helper: add prefetch support

Teach gvfs-helper to support "/gvfs/prefetch" REST API.
This includes a new `gvfs-helper prefetch --since=<t>` command line option.
And a new `objects.prefetch` verb in `gvfs-helper server` mode.

If `since` argument is omitted, `gvfs-helper` will search the local
shared-cache for the most recent prefetch packfile and start from
there.

The <t> is usually a seconds-since-epoch, but may also be a "friendly"
date -- such as "midnight", "yesterday" and etc. using the existing
date selection mechanism.

Add `gh_client__prefetch()` API to allow `git.exe` to easily call
prefetch (and using the same long-running process as immediate and
queued object fetches).

Expanded t5799 unit tests to include prefetch tests.  Test setup now
also builds some commits-and-trees packfiles for testing purposes with
well-known timestamps.

Expanded t/helper/test-gvfs-protocol.exe to support "/gvfs/prefetch"
REST API.

Massive refactor of existing packfile handling in gvfs-helper.c to
reuse more code between "/gvfs/objects POST" and "/gvfs/prefetch".
With this we now properly name packfiles with the checksum SHA1
rather than a date string.

Refactor also addresses some of the confusing tempfile setup and
install_<result> code processing (introduced to handle the ambiguity
of how POST works with commit objects).

Update 2023-05-22 (v2.41.0): add '--no-rev-index' to 'index-pack' to avoid
writing the extra (unused) file.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper-client.c          |  129 +++-
 gvfs-helper-client.h          |   18 +
 gvfs-helper.c                 | 1179 +++++++++++++++++++++++++--------
 t/helper/test-gvfs-protocol.c |  318 ++++++++-
 t/t5799-gvfs-helper.sh        |  204 ++++--
 5 files changed, 1509 insertions(+), 339 deletions(-)

diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
index ce437a64db3b74..4363b29c108859 100644
--- a/gvfs-helper-client.c
+++ b/gvfs-helper-client.c
@@ -28,13 +28,14 @@ static struct hashmap gh_server__subprocess_map;
 static struct object_directory *gh_client__chosen_odb;
 
 /*
- * The "objects" capability has 2 verbs: "get" and "post".
+ * The "objects" capability has verbs: "get" and "post" and "prefetch".
  */
 #define CAP_OBJECTS      (1u<<1)
 #define CAP_OBJECTS_NAME "objects"
 
 #define CAP_OBJECTS__VERB_GET1_NAME "get"
 #define CAP_OBJECTS__VERB_POST_NAME "post"
+#define CAP_OBJECTS__VERB_PREFETCH_NAME "prefetch"
 
 static int gh_client__start_fn(struct subprocess_entry *subprocess)
 {
@@ -133,6 +134,44 @@ static int gh_client__send__objects_get(struct child_process *process,
 	return 0;
 }
 
+/*
+ * Send a request to gvfs-helper to prefetch packfiles from either the
+ * cache-server or the main Git server using "/gvfs/prefetch".
+ *
+ *     objects.prefetch LF
+ *     [<seconds-since_epoch> LF]
+ *     <flush>
+ */
+static int gh_client__send__objects_prefetch(struct child_process *process,
+					     timestamp_t seconds_since_epoch)
+{
+	int err;
+
+	/*
+	 * We assume that all of the packet_ routines call error()
+	 * so that we don't have to.
+	 */
+
+	err = packet_write_fmt_gently(
+		process->in,
+		(CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_PREFETCH_NAME "\n"));
+	if (err)
+		return err;
+
+	if (seconds_since_epoch) {
+		err = packet_write_fmt_gently(process->in, "%" PRItime "\n",
+					      seconds_since_epoch);
+		if (err)
+			return err;
+	}
+
+	err = packet_flush_gently(process->in);
+	if (err)
+		return err;
+
+	return 0;
+}
+
 /*
  * Update the loose object cache to include the newly created
  * object.
@@ -181,7 +220,7 @@ static void gh_client__update_packed_git(const char *line)
 }
 
 /*
- * Both CAP_OBJECTS verbs return the same format response:
+ * CAP_OBJECTS verbs return the same format response:
  *
  *    <odb>
  *    <data>*
@@ -221,6 +260,8 @@ static int gh_client__objects__receive_response(
 	const char *v1;
 	char *line;
 	int len;
+	int nr_loose = 0;
+	int nr_packfile = 0;
 	int err = 0;
 
 	while (1) {
@@ -239,13 +280,13 @@ static int gh_client__objects__receive_response(
 		else if (starts_with(line, "packfile")) {
 			gh_client__update_packed_git(line);
 			ghc |= GHC__CREATED__PACKFILE;
-			*p_nr_packfile += 1;
+			nr_packfile++;
 		}
 
 		else if (starts_with(line, "loose")) {
 			gh_client__update_loose_cache(line);
 			ghc |= GHC__CREATED__LOOSE;
-			*p_nr_loose += 1;
+			nr_loose++;
 		}
 
 		else if (starts_with(line, "ok"))
@@ -259,6 +300,8 @@ static int gh_client__objects__receive_response(
 	}
 
 	*p_ghc = ghc;
+	*p_nr_loose = nr_loose;
+	*p_nr_packfile = nr_packfile;
 
 	return err;
 }
@@ -315,7 +358,7 @@ static struct gh_server__process *gh_client__find_long_running_process(
 	/*
 	 * Find an existing long-running process with the above command
 	 * line -or- create a new long-running process for this and
-	 * subsequent 'get' requests.
+	 * subsequent requests.
 	 */
 	if (!gh_server__subprocess_map_initialized) {
 		gh_server__subprocess_map_initialized = 1;
@@ -352,10 +395,14 @@ static struct gh_server__process *gh_client__find_long_running_process(
 
 void gh_client__queue_oid(const struct object_id *oid)
 {
-	// TODO consider removing this trace2.  it is useful for interactive
-	// TODO debugging, but may generate way too much noise for a data
-	// TODO event.
-	trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
+	/*
+	 * Keep this trace as a printf only, so that it goes to the
+	 * perf log, but not the event log.  It is useful for interactive
+	 * debugging, but generates way too much (unuseful) noise for the
+	 * database.
+	 */
+	if (trace2_is_enabled())
+		trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
 
 	if (!oidset_insert(&gh_client__oidset_queued, oid))
 		gh_client__oidset_count++;
@@ -436,10 +483,14 @@ int gh_client__get_immediate(const struct object_id *oid,
 	int nr_packfile = 0;
 	int err = 0;
 
-	// TODO consider removing this trace2.  it is useful for interactive
-	// TODO debugging, but may generate way too much noise for a data
-	// TODO event.
-	trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
+	/*
+	 * Keep this trace as a printf only, so that it goes to the
+	 * perf log, but not the event log.  It is useful for interactive
+	 * debugging, but generates way too much (unuseful) noise for the
+	 * database.
+	 */
+	if (trace2_is_enabled())
+		trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
 
 	entry = gh_client__find_long_running_process(CAP_OBJECTS);
 	if (!entry)
@@ -468,3 +519,55 @@ int gh_client__get_immediate(const struct object_id *oid,
 
 	return err;
 }
+
+/*
+ * Ask gvfs-helper to prefetch commits-and-trees packfiles since a
+ * given timestamp.
+ *
+ * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
+ * the last received prefetch and ask for ones newer than that.
+ */
+int gh_client__prefetch(timestamp_t seconds_since_epoch,
+			int *nr_packfiles_received)
+{
+	struct gh_server__process *entry;
+	struct child_process *process;
+	enum gh_client__created ghc;
+	int nr_loose = 0;
+	int nr_packfile = 0;
+	int err = 0;
+
+	entry = gh_client__find_long_running_process(CAP_OBJECTS);
+	if (!entry)
+		return -1;
+
+	trace2_region_enter("gh-client", "objects/prefetch", the_repository);
+	trace2_data_intmax("gh-client", the_repository, "prefetch/since",
+			   seconds_since_epoch);
+
+	process = &entry->subprocess.process;
+
+	sigchain_push(SIGPIPE, SIG_IGN);
+
+	err = gh_client__send__objects_prefetch(process, seconds_since_epoch);
+	if (!err)
+		err = gh_client__objects__receive_response(
+			process, &ghc, &nr_loose, &nr_packfile);
+
+	sigchain_pop(SIGPIPE);
+
+	if (err) {
+		subprocess_stop(&gh_server__subprocess_map,
+				(struct subprocess_entry *)entry);
+		FREE_AND_NULL(entry);
+	}
+
+	trace2_data_intmax("gh-client", the_repository,
+			   "prefetch/packfile_count", nr_packfile);
+	trace2_region_leave("gh-client", "objects/prefetch", the_repository);
+
+	if (nr_packfiles_received)
+		*nr_packfiles_received = nr_packfile;
+
+	return err;
+}
diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h
index c1e38fad75f841..7692534ecda54c 100644
--- a/gvfs-helper-client.h
+++ b/gvfs-helper-client.h
@@ -66,4 +66,22 @@ void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr);
  */
 int gh_client__drain_queue(enum gh_client__created *p_ghc);
 
+/*
+ * Ask `gvfs-helper server` to fetch any "prefetch packs"
+ * available on the server more recent than the requested time.
+ *
+ * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
+ * the last received prefetch and ask for ones newer than that.
+ *
+ * A long-running background process is used to subsequent requests
+ * (either prefetch or regular immediate/queued requests) more efficient.
+ *
+ * One or more packfiles will be created in the shared-cache ODB.
+ *
+ * Returns 0 on success, -1 on error.  Optionally also returns the
+ * number of prefetch packs received.
+ */
+int gh_client__prefetch(timestamp_t seconds_since_epoch,
+			int *nr_packfiles_received);
+
 #endif /* GVFS_HELPER_CLIENT_H */
diff --git a/gvfs-helper.c b/gvfs-helper.c
index e0e55b6b1d74d8..38429a35433d9f 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -22,7 +22,7 @@
 //
 //            error    := verify cache-server and abort if not well-known.
 //
-//            trust    := do not verify cache-server.  just use it.
+//            trust    := do not verify cache-server.  just use it, if set.
 //
 //            disable  := disable the cache-server and always use the main
 //                        Git server.
@@ -87,6 +87,24 @@
 //                       Number of retries after transient network errors.
 //                       Set to zero to disable such retries.
 //
+//     prefetch
+//
+//            Use "/gvfs/prefetch" REST API to fetch 1 or more commits-and-trees
+//            prefetch packs from the server.
+//
+//            <prefetch-options>:
+//
+//                 --since=<t>           // defaults to "0"
+//
+//                       Time in seconds since the epoch.  If omitted or
+//                       zero, the timestamp from the newest prefetch
+//                       packfile found in the shared-cache ODB is used.
+//                       (This is based upon the packfile name, not the
+//                       mtime.)
+//
+//                       The GVFS Protocol defines this value as a way to
+//                       request cached packfiles NEWER THAN this timestamp.
+//
 //     server
 //
 //            Interactive/sub-process mode.  Listen for a series of commands
@@ -116,20 +134,36 @@
 //
 //                 Each object will be created as a loose object in the ODB.
 //
+//                 Create 1 or more loose objects in the shared-cache ODB.
+//                 (The pathname of the selected ODB is reported at the
+//                 beginning of the response; this should match the pathname
+//                 given on the command line).
+//
+//                 git> objects.get
+//                 git> <oid>
+//                 git> <oid>
+//                 git> ...
+//                 git> <oid>
+//                 git> 0000
+//
+//                 git< odb <directory>
+//                 git< loose <oid>
+//                 git< loose <oid>
+//                 git< ...
+//                 git< loose <oid>
+//                 git< ok | partial | error <message>
+//                 git< 0000
+//
 //            Interactive verb: objects.post
 //
 //                 Fetch 1 or more objects, in bulk, using one or more
 //                 "/gvfs/objects" POST requests.
 //
-//            For both verbs, if a cache-server is configured, try it first.
-//            Optionally fallback to the main Git server.
-//
 //                 Create 1 or more loose objects and/or packfiles in the
-//                 shared-cache ODB.  (The pathname of the selected ODB is
-//                 reported at the beginning of the response; this should
-//                 match the pathname given on the command line).
+//                 shared-cache ODB.  A POST is allowed to respond with
+//                 either loose or packed objects.
 //
-//                 git> objects.get | objects.post
+//                 git> objects.post
 //                 git> <oid>
 //                 git> <oid>
 //                 git> ...
@@ -139,11 +173,31 @@
 //                 git< odb <directory>
 //                 git< loose <oid> | packfile <filename.pack>
 //                 git< loose <oid> | packfile <filename.pack>
-//                 gid< ...
+//                 git< ...
 //                 git< loose <oid> | packfile <filename.pack>
 //                 git< ok | partial | error <message>
 //                 git< 0000
 //
+//            Interactive verb: object.prefetch
+//
+//                 Fetch 1 or more prefetch packs using a "/gvfs/prefetch"
+//                 request.
+//
+//                 git> objects.prefetch
+//                 git> <timestamp>            // optional
+//                 git> 0000
+//
+//                 git< odb <directory>
+//                 git< packfile <filename.pack>
+//                 git< packfile <filename.pack>
+//                 git< ...
+//                 git< packfile <filename.pack>
+//                 git< ok | error <message>
+//                 git< 0000
+//
+//            If a cache-server is configured, try it first.
+//            Optionally fallback to the main Git server.
+//
 //            [1] Documentation/technical/protocol-common.txt
 //            [2] Documentation/technical/long-running-process-protocol.txt
 //            [3] See GIT_TRACE_PACKET
@@ -185,11 +239,17 @@
 #include "abspath.h"
 #include "progress.h"
 #include "trace2.h"
+#include "wrapper.h"
+#include "packfile.h"
+#include "date.h"
+
+#define TR2_CAT "gvfs-helper"
 
 static const char * const main_usage[] = {
 	N_("git gvfs-helper [<main_options>] config      [<options>]"),
 	N_("git gvfs-helper [<main_options>] get         [<options>]"),
 	N_("git gvfs-helper [<main_options>] post        [<options>]"),
+	N_("git gvfs-helper [<main_options>] prefetch    [<options>]"),
 	N_("git gvfs-helper [<main_options>] server      [<options>]"),
 	NULL
 };
@@ -204,6 +264,11 @@ static const char *const objects_post_usage[] = {
 	NULL
 };
 
+static const char *const prefetch_usage[] = {
+	N_("git gvfs-helper [<main_options>] prefetch [<options>]"),
+	NULL
+};
+
 static const char *const server_usage[] = {
 	N_("git gvfs-helper [<main_options>] server [<options>]"),
 	NULL
@@ -248,6 +313,7 @@ enum gh__error_code {
 	GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE = 11,
 	GH__ERROR_CODE__SUBPROCESS_SYNTAX = 12,
 	GH__ERROR_CODE__INDEX_PACK_FAILED = 13,
+	GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH = 14,
 };
 
 enum gh__cache_server_mode {
@@ -312,6 +378,8 @@ static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = {
 };
 
 enum gh__objects_mode {
+	GH__OBJECTS_MODE__NONE = 0,
+
 	/*
 	 * Bulk fetch objects.
 	 *
@@ -331,6 +399,12 @@ enum gh__objects_mode {
 	 * object treatment).
 	 */
 	GH__OBJECTS_MODE__GET,
+
+	/*
+	 * Fetch one or more pre-computed "prefetch packs" containing
+	 * commits and trees.
+	 */
+	GH__OBJECTS_MODE__PREFETCH,
 };
 
 struct gh__azure_throttle
@@ -405,6 +479,7 @@ struct gh__request_params {
 	int b_write_to_file;      /* write to file=1 or strbuf=0 */
 	int b_permit_cache_server_if_defined;
 
+	enum gh__objects_mode objects_mode;
 	enum gh__server_type server_type;
 
 	int k_attempt; /* robust retry attempt */
@@ -419,15 +494,8 @@ struct gh__request_params {
 	struct strbuf *buffer;     /* for response content when strbuf */
 	struct strbuf tr2_label;   /* for trace2 regions */
 
-	struct strbuf loose_path;
 	struct object_id loose_oid;
 
-	struct strbuf temp_path_pack;
-	struct strbuf temp_path_idx;
-	struct strbuf final_path_pack;
-	struct strbuf final_path_idx;
-	struct strbuf final_packfile_filename;
-
 	/*
 	 * Note that I am putting all of the progress-related instance data
 	 * inside the request-params in the hope that we can eventually
@@ -467,13 +535,7 @@ struct gh__request_params {
 	.tempfile = NULL, \
 	.buffer = NULL, \
 	.tr2_label = STRBUF_INIT, \
-	.loose_path = STRBUF_INIT, \
 	.loose_oid = {{0}}, \
-	.temp_path_pack = STRBUF_INIT, \
-	.temp_path_idx = STRBUF_INIT, \
-	.final_path_pack = STRBUF_INIT, \
-	.final_path_idx = STRBUF_INIT, \
-	.final_packfile_filename = STRBUF_INIT, \
 	.progress_state = GH__PROGRESS_STATE__START, \
 	.progress_base_phase2_msg = STRBUF_INIT, \
 	.progress_base_phase3_msg = STRBUF_INIT, \
@@ -498,12 +560,6 @@ static void gh__request_params__release(struct gh__request_params *params)
 	params->buffer = NULL; /* we do not own this */
 
 	strbuf_release(&params->tr2_label);
-	strbuf_release(&params->loose_path);
-	strbuf_release(&params->temp_path_pack);
-	strbuf_release(&params->temp_path_idx);
-	strbuf_release(&params->final_path_pack);
-	strbuf_release(&params->final_path_idx);
-	strbuf_release(&params->final_packfile_filename);
 
 	strbuf_release(&params->progress_base_phase2_msg);
 	strbuf_release(&params->progress_base_phase3_msg);
@@ -594,9 +650,7 @@ static void gh__response_status__zero(struct gh__response_status *s)
 	s->azure = NULL;
 }
 
-static void install_packfile(struct gh__request_params *params,
-			     struct gh__response_status *status);
-static void install_loose(struct gh__request_params *params,
+static void install_result(struct gh__request_params *params,
 			  struct gh__response_status *status);
 
 /*
@@ -633,7 +687,7 @@ static void log_e2eid(struct gh__request_params *params,
 		strbuf_addstr(&key, "e2eid");
 		strbuf_addstr(&key, gh__server_type_label[params->server_type]);
 
-		trace2_data_string("gvfs-helper", NULL, key.buf,
+		trace2_data_string(TR2_CAT, NULL, key.buf,
 				   params->e2eid.buf);
 
 		strbuf_release(&key);
@@ -733,7 +787,7 @@ static void compute_retry_mode_from_http_response(
 		status->retry = GH__RETRY_MODE__HTTP_429;
 		status->ec = GH__ERROR_CODE__HTTP_429;
 
-		trace2_data_string("gvfs-helper", NULL, "error/http",
+		trace2_data_string(TR2_CAT, NULL, "error/http",
 				   status->error_message.buf);
 		return;
 
@@ -746,7 +800,7 @@ static void compute_retry_mode_from_http_response(
 		status->retry = GH__RETRY_MODE__HTTP_503;
 		status->ec = GH__ERROR_CODE__HTTP_503;
 
-		trace2_data_string("gvfs-helper", NULL, "error/http",
+		trace2_data_string(TR2_CAT, NULL, "error/http",
 				   status->error_message.buf);
 		return;
 
@@ -760,7 +814,7 @@ static void compute_retry_mode_from_http_response(
 	status->retry = GH__RETRY_MODE__HARD_FAIL;
 	status->ec = GH__ERROR_CODE__HTTP_OTHER;
 
-	trace2_data_string("gvfs-helper", NULL, "error/http",
+	trace2_data_string(TR2_CAT, NULL, "error/http",
 			   status->error_message.buf);
 	return;
 }
@@ -894,7 +948,7 @@ static void compute_retry_mode_from_curl_error(
 	status->retry = GH__RETRY_MODE__HARD_FAIL;
 	status->ec = GH__ERROR_CODE__CURL_ERROR;
 
-	trace2_data_string("gvfs-helper", NULL, "error/curl",
+	trace2_data_string(TR2_CAT, NULL, "error/curl",
 			   status->error_message.buf);
 	return;
 
@@ -904,7 +958,7 @@ static void compute_retry_mode_from_curl_error(
 	status->retry = GH__RETRY_MODE__TRANSIENT;
 	status->ec = GH__ERROR_CODE__CURL_ERROR;
 
-	trace2_data_string("gvfs-helper", NULL, "error/curl",
+	trace2_data_string(TR2_CAT, NULL, "error/curl",
 			   status->error_message.buf);
 	return;
 }
@@ -1098,7 +1152,7 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 	params->progress_state = GH__PROGRESS_STATE__START;
 	strbuf_setlen(&params->e2eid, 0);
 
-	trace2_region_enter("gvfs-helper", key.buf, NULL);
+	trace2_region_enter(TR2_CAT, key.buf, NULL);
 
 	if (!start_active_slot(slot)) {
 		compute_retry_mode_from_curl_error(status,
@@ -1122,7 +1176,7 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 			 * (such as when we request a commit).
 			 */
 			strbuf_addstr(&key, "/nr_bytes");
-			trace2_data_intmax("gvfs-helper", NULL,
+			trace2_data_intmax(TR2_CAT, NULL,
 					   key.buf,
 					   status->bytes_received);
 			strbuf_setlen(&key, old_len);
@@ -1132,16 +1186,10 @@ static void gh__run_one_slot(struct active_request_slot *slot,
 	if (params->progress)
 		stop_progress(&params->progress);
 
-	if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) {
-		if (params->b_is_post &&
-		    !strcmp(status->content_type.buf,
-			    "application/x-git-packfile"))
-			install_packfile(params, status);
-		else
-			install_loose(params, status);
-	}
+	if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file)
+		install_result(params, status);
 
-	trace2_region_leave("gvfs-helper", key.buf, NULL);
+	trace2_region_leave(TR2_CAT, key.buf, NULL);
 
 	strbuf_release(&key);
 }
@@ -1288,7 +1336,7 @@ static void lookup_main_url(void)
 	 */
 	gh__global.main_url = transport_anonymize_url(gh__global.remote->url.v[0]);
 
-	trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url);
+	trace2_data_string(TR2_CAT, NULL, "remote/url", gh__global.main_url);
 }
 
 static void do__http_get__gvfs_config(struct gh__response_status *status,
@@ -1315,10 +1363,23 @@ static void select_cache_server(void)
 	gh__global.cache_server_url = NULL;
 
 	if (gh__cmd_opts.cache_server_mode == GH__CACHE_SERVER_MODE__DISABLE) {
-		trace2_data_string("gvfs-helper", NULL, "cache/url", "disabled");
+		trace2_data_string(TR2_CAT, NULL, "cache/url", "disabled");
 		return;
 	}
 
+	if (!gvfs_cache_server_url || !*gvfs_cache_server_url) {
+		switch (gh__cmd_opts.cache_server_mode) {
+		default:
+		case GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY:
+		case GH__CACHE_SERVER_MODE__VERIFY_DISABLE:
+			trace2_data_string(TR2_CAT, NULL, "cache/url", "unset");
+			return;
+
+		case GH__CACHE_SERVER_MODE__VERIFY_ERROR:
+			die("cache-server not set");
+		}
+	}
+
 	/*
 	 * If the cache-server and main Git server have the same URL, we
 	 * can silently disable the cache-server (by NOT setting the field
@@ -1326,14 +1387,14 @@ static void select_cache_server(void)
 	 */
 	if (!strcmp(gvfs_cache_server_url, gh__global.main_url)) {
 		gh__cmd_opts.try_fallback = 0;
-		trace2_data_string("gvfs-helper", NULL, "cache/url", "same");
+		trace2_data_string(TR2_CAT, NULL, "cache/url", "same");
 		return;
 	}
 
 	if (gh__cmd_opts.cache_server_mode ==
 	    GH__CACHE_SERVER_MODE__TRUST_WITHOUT_VERIFY) {
 		gh__global.cache_server_url = gvfs_cache_server_url;
-		trace2_data_string("gvfs-helper", NULL, "cache/url",
+		trace2_data_string(TR2_CAT, NULL, "cache/url",
 				   gvfs_cache_server_url);
 		return;
 	}
@@ -1374,7 +1435,7 @@ static void select_cache_server(void)
 
 	if (match) {
 		gh__global.cache_server_url = gvfs_cache_server_url;
-		trace2_data_string("gvfs-helper", NULL, "cache/url",
+		trace2_data_string(TR2_CAT, NULL, "cache/url",
 				   gvfs_cache_server_url);
 	}
 
@@ -1398,7 +1459,7 @@ static void select_cache_server(void)
 		else
 			warning("could not verify cache-server '%s'",
 				gvfs_cache_server_url);
-		trace2_data_string("gvfs-helper", NULL, "cache/url",
+		trace2_data_string(TR2_CAT, NULL, "cache/url",
 				   "disabled");
 	}
 
@@ -1587,27 +1648,14 @@ static void select_odb(void)
 }
 
 /*
- * Create a tempfile to stream the packfile into.
- *
- * We create a tempfile in the chosen ODB directory and let CURL
- * automatically stream data to the file.  If successful, we can
- * later rename it to a proper .pack and run "git index-pack" on
- * it to create the corresponding .idx file.
- *
- * TODO I would rather to just stream the packfile directly into
- * TODO "git index-pack --stdin" (and save some I/O) because it
- * TODO will automatically take care of the rename of both files
- * TODO and any other cleanup.  BUT INDEX-PACK WILL ONLY WRITE
- * TODO TO THE PRIMARY ODB -- it will not write into the alternates
- * TODO (this is considered bad form).  So we would need to add
- * TODO an option to index-pack to handle this.  I don't want to
- * TODO deal with this issue right now.
- *
- * TODO Consider using lockfile for this rather than naked tempfile.
+ * Create a unique tempfile or tempfile-pair inside the
+ * tempPacks directory.
  */
-static void create_tempfile_for_packfile(
-	struct gh__request_params *params,
-	struct gh__response_status *status)
+static void my_create_tempfile(
+	struct gh__response_status *status,
+	int b_fdopen,
+	const char *suffix1, struct tempfile **t1,
+	const char *suffix2, struct tempfile **t2)
 {
 	static unsigned int nth = 0;
 	static struct timeval tv = {0};
@@ -1617,15 +1665,15 @@ static void create_tempfile_for_packfile(
 
 	struct strbuf basename = STRBUF_INIT;
 	struct strbuf buf = STRBUF_INIT;
-	int len_p;
+	int len_tp;
 	enum scld_error scld;
 
 	gh__response_status__zero(status);
 
 	if (!nth) {
 		/*
-		 * Create a <date> string to use in the name of all packfiles
-		 * created by this process.
+		 * Create a unique <date> string to use in the name of all
+		 * tempfiles created by this process.
 		 */
 		gettimeofday(&tv, NULL);
 		secs = tv.tv_sec;
@@ -1638,84 +1686,114 @@ static void create_tempfile_for_packfile(
 	}
 
 	/*
-	 * Create a <basename> for this packfile using a series number <n>,
-	 * so that all of the chunks we download will group together.
+	 * Create a <basename> for this instance/pair using a series
+	 * number <n>.
 	 */
-	strbuf_addf(&basename, "vfs-%s-%04d", date, nth++);
+	strbuf_addf(&basename, "t-%s-%04d", date, nth++);
+
+	if (!suffix1 || !*suffix1)
+		suffix1 = "temp";
 
 	/*
-	 * We will stream the data into a managed tempfile() in:
+	 * Create full pathname as:
 	 *
-	 *     "<odb>/pack/tempPacks/vfs-<date>-<n>.temp"
+	 *     "<odb>/pack/tempPacks/<basename>.<suffix1>"
 	 */
 	strbuf_setlen(&buf, 0);
 	strbuf_addbuf(&buf, &gh__global.buf_odb_path);
 	strbuf_complete(&buf, '/');
-	strbuf_addstr(&buf, "pack/");
-	len_p = buf.len;
-	strbuf_addstr(&buf, "tempPacks/");
-	strbuf_addbuf(&buf, &basename);
-	strbuf_addstr(&buf, ".temp");
+	strbuf_addstr(&buf, "pack/tempPacks/");
+	len_tp = buf.len;
+	strbuf_addf(  &buf, "%s.%s", basename.buf, suffix1);
 
 	scld = safe_create_leading_directories(buf.buf);
 	if (scld != SCLD_OK && scld != SCLD_EXISTS) {
 		strbuf_addf(&status->error_message,
-			    "could not create directory for packfile: '%s'",
+			    "could not create directory for tempfile: '%s'",
 			    buf.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
 		goto cleanup;
 	}
 
-	params->tempfile = create_tempfile(buf.buf);
-	if (!params->tempfile) {
+	*t1 = create_tempfile(buf.buf);
+	if (!*t1) {
 		strbuf_addf(&status->error_message,
-			    "could not create tempfile for packfile: '%s'",
+			    "could not create tempfile: '%s'",
 			    buf.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
 		goto cleanup;
 	}
-
-	fdopen_tempfile(params->tempfile, "w");
-
-	/*
-	 * After the download is complete, we will need to steal the file
-	 * from the tempfile() class (so that it doesn't magically delete
-	 * it when we close the file handle) and then index it.
-	 *
-	 * We do this into the tempPacks directory to avoid contaminating
-	 * the real pack directory until we know there is no corruption.
-	 *
-	 *     "<odb>/pack/tempPacks/vfs-<date>-<n>.temp.pack"
-	 *     "<odb>/pack/tempPacks/vfs-<date>-<n>.temp.idx"
-	 */
-	strbuf_setlen(&params->temp_path_pack, 0);
-	strbuf_addf(&params->temp_path_pack, "%s.pack", buf.buf);
-
-	strbuf_setlen(&params->temp_path_idx, 0);
-	strbuf_addf(&params->temp_path_idx, "%s.idx", buf.buf);
+	if (b_fdopen)
+		fdopen_tempfile(*t1, "w");
 
 	/*
-	 * Later, if all goes well, we will install them as:
+	 * Optionally create a peer tempfile with the same basename.
+	 * (This is useful for prefetching .pack and .idx pairs.)
 	 *
-	 *     "<odb>/pack/vfs-<date>-<n>.pack"
-	 *     "<odb>/pack/vfs-<date>-<n>.idx"
+	 *     "<odb>/pack/tempPacks/<basename>.<suffix2>"
 	 */
-	strbuf_setlen(&buf, len_p);
-	strbuf_setlen(&params->final_path_pack, 0);
-	strbuf_addf(&params->final_path_pack, "%s%s.pack",
-		    buf.buf, basename.buf);
-	strbuf_setlen(&params->final_path_idx, 0);
-	strbuf_addf(&params->final_path_idx, "%s%s.idx",
-		    buf.buf, basename.buf);
-	strbuf_setlen(&params->final_packfile_filename, 0);
-	strbuf_addf(&params->final_packfile_filename, "%s.pack",
-		    basename.buf);
+	if (suffix2 && *suffix2 && t2) {
+		strbuf_setlen(&buf, len_tp);
+		strbuf_addf(  &buf, "%s.%s", basename.buf, suffix2);
+
+		*t2 = create_tempfile(buf.buf);
+		if (!*t2) {
+			strbuf_addf(&status->error_message,
+				    "could not create tempfile: '%s'",
+				    buf.buf);
+			status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+			goto cleanup;
+		}
+		if (b_fdopen)
+			fdopen_tempfile(*t2, "w");
+	}
 
 cleanup:
 	strbuf_release(&buf);
 	strbuf_release(&basename);
 }
 
+/*
+ * Create pathnames to the final location of the .pack and .idx
+ * files in the ODB.  These are of the form:
+ *
+ *    "<odb>/pack/<term_1>-<term_2>[-<term_3>].<suffix>"
+ *
+ * For example, for prefetch packs, <term_2> will be the epoch
+ * timestamp and <term_3> will be the packfile hash.
+ */
+static void create_final_packfile_pathnames(
+	const char *term_1, const char *term_2, const char *term_3,
+	struct strbuf *pack_path, struct strbuf *idx_path,
+	struct strbuf *pack_filename)
+{
+	struct strbuf base = STRBUF_INIT;
+	struct strbuf path = STRBUF_INIT;
+
+	if (term_3 && *term_3)
+		strbuf_addf(&base, "%s-%s-%s", term_1, term_2, term_3);
+	else
+		strbuf_addf(&base, "%s-%s", term_1, term_2);
+
+	strbuf_setlen(pack_filename, 0);
+	strbuf_addf(  pack_filename, "%s.pack", base.buf);
+
+	strbuf_addbuf(&path, &gh__global.buf_odb_path);
+	strbuf_complete(&path, '/');
+	strbuf_addstr(&path, "pack/");
+
+	strbuf_setlen(pack_path, 0);
+	strbuf_addbuf(pack_path, &path);
+	strbuf_addf(  pack_path, "%s.pack", base.buf);
+
+	strbuf_setlen(idx_path, 0);
+	strbuf_addbuf(idx_path, &path);
+	strbuf_addf(  idx_path, "%s.idx", base.buf);
+
+	strbuf_release(&base);
+	strbuf_release(&path);
+}
+
 /*
  * Create a pathname to the loose object in the shared-cache ODB
  * with the given OID.  Try to "mkdir -p" to ensure the parent
@@ -1743,54 +1821,93 @@ static int create_loose_pathname_in_odb(struct strbuf *buf_path,
 	return 0;
 }
 
-/*
- * Create a tempfile to stream a loose object into.
- *
- * We create a tempfile in the chosen ODB directory and let CURL
- * automatically stream data to the file.
- *
- * We put it directly in the "<odb>/xx/" directory.
- */
-static void create_tempfile_for_loose(
-	struct gh__request_params *params,
-	struct gh__response_status *status)
+static void my_run_index_pack(struct gh__request_params *params UNUSED,
+			      struct gh__response_status *status,
+			      const struct strbuf *temp_path_pack,
+			      const struct strbuf *temp_path_idx,
+			      struct strbuf *packfile_checksum)
 {
-	static int nth = 0;
-	struct strbuf buf_path = STRBUF_INIT;
+	struct child_process ip = CHILD_PROCESS_INIT;
+	struct strbuf ip_stdout = STRBUF_INIT;
 
-	gh__response_status__zero(status);
+	strvec_push(&ip.args, "git");
+	strvec_push(&ip.args, "index-pack");
+
+	ip.err = -1;
+	ip.no_stderr = 1;
+
+	/* Skip generating the rev index, we don't need it. */
+	strvec_push(&ip.args, "--no-rev-index");
+
+	strvec_pushl(&ip.args, "-o", temp_path_idx->buf, NULL);
+	strvec_push(&ip.args, temp_path_pack->buf);
+	ip.no_stdin = 1;
+	ip.out = -1;
 
-	if (create_loose_pathname_in_odb(&buf_path, &params->loose_oid)) {
+	if (pipe_command(&ip, NULL, 0, &ip_stdout, 0, NULL, 0)) {
+		unlink(temp_path_pack->buf);
+		unlink(temp_path_idx->buf);
 		strbuf_addf(&status->error_message,
-			    "cannot create directory for loose object '%s'",
-			    buf_path.buf);
-		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
+			    "index-pack failed on '%s'",
+			    temp_path_pack->buf);
+		/*
+		 * Lets assume that index-pack failed because the
+		 * downloaded file is corrupt (truncated).
+		 *
+		 * Retry it as if the network had dropped.
+		 */
+		status->retry = GH__RETRY_MODE__TRANSIENT;
+		status->ec = GH__ERROR_CODE__INDEX_PACK_FAILED;
 		goto cleanup;
 	}
 
-	/* Remember the full path of the final destination. */
-	strbuf_setlen(&params->loose_path, 0);
-	strbuf_addbuf(&params->loose_path, &buf_path);
+	if (packfile_checksum) {
+		/*
+		 * stdout from index-pack should have the packfile hash.
+		 * Extract it and use it in the final packfile name.
+		 *
+		 * TODO What kind of validation should we do on the
+		 * TODO string and is there ever any other output besides
+		 * TODO just the checksum ?
+		 */
+		strbuf_trim_trailing_newline(&ip_stdout);
 
-	/*
-	 * Build a unique tempfile pathname based upon it.  We avoid
-	 * using lockfiles to avoid issues with stale locks after
-	 * crashes.
-	 */
-	strbuf_addf(&buf_path, ".%08u.%.06u.temp", getpid(), nth++);
+		strbuf_addbuf(packfile_checksum, &ip_stdout);
+	}
 
-	params->tempfile = create_tempfile(buf_path.buf);
-	if (!params->tempfile) {
-		strbuf_addstr(&status->error_message,
-			      "could not create tempfile for loose object");
-		status->ec = GH__ERROR_CODE__COULD_NOT_CREATE_TEMPFILE;
-		goto cleanup;
+cleanup:
+	strbuf_release(&ip_stdout);
+	child_process_clear(&ip);
+}
+
+static void my_finalize_packfile(struct gh__request_params *params,
+				 struct gh__response_status *status,
+				 const struct strbuf *temp_path_pack,
+				 const struct strbuf *temp_path_idx,
+				 struct strbuf *final_path_pack,
+				 struct strbuf *final_path_idx,
+				 struct strbuf *final_filename)
+{
+	if (finalize_object_file(temp_path_pack->buf, final_path_pack->buf) ||
+	    finalize_object_file(temp_path_idx->buf, final_path_idx->buf)) {
+		unlink(temp_path_pack->buf);
+		unlink(temp_path_idx->buf);
+		unlink(final_path_pack->buf);
+		unlink(final_path_idx->buf);
+		strbuf_addf(&status->error_message,
+			    "could not install packfile '%s'",
+			    final_path_pack->buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+		return;
 	}
 
-	fdopen_tempfile(params->tempfile, "w");
+	if (params->result_list) {
+		struct strbuf result_msg = STRBUF_INIT;
 
-cleanup:
-	strbuf_release(&buf_path);
+		strbuf_addf(&result_msg, "packfile %s", final_filename->buf);
+		string_list_append(params->result_list, result_msg.buf);
+		strbuf_release(&result_msg);
+	}
 }
 
 /*
@@ -1800,93 +1917,343 @@ static void create_tempfile_for_loose(
 static void install_packfile(struct gh__request_params *params,
 			     struct gh__response_status *status)
 {
-	struct child_process ip = CHILD_PROCESS_INIT;
+	struct strbuf temp_path_pack = STRBUF_INIT;
+	struct strbuf temp_path_idx = STRBUF_INIT;
+	struct strbuf packfile_checksum = STRBUF_INIT;
+	struct strbuf final_path_pack = STRBUF_INIT;
+	struct strbuf final_path_idx = STRBUF_INIT;
+	struct strbuf final_filename = STRBUF_INIT;
+
+	gh__response_status__zero(status);
 
 	/*
-	 * When we request more than 1 object, the server should always
-	 * send us a packfile.
+	 * After the download is complete, we will need to steal the file
+	 * from the tempfile() class (so that it doesn't magically delete
+	 * it when we close the file handle) and then index it.
 	 */
-	if (strcmp(status->content_type.buf,
-		   "application/x-git-packfile")) {
-		strbuf_addf(&status->error_message,
-			    "install_packfile: received unknown content-type '%s'",
-			    status->content_type.buf);
-		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
-		goto cleanup;
-	}
-
-	gh__response_status__zero(status);
+	strbuf_addf(&temp_path_pack, "%s.pack",
+		    get_tempfile_path(params->tempfile));
+	strbuf_addf(&temp_path_idx, "%s.idx",
+		    get_tempfile_path(params->tempfile));
 
 	if (rename_tempfile(&params->tempfile,
-			    params->temp_path_pack.buf) == -1) {
+			    temp_path_pack.buf) == -1) {
 		strbuf_addf(&status->error_message,
 			    "could not rename packfile to '%s'",
-			    params->temp_path_pack.buf);
+			    temp_path_pack.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
 		goto cleanup;
 	}
 
-	strvec_push(&ip.args, "index-pack");
-	if (gh__cmd_opts.show_progress)
-		strvec_push(&ip.args, "-v");
-	strvec_pushl(&ip.args, "-o", params->temp_path_idx.buf, NULL);
-	strvec_push(&ip.args, params->temp_path_pack.buf);
-	ip.git_cmd = 1;
-	ip.no_stdin = 1;
-	ip.no_stdout = 1;
+	my_run_index_pack(params, status, &temp_path_pack, &temp_path_idx,
+			  &packfile_checksum);
+	if (status->ec != GH__ERROR_CODE__OK)
+		goto cleanup;
+
+	create_final_packfile_pathnames("vfs", packfile_checksum.buf, NULL,
+					&final_path_pack, &final_path_idx,
+					&final_filename);
+	my_finalize_packfile(params, status,
+			     &temp_path_pack, &temp_path_idx,
+			     &final_path_pack, &final_path_idx,
+			     &final_filename);
+
+cleanup:
+	strbuf_release(&temp_path_pack);
+	strbuf_release(&temp_path_idx);
+	strbuf_release(&packfile_checksum);
+	strbuf_release(&final_path_pack);
+	strbuf_release(&final_path_idx);
+	strbuf_release(&final_filename);
+}
+
+/*
+ * bswap.h only defines big endian functions.
+ * The GVFS Protocol defines fields in little endian.
+ */
+static inline uint64_t my_get_le64(uint64_t le_val)
+{
+#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN
+	return le_val;
+#else
+	return default_bswap64(le_val);
+#endif
+}
+
+#define MY_MIN(x,y) (((x) < (y)) ? (x) : (y))
+#define MY_MAX(x,y) (((x) > (y)) ? (x) : (y))
+
+/*
+ * Copy the `nr_bytes_total` from `fd_in` to `fd_out`.
+ *
+ * This could be used to extract a single packfile from
+ * a multipart file, for example.
+ */
+static int my_copy_fd_len(int fd_in, int fd_out, ssize_t nr_bytes_total)
+{
+	char buffer[8192];
+
+	while (nr_bytes_total > 0) {
+		ssize_t len_to_read = MY_MIN(nr_bytes_total, (ssize_t)sizeof(buffer));
+		ssize_t nr_read = xread(fd_in, buffer, len_to_read);
+
+		if (!nr_read)
+			break;
+		if (nr_read < 0)
+			return -1;
+
+		if (write_in_full(fd_out, buffer, nr_read) < 0)
+			return -1;
+
+		nr_bytes_total -= nr_read;
+	}
+
+	return 0;
+}
+
+/*
+ * Copy the `nr_bytes_total` from `fd_in` to `fd_out` AND save the
+ * final `tail_len` bytes in the given buffer.
+ *
+ * This could be used to extract a single packfile from
+ * a multipart file and read the final SHA into the buffer.
+ */
+static int my_copy_fd_len_tail(int fd_in, int fd_out, ssize_t nr_bytes_total,
+			       unsigned char *buf_tail, ssize_t tail_len)
+{
+	memset(buf_tail, 0, tail_len);
+
+	if (nr_bytes_total < tail_len)
+		return my_copy_fd_len(fd_in, fd_out, nr_bytes_total);
+
+	if (my_copy_fd_len(fd_in, fd_out, (nr_bytes_total - tail_len)) < 0)
+		return -1;
+
+	if (xread(fd_in, (char *)buf_tail, tail_len) != tail_len)
+		return -1;
+
+	if (write_in_full(fd_out, buf_tail, tail_len) < 0)
+		return -1;
+
+	return 0;
+}
+
+/*
+ * See the protocol document for the per-packfile header.
+ */
+struct ph {
+	uint64_t timestamp;
+	uint64_t pack_len;
+	uint64_t idx_len;
+};
+
+/*
+ * Extract the next packfile from the multipack.
+ */
+static void extract_packfile_from_multipack(
+	struct gh__request_params *params,
+	struct gh__response_status *status,
+	int fd_multipack,
+	unsigned short k)
+{
+	struct ph ph;
+	struct tempfile *tempfile_pack = NULL;
+	struct tempfile *tempfile_idx = NULL;
+	int result = -1;
+	int b_no_idx_in_multipack;
+	struct object_id packfile_checksum;
+	char hex_checksum[GIT_MAX_HEXSZ + 1];
+	struct strbuf buf_timestamp = STRBUF_INIT;
+	struct strbuf temp_path_pack = STRBUF_INIT;
+	struct strbuf temp_path_idx = STRBUF_INIT;
+	struct strbuf final_path_pack = STRBUF_INIT;
+	struct strbuf final_path_idx = STRBUF_INIT;
+	struct strbuf final_filename = STRBUF_INIT;
+
+	if (xread(fd_multipack, &ph, sizeof(ph)) != sizeof(ph)) {
+		strbuf_addf(&status->error_message,
+			    "could not read header for packfile[%d] in multipack",
+			    k);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH;
+		goto done;
+	}
+
+	ph.timestamp = my_get_le64(ph.timestamp);
+	ph.pack_len = my_get_le64(ph.pack_len);
+	ph.idx_len = my_get_le64(ph.idx_len);
+
+	if (!ph.pack_len) {
+		strbuf_addf(&status->error_message,
+			    "packfile[%d]: zero length packfile?", k);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH;
+		goto done;
+	}
+
+	b_no_idx_in_multipack = (ph.idx_len == maximum_unsigned_value_of_type(uint64_t) ||
+				 ph.idx_len == 0);
+
+	if (b_no_idx_in_multipack) {
+		my_create_tempfile(status, 0, "pack", &tempfile_pack, NULL, NULL);
+		if (!tempfile_pack)
+			goto done;
+	} else {
+		/* create a pair of tempfiles with the same basename */
+		my_create_tempfile(status, 0, "pack", &tempfile_pack, "idx", &tempfile_idx);
+		if (!tempfile_pack || !tempfile_idx)
+			goto done;
+	}
 
 	/*
-	 * Note that I DO NOT have a trace2 region around the
-	 * index-pack process by itself.  Currently, we are inside the
-	 * trace2 region for running the request and that's fine.
-	 * Later, if/when we stream the download directly to
-	 * index-pack, it will be inside under the same region anyway.
-	 * So, I'm not going to introduce it here.
+	 * Copy the current packfile from the open stream and capture
+	 * the checksum.
+	 *
+	 * TODO This assumes that the checksum is SHA1.  Fix this if/when
+	 * TODO Git converts to SHA256.
 	 */
-	if (run_command(&ip)) {
-		unlink(params->temp_path_pack.buf);
-		unlink(params->temp_path_idx.buf);
+	result = my_copy_fd_len_tail(fd_multipack,
+				     get_tempfile_fd(tempfile_pack),
+				     ph.pack_len,
+				     packfile_checksum.hash,
+				     GIT_SHA1_RAWSZ);
+	packfile_checksum.algo = GIT_HASH_SHA1;
+
+	if (result < 0){
 		strbuf_addf(&status->error_message,
-			    "index-pack failed on '%s'",
-			    params->temp_path_pack.buf);
+			    "could not extract packfile[%d] from multipack",
+			    k);
+		goto done;
+	}
+	strbuf_addstr(&temp_path_pack, get_tempfile_path(tempfile_pack));
+	close_tempfile_gently(tempfile_pack);
+
+	oid_to_hex_r(hex_checksum, &packfile_checksum);
+
+	if (b_no_idx_in_multipack) {
 		/*
-		 * Lets assume that index-pack failed because the
-		 * downloaded file is corrupt (truncated).
-		 *
-		 * Retry it as if the network had dropped.
+		 * The server did not send the corresponding .idx, so
+		 * we have to compute it ourselves.
 		 */
-		status->retry = GH__RETRY_MODE__TRANSIENT;
-		status->ec = GH__ERROR_CODE__INDEX_PACK_FAILED;
+		strbuf_addbuf(&temp_path_idx, &temp_path_pack);
+		strbuf_strip_suffix(&temp_path_idx, ".pack");
+		strbuf_addstr(&temp_path_idx, ".idx");
+
+		my_run_index_pack(params, status,
+				  &temp_path_pack, &temp_path_idx,
+				  NULL);
+		if (status->ec != GH__ERROR_CODE__OK)
+			goto done;
+
+	} else {
+		/*
+		 * Server send the .idx immediately after the .pack in the
+		 * data stream.  I'm tempted to verify it, but that defeats
+		 * the purpose of having it cached...
+		 */
+		if (my_copy_fd_len(fd_multipack, get_tempfile_fd(tempfile_idx),
+				   ph.idx_len) < 0) {
+			strbuf_addf(&status->error_message,
+				    "could not extract index[%d] in multipack",
+				    k);
+			status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH;
+			goto done;
+		}
+
+		strbuf_addstr(&temp_path_idx, get_tempfile_path(tempfile_idx));
+		close_tempfile_gently(tempfile_idx);
+	}
+
+	strbuf_addf(&buf_timestamp, "%u", (unsigned int)ph.timestamp);
+	create_final_packfile_pathnames("prefetch", buf_timestamp.buf, hex_checksum,
+					&final_path_pack, &final_path_idx,
+					&final_filename);
+	strbuf_release(&buf_timestamp);
+
+	my_finalize_packfile(params, status,
+			     &temp_path_pack, &temp_path_idx,
+			     &final_path_pack, &final_path_idx,
+			     &final_filename);
+
+done:
+	delete_tempfile(&tempfile_pack);
+	delete_tempfile(&tempfile_idx);
+	strbuf_release(&temp_path_pack);
+	strbuf_release(&temp_path_idx);
+	strbuf_release(&final_path_pack);
+	strbuf_release(&final_path_idx);
+	strbuf_release(&final_filename);
+}
+
+/*
+ * Cut apart the received multipart response into individual packfiles
+ * and install each one.
+ */
+static void install_prefetch(struct gh__request_params *params,
+			     struct gh__response_status *status)
+{
+	static unsigned char v1_h[6] = { 'G', 'P', 'R', 'E', ' ', 0x01 };
+
+	struct mh {
+		unsigned char h[6];
+		unsigned char np[2];
+	};
+
+	struct mh mh;
+	unsigned short np;
+	unsigned short k;
+	int fd = -1;
+
+	struct strbuf temp_path_mp = STRBUF_INIT;
+
+	/*
+	 * Steal the multi-part file from the tempfile class.
+	 */
+	strbuf_addf(&temp_path_mp, "%s.mp", get_tempfile_path(params->tempfile));
+	if (rename_tempfile(&params->tempfile, temp_path_mp.buf) == -1) {
+		strbuf_addf(&status->error_message,
+			    "could not rename prefetch tempfile to '%s'",
+			    temp_path_mp.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH;
 		goto cleanup;
 	}
 
-	if (finalize_object_file(params->temp_path_pack.buf,
-				 params->final_path_pack.buf) ||
-	    finalize_object_file(params->temp_path_idx.buf,
-				 params->final_path_idx.buf)) {
-		unlink(params->temp_path_pack.buf);
-		unlink(params->temp_path_idx.buf);
-		unlink(params->final_path_pack.buf);
-		unlink(params->final_path_idx.buf);
+	fd = git_open_cloexec(temp_path_mp.buf, O_RDONLY);
+	if (fd == -1) {
 		strbuf_addf(&status->error_message,
-			    "could not install packfile '%s'",
-			    params->final_path_pack.buf);
-		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PACKFILE;
+			    "could not reopen prefetch tempfile '%s'",
+			    temp_path_mp.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH;
 		goto cleanup;
 	}
 
+	if ((xread(fd, &mh, sizeof(mh)) != sizeof(mh)) ||
+	    (memcmp(mh.h, &v1_h, sizeof(mh.h)))) {
+		strbuf_addstr(&status->error_message,
+			      "invalid prefetch multipart header");
+		goto cleanup;
+	}
 
-	if (params->result_list) {
-		struct strbuf result_msg = STRBUF_INIT;
+	np = (unsigned short)mh.np[0] + ((unsigned short)mh.np[1] << 8);
+	if (np)
+		trace2_data_intmax(TR2_CAT, NULL,
+				   "prefetch/packfile_count", np);
 
-		strbuf_addf(&result_msg, "packfile %s",
-			    params->final_packfile_filename.buf);
-		string_list_append(params->result_list, result_msg.buf);
-		strbuf_release(&result_msg);
+	if (gh__cmd_opts.show_progress)
+		params->progress = start_progress("Installing prefetch packfiles", np);
+
+	for (k = 0; k < np; k++) {
+		extract_packfile_from_multipack(params, status, fd, k);
+		display_progress(params->progress, k + 1);
+		if (status->ec != GH__ERROR_CODE__OK)
+			break;
 	}
+	stop_progress(&params->progress);
 
 cleanup:
-	child_process_clear(&ip);
+	if (fd != -1)
+		close(fd);
+
+	unlink(temp_path_mp.buf);
+	strbuf_release(&temp_path_mp);
 }
 
 /*
@@ -1923,21 +2290,7 @@ static void install_loose(struct gh__request_params *params,
 			  struct gh__response_status *status)
 {
 	struct strbuf tmp_path = STRBUF_INIT;
-
-	/*
-	 * We expect a loose object when we do a GET -or- when we
-	 * do a POST with only 1 object.
-	 *
-	 * Note that this content type is singular, not plural.
-	 */
-	if (strcmp(status->content_type.buf,
-		   "application/x-git-loose-object")) {
-		strbuf_addf(&status->error_message,
-			    "install_loose: received unknown content-type '%s'",
-			    status->content_type.buf);
-		status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
-		return;
-	}
+	struct strbuf loose_path = STRBUF_INIT;
 
 	gh__response_status__zero(status);
 
@@ -1975,11 +2328,19 @@ static void install_loose(struct gh__request_params *params,
 	 * collision we have to assume something else is happening in
 	 * parallel and we lost the race.  And that's OK.
 	 */
-	if (finalize_object_file(tmp_path.buf, params->loose_path.buf)) {
+	if (create_loose_pathname_in_odb(&loose_path, &params->loose_oid)) {
+		strbuf_addf(&status->error_message,
+			    "cannot create directory for loose object '%s'",
+			    loose_path.buf);
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE;
+		goto cleanup;
+	}
+
+	if (finalize_object_file(tmp_path.buf, loose_path.buf)) {
 		unlink(tmp_path.buf);
 		strbuf_addf(&status->error_message,
 			    "could not install loose object '%s'",
-			    params->loose_path.buf);
+			    loose_path.buf);
 		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE;
 		goto cleanup;
 	}
@@ -1995,6 +2356,57 @@ static void install_loose(struct gh__request_params *params,
 
 cleanup:
 	strbuf_release(&tmp_path);
+	strbuf_release(&loose_path);
+}
+
+static void install_result(struct gh__request_params *params,
+			   struct gh__response_status *status)
+{
+	if (params->objects_mode == GH__OBJECTS_MODE__PREFETCH) {
+		/*
+		 * The "gvfs/prefetch" API is the only thing that sends
+		 * these multi-part packfiles.  According to the protocol
+		 * documentation, they will have this x- content type.
+		 *
+		 * However, it appears that there is a BUG in the origin
+		 * server causing it to sometimes send "text/html" instead.
+		 * So, we silently handle both.
+		 */
+		if (!strcmp(status->content_type.buf,
+			    "application/x-gvfs-timestamped-packfiles-indexes")) {
+			install_prefetch(params, status);
+			return;
+		}
+
+		if (!strcmp(status->content_type.buf, "text/html")) {
+			install_prefetch(params, status);
+			return;
+		}
+	}
+
+	if (!strcmp(status->content_type.buf, "application/x-git-packfile")) {
+		assert(params->b_is_post);
+		assert(params->objects_mode == GH__OBJECTS_MODE__POST);
+
+		install_packfile(params, status);
+		return;
+	}
+
+	if (!strcmp(status->content_type.buf,
+		    "application/x-git-loose-object")) {
+		/*
+		 * We get these for "gvfs/objects" GET and POST requests.
+		 *
+		 * Note that this content type is singular, not plural.
+		 */
+		install_loose(params, status);
+		return;
+	}
+
+	strbuf_addf(&status->error_message,
+		    "install_result: received unknown content-type '%s'",
+		    status->content_type.buf);
+	status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE;
 }
 
 /*
@@ -2060,7 +2472,7 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems,
 		 * Other servers have similar sets of values, but I haven't
 		 * compared them in depth.
 		 */
-		// trace2_printf("Throttle: %s %s", key.buf, val.buf);
+		// trace2_printf("%s: Throttle: %s %s", TR2_CAT, key.buf, val.buf);
 
 		if (!strcmp(key.buf, "X-RateLimit-Resource")) {
 			/*
@@ -2071,7 +2483,7 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems,
 			strbuf_addstr(&key, "ratelimit/resource");
 			strbuf_addstr(&key, gh__server_type_label[params->server_type]);
 
-			trace2_data_string("gvfs-helper", NULL, key.buf, val.buf);
+			trace2_data_string(TR2_CAT, NULL, key.buf, val.buf);
 		}
 
 		else if (!strcmp(key.buf, "X-RateLimit-Delay")) {
@@ -2087,7 +2499,7 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems,
 
 			git_parse_ulong(val.buf, &tarpit_delay_ms);
 
-			trace2_data_intmax("gvfs-helper", NULL, key.buf, tarpit_delay_ms);
+			trace2_data_intmax(TR2_CAT, NULL, key.buf, tarpit_delay_ms);
 		}
 
 		else if (!strcmp(key.buf, "X-RateLimit-Limit")) {
@@ -2185,7 +2597,7 @@ static void do_throttle_spin(struct gh__request_params *params,
 
 	strbuf_addstr(&region, tr2_label);
 	strbuf_addstr(&region, gh__server_type_label[params->server_type]);
-	trace2_region_enter("gvfs-helper", region.buf, NULL);
+	trace2_region_enter(TR2_CAT, region.buf, NULL);
 
 	if (gh__cmd_opts.show_progress)
 		progress = start_progress(progress_msg, duration);
@@ -2201,7 +2613,7 @@ static void do_throttle_spin(struct gh__request_params *params,
 	display_progress(progress, duration);
 	stop_progress(&progress);
 
-	trace2_region_leave("gvfs-helper", region.buf, NULL);
+	trace2_region_leave(TR2_CAT, region.buf, NULL);
 	strbuf_release(&region);
 }
 
@@ -2363,11 +2775,7 @@ static void do_req(const char *url_base,
 		if (params->tempfile)
 			delete_tempfile(&params->tempfile);
 
-		if (params->b_is_post)
-			create_tempfile_for_packfile(params, status);
-
-		create_tempfile_for_loose(params, status);
-
+		my_create_tempfile(status, 1, NULL, &params->tempfile, NULL, NULL);
 		if (!params->tempfile || status->ec != GH__ERROR_CODE__OK)
 			return;
 	} else {
@@ -2614,6 +3022,7 @@ static void do__http_get__gvfs_config(struct gh__response_status *status,
 	/* cache-servers do not handle gvfs/config REST calls */
 	params.b_permit_cache_server_if_defined = 0;
 	params.buffer = config_data;
+	params.objects_mode = GH__OBJECTS_MODE__NONE;
 
 	params.object_count = 1; /* a bit of a lie */
 
@@ -2678,6 +3087,7 @@ static void do__http_get__gvfs_object(struct gh__response_status *status,
 	params.b_is_post = 0;
 	params.b_write_to_file = 1;
 	params.b_permit_cache_server_if_defined = 1;
+	params.objects_mode = GH__OBJECTS_MODE__GET;
 
 	params.object_count = 1;
 
@@ -2734,6 +3144,7 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status,
 	params.b_is_post = 1;
 	params.b_write_to_file = 1;
 	params.b_permit_cache_server_if_defined = 1;
+	params.objects_mode = GH__OBJECTS_MODE__POST;
 
 	params.post_payload = &jw_req.json;
 
@@ -2770,6 +3181,126 @@ static void do__http_post__gvfs_objects(struct gh__response_status *status,
 	jw_release(&jw_req);
 }
 
+struct find_last_data {
+	timestamp_t timestamp;
+	int nr_files;
+};
+
+static void cb_find_last(const char *full_path UNUSED, size_t full_path_len UNUSED,
+			 const char *file_path, void *void_data)
+{
+	struct find_last_data *data = void_data;
+	const char *val;
+	timestamp_t t;
+
+	if (!skip_prefix(file_path, "prefetch-", &val))
+		return;
+	if (!ends_with(val, ".pack"))
+		return;
+
+	data->nr_files++;
+
+	/*
+	 * We expect prefetch packfiles named like:
+	 *
+	 *     prefetch-<seconds>-<checksum>.pack
+	 */
+	t = strtol(val, NULL, 10);
+
+	data->timestamp = MY_MAX(t, data->timestamp);
+}
+
+/*
+ * Find the server timestamp on the last prefetch packfile that
+ * we have in the ODB.
+ *
+ * TODO I'm going to assume that all prefetch packs are created
+ * TODO equal and take the one with the largest t value.
+ * TODO
+ * TODO Or should we look for one marked with .keep ?
+ *
+ * TODO Alternatively, should we maybe get the 2nd largest?
+ * TODO (Or maybe subtract an hour delta from the largest?)
+ * TODO
+ * TODO Since each cache-server maintains its own set of prefetch
+ * TODO packs (such that 2 requests may hit 2 different
+ * TODO load-balanced servers and get different answers (with or
+ * TODO without clock-skew issues)), is it possible for us to miss
+ * TODO the absolute fringe of new commits and trees?
+ * TODO
+ * TODO That is, since the cache-server generates hourly prefetch
+ * TODO packs, we could do a prefetch and be up-to-date, but then
+ * TODO do the main fetch and hit a different cache/main server
+ * TODO and be behind by as much as an hour and have to demand-
+ * TODO load the commits/trees.
+ *
+ * TODO Alternatively, should we compare the last timestamp found
+ * TODO with "now" and silently do nothing if within an epsilon?
+ */
+static void find_last_prefetch_timestamp(timestamp_t *last)
+{
+	struct find_last_data data;
+
+	memset(&data, 0, sizeof(data));
+
+	for_each_file_in_pack_dir(gh__global.buf_odb_path.buf, cb_find_last, &data);
+
+	*last = data.timestamp;
+}
+
+/*
+ * Call "gvfs/prefetch[?lastPackTimestamp=<secondsSinceEpoch>]" REST API to
+ * fetch a series of packfiles and write them to the ODB.
+ *
+ * Return a list of packfile names.
+ */
+static void do__http_get__gvfs_prefetch(struct gh__response_status *status,
+					timestamp_t seconds_since_epoch,
+					struct string_list *result_list)
+{
+	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
+	struct strbuf component_url = STRBUF_INIT;
+
+	gh__response_status__zero(status);
+
+	strbuf_addstr(&component_url, "gvfs/prefetch");
+
+	if (!seconds_since_epoch)
+		find_last_prefetch_timestamp(&seconds_since_epoch);
+	if (seconds_since_epoch)
+		strbuf_addf(&component_url, "?lastPackTimestamp=%"PRItime,
+			    seconds_since_epoch);
+
+	params.b_is_post = 0;
+	params.b_write_to_file = 1;
+	params.b_permit_cache_server_if_defined = 1;
+	params.objects_mode = GH__OBJECTS_MODE__PREFETCH;
+
+	params.object_count = -1;
+
+	params.result_list = result_list;
+
+	params.headers = http_copy_default_headers();
+	params.headers = curl_slist_append(params.headers,
+					   "X-TFS-FedAuthRedirect: Suppress");
+	params.headers = curl_slist_append(params.headers,
+					   "Pragma: no-cache");
+	params.headers = curl_slist_append(params.headers,
+					   "Accept: application/x-gvfs-timestamped-packfiles-indexes");
+
+	if (gh__cmd_opts.show_progress)
+		strbuf_addf(&params.progress_base_phase3_msg,
+			    "Prefetch %"PRItime" (%s)",
+			    seconds_since_epoch,
+			    show_date(seconds_since_epoch, 0,
+				      DATE_MODE(ISO8601)));
+
+	do_req__with_fallback(component_url.buf, &params, status);
+
+	gh__request_params__release(&params);
+	strbuf_release(&component_url);
+}
+
 /*
  * Drive one or more HTTP GET requests to fetch the objects
  * in the given OIDSET.  These are received into loose objects.
@@ -3070,7 +3601,83 @@ static enum gh__error_code do_sub_cmd__post(int argc, const char **argv)
 }
 
 /*
- * Handle the 'objects.get' and 'objects.post' verbs in "server mode".
+ * Interpret the given string as a timestamp and compute an absolute
+ * UTC-seconds-since-epoch value (and without TZ).
+ *
+ * Note that the gvfs/prefetch API only accepts seconds since epoch,
+ * so that is all we really need here. But there is a tradition of
+ * various Git commands allowing a variety of formats for args like
+ * this.  For example, see the `--date` arg in `git commit`.  We allow
+ * these other forms mainly for testing purposes.
+ */
+static int my_parse_since(const char *since, timestamp_t *p_timestamp)
+{
+	int offset = 0;
+	int errors = 0;
+	unsigned long t;
+
+	if (!parse_date_basic(since, p_timestamp, &offset))
+		return 0;
+
+	t = approxidate_careful(since, &errors);
+	if (!errors) {
+		*p_timestamp = t;
+		return 0;
+	}
+
+	return -1;
+}
+
+/*
+ * Ask the server for all available packfiles -or- all available since
+ * the given timestamp.
+ */
+static enum gh__error_code do_sub_cmd__prefetch(int argc, const char **argv)
+{
+	static const char *since_str;
+	static struct option prefetch_options[] = {
+		OPT_STRING(0, "since", &since_str, N_("since"), N_("seconds since epoch")),
+		OPT_END(),
+	};
+
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct string_list result_list = STRING_LIST_INIT_DUP;
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+	timestamp_t seconds_since_epoch = 0;
+	size_t k;
+
+	trace2_cmd_mode("prefetch");
+
+	if (argc > 1 && !strcmp(argv[1], "-h"))
+		usage_with_options(prefetch_usage, prefetch_options);
+
+	argc = parse_options(argc, argv, NULL, prefetch_options, prefetch_usage, 0);
+	if (since_str && *since_str) {
+		if (my_parse_since(since_str, &seconds_since_epoch))
+			die("could not parse 'since' field");
+	}
+
+	finish_init(1);
+
+	do__http_get__gvfs_prefetch(&status, seconds_since_epoch, &result_list);
+
+	ec = status.ec;
+
+	for (k = 0; k < result_list.nr; k++)
+		printf("%s\n", result_list.items[k].string);
+
+	if (ec != GH__ERROR_CODE__OK)
+		error("prefetch: %s", status.error_message.buf);
+
+	gh__response_status__release(&status);
+	string_list_clear(&result_list, 0);
+
+	return ec;
+}
+
+/*
+ * Handle the 'objects.get' and 'objects.post' and 'objects.prefetch'
+ * verbs in "server mode".
  *
  * Only call error() and set ec for hard errors where we cannot
  * communicate correctly with the foreground client process.  Pass any
@@ -3090,45 +3697,73 @@ static enum gh__error_code do_server_subprocess__objects(const char *verb_line)
 	size_t k;
 	enum gh__objects_mode objects_mode;
 	unsigned long nr_oid_total = 0;
+	timestamp_t seconds_since_epoch = 0;
 
 	if (!strcmp(verb_line, "objects.get"))
 		objects_mode = GH__OBJECTS_MODE__GET;
 	else if (!strcmp(verb_line, "objects.post"))
 		objects_mode = GH__OBJECTS_MODE__POST;
+	else if (!strcmp(verb_line, "objects.prefetch"))
+		objects_mode = GH__OBJECTS_MODE__PREFETCH;
 	else {
 		error("server: unexpected objects-mode verb '%s'", verb_line);
 		ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
 		goto cleanup;
 	}
 
-	while (1) {
-		len = packet_read_line_gently(0, NULL, &line);
-		if (len < 0 || !line)
-			break;
+	switch (objects_mode) {
+	case GH__OBJECTS_MODE__GET:
+	case GH__OBJECTS_MODE__POST:
+		while (1) {
+			len = packet_read_line_gently(0, NULL, &line);
+			if (len < 0 || !line)
+				break;
 
-		if (get_oid_hex(line, &oid)) {
-			error("server: invalid oid syntax '%s'", line);
-			ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+			if (get_oid_hex(line, &oid)) {
+				error("server: invalid oid syntax '%s'", line);
+				ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+				goto cleanup;
+			}
+
+			if (!oidset_insert(&oids, &oid))
+				nr_oid_total++;
+		}
+
+		if (!nr_oid_total) {
+			/* if zero objects requested, trivial OK. */
+			if (packet_write_fmt_gently(1, "ok\n")) {
+				error("server: cannot write 'get' result to client");
+				ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
+			} else
+				ec = GH__ERROR_CODE__OK;
 			goto cleanup;
 		}
 
-		if (!oidset_insert(&oids, &oid))
-			nr_oid_total++;
-	}
+		if (objects_mode == GH__OBJECTS_MODE__GET)
+			do__http_get__fetch_oidset(&status, &oids,
+						   nr_oid_total, &result_list);
+		else
+			do__http_post__fetch_oidset(&status, &oids,
+						    nr_oid_total, &result_list);
+		break;
 
-	if (!nr_oid_total) {
-		if (packet_write_fmt_gently(1, "ok\n")) {
-			error("server: cannot write 'get' result to client");
-			ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX;
-		} else
-			ec = GH__ERROR_CODE__OK;
-		goto cleanup;
-	}
+	case GH__OBJECTS_MODE__PREFETCH:
+		/* get optional timestamp line */
+		while (1) {
+			len = packet_read_line_gently(0, NULL, &line);
+			if (len < 0 || !line)
+				break;
 
-	if (objects_mode == GH__OBJECTS_MODE__GET)
-		do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list);
-	else
-		do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list);
+			seconds_since_epoch = strtoul(line, NULL, 10);
+		}
+
+		do__http_get__gvfs_prefetch(&status, seconds_since_epoch,
+					    &result_list);
+		break;
+
+	default:
+		BUG("unexpected object_mode in switch '%d'", objects_mode);
+	}
 
 	/*
 	 * Write pathname of the ODB where we wrote all of the objects
@@ -3353,12 +3988,16 @@ static enum gh__error_code do_sub_cmd(int argc, const char **argv)
 	if (!strcmp(argv[0], "config"))
 		return do_sub_cmd__config(argc, argv);
 
+	if (!strcmp(argv[0], "prefetch"))
+		return do_sub_cmd__prefetch(argc, argv);
+
+	/*
+	 * server mode is for talking with git.exe via the "gh_client_" API
+	 * using packet-line format.
+	 */
 	if (!strcmp(argv[0], "server"))
 		return do_sub_cmd__server(argc, argv);
 
-	// TODO have "test" mode that could be used to drive
-	// TODO unit testing.
-
 	return GH__ERROR_CODE__USAGE;
 }
 
diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
index 99cb5b7e0dd515..8a83b5e27c6a1b 100644
--- a/t/helper/test-gvfs-protocol.c
+++ b/t/helper/test-gvfs-protocol.c
@@ -11,7 +11,9 @@
 #include "strbuf.h"
 #include "string-list.h"
 #include "trace2.h"
+#include "copy.h"
 #include "object.h"
+#include "object-file.h"
 #include "object-store.h"
 #include "replace-object.h"
 #include "repository.h"
@@ -22,6 +24,7 @@
 #include "date.h"
 #include "wrapper.h"
 #include "git-zlib.h"
+#include "packfile.h"
 
 #define TR2_CAT "test-gvfs-protocol"
 
@@ -551,9 +554,6 @@ static enum worker_result send_loose_object(const struct object_id *oid,
 		return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM);
 	}
 
-	trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT,
-		      type, size, (const char *)content);
-
 	/*
 	 * We are blending several somewhat independent concepts here:
 	 *
@@ -872,7 +872,6 @@ static enum worker_result get_packfile_from_oids(
 		goto done;
 	}
 
-	trace2_printf("%s: pack-objects returned %d bytes", TR2_CAT, buf_packfile->len);
 	wr = WR_OK;
 
 done:
@@ -1020,6 +1019,305 @@ static enum worker_result do__gvfs_objects__post(struct req *req)
 	return wr;
 }
 
+/*
+ * bswap.h only defines big endian functions.
+ * The GVFS Protocol defines fields in little endian.
+ */
+static inline uint64_t my_get_le64(uint64_t le_val)
+{
+#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN
+	return le_val;
+#else
+	return default_bswap64(le_val);
+#endif
+}
+
+static inline uint16_t my_get_le16(uint16_t le_val)
+{
+#if GIT_BYTE_ORDER == GIT_LITTLE_ENDIAN
+	return le_val;
+#else
+	return default_bswap16(le_val);
+#endif
+}
+
+/*
+ * GVFS Protocol headers for the multipack format
+ * All integer values are little-endian on the wire.
+ *
+ * Note: technically, the protocol defines the `ph` fields as signed, but
+ * that makes a mess of the bswap routines and we're not going to overflow
+ * them for a very long time.
+ */
+
+static unsigned char v1_h[6] = { 'G', 'P', 'R', 'E', ' ', 0x01 };
+
+struct ph {
+	uint64_t timestamp;
+	uint64_t len_pack;
+	uint64_t len_idx;
+};
+
+/*
+ * Accumulate a list of commits-and-trees packfiles we have in the local ODB.
+ * The test script should have pre-created a set of "ct-<epoch>.pack" and .idx
+ * files for us.  We serve these as is and DO NOT try to dynamically create
+ * new commits/trees packfiles (like the cache-server does).  We are only
+ * testing if/whether gvfs-helper.exe can receive one or more packfiles and
+ * idx files over the protocol.
+ */
+struct ct_pack_item {
+	struct ph ph;
+	struct strbuf path_pack;
+	struct strbuf path_idx;
+};
+
+static void ct_pack_item__free(struct ct_pack_item *item)
+{
+	if (!item)
+		return;
+	strbuf_release(&item->path_pack);
+	strbuf_release(&item->path_idx);
+	free(item);
+}
+
+struct ct_pack_data {
+	struct ct_pack_item **items;
+	size_t nr, alloc;
+};
+
+static void ct_pack_data__release(struct ct_pack_data *data)
+{
+	size_t k;
+
+	if (!data)
+		return;
+
+	for (k = 0; k < data->nr; k++)
+		ct_pack_item__free(data->items[k]);
+
+	FREE_AND_NULL(data->items);
+	data->nr = 0;
+	data->alloc = 0;
+}
+
+static void cb_ct_pack(const char *full_path, size_t full_path_len UNUSED,
+		       const char *file_path, void *void_data)
+{
+	struct ct_pack_data *data = void_data;
+	struct ct_pack_item *item = NULL;
+	struct stat st;
+	const char *v;
+
+	/*
+	 * We only want "ct-<epoch>.pack" files.  The test script creates
+	 * cached commits-and-trees packfiles with this prefix to avoid
+	 * confusion with prefetch packfiles received by gvfs-helper.
+	 */
+	if (!ends_with(file_path, ".pack"))
+		return;
+	if (!skip_prefix(file_path, "ct-", &v))
+		return;
+
+	item = (struct ct_pack_item *)xcalloc(1, sizeof(*item));
+	strbuf_init(&item->path_pack, 0);
+	strbuf_addstr(&item->path_pack, full_path);
+
+	strbuf_init(&item->path_idx, 0);
+	strbuf_addstr(&item->path_idx, full_path);
+	strbuf_strip_suffix(&item->path_idx, ".pack");
+	strbuf_addstr(&item->path_idx, ".idx");
+
+	item->ph.timestamp = (uint64_t)strtoul(v, NULL, 10);
+
+	lstat(item->path_pack.buf, &st);
+	item->ph.len_pack = (uint64_t)st.st_size;
+
+	if (string_list_has_string(&mayhem_list, "no_prefetch_idx"))
+		item->ph.len_idx = maximum_unsigned_value_of_type(uint64_t);
+	else if (lstat(item->path_idx.buf, &st) < 0)
+		item->ph.len_idx = maximum_unsigned_value_of_type(uint64_t);
+	else
+		item->ph.len_idx = (uint64_t)st.st_size;
+
+	ALLOC_GROW(data->items, data->nr + 1, data->alloc);
+	data->items[data->nr++] = item;
+}
+
+/*
+ * Sort by increasing EPOCH time.
+ */
+static int ct_pack_sort_compare(const void *_a, const void *_b)
+{
+	const struct ct_pack_item *a = *(const struct ct_pack_item **)_a;
+	const struct ct_pack_item *b = *(const struct ct_pack_item **)_b;
+	return (a->ph.timestamp < b->ph.timestamp) ? -1 : (a->ph.timestamp != b->ph.timestamp);
+}
+
+static enum worker_result send_ct_item(const struct ct_pack_item *item)
+{
+	struct ph ph_le;
+	int fd_pack = -1;
+	int fd_idx = -1;
+	enum worker_result wr = WR_OK;
+
+	/* send per-packfile header. all fields are little-endian on the wire. */
+	ph_le.timestamp = my_get_le64(item->ph.timestamp);
+	ph_le.len_pack = my_get_le64(item->ph.len_pack);
+	ph_le.len_idx = my_get_le64(item->ph.len_idx);
+
+	if (write_in_full(1, &ph_le, sizeof(ph_le)) < 0) {
+		logerror("unable to write ph_le");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	trace2_printf("%s: sending prefetch pack '%s'", TR2_CAT, item->path_pack.buf);
+
+	fd_pack = git_open_cloexec(item->path_pack.buf, O_RDONLY);
+	if (fd_pack == -1 || copy_fd(fd_pack, 1)) {
+		logerror("could not send packfile");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	if (item->ph.len_idx != maximum_unsigned_value_of_type(uint64_t)) {
+		trace2_printf("%s: sending prefetch idx '%s'", TR2_CAT, item->path_idx.buf);
+
+		fd_idx = git_open_cloexec(item->path_idx.buf, O_RDONLY);
+		if (fd_idx == -1 || copy_fd(fd_idx, 1)) {
+			logerror("could not send idx");
+			wr = WR_IO_ERROR;
+			goto done;
+		}
+	}
+
+done:
+	if (fd_pack != -1)
+		close(fd_pack);
+	if (fd_idx != -1)
+		close(fd_idx);
+	return wr;
+}
+
+/*
+ * The GVFS Protocol defines the lastTimeStamp parameter as the value
+ * of the last prefetch pack that the client has.  Therefore, we only
+ * want to send newer ones.
+ */
+static int want_ct_pack(const struct ct_pack_item *item, timestamp_t last_timestamp)
+{
+	return item->ph.timestamp > last_timestamp;
+}
+
+static enum worker_result send_multipack(struct ct_pack_data *data,
+					 timestamp_t last_timestamp)
+{
+	struct strbuf response_header = STRBUF_INIT;
+	struct strbuf uuid = STRBUF_INIT;
+	enum worker_result wr;
+	size_t content_len = 0;
+	unsigned short np = 0;
+	unsigned short np_le;
+	size_t k;
+
+	/*
+	 * Precompute the content-length so that we don't have to deal with
+	 * chunking it.
+	 */
+	content_len += sizeof(v1_h) + sizeof(np);
+	for (k = 0; k < data->nr; k++) {
+		struct ct_pack_item *item = data->items[k];
+
+		if (!want_ct_pack(item, last_timestamp))
+			continue;
+
+		np++;
+		content_len += sizeof(struct ph);
+		content_len += item->ph.len_pack;
+		if (item->ph.len_idx != maximum_unsigned_value_of_type(uint64_t))
+			content_len += item->ph.len_idx;
+	}
+
+	strbuf_addstr(&response_header, "HTTP/1.1 200 OK\r\n");
+	strbuf_addstr(&response_header, "Cache-Control: private\r\n");
+	strbuf_addstr(&response_header,
+		      "Content-Type: application/x-gvfs-timestamped-packfiles-indexes\r\n");
+	strbuf_addf(  &response_header,	"Content-Length: %d\r\n", (int)content_len);
+	strbuf_addf(  &response_header,	"Server: test-gvfs-protocol/%s\r\n", git_version_string);
+	strbuf_addf(  &response_header, "Date: %s\r\n", show_date(time(NULL), 0, DATE_MODE(RFC2822)));
+	gen_fake_uuid(&uuid);
+	strbuf_addf(  &response_header, "X-VSS-E2EID: %s\r\n", uuid.buf);
+	strbuf_addstr(&response_header, "\r\n");
+
+	if (write_in_full(1, response_header.buf, response_header.len) < 0) {
+		logerror("unable to write response header");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	/* send protocol version header */
+	if (write_in_full(1, v1_h, sizeof(v1_h)) < 0) {
+		logerror("unabled to write v1_h");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	/* send number of packfiles */
+	np_le = my_get_le16(np);
+	if (write_in_full(1, &np_le, sizeof(np_le)) < 0) {
+		logerror("unable to write np");
+		wr = WR_IO_ERROR;
+		goto done;
+	}
+
+	for (k = 0; k < data->nr; k++) {
+		if (!want_ct_pack(data->items[k], last_timestamp))
+			continue;
+
+		wr = send_ct_item(data->items[k]);
+		if (wr != WR_OK)
+			goto done;
+	}
+
+	wr = WR_OK;
+
+done:
+	strbuf_release(&uuid);
+	strbuf_release(&response_header);
+
+	return wr;
+}
+
+static enum worker_result do__gvfs_prefetch__get(struct req *req)
+{
+	struct ct_pack_data data;
+	timestamp_t last_timestamp = 0;
+	enum worker_result wr;
+
+	memset(&data, 0, sizeof(data));
+
+	if (req->quest_args.len) {
+		const char *key = strstr(req->quest_args.buf, "lastPackTimestamp=");
+		if (key) {
+			const char *val;
+			if (skip_prefix(key, "lastPackTimestamp=", &val)) {
+				last_timestamp = strtol(val, NULL, 10);
+			}
+		}
+	}
+	trace2_printf("%s: prefetch/since %"PRItime, TR2_CAT, last_timestamp);
+
+	for_each_file_in_pack_dir(repo_get_object_directory(the_repository), cb_ct_pack, &data);
+	QSORT(data.items, data.nr, ct_pack_sort_compare);
+
+	wr = send_multipack(&data, last_timestamp);
+
+	ct_pack_data__release(&data);
+
+	return wr;
+}
+
 /*
  * Read the HTTP request up to the start of the optional message-body.
  * We do this byte-by-byte because we have keep-alive turned on and
@@ -1174,6 +1472,11 @@ static enum worker_result req__read(struct req *req, int fd)
 	 * We let our caller read/chunk it in as appropriate.
 	 */
 done:
+
+#if 0
+	/*
+	 * This is useful for debugging the request, but very noisy.
+	 */
 	if (trace2_is_enabled()) {
 		struct string_list_item *item;
 		trace2_printf("%s: %s", TR2_CAT, req->start_line.buf);
@@ -1188,6 +1491,7 @@ static enum worker_result req__read(struct req *req, int fd)
 		for_each_string_list_item(item, &req->header_list)
 			trace2_printf("%s: Hdrs: %s", TR2_CAT, item->string);
 	}
+#endif
 
 	strbuf_release(&h);
 
@@ -1238,6 +1542,12 @@ static enum worker_result dispatch(struct req *req)
 			return do__gvfs_config__get(req);
 	}
 
+	if (!strcmp(req->gvfs_api.buf, "gvfs/prefetch")) {
+
+		if (!strcmp(method, "GET"))
+			return do__gvfs_prefetch__get(req);
+	}
+
 	return send_http_error(1, 501, "Not Implemented", -1,
 			       WR_OK | WR_HANGUP);
 }
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index 3cb1459ea32476..196a5513b1661a 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -24,8 +24,8 @@ test_set_port GIT_TEST_GVFS_PROTOCOL_PORT
 #        actually use it).  We are only testing explicit object
 #        fetching using gvfs-helper.exe in isolation.
 #
-REPO_SRC="$PWD"/repo_src
-REPO_T1="$PWD"/repo_t1
+REPO_SRC="$(pwd)"/repo_src
+REPO_T1="$(pwd)"/repo_t1
 
 # Setup some loopback URLs where test-gvfs-protocol.exe will be
 # listening.  We will spawn it directly inside the repo_src directory,
@@ -44,22 +44,22 @@ HOST_PORT=127.0.0.1:$GIT_TEST_GVFS_PROTOCOL_PORT
 ORIGIN_URL=http://$HOST_PORT/servertype/origin
 CACHE_URL=http://$HOST_PORT/servertype/cache
 
-SHARED_CACHE_T1="$PWD"/shared_cache_t1
+SHARED_CACHE_T1="$(pwd)"/shared_cache_t1
 
 # The pid-file is created by test-gvfs-protocol.exe when it starts.
 # The server will shut down if/when we delete it.  (This is a little
 # easier than killing it by PID.)
 #
-PID_FILE="$PWD"/pid-file.pid
-SERVER_LOG="$PWD"/OUT.server.log
+PID_FILE="$(pwd)"/pid-file.pid
+SERVER_LOG="$(pwd)"/OUT.server.log
 
 PATH="$GIT_BUILD_DIR/t/helper/:$PATH" && export PATH
 
-OIDS_FILE="$PWD"/oid_list.txt
-OIDS_CT_FILE="$PWD"/oid_ct_list.txt
-OIDS_BLOBS_FILE="$PWD"/oids_blobs_file.txt
-OID_ONE_BLOB_FILE="$PWD"/oid_one_blob_file.txt
-OID_ONE_COMMIT_FILE="$PWD"/oid_one_commit_file.txt
+OIDS_FILE="$(pwd)"/oid_list.txt
+OIDS_CT_FILE="$(pwd)"/oid_ct_list.txt
+OIDS_BLOBS_FILE="$(pwd)"/oids_blobs_file.txt
+OID_ONE_BLOB_FILE="$(pwd)"/oid_one_blob_file.txt
+OID_ONE_COMMIT_FILE="$(pwd)"/oid_one_commit_file.txt
 
 # Get a list of available OIDs in repo_src so that we can try to fetch
 # them and so that we don't have to hard-code a list of known OIDs.
@@ -108,6 +108,30 @@ get_one_commit_oid () {
 	return 0
 }
 
+# Create a commits-and-trees packfile for use with "prefetch"
+# using the given range of commits.
+#
+create_commits_and_trees_packfile () {
+	if test $# -eq 2
+	then
+		epoch=$1
+		revs=$2
+	else
+		echo "create_commits_and_trees_packfile: Need 2 args"
+		return 1
+	fi
+
+	pack_file="$REPO_SRC"/.git/objects/pack/ct-$epoch.pack
+	idx_file="$REPO_SRC"/.git/objects/pack/ct-$epoch.idx
+
+	git -C "$REPO_SRC" pack-objects --stdout --revs --filter=blob:none \
+		>"$pack_file" <<-EOF
+		$revs
+	EOF
+	git -C "$REPO_SRC" index-pack -o "$idx_file" "$pack_file"
+	return 0
+}
+
 test_expect_success 'setup repos' '
 	test_create_repo "$REPO_SRC" &&
 	git -C "$REPO_SRC" branch -M main &&
@@ -115,9 +139,16 @@ test_expect_success 'setup repos' '
 	# test_commit_bulk() does magic to create a packfile containing
 	# the new commits.
 	#
+	# We create branches in repo_src, but also remember the branch OIDs
+	# in files so that we can refer to them in repo_t1, which will not
+	# have the commits locally (because we do not clone or fetch).
+	#
 	test_commit_bulk -C "$REPO_SRC" --filename="batch_a.%s.t" 9 &&
+	git -C "$REPO_SRC" branch B1 &&
 	git -C "$REPO_SRC" rev-parse refs/heads/main >m1.branch &&
+	#
 	test_commit_bulk -C "$REPO_SRC" --filename="batch_b.%s.t" 9 &&
+	git -C "$REPO_SRC" branch B2 &&
 	git -C "$REPO_SRC" rev-parse refs/heads/main >m2.branch &&
 	#
 	# test_commit() creates commits, trees, tags, and blobs and leave
@@ -134,8 +165,16 @@ test_expect_success 'setup repos' '
 	test_commit -C "$REPO_SRC" file7.txt &&
 	test_commit -C "$REPO_SRC" file8.txt &&
 	test_commit -C "$REPO_SRC" file9.txt &&
+	git -C "$REPO_SRC" branch B3 &&
 	git -C "$REPO_SRC" rev-parse refs/heads/main >m3.branch &&
 	#
+	# Create some commits-and-trees-only packfiles for testing prefetch.
+	# Set arbitrary EPOCH times to make it easier to test fetch-since.
+	#
+	create_commits_and_trees_packfile 1000000000 B1 &&
+	create_commits_and_trees_packfile 1100000000 B1..B2 &&
+	create_commits_and_trees_packfile 1200000000 B2..B3 &&
+	#
 	# gvfs-helper.exe writes downloaded objects to a shared-cache directory
 	# rather than the ODB inside the .git directory.
 	#
@@ -160,10 +199,10 @@ test_expect_success 'setup repos' '
 	EOF
 	cat <<-EOF >creds.sh &&
 		#!/bin/sh
-		cat "$PWD"/creds.txt
+		cat "$(pwd)"/creds.txt
 	EOF
 	chmod 755 creds.sh &&
-	git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$PWD\"/creds.txt; }; f" &&
+	git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$(pwd)\"/creds.txt; }; f" &&
 	#
 	# Create some test data sets.
 	#
@@ -554,8 +593,8 @@ test_expect_success 'basic: POST-request a single blob' '
 # Request a single commit via POST.  Per the GVFS Protocol, the server
 # should implicitly send us a packfile containing the commit and the
 # trees it references.  Confirm that properly handled the receipt of
-# the packfile.  (Here, we are testing that asking for a single object
-# yields a packfile rather than a loose object.)
+# the packfile.  (Here, we are testing that asking for a single commit
+# via POST yields a packfile rather than a loose object.)
 #
 # We DO NOT verify that the packfile contains commits/trees and no blobs
 # because our test helper doesn't implement the filtering.
@@ -587,6 +626,105 @@ test_expect_success 'basic: POST-request a single commit' '
 	verify_connection_count 1
 '
 
+test_expect_success 'basic: PREFETCH w/o arg gets all' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Without a "since" argument gives us all "ct-*.pack" since the EPOCH
+	# because we do not have any prefetch packs locally.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		prefetch >OUT.output &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.
+	#
+	verify_received_packfile_count 3 &&
+
+	stop_gvfs_protocol_server &&
+	verify_connection_count 1
+'
+
+test_expect_success 'basic: PREFETCH w/ arg' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Ask for cached packfiles NEWER THAN the given time.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		prefetch --since="1000000000" >OUT.output &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.
+	#
+	verify_received_packfile_count 2 &&
+
+	stop_gvfs_protocol_server &&
+	verify_connection_count 1
+'
+
+test_expect_success 'basic: PREFETCH mayhem no_prefetch_idx' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem no_prefetch_idx &&
+
+	# Request prefetch packs, but tell server to not send any
+	# idx files and force gvfs-helper to compute them.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		prefetch --since="1000000000" >OUT.output &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.
+	#
+	verify_received_packfile_count 2 &&
+
+	stop_gvfs_protocol_server &&
+	verify_connection_count 1
+'
+
+test_expect_success 'basic: PREFETCH up-to-date' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Ask for cached packfiles NEWER THAN the given time.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		prefetch --since="1000000000" >OUT.output &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.
+	#
+	verify_received_packfile_count 2 &&
+
+	# Ask again for any packfiles newer than what we have cached locally.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		prefetch >OUT.output &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.
+	#
+	verify_received_packfile_count 0 &&
+
+	stop_gvfs_protocol_server &&
+	verify_connection_count 2
+'
+
 #################################################################
 # Tests to see how gvfs-helper responds to network problems.
 #
@@ -960,44 +1098,6 @@ test_expect_success 'HTTP GET Auth on Cache Server' '
 # magically fetched whenever required.
 #################################################################
 
-test_expect_success 'integration: explicit commit/trees, implicit blobs: log file' '
-	test_when_finished "per_test_cleanup" &&
-	start_gvfs_protocol_server &&
-
-	# We have a very empty repo.  Seed it with all of the commits
-	# and trees.  The purpose of this test is to demand-load the
-	# needed blobs only, so we prefetch the commits and trees.
-	#
-	git -C "$REPO_T1" gvfs-helper \
-		--cache-server=disable \
-		--remote=origin \
-		get \
-		<"$OIDS_CT_FILE" >OUT.output &&
-
-	# Confirm that we do not have the blobs locally.
-	# With gvfs-helper turned off, we should fail.
-	#
-	test_must_fail \
-		git -C "$REPO_T1" -c core.useGVFSHelper=false \
-			log $(cat m3.brach) -- file9.txt \
-			>OUT.output 2>OUT.stderr &&
-
-	# Turn on gvfs-helper and retry.  This should implicitly fetch
-	# any needed blobs.
-	#
-	git -C "$REPO_T1" -c core.useGVFSHelper=true \
-		log $(cat m3.branch) -- file9.txt \
-		>OUT.output 2>OUT.stderr &&
-
-	# Verify that gvfs-helper wrote the fetched the blobs to the
-	# local ODB, such that a second attempt with gvfs-helper
-	# turned off should succeed.
-	#
-	git -C "$REPO_T1" -c core.useGVFSHelper=false \
-		log $(cat m3.branch) -- file9.txt \
-		>OUT.output 2>OUT.stderr
-'
-
 test_expect_success 'integration: explicit commit/trees, implicit blobs: diff 2 commits' '
 	test_when_finished "per_test_cleanup" &&
 	start_gvfs_protocol_server &&

From cbb7d837fc857b4eb4bd8b16371068db1258e325 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 13 Nov 2019 14:24:34 -0500
Subject: [PATCH 106/207] t5799: add support for POST to return either a loose
 object or packfile

Earlier versions of the test always returned a packfile in response to a POST.
Now we look at the number of objects in the POST request.

If > 1, always send a packfile.

If = 1 and it is a commit, send a packfile.
Otherwise, send a loose object.

This is to better model the behavior of the GVFS server/protocol which
treats commits differently.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 t/helper/test-gvfs-protocol.c | 193 +++++++++++++++++++++++-----------
 t/t5799-gvfs-helper.sh        |  76 ++++++++++++-
 2 files changed, 209 insertions(+), 60 deletions(-)

diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
index ba3faf35cdc2e2..d89aa24d73dca7 100644
--- a/t/helper/test-gvfs-protocol.c
+++ b/t/helper/test-gvfs-protocol.c
@@ -500,8 +500,7 @@ static enum worker_result do__gvfs_config__get(struct req *req)
  *     write_object_file_prepare()
  *     write_loose_object()
  */
-static enum worker_result send_loose_object(const struct object_info *oi,
-					    const struct object_id *oid,
+static enum worker_result send_loose_object(const struct object_id *oid,
 					    int fd)
 {
 #define MAX_HEADER_LEN 32
@@ -514,6 +513,44 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 	git_hash_ctx c;
 	int object_header_len;
 	int ret;
+	unsigned flags = 0;
+	void *content;
+	unsigned long size;
+	enum object_type type;
+	struct object_info oi = OBJECT_INFO_INIT;
+
+	/*
+	 * Since `test-gvfs-protocol` is mocking a real GVFS server (cache or
+	 * main), we don't want a request for a missing object to cause the
+	 * implicit dynamic fetch mechanism to try to fault-it-in (and cause
+	 * our call to oid_object_info_extended() to launch another instance
+	 * of `gvfs-helper` to magically fetch it (which would connect to a
+	 * new instance of `test-gvfs-protocol`)).
+	 *
+	 * Rather, we want a missing object to fail, so we can respond with
+	 * a 404, for example.
+	 */
+	flags |= OBJECT_INFO_FOR_PREFETCH;
+	flags |= OBJECT_INFO_LOOKUP_REPLACE;
+
+	oi.typep = &type;
+	oi.sizep = &size;
+	oi.contentp = &content;
+
+	if (oid_object_info_extended(the_repository, oid, &oi, flags)) {
+		logerror("Could not find OID: '%s'", oid_to_hex(oid));
+		free(content);
+		return send_http_error(1, 404, "Not Found", -1, WR_OK);
+	}
+
+	if (string_list_has_string(&mayhem_list, "http_404")) {
+		logmayhem("http_404");
+		free(content);
+		return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM);
+	}
+
+	trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT,
+		      type, size, (const char *)content);
 
 	/*
 	 * We are blending several somewhat independent concepts here:
@@ -554,6 +591,7 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 
 	if (write_in_full(fd, response_header.buf, response_header.len) < 0) {
 		logerror("unable to write response header");
+		free(content);
 		return WR_IO_ERROR;
 	}
 
@@ -562,20 +600,21 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 
 	if (string_list_has_string(&mayhem_list, "close_write")) {
 		logmayhem("close_write");
+		free(content);
 		return WR_MAYHEM | WR_HANGUP;
 	}
 
 	/* [1a] */
 	object_header_len = 1 + xsnprintf(object_header, MAX_HEADER_LEN,
 					  "%s %"PRIuMAX,
-					  type_name(*oi->typep),
-					  (uintmax_t)*oi->sizep);
+					  type_name(*oi.typep),
+					  (uintmax_t)*oi.sizep);
 
 	/* [2] */
 	memset(&oid_check, 0, sizeof(oid_check));
 	the_hash_algo->init_fn(&c);
 	the_hash_algo->update_fn(&c, object_header, object_header_len);
-	the_hash_algo->update_fn(&c, *oi->contentp, *oi->sizep);
+	the_hash_algo->update_fn(&c, *oi.contentp, *oi.sizep);
 	the_hash_algo->final_fn(oid_check.hash, &c);
 	if (!oideq(oid, &oid_check))
 		BUG("send_loose_object[2]: invalid construction '%s' '%s'",
@@ -595,8 +634,8 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 	the_hash_algo->update_fn(&c, object_header, object_header_len);
 
 	/* [3, 1b, 5, 6] */
-	stream.next_in = *oi->contentp;
-	stream.avail_in = *oi->sizep;
+	stream.next_in = *oi.contentp;
+	stream.avail_in = *oi.sizep;
 	do {
 		enum worker_result wr;
 		unsigned char *in0 = stream.next_in;
@@ -606,6 +645,7 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 		/* [5] */
 		wr = send_chunk(fd, compressed, stream.next_out - compressed);
 		if (wr & WR_STOP_THE_MUSIC) {
+			free(content);
 			return wr;
 		}
 
@@ -628,6 +668,7 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 		    oid_to_hex(oid), oid_to_hex(&oid_check));
 
 	/* [5] */
+	free(content);
 	return send_final_chunk(fd);
 }
 
@@ -642,25 +683,6 @@ static enum worker_result send_loose_object(const struct object_info *oi,
 static enum worker_result do__gvfs_objects__get(struct req *req)
 {
 	struct object_id oid;
-	void *content;
-	unsigned long size;
-	enum object_type type;
-	struct object_info oi = OBJECT_INFO_INIT;
-	unsigned flags = 0;
-
-	/*
-	 * Since `test-gvfs-protocol` is mocking a real GVFS server (cache or
-	 * main), we don't want a request for a missing object to cause the
-	 * implicit dynamic fetch mechanism to try to fault-it-in (and cause
-	 * our call to oid_object_info_extended() to launch another instance
-	 * of `gvfs-helper` to magically fetch it (which would connect to a
-	 * new instance of `test-gvfs-protocol`)).
-	 *
-	 * Rather, we want a missing object to fail, so we can respond with
-	 * a 404, for example.
-	 */
-	flags |= OBJECT_INFO_FOR_PREFETCH;
-	flags |= OBJECT_INFO_LOOKUP_REPLACE;
 
 	if (!req->slash_args.len ||
 	    get_oid_hex(req->slash_args.buf, &oid)) {
@@ -671,29 +693,13 @@ static enum worker_result do__gvfs_objects__get(struct req *req)
 
 	trace2_printf("%s: GET %s", TR2_CAT, oid_to_hex(&oid));
 
-	oi.typep = &type;
-	oi.sizep = &size;
-	oi.contentp = &content;
-
-	if (oid_object_info_extended(the_repository, &oid, &oi, flags)) {
-		logerror("Could not find OID: '%s'", oid_to_hex(&oid));
-		return send_http_error(1, 404, "Not Found", -1, WR_OK);
-	}
-
-	if (string_list_has_string(&mayhem_list, "http_404")) {
-		logmayhem("http_404");
-		return send_http_error(1, 404, "Not Found", -1, WR_MAYHEM);
-	}
-
-	trace2_printf("%s: OBJECT type=%d len=%ld '%.40s'", TR2_CAT,
-		      type, size, (const char *)content);
-
-	return send_loose_object(&oi, &oid, 1);
+	return send_loose_object(&oid, 1);
 }
 
 static enum worker_result read_json_post_body(
 	struct req *req,
-	struct oidset *oids)
+	struct oidset *oids,
+	int *nr_oids)
 {
 	struct object_id oid;
 	struct string_list_item *item;
@@ -762,7 +768,8 @@ static enum worker_result read_json_post_body(
 
 		if (get_oid_hex(pstart, &oid))
 			goto could_not_parse_json;
-		oidset_insert(oids, &oid);
+		if (!oidset_insert(oids, &oid))
+			*nr_oids += 1;
 		trace2_printf("%s: POST %s", TR2_CAT, oid_to_hex(&oid));
 
 		/* Eat trailing whitespace after trailing DQUOTE */
@@ -806,16 +813,6 @@ static enum worker_result read_json_post_body(
  *
  * My assumption here is that we're not testing with GBs
  * of data....
- *
- * Note: The GVFS Protocol POST verb behaves like GET for
- * Note: non-commit objects (in that it just returns the
- * Note: requested object), but for commit objects POST
- * Note: *also* returns all trees referenced by the commit.
- * Note:
- * Note: Since the goal of this test is to confirm that
- * Note: gvfs-helper can request and receive a packfile
- * Note: *at all*, I'm not going to blur the issue and
- * Note: support the extra semantics for commit objects.
  */
 static enum worker_result get_packfile_from_oids(
 	struct oidset *oids,
@@ -905,21 +902,99 @@ static enum worker_result send_packfile_from_buffer(const struct strbuf *packfil
 	return wr;
 }
 
+/*
+ * The GVFS Protocol POST verb behaves like GET for non-commit objects
+ * (in that it just returns the requested object), but for commit
+ * objects POST *also* returns all trees referenced by the commit.
+ *
+ * The goal of this test is to confirm that:
+ * [] `gvfs-helper post` can request and receive a packfile at all.
+ * [] `gvfs-helper post` can handle getting either a packfile or a
+ *                       loose object.
+ *
+ * Therefore, I'm not going to blur the issue and support the custom
+ * semantics for commit objects.
+ *
+ * If one of the OIDs is a commit, `git pack-objects` will completely
+ * walk the trees and blobs for it and we get that for free.  This is
+ * good enough for our testing.
+ *
+ * TODO A proper solution would separate the commit objects and do a
+ * TODO `rev-list --filter=blobs:none` for them (or use the internal
+ * TODO list-objects API) and a regular enumeration for the non-commit
+ * TODO objects.  And build an new oidset with union of those and then
+ * TODO call pack-objects on it instead.
+ * TODO
+ * TODO But that's too much trouble for now.
+ *
+ * For now, we just need to know if the post asks for a single object,
+ * is it a commit or non-commit.  That is sufficient to know whether
+ * we should send a packfile or loose object.
+*/
+static enum worker_result classify_oids_in_post(
+	struct oidset *oids, int nr_oids, int *need_packfile)
+{
+	struct oidset_iter iter;
+	struct object_id *oid;
+	enum object_type type;
+	struct object_info oi = OBJECT_INFO_INIT;
+	unsigned flags = 0;
+
+	if (nr_oids > 1) {
+		*need_packfile = 1;
+		return WR_OK;
+	}
+
+	/* disable missing-object faulting */
+	flags |= OBJECT_INFO_FOR_PREFETCH;
+	flags |= OBJECT_INFO_LOOKUP_REPLACE;
+
+	oi.typep = &type;
+
+	oidset_iter_init(oids, &iter);
+	while ((oid = oidset_iter_next(&iter))) {
+		if (!oid_object_info_extended(the_repository, oid, &oi, flags) &&
+		    type == OBJ_COMMIT) {
+			*need_packfile = 1;
+			return WR_OK;
+		}
+	}
+
+	*need_packfile = 0;
+	return WR_OK;
+}
+
 static enum worker_result do__gvfs_objects__post(struct req *req)
 {
 	struct oidset oids = OIDSET_INIT;
 	struct strbuf packfile = STRBUF_INIT;
 	enum worker_result wr;
+	int nr_oids = 0;
+	int need_packfile = 0;
 
-	wr = read_json_post_body(req, &oids);
+	wr = read_json_post_body(req, &oids, &nr_oids);
 	if (wr & WR_STOP_THE_MUSIC)
 		goto done;
 
-	wr = get_packfile_from_oids(&oids, &packfile);
+	wr = classify_oids_in_post(&oids, nr_oids, &need_packfile);
 	if (wr & WR_STOP_THE_MUSIC)
 		goto done;
 
-	wr = send_packfile_from_buffer(&packfile);
+	if (!need_packfile) {
+		struct oidset_iter iter;
+		struct object_id *oid;
+
+		oidset_iter_init(&oids, &iter);
+		oid = oidset_iter_next(&iter);
+
+		wr = send_loose_object(oid, 1);
+	} else {
+		wr = get_packfile_from_oids(&oids, &packfile);
+		if (wr & WR_STOP_THE_MUSIC)
+			goto done;
+
+		wr = send_packfile_from_buffer(&packfile);
+	}
 
 done:
 	oidset_clear(&oids);
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index f67ba209466f86..afdacfe7134fe6 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -59,6 +59,7 @@ OIDS_FILE="$PWD"/oid_list.txt
 OIDS_CT_FILE="$PWD"/oid_ct_list.txt
 OIDS_BLOBS_FILE="$PWD"/oids_blobs_file.txt
 OID_ONE_BLOB_FILE="$PWD"/oid_one_blob_file.txt
+OID_ONE_COMMIT_FILE="$PWD"/oid_one_commit_file.txt
 
 # Get a list of available OIDs in repo_src so that we can try to fetch
 # them and so that we don't have to hard-code a list of known OIDs.
@@ -102,6 +103,11 @@ get_list_of_commit_and_tree_oids () {
 	return 0
 }
 
+get_one_commit_oid () {
+	git -C "$REPO_SRC" rev-parse HEAD >"$OID_ONE_COMMIT_FILE"
+	return 0
+}
+
 test_expect_success 'setup repos' '
 	test_create_repo "$REPO_SRC" &&
 	git -C "$REPO_SRC" branch -M main &&
@@ -161,7 +167,8 @@ test_expect_success 'setup repos' '
 	#
 	get_list_of_oids 30 &&
 	get_list_of_commit_and_tree_oids 30 &&
-	get_list_of_blobs_oids
+	get_list_of_blobs_oids &&
+	get_one_commit_oid
 '
 
 stop_gvfs_protocol_server () {
@@ -511,6 +518,73 @@ test_expect_success 'basic: POST origin blobs' '
 	verify_connection_count 1
 '
 
+# Request a single blob via POST.  Per the GVFS Protocol, the server
+# should implicitly send a loose object for it.  Confirm that.
+#
+test_expect_success 'basic: POST-request a single blob' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the origin server (w/o auth) and request a single
+	# blob via POST.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OID_ONE_BLOB_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "loose <oid>" message for each received
+	# loose object.
+	#
+	sed "s/loose //" <OUT.output | sort >OUT.actual &&
+	test_cmp "$OID_ONE_BLOB_FILE" OUT.actual &&
+
+	verify_connection_count 1
+'
+
+# Request a single commit via POST.  Per the GVFS Protocol, the server
+# should implicitly send us a packfile containing the commit and the
+# trees it references.  Confirm that properly handled the receipt of
+# the packfile.  (Here, we are testing that asking for a single object
+# yields a packfile rather than a loose object.)
+#
+# We DO NOT verify that the packfile contains commits/trees and no blobs
+# because our test helper doesn't implement the filtering.
+#
+test_expect_success 'basic: POST-request a single commit' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	# Connect to the origin server (w/o auth) and request a single
+	# commit via POST.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OID_ONE_COMMIT_FILE" >OUT.output &&
+
+	# Stop the server to prevent the verification steps from faulting-in
+	# any missing objects.
+	#
+	stop_gvfs_protocol_server &&
+
+	# gvfs-helper prints a "packfile <path>" message for each received
+	# packfile.
+	#
+	verify_received_packfile_count 1 &&
+
+	verify_connection_count 1
+'
+
 #################################################################
 # Tests to see how gvfs-helper responds to network problems.
 #

From 0288b1109d90fa792b040fb4d4618d17af76ee5e Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Tue, 26 Nov 2019 14:13:57 -0500
Subject: [PATCH 107/207] gvfs-helper: add prefetch .keep file for last
 packfile

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper.c          | 83 ++++++++++++++++++++++++++++++++++++++++--
 t/t5799-gvfs-helper.sh | 29 +++++++++++++++
 2 files changed, 109 insertions(+), 3 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 38429a35433d9f..dae73bcd1a06e5 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -1882,6 +1882,7 @@ static void my_run_index_pack(struct gh__request_params *params UNUSED,
 
 static void my_finalize_packfile(struct gh__request_params *params,
 				 struct gh__response_status *status,
+				 int b_keep,
 				 const struct strbuf *temp_path_pack,
 				 const struct strbuf *temp_path_idx,
 				 struct strbuf *final_path_pack,
@@ -1901,6 +1902,21 @@ static void my_finalize_packfile(struct gh__request_params *params,
 		return;
 	}
 
+	if (b_keep) {
+		struct strbuf keep = STRBUF_INIT;
+		int fd_keep;
+
+		strbuf_addbuf(&keep, final_path_pack);
+		strbuf_strip_suffix(&keep, ".pack");
+		strbuf_addstr(&keep, ".keep");
+
+		fd_keep = xopen(keep.buf, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+		if (fd_keep >= 0)
+			close(fd_keep);
+
+		strbuf_release(&keep);
+	}
+
 	if (params->result_list) {
 		struct strbuf result_msg = STRBUF_INIT;
 
@@ -1953,7 +1969,7 @@ static void install_packfile(struct gh__request_params *params,
 	create_final_packfile_pathnames("vfs", packfile_checksum.buf, NULL,
 					&final_path_pack, &final_path_idx,
 					&final_filename);
-	my_finalize_packfile(params, status,
+	my_finalize_packfile(params, status, 0,
 			     &temp_path_pack, &temp_path_idx,
 			     &final_path_pack, &final_path_idx,
 			     &final_filename);
@@ -2049,6 +2065,12 @@ struct ph {
 
 /*
  * Extract the next packfile from the multipack.
+ * Install {.pack, .idx, .keep} set.
+ *
+ * Mark each successfully installed prefetch pack as .keep it as installed
+ * in case we have errors decoding/indexing later packs within the received
+ * multipart file.  (A later pass can delete the unnecessary .keep files
+ * from this and any previous invocations.)
  */
 static void extract_packfile_from_multipack(
 	struct gh__request_params *params,
@@ -2145,7 +2167,7 @@ static void extract_packfile_from_multipack(
 
 	} else {
 		/*
-		 * Server send the .idx immediately after the .pack in the
+		 * Server sent the .idx immediately after the .pack in the
 		 * data stream.  I'm tempted to verify it, but that defeats
 		 * the purpose of having it cached...
 		 */
@@ -2168,7 +2190,7 @@ static void extract_packfile_from_multipack(
 					&final_filename);
 	strbuf_release(&buf_timestamp);
 
-	my_finalize_packfile(params, status,
+	my_finalize_packfile(params, status, 1,
 			     &temp_path_pack, &temp_path_idx,
 			     &final_path_pack, &final_path_idx,
 			     &final_filename);
@@ -2183,6 +2205,56 @@ static void extract_packfile_from_multipack(
 	strbuf_release(&final_filename);
 }
 
+struct keep_files_data {
+	timestamp_t max_timestamp;
+	int pos_of_max;
+	struct string_list *keep_files;
+};
+
+static void cb_keep_files(const char *full_path, size_t full_path_len UNUSED,
+			  const char *file_path, void *void_data)
+{
+	struct keep_files_data *data = void_data;
+	const char *val;
+	timestamp_t t;
+
+	/*
+	 * We expect prefetch packfiles named like:
+	 *
+	 *     prefetch-<seconds>-<checksum>.keep
+	 */
+	if (!skip_prefix(file_path, "prefetch-", &val))
+		return;
+	if (!ends_with(val, ".keep"))
+		return;
+
+	t = strtol(val, NULL, 10);
+	if (t > data->max_timestamp) {
+		data->pos_of_max = data->keep_files->nr;
+		data->max_timestamp = t;
+	}
+
+	string_list_append(data->keep_files, full_path);
+}
+
+static void delete_stale_keep_files(
+	struct gh__request_params *params UNUSED,
+	struct gh__response_status *status UNUSED)
+{
+	struct string_list keep_files = STRING_LIST_INIT_DUP;
+	struct keep_files_data data = { 0, 0, &keep_files };
+	size_t k;
+
+	for_each_file_in_pack_dir(gh__global.buf_odb_path.buf,
+				  cb_keep_files, &data);
+	for (k = 0; k < keep_files.nr; k++) {
+		if ((ssize_t)k != data.pos_of_max)
+			unlink(keep_files.items[k].string);
+	}
+
+	string_list_clear(&keep_files, 0);
+}
+
 /*
  * Cut apart the received multipart response into individual packfiles
  * and install each one.
@@ -2201,6 +2273,7 @@ static void install_prefetch(struct gh__request_params *params,
 	unsigned short np;
 	unsigned short k;
 	int fd = -1;
+	int nr_installed = 0;
 
 	struct strbuf temp_path_mp = STRBUF_INIT;
 
@@ -2245,9 +2318,13 @@ static void install_prefetch(struct gh__request_params *params,
 		display_progress(params->progress, k + 1);
 		if (status->ec != GH__ERROR_CODE__OK)
 			break;
+		nr_installed++;
 	}
 	stop_progress(&params->progress);
 
+	if (nr_installed)
+		delete_stale_keep_files(params, status);
+
 cleanup:
 	if (fd != -1)
 		close(fd);
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index 196a5513b1661a..29b5aa47b2f6aa 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -390,6 +390,30 @@ verify_received_packfile_count () {
 	return 0
 }
 
+# Verify that we have exactly 1 prefetch .keep file.
+# Optionally, verify that it has the given timestamp.
+#
+verify_prefetch_keeps () {
+	count=$(( $(ls -1 "$SHARED_CACHE_T1"/pack/prefetch-*.keep | wc -l) ))
+	if test $count -ne 1
+	then
+		echo "verify_prefetch_keep_file_count: found $count, expected 1."
+		return 1
+	fi
+
+	if test $# -eq 1
+	then
+		count=$(( $(ls -1 "$SHARED_CACHE_T1"/pack/prefetch-$1-*.keep | wc -l) ))
+		if test $count -ne 1
+		then
+			echo "verify_prefetch_keep_file_count: did not find expected keep file."
+			return 1
+		fi
+	fi
+
+	return 0
+}
+
 per_test_cleanup () {
 	stop_gvfs_protocol_server
 
@@ -643,6 +667,7 @@ test_expect_success 'basic: PREFETCH w/o arg gets all' '
 	# packfile.
 	#
 	verify_received_packfile_count 3 &&
+	verify_prefetch_keeps 1200000000 &&
 
 	stop_gvfs_protocol_server &&
 	verify_connection_count 1
@@ -664,6 +689,7 @@ test_expect_success 'basic: PREFETCH w/ arg' '
 	# packfile.
 	#
 	verify_received_packfile_count 2 &&
+	verify_prefetch_keeps 1200000000 &&
 
 	stop_gvfs_protocol_server &&
 	verify_connection_count 1
@@ -686,6 +712,7 @@ test_expect_success 'basic: PREFETCH mayhem no_prefetch_idx' '
 	# packfile.
 	#
 	verify_received_packfile_count 2 &&
+	verify_prefetch_keeps 1200000000 &&
 
 	stop_gvfs_protocol_server &&
 	verify_connection_count 1
@@ -707,6 +734,7 @@ test_expect_success 'basic: PREFETCH up-to-date' '
 	# packfile.
 	#
 	verify_received_packfile_count 2 &&
+	verify_prefetch_keeps 1200000000 &&
 
 	# Ask again for any packfiles newer than what we have cached locally.
 	#
@@ -720,6 +748,7 @@ test_expect_success 'basic: PREFETCH up-to-date' '
 	# packfile.
 	#
 	verify_received_packfile_count 0 &&
+	verify_prefetch_keeps 1200000000 &&
 
 	stop_gvfs_protocol_server &&
 	verify_connection_count 2

From 9cd5daad08c9fa0bf9e8be4cd1c2dc9b6896f979 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 13 Nov 2019 15:39:40 -0500
Subject: [PATCH 108/207] t5799: cleanup wc-l and grep-c lines

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 t/t5799-gvfs-helper.sh | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index afdacfe7134fe6..cedfea71246710 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -73,7 +73,7 @@ get_list_of_oids () {
 
 	if test $# -eq 1
 	then
-		actual_nr=$(( $(wc -l <"$OIDS_FILE") ))
+		actual_nr=$(wc -l <"$OIDS_FILE")
 		if test $actual_nr -lt $1
 		then
 			echo "get_list_of_oids: insufficient data.  Need $1 OIDs."
@@ -93,7 +93,7 @@ get_list_of_commit_and_tree_oids () {
 
 	if test $# -eq 1
 	then
-		actual_nr=$(( $(wc -l <"$OIDS_CT_FILE") ))
+		actual_nr=$(wc -l <"$OIDS_CT_FILE")
 		if test $actual_nr -lt $1
 		then
 			echo "get_list_of_commit_and_tree_oids: insufficient data.  Need $1 OIDs."
@@ -123,6 +123,8 @@ test_expect_success 'setup repos' '
 	# test_commit() creates commits, trees, tags, and blobs and leave
 	# them loose.
 	#
+	test_config gc.auto 0 &&
+	#
 	test_commit -C "$REPO_SRC" file1.txt &&
 	test_commit -C "$REPO_SRC" file2.txt &&
 	test_commit -C "$REPO_SRC" file3.txt &&
@@ -288,7 +290,7 @@ verify_connection_count () {
 		expected_nr=1
 	fi
 
-	actual_nr=$(( $(grep "Connection from" "$SERVER_LOG" | wc -l) ))
+	actual_nr=$(grep -c "Connection from" "$SERVER_LOG")
 
 	if test $actual_nr -ne $expected_nr
 	then
@@ -339,7 +341,7 @@ verify_received_packfile_count () {
 		expected_nr=1
 	fi
 
-	actual_nr=$(( $(grep "packfile " OUT.output | wc -l) ))
+	actual_nr=$(grep -c "packfile " <OUT.output)
 
 	if test $actual_nr -ne $expected_nr
 	then

From d535853798358f122dbd21ac99647873b1ca0300 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 16 Dec 2019 13:12:32 -0500
Subject: [PATCH 109/207] gvfs-helper: do one read in my_copy_fd_len_tail()

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index dae73bcd1a06e5..9b2f4bda8e781c 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -2039,18 +2039,18 @@ static int my_copy_fd_len_tail(int fd_in, int fd_out, ssize_t nr_bytes_total,
 {
 	memset(buf_tail, 0, tail_len);
 
+	if (my_copy_fd_len(fd_in, fd_out, nr_bytes_total) < 0)
+		return -1;
+
 	if (nr_bytes_total < tail_len)
-		return my_copy_fd_len(fd_in, fd_out, nr_bytes_total);
+		return 0;
 
-	if (my_copy_fd_len(fd_in, fd_out, (nr_bytes_total - tail_len)) < 0)
-		return -1;
+	/* Reset the position to read the tail */
+	lseek(fd_in, -tail_len, SEEK_CUR);
 
 	if (xread(fd_in, (char *)buf_tail, tail_len) != tail_len)
 		return -1;
 
-	if (write_in_full(fd_out, buf_tail, tail_len) < 0)
-		return -1;
-
 	return 0;
 }
 

From e086b2acd826334cf5b62e0b083c3c2aaee40987 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Fri, 18 Sep 2020 14:30:46 -0400
Subject: [PATCH 110/207] gvfs-helper: verify loose objects after write

It is possible that a loose object that is written from a GVFS protocol
"get object" request does not match the expected hash. Error out in this
case.

2021-10-30: The prototype for read_loose_object() changed in 31deb28 (fsck:
don't hard die on invalid object types, 2021-10-01) and 96e41f5 (fsck:
report invalid object type-path combinations, 2021-10-01).

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper.c | 40 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 23e26d5360f1da..e0e55b6b1d74d8 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -1889,6 +1889,33 @@ static void install_packfile(struct gh__request_params *params,
 	child_process_clear(&ip);
 }
 
+/*
+ * Wrapper for read_loose_object() to read and verify the hash of a
+ * loose object, and discard the contents buffer.
+ *
+ * Returns 0 on success, negative on error (details may be written to stderr).
+ */
+static int verify_loose_object(const char *path,
+			       const struct object_id *expected_oid)
+{
+	enum object_type type;
+	void *contents = NULL;
+	unsigned long size;
+	struct strbuf type_name = STRBUF_INIT;
+	int ret;
+	struct object_info oi = OBJECT_INFO_INIT;
+	struct object_id real_oid = *null_oid();
+	oi.typep = &type;
+	oi.sizep = &size;
+	oi.type_name = &type_name;
+
+	ret = read_loose_object(path, expected_oid, &real_oid, &contents, &oi);
+	free(contents);
+	strbuf_release(&type_name);
+
+	return ret;
+}
+
 /*
  * Convert the tempfile into a permanent loose object in the ODB.
  */
@@ -1920,6 +1947,19 @@ static void install_loose(struct gh__request_params *params,
 	strbuf_addstr(&tmp_path, get_tempfile_path(params->tempfile));
 	close_tempfile_gently(params->tempfile);
 
+	/*
+	 * Compute the hash of the received content (while it is still
+	 * in a temp file) and verify that it matches the OID that we
+	 * requested and was not corrupted.
+	 */
+	if (verify_loose_object(tmp_path.buf, &params->loose_oid)) {
+		strbuf_addf(&status->error_message,
+			    "hash failed for received loose object '%s'",
+			    oid_to_hex(&params->loose_oid));
+		status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_LOOSE;
+		goto cleanup;
+	}
+
 	/*
 	 * Try to install the tempfile as the actual loose object.
 	 *

From 0733f30b6ae082d00bd9f6452ced9b749a730da7 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 16 Dec 2019 16:26:33 -0500
Subject: [PATCH 111/207] gvfs-helper: move content-type warning for prefetch
 packs

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper.c | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 9b2f4bda8e781c..d39aa9de2f0e8d 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -2459,25 +2459,25 @@ static void install_result(struct gh__request_params *params,
 			install_prefetch(params, status);
 			return;
 		}
-	}
-
-	if (!strcmp(status->content_type.buf, "application/x-git-packfile")) {
-		assert(params->b_is_post);
-		assert(params->objects_mode == GH__OBJECTS_MODE__POST);
+	} else {
+		if (!strcmp(status->content_type.buf, "application/x-git-packfile")) {
+			assert(params->b_is_post);
+			assert(params->objects_mode == GH__OBJECTS_MODE__POST);
 
-		install_packfile(params, status);
-		return;
-	}
+			install_packfile(params, status);
+			return;
+		}
 
-	if (!strcmp(status->content_type.buf,
-		    "application/x-git-loose-object")) {
-		/*
-		 * We get these for "gvfs/objects" GET and POST requests.
-		 *
-		 * Note that this content type is singular, not plural.
-		 */
-		install_loose(params, status);
-		return;
+		if (!strcmp(status->content_type.buf,
+			"application/x-git-loose-object")) {
+			/*
+			* We get these for "gvfs/objects" GET and POST requests.
+			*
+			* Note that this content type is singular, not plural.
+			*/
+			install_loose(params, status);
+			return;
+		}
 	}
 
 	strbuf_addf(&status->error_message,

From 17cb4286b8266fbc368bf7c1f9c4879e0e6a3759 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Fri, 18 Sep 2020 17:29:18 -0400
Subject: [PATCH 112/207] t7599: create corrupt blob test

Teach helper/test-gvfs-protocol to be able to send corrupted
loose blobs.

Add unit test for gvfs-helper to detect receipt of a corrupted loose blob.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 t/helper/test-gvfs-protocol.c | 17 +++++++++++++++++
 t/t5799-gvfs-helper.sh        | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 49 insertions(+)

diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
index d89aa24d73dca7..99cb5b7e0dd515 100644
--- a/t/helper/test-gvfs-protocol.c
+++ b/t/helper/test-gvfs-protocol.c
@@ -518,6 +518,8 @@ static enum worker_result send_loose_object(const struct object_id *oid,
 	unsigned long size;
 	enum object_type type;
 	struct object_info oi = OBJECT_INFO_INIT;
+	int mayhem__corrupt_loose = string_list_has_string(&mayhem_list,
+							   "corrupt_loose");
 
 	/*
 	 * Since `test-gvfs-protocol` is mocking a real GVFS server (cache or
@@ -639,7 +641,22 @@ static enum worker_result send_loose_object(const struct object_id *oid,
 	do {
 		enum worker_result wr;
 		unsigned char *in0 = stream.next_in;
+
+		/*
+		 * Corrupt a byte in the buffer we compress, but undo it
+		 * before we compute the SHA on the portion of the raw
+		 * buffer included in the chunk we compressed.
+		 */
+		if (mayhem__corrupt_loose) {
+			logmayhem("corrupt_loose");
+			*in0 = *in0 ^ 0xff;
+		}
+
 		ret = git_deflate(&stream, Z_FINISH);
+
+		if (mayhem__corrupt_loose)
+			*in0 = *in0 ^ 0xff;
+
 		the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
 
 		/* [5] */
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index cedfea71246710..3cb1459ea32476 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -1047,4 +1047,36 @@ test_expect_success 'integration: fully implicit: diff 2 commits' '
 		>OUT.output 2>OUT.stderr
 '
 
+#################################################################
+# Ensure that the SHA of the blob we received matches the SHA of
+# the blob we requested.
+#################################################################
+
+# Request a loose blob from the server.  Verify that we received
+# content matches the requested SHA.
+#
+test_expect_success 'catch corrupted loose object' '
+#	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem corrupt_loose &&
+
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+			--cache-server=trust \
+			--remote=origin \
+			get \
+			<"$OID_ONE_BLOB_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	# Verify corruption detected.
+	# Verify valid blob not included in response to client.
+
+	grep "hash failed for received loose object" OUT.stderr &&
+
+	# Verify that we did not write the corrupted blob to the ODB.
+
+	! verify_objects_in_shared_cache "$OID_ONE_BLOB_FILE" &&
+	git -C "$REPO_T1" fsck
+'
+
 test_done

From 809e8e3166fb72e6a24ce538904962cfd22108e6 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Tue, 17 Dec 2019 07:25:40 -0500
Subject: [PATCH 113/207] fetch: use gvfs-helper prefetch under config

The gvfs-helper allows us to download prefetch packs using a simple
subprocess call. The gvfs-helper-client.h method will automatically
compute the timestamp if passing 0, and passing NULL for the number
of downloaded packs is valid.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 Documentation/config/core.txt | 4 ++++
 builtin/fetch.c               | 5 +++++
 gvfs.h                        | 1 +
 3 files changed, 10 insertions(+)

diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index 6f9cba3b4c7ed7..716fc9dccd53a1 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -791,6 +791,10 @@ core.gvfs::
 		is first accessed and brought down to the client. Git.exe can't
 		currently tell the first access vs subsequent accesses so this
 		flag just blocks them from occurring at all.
+	GVFS_PREFETCH_DURING_FETCH::
+		Bit value 128
+		While performing a `git fetch` command, use the gvfs-helper to
+		perform a "prefetch" of commits and trees.
 --
 
 core.useGvfsHelper::
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 2d37a378ba7650..e97c8eccc5e555 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -21,6 +21,8 @@
 #include "string-list.h"
 #include "remote.h"
 #include "transport.h"
+#include "gvfs.h"
+#include "gvfs-helper-client.h"
 #include "run-command.h"
 #include "parse-options.h"
 #include "sigchain.h"
@@ -2549,6 +2551,9 @@ int cmd_fetch(int argc,
 	}
 	string_list_remove_duplicates(&list, 0);
 
+	if (core_gvfs & GVFS_PREFETCH_DURING_FETCH)
+		gh_client__prefetch(0, NULL);
+
 	if (negotiate_only) {
 		struct oidset acked_commits = OIDSET_INIT;
 		struct oidset_iter iter;
diff --git a/gvfs.h b/gvfs.h
index a8e58a6ebc88b8..99c5205aa043d7 100644
--- a/gvfs.h
+++ b/gvfs.h
@@ -28,6 +28,7 @@
 
 #define GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK (1 << 4)
 #define GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS      (1 << 6)
+#define GVFS_PREFETCH_DURING_FETCH		    (1 << 7)
 
 void gvfs_load_config_value(const char *value);
 int gvfs_config_is_set(int mask);

From 37848f1d4ec93bed05f6182fcd54dad71df2d787 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhost@microsoft.com>
Date: Wed, 18 Dec 2019 12:13:46 -0500
Subject: [PATCH 114/207] gvfs-helper: better support for concurrent packfile
 fetches

Teach gvfs-helper to better support the concurrent fetching of the
same packfile by multiple instances.

If 2 instances of gvfs-helper did a POST and requested the same set of
OIDs, they might receive the exact same packfile (same checksum SHA).
Both processes would then race to install their copy of the .pack and
.idx files into the ODB/pack directory.

This is not a problem on Unix (because of filesystem semantics).

On Windows, this can cause an EBUSY/EPERM problem for the loser while
the winner is holding a handle to the target files.  (The existing
packfile code already handled simple the existence and/or replacement
case.)

The solution presented here is to silently let the loser claim
victory IIF the .pack and .idx are already present in the ODB.
(We can't check this in advance because we don't know the packfile
SHA checksum until after we receive it and run index-pack.)

We avoid using a per-packfile lockfile (or a single lockfile for
the `vfs-` prefix) to avoid the usual issues with stale lockfiles.

Signed-off-by: Jeff Hostetler <jeffhost@microsoft.com>
---
 gvfs-helper.c          |  29 ++++++++++-
 t/t5799-gvfs-helper.sh | 114 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 141 insertions(+), 2 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index d39aa9de2f0e8d..d3cb28fd3a8090 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -1889,12 +1889,36 @@ static void my_finalize_packfile(struct gh__request_params *params,
 				 struct strbuf *final_path_idx,
 				 struct strbuf *final_filename)
 {
+	/*
+	 * Install the .pack and .idx into the ODB pack directory.
+	 *
+	 * We might be racing with other instances of gvfs-helper if
+	 * we, in parallel, both downloaded the exact same packfile
+	 * (with the same checksum SHA) and try to install it at the
+	 * same time.  This might happen on Windows where the loser
+	 * can get an EBUSY or EPERM trying to move/rename the
+	 * tempfile into the pack dir, for example.
+	 *
+	 * So, we always install the .pack before the .idx for
+	 * consistency.  And only if *WE* created the .pack and .idx
+	 * files, do we create the matching .keep (when requested).
+	 *
+	 * If we get an error and the target files already exist, we
+	 * silently eat the error.  Note that finalize_object_file()
+	 * has already munged errno (and it has various creation
+	 * strategies), so we don't bother looking at it.
+	 */
 	if (finalize_object_file(temp_path_pack->buf, final_path_pack->buf) ||
 	    finalize_object_file(temp_path_idx->buf, final_path_idx->buf)) {
 		unlink(temp_path_pack->buf);
 		unlink(temp_path_idx->buf);
-		unlink(final_path_pack->buf);
-		unlink(final_path_idx->buf);
+
+		if (file_exists(final_path_pack->buf) &&
+		    file_exists(final_path_idx->buf)) {
+			trace2_printf("%s: assuming ok for %s", TR2_CAT, final_path_pack->buf);
+			goto assume_ok;
+		}
+
 		strbuf_addf(&status->error_message,
 			    "could not install packfile '%s'",
 			    final_path_pack->buf);
@@ -1917,6 +1941,7 @@ static void my_finalize_packfile(struct gh__request_params *params,
 		strbuf_release(&keep);
 	}
 
+assume_ok:
 	if (params->result_list) {
 		struct strbuf result_msg = STRBUF_INIT;
 
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index 29b5aa47b2f6aa..5c79654a63f0fa 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -372,6 +372,10 @@ verify_objects_in_shared_cache () {
 	return 0
 }
 
+# gvfs-helper prints a "packfile <path>" message for each received
+# packfile to stdout.  Verify that we received the expected number
+# of packfiles.
+#
 verify_received_packfile_count () {
 	if test $# -eq 1
 	then
@@ -414,6 +418,19 @@ verify_prefetch_keeps () {
 	return 0
 }
 
+# Verify that the number of vfs- packfile present in the shared-cache
+# matches our expectations.
+#
+verify_vfs_packfile_count () {
+	count=$(( $(ls -1 "$SHARED_CACHE_T1"/pack/vfs-*.pack | wc -l) ))
+	if test $count -ne $1
+	then
+		echo "verify_vfs_packfile_count: expected $1; actual $count"
+		return 1
+	fi
+	return 0
+}
+
 per_test_cleanup () {
 	stop_gvfs_protocol_server
 
@@ -1176,6 +1193,103 @@ test_expect_success 'integration: fully implicit: diff 2 commits' '
 		>OUT.output 2>OUT.stderr
 '
 
+#################################################################
+# Duplicate packfile tests.
+#
+# If we request a fixed set of blobs, we should get a unique packfile
+# of the form "vfs-<sha>.{pack,idx}".  It we request that same set
+# again, the server should create and send the exact same packfile.
+# True web servers might build the custom packfile in random order,
+# but our test web server should give us consistent results.
+#
+# Verify that we can handle the duplicate pack and idx file properly.
+#################################################################
+
+test_expect_success 'duplicate: vfs- packfile' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OIDS_BLOBS_FILE" >OUT.output 2>OUT.stderr &&
+	verify_received_packfile_count 1 &&
+	verify_vfs_packfile_count 1 &&
+
+	# Re-fetch the same packfile.  We do not care if it replaces
+	# first one or if it silently fails to overwrite the existing
+	# one.  We just confirm that afterwards we only have 1 packfile.
+	#
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OIDS_BLOBS_FILE" >OUT.output 2>OUT.stderr &&
+	verify_received_packfile_count 1 &&
+	verify_vfs_packfile_count 1 &&
+
+	stop_gvfs_protocol_server
+'
+
+# Return the absolute pathname of the first received packfile.
+#
+first_received_packfile_pathname () {
+	fn=$(sed -n '/^packfile/p' <OUT.output | head -1 | sed -n 's/^packfile \(.*\)/\1/p')
+	echo "$SHARED_CACHE_T1"/pack/"$fn"
+	return 0
+}
+
+test_expect_success 'duplicate and busy: vfs- packfile' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server &&
+
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OIDS_BLOBS_FILE" \
+		>OUT.output \
+		2>OUT.stderr &&
+	verify_received_packfile_count 1 &&
+	verify_vfs_packfile_count 1 &&
+
+	# Re-fetch the same packfile, but hold the existing packfile
+	# open for writing on an obscure (and randomly-chosen) file
+	# descriptor.
+	#
+	# This should cause the replacement-install to fail (at least
+	# on Windows) with an EBUSY or EPERM or something.
+	#
+	# Verify that that error is eaten.  We do not care if the
+	# replacement is retried or if gvfs-helper simply discards the
+	# second instance.  We just confirm that afterwards we only
+	# have 1 packfile on disk and that the command "lies" and reports
+	# that it created the existing packfile.  (We want the lie because
+	# in normal usage, gh-client has already built the packed-git list
+	# in memory and is using gvfs-helper to fetch missing objects;
+	# gh-client does not care who does the fetch, but it needs to
+	# update its packed-git list and restart the object lookup.)
+	#
+	PACK=$(first_received_packfile_pathname) &&
+	git -C "$REPO_T1" gvfs-helper \
+		--cache-server=disable \
+		--remote=origin \
+		--no-progress \
+		post \
+		<"$OIDS_BLOBS_FILE" \
+		>OUT.output \
+		2>OUT.stderr \
+		9>>"$PACK" &&
+	verify_received_packfile_count 1 &&
+	verify_vfs_packfile_count 1 &&
+
+	stop_gvfs_protocol_server
+'
+
 #################################################################
 # Ensure that the SHA of the blob we received matches the SHA of
 # the blob we requested.

From 4aa479d1820ca79cee9f18d437f18c7b0dd6e5bc Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 3 Feb 2020 15:33:26 -0500
Subject: [PATCH 115/207] remote-curl: do not call fetch-pack when using
 gvfs-helper

When using the GVFS protocol, we should _never_ call "git fetch-pack"
to attempt downloading a pack-file via the regular Git protocol. It
appears that the mechanism that prevented this in the VFS for Git
world is due to the read-object hook populating the commits at the
new ref tips in a different way than the gvfs-helper does.

By acting as if the fetch-pack succeeds here in remote-curl, we
prevent a failed fetch.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 remote-curl.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/remote-curl.c b/remote-curl.c
index a24e3a8b9abcc9..b2898809bd9f6a 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -1196,6 +1196,9 @@ static int fetch_git(struct discovery *heads,
 	struct strvec args = STRVEC_INIT;
 	struct strbuf rpc_result = STRBUF_INIT;
 
+	if (core_use_gvfs_helper)
+		return 0;
+
 	strvec_pushl(&args, "fetch-pack", "--stateless-rpc",
 		     "--stdin", "--lock-pack", NULL);
 	if (options.followtags)

From 53c764a3bc30d55bcfccae470fe3c488c0bce9b3 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 28 Jun 2024 10:53:38 -0400
Subject: [PATCH 116/207] t5799: explicitly test gvfs-helper --fallback and
 --no-fallback

Construct 2 new unit tests to explicitly verify the use of
`--fallback` and `--no-fallback` arguments to `gvfs-helper`.

When a cache-server is enabled, `gvfs-helper` will try to fetch
objects from it rather than the origin server.  If the cache-server
fails (and all cache-server retry attempts have been exhausted),
`gvfs-helper` can optionally "fallback" and try to fetch the objects
from the origin server.  (The retry logic is also applied to the
origin server, if the origin server fails on the first request.)

Add new unit tests to verify that `gvfs-helper` respects both the
`--max-retries` and `--[no-]fallback` arguments.

We use the "http_503" mayhem feature of the `test_gvfs_protocol`
server to force a 503 response on all requests to the cache-server and
the origin server end-points.  We can then count the number of connection
requests that `gvfs-helper` makes to the server and confirm both the
per-server retries and whether fallback was attempted.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 t/t5799-gvfs-helper.sh | 64 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index 0a2d6180430b50..8fb81d1fb32c45 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -1036,6 +1036,70 @@ test_expect_success 'successful retry after http-error: origin get' '
 	verify_connection_count 2
 '
 
+#################################################################
+# So far we have confirmed that gvfs-helper can recover from a network
+# error (with retries, since the cache-server was disabled in all of
+# the above tests).  Try again with fallback turned on.
+#
+# With mayhem "http_503" turned on both the cache and origin server
+# will always throw a 503 error.
+#
+# Confirm that we tried to make six connections: we should hit the
+# cache-server 3 times (one initial attempt and two retries) and then
+# try the origin server 3 times.
+#
+#################################################################
+
+test_expect_success 'http-error: 503 Service Unavailable (with retry and fallback)' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_503 &&
+
+	test_expect_code $GH__ERROR_CODE__HTTP_503 \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=trust \
+		--remote=origin \
+		--fallback \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	grep -q "error: get: (http:503)" OUT.stderr &&
+	verify_connection_count 6
+'
+
+#################################################################
+# Now repeat the above, but explicitly turn off fallback.
+#
+# Again, we use mayhem "http_503".  However, with fallback turned
+# off, we will only attempt the 3 connections to the cache server.
+# We will not try to hit the origin server.
+#
+# So we should only see a total of 3 connections rather than the
+# six in the previous test.
+#
+#################################################################
+
+test_expect_success 'http-error: 503 Service Unavailable (with retry and no-fallback)' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem http_503 &&
+
+	test_expect_code $GH__ERROR_CODE__HTTP_503 \
+		git -C "$REPO_T1" gvfs-helper \
+		--cache-server=trust \
+		--remote=origin \
+		--no-fallback \
+		get \
+		--max-retries=2 \
+		<"$OIDS_FILE" >OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	grep -q "error: get: (http:503)" OUT.stderr &&
+	verify_connection_count 3
+'
+
 #################################################################
 # Test HTTP Auth
 #

From ef7be83c68920121d106f9c1b31ae4595b848b44 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 12 Mar 2020 12:48:49 +0000
Subject: [PATCH 117/207] fetch: reprepare packs before checking connectivity

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 builtin/fetch.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/builtin/fetch.c b/builtin/fetch.c
index e97c8eccc5e555..52235b510c10de 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -23,6 +23,7 @@
 #include "transport.h"
 #include "gvfs.h"
 #include "gvfs-helper-client.h"
+#include "packfile.h"
 #include "run-command.h"
 #include "parse-options.h"
 #include "sigchain.h"
@@ -1162,6 +1163,13 @@ static int store_updated_refs(struct display_state *display_state,
 
 		opt.exclude_hidden_refs_section = "fetch";
 		rm = ref_map;
+
+		/*
+		 * Before checking connectivity, be really sure we have the
+		 * latest pack-files loaded into memory.
+		 */
+		reprepare_packed_git(the_repository);
+
 		if (check_connected(iterate_ref_map, &rm, &opt)) {
 			rc = error(_("%s did not send all necessary objects"),
 				   display_state->url);

From 7f9bc3c708a3ceee072df79e6994da6eecb654d2 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Wed, 12 Apr 2023 10:52:22 -0400
Subject: [PATCH 118/207] gvfs-helper: add --max-retries to prefetch verb

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 gvfs-helper.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index f03be891f061d0..9f569df0290c16 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -105,6 +105,11 @@
 //                       The GVFS Protocol defines this value as a way to
 //                       request cached packfiles NEWER THAN this timestamp.
 //
+//                 --max-retries=<n>     // defaults to "6"
+//
+//                       Number of retries after transient network errors.
+//                       Set to zero to disable such retries.
+//
 //     server
 //
 //            Interactive/sub-process mode.  Listen for a series of commands
@@ -3755,6 +3760,8 @@ static enum gh__error_code do_sub_cmd__prefetch(int argc, const char **argv)
 	static const char *since_str;
 	static struct option prefetch_options[] = {
 		OPT_STRING(0, "since", &since_str, N_("since"), N_("seconds since epoch")),
+		OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries,
+			    N_("retries for transient network errors")),
 		OPT_END(),
 	};
 
@@ -3774,6 +3781,8 @@ static enum gh__error_code do_sub_cmd__prefetch(int argc, const char **argv)
 		if (my_parse_since(since_str, &seconds_since_epoch))
 			die("could not parse 'since' field");
 	}
+	if (gh__cmd_opts.max_retries < 0)
+		gh__cmd_opts.max_retries = 0;
 
 	finish_init(1);
 

From 76af555fa768e40e0860d9591bb21086b7d0beb0 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Thu, 27 Jun 2024 11:02:36 -0400
Subject: [PATCH 119/207] gvfs-helper: don't fallback with new config

By default, GVFS Protocol-enabled Scalar clones will fall back to the
origin server if there is a network issue with the cache servers.
However (and especially for the prefetch endpoint) this may be a very
expensive operation for the origin server, leading to the user being
throttled. This shows up later in cases such as 'git push' or other web
operations.

To avoid this, create a new config option, 'gvfs.fallback', which
defaults to true. When set to 'false', pass '--no-fallback' from the
gvfs-helper client to the child gvfs-helper server process.

This will allow users who have hit this problem to avoid it in the
future. In case this becomes a more widespread problem, engineering
systems can enable the config option more broadly.

Enabling the config will of course lead to immediate failures for users,
but at least that will help diagnose the problem when it occurs instead
of later when the throttling shows up and the server load has already
passed, damage done.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 Documentation/config/gvfs.txt |  5 +++++
 gvfs-helper-client.c          | 11 ++++++++++-
 2 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/Documentation/config/gvfs.txt b/Documentation/config/gvfs.txt
index 6ab221ded36c91..7224939ac0b270 100644
--- a/Documentation/config/gvfs.txt
+++ b/Documentation/config/gvfs.txt
@@ -3,3 +3,8 @@ gvfs.cache-server::
 
 gvfs.sharedcache::
 	TODO
+
+gvfs.fallback::
+	If set to `false`, then never fallback to the origin server when the cache
+	server fails to connect. This will alert users to failures with the cache
+	server, but avoid causing throttling on the origin server.
diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
index 4363b29c108859..c8ab947f5f9cc4 100644
--- a/gvfs-helper-client.c
+++ b/gvfs-helper-client.c
@@ -14,6 +14,7 @@
 #include "quote.h"
 #include "packfile.h"
 #include "hex.h"
+#include "config.h"
 
 static struct oidset gh_client__oidset_queued = OIDSET_INIT;
 static unsigned long gh_client__oidset_count;
@@ -340,6 +341,7 @@ static struct gh_server__process *gh_client__find_long_running_process(
 	struct gh_server__process *entry;
 	struct strvec argv = STRVEC_INIT;
 	struct strbuf quoted = STRBUF_INIT;
+	int fallback;
 
 	gh_client__choose_odb();
 
@@ -347,10 +349,17 @@ static struct gh_server__process *gh_client__find_long_running_process(
 	 * TODO decide what defaults we want.
 	 */
 	strvec_push(&argv, "gvfs-helper");
-	strvec_push(&argv, "--fallback");
 	strvec_push(&argv, "--cache-server=trust");
 	strvec_pushf(&argv, "--shared-cache=%s",
 			 gh_client__chosen_odb->path);
+
+	/* If gvfs.fallback=false, then don't add --fallback. */
+	if (!git_config_get_bool("gvfs.fallback", &fallback) &&
+	    !fallback)
+		strvec_push(&argv, "--no-fallback");
+	else
+		strvec_push(&argv, "--fallback");
+
 	strvec_push(&argv, "server");
 
 	sq_quote_argv_pretty(&quoted, argv.v);

From 904aa60b4efa620922eee7c5085d9df836938a57 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 26 Dec 2019 10:20:24 -0500
Subject: [PATCH 120/207] gvfs-helper: retry when creating temp files

When we create temp files for downloading packs, we use a name
based on the current timestamp. There is no randomness in the
name, so we can have collisions in the same second.

Retry the temp pack names using a new "-<retry>" suffix to the
name before the ".temp".

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 gvfs-helper.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index d3cb28fd3a8090..f03be891f061d0 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -1667,6 +1667,7 @@ static void my_create_tempfile(
 	struct strbuf buf = STRBUF_INIT;
 	int len_tp;
 	enum scld_error scld;
+	int retries;
 
 	gh__response_status__zero(status);
 
@@ -1715,7 +1716,15 @@ static void my_create_tempfile(
 		goto cleanup;
 	}
 
+	retries = 0;
 	*t1 = create_tempfile(buf.buf);
+	while (!*t1 && retries < 5) {
+		retries++;
+		strbuf_setlen(&buf, len_tp);
+		strbuf_addf(&buf, "%s-%d.%s", basename.buf, retries, suffix1);
+		*t1 = create_tempfile(buf.buf);
+	}
+
 	if (!*t1) {
 		strbuf_addf(&status->error_message,
 			    "could not create tempfile: '%s'",
@@ -1737,6 +1746,13 @@ static void my_create_tempfile(
 		strbuf_addf(  &buf, "%s.%s", basename.buf, suffix2);
 
 		*t2 = create_tempfile(buf.buf);
+		while (!*t2 && retries < 5) {
+			retries++;
+			strbuf_setlen(&buf, len_tp);
+			strbuf_addf(&buf, "%s-%d.%s", basename.buf, retries, suffix2);
+			*t2 = create_tempfile(buf.buf);
+		}
+
 		if (!*t2) {
 			strbuf_addf(&status->error_message,
 				    "could not create tempfile: '%s'",

From b282ae728d235fde4ea807eede596396a816e63e Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Thu, 13 Apr 2023 14:16:01 -0400
Subject: [PATCH 121/207] t5799: add tests to detect corrupt pack/idx files in
 prefetch

Add "mayhem" keys to generate corrupt packfiles and/or corrupt idx
files in prefetch by trashing the trailing checksum SHA.

Add unit tests to t5799 to verify that `gvfs-helper` detects these
corrupt pack/idx files.

Currently, only the (bad-pack, no-idx) case is correctly detected,
Because `gvfs-helper` needs to locally compute the idx file itself.

A test for the (bad-pack, any-idx) case was also added (as a known
breakage) because `gvfs-helper` assumes that when the cache server
provides both, it doesn't need to verify them.  We will fix that
assumption in the next commit.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 t/helper/test-gvfs-protocol.c | 82 ++++++++++++++++++++++++++++++++++-
 t/t5799-gvfs-helper.sh        | 68 ++++++++++++++++++++++++++++-
 2 files changed, 147 insertions(+), 3 deletions(-)

diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
index 8a83b5e27c6a1b..05fd627da33f66 100644
--- a/t/helper/test-gvfs-protocol.c
+++ b/t/helper/test-gvfs-protocol.c
@@ -1154,6 +1154,82 @@ static int ct_pack_sort_compare(const void *_a, const void *_b)
 	return (a->ph.timestamp < b->ph.timestamp) ? -1 : (a->ph.timestamp != b->ph.timestamp);
 }
 
+#define MY_MIN(a, b) ((a) < (b) ? (a) : (b))
+
+/*
+ * Like copy.c:copy_fd(), but corrupt part of the trailing SHA (if the
+ * given mayhem key is defined) as we copy it to the destination file.
+ *
+ * We don't know (or care) if the input file is a pack file or idx
+ * file, just that the final bytes are part of a SHA that we can
+ * corrupt.
+ */
+static int copy_fd_with_checksum_mayhem(int ifd, int ofd,
+					const char *mayhem_key,
+					ssize_t nr_wrong_bytes)
+{
+	off_t in_cur, in_len;
+	ssize_t bytes_to_copy;
+	ssize_t bytes_remaining_to_copy;
+	char buffer[8192];
+
+	if (!mayhem_key || !*mayhem_key || !nr_wrong_bytes ||
+	    !string_list_has_string(&mayhem_list, mayhem_key))
+		return copy_fd(ifd, ofd);
+
+	in_cur = lseek(ifd, 0, SEEK_CUR);
+	if (in_cur < 0)
+		return in_cur;
+
+	in_len = lseek(ifd, 0, SEEK_END);
+	if (in_len < 0)
+		return in_len;
+
+	if (lseek(ifd, in_cur, SEEK_SET) < 0)
+		return -1;
+
+	/* Copy the entire file except for the last few bytes. */
+
+	bytes_to_copy = (ssize_t)in_len - nr_wrong_bytes;
+	bytes_remaining_to_copy = bytes_to_copy;
+	while (bytes_remaining_to_copy) {
+		ssize_t to_read = MY_MIN((ssize_t)sizeof(buffer), bytes_remaining_to_copy);
+		ssize_t len = xread(ifd, buffer, to_read);
+
+		if (!len)
+			return -1; /* error on unexpected EOF */
+		if (len < 0)
+			return -1;
+		if (write_in_full(ofd, buffer, len) < 0)
+			return -1;
+
+		bytes_remaining_to_copy -= len;
+	}
+
+	/* Read the trailing bytes so that we can alter them before copying. */
+
+	while (nr_wrong_bytes) {
+		ssize_t to_read = MY_MIN((ssize_t)sizeof(buffer), nr_wrong_bytes);
+		ssize_t len = xread(ifd, buffer, to_read);
+		ssize_t k;
+
+		if (!len)
+			return -1; /* error on unexpected EOF */
+		if (len < 0)
+			return -1;
+
+		for (k = 0; k < len; k++)
+			buffer[k] ^= 0xff;
+
+		if (write_in_full(ofd, buffer, len) < 0)
+			return -1;
+
+		nr_wrong_bytes -= len;
+	}
+
+	return 0;
+}
+
 static enum worker_result send_ct_item(const struct ct_pack_item *item)
 {
 	struct ph ph_le;
@@ -1175,7 +1251,8 @@ static enum worker_result send_ct_item(const struct ct_pack_item *item)
 	trace2_printf("%s: sending prefetch pack '%s'", TR2_CAT, item->path_pack.buf);
 
 	fd_pack = git_open_cloexec(item->path_pack.buf, O_RDONLY);
-	if (fd_pack == -1 || copy_fd(fd_pack, 1)) {
+	if (fd_pack == -1 ||
+	    copy_fd_with_checksum_mayhem(fd_pack, 1, "bad_prefetch_pack_sha", 4)) {
 		logerror("could not send packfile");
 		wr = WR_IO_ERROR;
 		goto done;
@@ -1185,7 +1262,8 @@ static enum worker_result send_ct_item(const struct ct_pack_item *item)
 		trace2_printf("%s: sending prefetch idx '%s'", TR2_CAT, item->path_idx.buf);
 
 		fd_idx = git_open_cloexec(item->path_idx.buf, O_RDONLY);
-		if (fd_idx == -1 || copy_fd(fd_idx, 1)) {
+		if (fd_idx == -1 ||
+		    copy_fd_with_checksum_mayhem(fd_idx, 1, "bad_prefetch_idx_sha", 4)) {
 			logerror("could not send idx");
 			wr = WR_IO_ERROR;
 			goto done;
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index 5c79654a63f0fa..dc91ee57ea3e67 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -1299,7 +1299,7 @@ test_expect_success 'duplicate and busy: vfs- packfile' '
 # content matches the requested SHA.
 #
 test_expect_success 'catch corrupted loose object' '
-#	test_when_finished "per_test_cleanup" &&
+	test_when_finished "per_test_cleanup" &&
 	start_gvfs_protocol_server_with_mayhem corrupt_loose &&
 
 	test_must_fail \
@@ -1322,4 +1322,70 @@ test_expect_success 'catch corrupted loose object' '
 	git -C "$REPO_T1" fsck
 '
 
+#################################################################
+# Ensure that we can detect when we receive a corrupted packfile
+# from the server.  This is not concerned with network IO errors,
+# but rather cases when the cache or origin server generates or
+# sends an invalid packfile.
+#
+# For example, if the server throws an exception and writes the
+# stack trace to the socket rather than or in addition to the
+# packfile content.
+#
+# Or for example, if the packfile on the server's disk is corrupt
+# and it sends it correctly, but the original data was already
+# garbage, so the client still has garbage (and retrying won't
+# help).
+#################################################################
+
+# Send corrupt PACK files w/o IDX files (so that `gvfs-helper`
+# must use `index-pack` to create it.  (And as a side-effect,
+# validate the PACK file is not corrupt.)
+test_expect_success 'prefetch corrupt pack without idx' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem \
+		bad_prefetch_pack_sha \
+		no_prefetch_idx &&
+
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+			--cache-server=disable \
+			--remote=origin \
+			--no-progress \
+			prefetch \
+			--max-retries=0 \
+			--since="1000000000" \
+			>OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server &&
+
+	# Verify corruption detected in pack when building
+	# local idx file for it.
+
+	grep -q "error: .* index-pack failed" <OUT.stderr
+'
+
+# Send corrupt PACK files with IDX files.  Since the cache server
+# sends both, `gvfs-helper` might fail to verify both of them.
+test_expect_failure 'prefetch corrupt pack with corrupt idx' '
+	test_when_finished "per_test_cleanup" &&
+	start_gvfs_protocol_server_with_mayhem \
+		bad_prefetch_pack_sha &&
+
+	# TODO This is a false-positive since `gvfs-helper`
+	# TODO does not verify either of them when a pair
+	# TODO is sent.
+	test_must_fail \
+		git -C "$REPO_T1" gvfs-helper \
+			--cache-server=disable \
+			--remote=origin \
+			--no-progress \
+			prefetch \
+			--max-retries=0 \
+			--since="1000000000" \
+			>OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server
+'
+
 test_done

From b668c69058e57028cd948673d8da09ab2912d892 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 28 Jun 2024 13:07:31 -0400
Subject: [PATCH 122/207] test-gvfs-protocol: add cache_http_503 to mayhem

Create new `cache_http_503` mayhem method where only the cache server
sends a 503.  The normal `http_503` directs both cache and origin
server to send 503s.  This will be used to help test fallback.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 t/helper/test-gvfs-protocol.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
index 05fd627da33f66..6a26f786113f84 100644
--- a/t/helper/test-gvfs-protocol.c
+++ b/t/helper/test-gvfs-protocol.c
@@ -1581,6 +1581,14 @@ static enum worker_result dispatch(struct req *req)
 	const char *method;
 	enum worker_result wr;
 
+	if (strstr(req->uri_base.buf, MY_SERVER_TYPE__CACHE)) {
+		if (string_list_has_string(&mayhem_list, "cache_http_503")) {
+			logmayhem("cache_http_503");
+			return send_http_error(1, 503, "Service Unavailable", 2,
+					       WR_MAYHEM | WR_HANGUP);
+		}
+	}
+
 	if (string_list_has_string(&mayhem_list, "close_no_write")) {
 		logmayhem("close_no_write");
 		return WR_MAYHEM | WR_HANGUP;

From 806116ad7e5f2743f4627fda601297ea2cb8a7ca Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Tue, 3 Aug 2021 17:08:26 -0400
Subject: [PATCH 123/207] sparse: avoid warnings about known cURL issues in
 gvfs-helper.c

`sparse` complains with an error message like this:

	gvfs-helper.c:2912:17: error: expression using sizeof on a
	function

The culprit is this line:

	curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);

Similar lines exist in `http-push.c` and other files that are in
upstream Git, and to avoid these bogus warnings, they are already
exempted from `sparse`'s tender, loving care. We simply add
`gvfs-helper.c` to that list.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index 748a9f4eb9dd2a..bd1034b6f0a027 100644
--- a/Makefile
+++ b/Makefile
@@ -2885,7 +2885,7 @@ gettext.sp gettext.s gettext.o: GIT-PREFIX
 gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \
 	-DGIT_LOCALE_PATH='"$(localedir_relative_SQ)"'
 
-http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SP_EXTRA_FLAGS += \
+http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp gvfs-helper.sp: SP_EXTRA_FLAGS += \
 	-DCURL_DISABLE_TYPECHECK
 
 pack-revindex.sp: SP_EXTRA_FLAGS += -Wno-memcpy-max-count

From aa86eeb1d8833c61489f2bfcc44ee1489d56b954 Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Thu, 13 Apr 2023 16:35:16 -0400
Subject: [PATCH 124/207] gvfs-helper: ignore .idx files in prefetch multi-part
 responses

The GVFS cache server can return multiple pairs of (.pack, .idx)
files.  If both are provided, `gvfs-helper` assumes that they are
valid without any validation.  This might cause problems if the
.pack file is corrupt inside the data stream.  (This might happen
if the cache server sends extra unexpected STDERR data or if the
.pack file is corrupt on the cache server's disk.)

All of the .pack file verification logic is already contained
within `git index-pack`, so let's ignore the .idx from the data
stream and force compute it.

This defeats the purpose of some of the data cacheing on the cache
server, but safety is more important.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 gvfs-helper.c          | 57 +++++++++++++++++-------------------------
 t/t5799-gvfs-helper.sh |  5 +---
 2 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index 9f569df0290c16..403b2e9bd224d0 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -2126,7 +2126,6 @@ static void extract_packfile_from_multipack(
 {
 	struct ph ph;
 	struct tempfile *tempfile_pack = NULL;
-	struct tempfile *tempfile_idx = NULL;
 	int result = -1;
 	int b_no_idx_in_multipack;
 	struct object_id packfile_checksum;
@@ -2160,16 +2159,14 @@ static void extract_packfile_from_multipack(
 	b_no_idx_in_multipack = (ph.idx_len == maximum_unsigned_value_of_type(uint64_t) ||
 				 ph.idx_len == 0);
 
-	if (b_no_idx_in_multipack) {
-		my_create_tempfile(status, 0, "pack", &tempfile_pack, NULL, NULL);
-		if (!tempfile_pack)
-			goto done;
-	} else {
-		/* create a pair of tempfiles with the same basename */
-		my_create_tempfile(status, 0, "pack", &tempfile_pack, "idx", &tempfile_idx);
-		if (!tempfile_pack || !tempfile_idx)
-			goto done;
-	}
+	/*
+	 * We are going to harden `gvfs-helper` here and ignore the .idx file
+	 * if it is provided and always compute it locally so that we get the
+	 * added verification that `git index-pack` provides.
+	 */
+	my_create_tempfile(status, 0, "pack", &tempfile_pack, NULL, NULL);
+	if (!tempfile_pack)
+		goto done;
 
 	/*
 	 * Copy the current packfile from the open stream and capture
@@ -2196,38 +2193,31 @@ static void extract_packfile_from_multipack(
 
 	oid_to_hex_r(hex_checksum, &packfile_checksum);
 
-	if (b_no_idx_in_multipack) {
-		/*
-		 * The server did not send the corresponding .idx, so
-		 * we have to compute it ourselves.
-		 */
-		strbuf_addbuf(&temp_path_idx, &temp_path_pack);
-		strbuf_strip_suffix(&temp_path_idx, ".pack");
-		strbuf_addstr(&temp_path_idx, ".idx");
+	/*
+	 * Always compute the .idx file from the .pack file.
+	 */
+	strbuf_addbuf(&temp_path_idx, &temp_path_pack);
+	strbuf_strip_suffix(&temp_path_idx, ".pack");
+	strbuf_addstr(&temp_path_idx, ".idx");
 
-		my_run_index_pack(params, status,
-				  &temp_path_pack, &temp_path_idx,
-				  NULL);
-		if (status->ec != GH__ERROR_CODE__OK)
-			goto done;
+	my_run_index_pack(params, status,
+			  &temp_path_pack, &temp_path_idx,
+			  NULL);
+	if (status->ec != GH__ERROR_CODE__OK)
+		goto done;
 
-	} else {
+	if (!b_no_idx_in_multipack) {
 		/*
 		 * Server sent the .idx immediately after the .pack in the
-		 * data stream.  I'm tempted to verify it, but that defeats
-		 * the purpose of having it cached...
+		 * data stream.  Skip over it.
 		 */
-		if (my_copy_fd_len(fd_multipack, get_tempfile_fd(tempfile_idx),
-				   ph.idx_len) < 0) {
+		if (lseek(fd_multipack, ph.idx_len, SEEK_CUR) < 0) {
 			strbuf_addf(&status->error_message,
-				    "could not extract index[%d] in multipack",
+				    "could not skip index[%d] in multipack",
 				    k);
 			status->ec = GH__ERROR_CODE__COULD_NOT_INSTALL_PREFETCH;
 			goto done;
 		}
-
-		strbuf_addstr(&temp_path_idx, get_tempfile_path(tempfile_idx));
-		close_tempfile_gently(tempfile_idx);
 	}
 
 	strbuf_addf(&buf_timestamp, "%u", (unsigned int)ph.timestamp);
@@ -2243,7 +2233,6 @@ static void extract_packfile_from_multipack(
 
 done:
 	delete_tempfile(&tempfile_pack);
-	delete_tempfile(&tempfile_idx);
 	strbuf_release(&temp_path_pack);
 	strbuf_release(&temp_path_idx);
 	strbuf_release(&final_path_pack);
diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index dc91ee57ea3e67..0a2d6180430b50 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -1367,14 +1367,11 @@ test_expect_success 'prefetch corrupt pack without idx' '
 
 # Send corrupt PACK files with IDX files.  Since the cache server
 # sends both, `gvfs-helper` might fail to verify both of them.
-test_expect_failure 'prefetch corrupt pack with corrupt idx' '
+test_expect_success 'prefetch corrupt pack with corrupt idx' '
 	test_when_finished "per_test_cleanup" &&
 	start_gvfs_protocol_server_with_mayhem \
 		bad_prefetch_pack_sha &&
 
-	# TODO This is a false-positive since `gvfs-helper`
-	# TODO does not verify either of them when a pair
-	# TODO is sent.
 	test_must_fail \
 		git -C "$REPO_T1" gvfs-helper \
 			--cache-server=disable \

From aba1c2feb65b9a5eea2f59a8c37fbab75f6bf4ee Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Fri, 28 Jun 2024 13:09:35 -0400
Subject: [PATCH 125/207] t5799: add unit tests for new `gvfs.fallback` config
 setting

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
---
 t/t5799-gvfs-helper.sh | 98 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 98 insertions(+)

diff --git a/t/t5799-gvfs-helper.sh b/t/t5799-gvfs-helper.sh
index 8fb81d1fb32c45..5a118e522c8b07 100755
--- a/t/t5799-gvfs-helper.sh
+++ b/t/t5799-gvfs-helper.sh
@@ -24,8 +24,12 @@ test_set_port GIT_TEST_GVFS_PROTOCOL_PORT
 #        actually use it).  We are only testing explicit object
 #        fetching using gvfs-helper.exe in isolation.
 #
+#    repo_t2:
+#        Another empty repo to use after we contaminate t1.
+#
 REPO_SRC="$(pwd)"/repo_src
 REPO_T1="$(pwd)"/repo_t1
+REPO_T2="$(pwd)"/repo_t2
 
 # Setup some loopback URLs where test-gvfs-protocol.exe will be
 # listening.  We will spawn it directly inside the repo_src directory,
@@ -45,6 +49,7 @@ ORIGIN_URL=http://$HOST_PORT/servertype/origin
 CACHE_URL=http://$HOST_PORT/servertype/cache
 
 SHARED_CACHE_T1="$(pwd)"/shared_cache_t1
+SHARED_CACHE_T2="$(pwd)"/shared_cache_t2
 
 # The pid-file is created by test-gvfs-protocol.exe when it starts.
 # The server will shut down if/when we delete it.  (This is a little
@@ -182,6 +187,10 @@ test_expect_success 'setup repos' '
 	mkdir "$SHARED_CACHE_T1/pack" &&
 	mkdir "$SHARED_CACHE_T1/info" &&
 	#
+	mkdir "$SHARED_CACHE_T2" &&
+	mkdir "$SHARED_CACHE_T2/pack" &&
+	mkdir "$SHARED_CACHE_T2/info" &&
+	#
 	# setup repo_t1 and point all of the gvfs.* values to repo_src.
 	#
 	test_create_repo "$REPO_T1" &&
@@ -191,6 +200,13 @@ test_expect_success 'setup repos' '
 	git -C "$REPO_T1" config --local gvfs.sharedCache "$SHARED_CACHE_T1" &&
 	echo "$SHARED_CACHE_T1" >> "$REPO_T1"/.git/objects/info/alternates &&
 	#
+	test_create_repo "$REPO_T2" &&
+	git -C "$REPO_T2" branch -M main &&
+	git -C "$REPO_T2" remote add origin $ORIGIN_URL &&
+	git -C "$REPO_T2" config --local gvfs.cache-server $CACHE_URL &&
+	git -C "$REPO_T2" config --local gvfs.sharedCache "$SHARED_CACHE_T2" &&
+	echo "$SHARED_CACHE_T2" >> "$REPO_T2"/.git/objects/info/alternates &&
+	#
 	#
 	#
 	cat <<-EOF >creds.txt &&
@@ -203,6 +219,7 @@ test_expect_success 'setup repos' '
 	EOF
 	chmod 755 creds.sh &&
 	git -C "$REPO_T1" config --local credential.helper "!f() { cat \"$(pwd)\"/creds.txt; }; f" &&
+	git -C "$REPO_T2" config --local credential.helper "!f() { cat \"$(pwd)\"/creds.txt; }; f" &&
 	#
 	# Create some test data sets.
 	#
@@ -1257,6 +1274,87 @@ test_expect_success 'integration: fully implicit: diff 2 commits' '
 		>OUT.output 2>OUT.stderr
 '
 
+# T1 should be considered contaminated at this point.
+
+#################################################################
+# gvfs-helper.exe defaults to no fallback.
+# gvfs-helper-client.c defaults to adding `--fallback` to child process.
+#
+# `gvfs.fallback` was added to change the default behavior in the
+# gvfs-helper-client.c code to add either `--fallback` or `--no-fallback`
+# (for origin server load reasons).
+#
+# When `gvfs.fallback` is unset, we default to TRUE and pass `--fallback`.
+# Otherwise, we use the boolean value to decide.
+#
+# NOTE: We DO NOT attempt to count connection requests in the
+# following tests.  Since we are using a normal `git` command to drive
+# the `gvfs-helper-client.c` code (and spawn `git-gvfs-helper.exe`) we
+# cannot make assumptions on the number of child processes or
+# reqeusts.  The "promisor" logic may drive one or more single-item
+# GETs or a series of bulk POST attempts.  Therefore, we must rely
+# only on the result of the command and (implicitly) whether all
+# missing objects were resolved. We use mayhem features to selectively
+# break the cache and origin servers.
+#################################################################
+
+test_expect_success 'integration: implicit-get: http_503: diff 2 commits' '
+	test_when_finished "per_test_cleanup" &&
+
+	# Tell both servers to always send 503.
+	start_gvfs_protocol_server_with_mayhem http_503 &&
+
+	# Implicitly demand-load everything without any pre-seeding.
+	# (We cannot tell from whether fallback was used or not in this
+	# limited test.)
+	#
+	test_must_fail \
+		git -C "$REPO_T2" -c core.useGVFSHelper=true \
+			diff $(cat m1.branch)..$(cat m3.branch) \
+			>OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server
+'
+
+test_expect_success 'integration: implicit-get: cache_http_503,no-fallback: diff 2 commits' '
+	test_when_finished "per_test_cleanup" &&
+
+	# Tell cache server to send 503 and origin server to send 200.
+	start_gvfs_protocol_server_with_mayhem cache_http_503 &&
+
+	# Implicitly demand-load everything without any pre-seeding.
+	# This should fail because we do not allow fallback.
+	#
+	test_must_fail \
+		git -C "$REPO_T2" \
+			-c core.useGVFSHelper=true \
+			-c gvfs.fallback=false \
+			diff $(cat m1.branch)..$(cat m3.branch) \
+			>OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server
+'
+
+test_expect_success 'integration: implicit-get: cache_http_503,with-fallback: diff 2 commits' '
+	test_when_finished "per_test_cleanup" &&
+
+	# Tell cache server to send 503 and origin server to send 200.
+	start_gvfs_protocol_server_with_mayhem cache_http_503 &&
+
+	# Implicitly demand-load everything without any pre-seeding.
+	#
+	git -C "$REPO_T2" \
+		-c core.useGVFSHelper=true \
+		-c gvfs.fallback=true \
+		diff $(cat m1.branch)..$(cat m3.branch) \
+		>OUT.output 2>OUT.stderr &&
+
+	stop_gvfs_protocol_server
+'
+
+# T2 should be considered contaminated at this point.
+
+
 #################################################################
 # Duplicate packfile tests.
 #

From 9a6ebd7b6e10a03d44c9a05ae365aad6a4522e0a Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 14 Dec 2020 22:27:13 -0500
Subject: [PATCH 126/207] maintenance: care about gvfs.sharedCache config

For Scalar and VFS for Git, we use an alternate as a shared object
cache. We need to enable the maintenance builtin to work on that
shared object cache, especially in the background.

'scalar run <task>' would set GIT_OBJECT_DIRECTORY to handle this.

We set GIT_OBJECT_DIRECTORY based on the gvfs.sharedCache config,
but we also need the checks in pack_loose() to look at that object
directory instead of the current ODB's.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 builtin/gc.c | 23 ++++++++++++++++++++---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/builtin/gc.c b/builtin/gc.c
index e60dc90a96b4de..5d8f1561c12f88 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -1154,6 +1154,8 @@ static int write_loose_object_to_stdin(const struct object_id *oid,
 	return ++(d->count) > d->batch_size;
 }
 
+static const char *object_dir = NULL;
+
 static int pack_loose(struct maintenance_run_opts *opts)
 {
 	struct repository *r = the_repository;
@@ -1161,11 +1163,14 @@ static int pack_loose(struct maintenance_run_opts *opts)
 	struct write_loose_object_data data;
 	struct child_process pack_proc = CHILD_PROCESS_INIT;
 
+	if (!object_dir)
+		object_dir = r->objects->odb->path;
+
 	/*
 	 * Do not start pack-objects process
 	 * if there are no loose objects.
 	 */
-	if (!for_each_loose_file_in_objdir(r->objects->odb->path,
+	if (!for_each_loose_file_in_objdir(object_dir,
 					   bail_on_loose,
 					   NULL, NULL, NULL))
 		return 0;
@@ -1175,7 +1180,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
 	strvec_push(&pack_proc.args, "pack-objects");
 	if (opts->quiet)
 		strvec_push(&pack_proc.args, "--quiet");
-	strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->odb->path);
+	strvec_pushf(&pack_proc.args, "%s/pack/loose", object_dir);
 
 	pack_proc.in = -1;
 
@@ -1194,7 +1199,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
 	data.count = 0;
 	data.batch_size = 50000;
 
-	for_each_loose_file_in_objdir(r->objects->odb->path,
+	for_each_loose_file_in_objdir(object_dir,
 				      write_loose_object_to_stdin,
 				      NULL,
 				      NULL,
@@ -1584,6 +1589,7 @@ static int maintenance_run(int argc, const char **argv, const char *prefix,
 	int i;
 	struct maintenance_run_opts opts = MAINTENANCE_RUN_OPTS_INIT;
 	struct gc_config cfg = GC_CONFIG_INIT;
+	const char *tmp_obj_dir = NULL;
 	struct option builtin_maintenance_run_options[] = {
 		OPT_BOOL(0, "auto", &opts.auto_flag,
 			 N_("run tasks based on the state of the repository")),
@@ -1621,6 +1627,17 @@ static int maintenance_run(int argc, const char **argv, const char *prefix,
 		usage_with_options(builtin_maintenance_run_usage,
 				   builtin_maintenance_run_options);
 
+	/*
+	 * To enable the VFS for Git/Scalar shared object cache, use
+	 * the gvfs.sharedcache config option to redirect the
+	 * maintenance to that location.
+	 */
+	if (!git_config_get_value("gvfs.sharedcache", &tmp_obj_dir) &&
+	    tmp_obj_dir) {
+		object_dir = xstrdup(tmp_obj_dir);
+		setenv(DB_ENVIRONMENT, object_dir, 1);
+	}
+
 	ret = maintenance_run_tasks(&opts, &cfg);
 	gc_config_release(&cfg);
 	return ret;

From 32ab78765816f8b73bb401e5feffe69d3dfcf388 Mon Sep 17 00:00:00 2001
From: Neeraj Singh <neerajsi@ntdev.microsoft.com>
Date: Fri, 5 Feb 2021 18:28:33 -0800
Subject: [PATCH 127/207] unpack-trees:virtualfilesystem: Improve efficiency of
 clear_ce_flags

When the virtualfilesystem is enabled the previous implementation of
clear_ce_flags would iterate all of the cache entries and query whether
each one is in the virtual filesystem to determine whether to clear one
of the SKIP_WORKTREE bits. For each cache entry, we would do a hash
lookup for each parent directory in the is_included_in_virtualfilesystem
function.

The former approach is slow for a typical Windows OS enlistment with
3 million files where only a small percentage is in the virtual
filesystem. The cost is
O(n_index_entries * n_chars_per_path * n_parent_directories_per_path).

In this change, we use the same approach as apply_virtualfilesystem,
which iterates the set of entries in the virtualfilesystem and searches
in the cache for the corresponding entries in order to clear their
flags. This approach has a cost of
O(n_virtual_filesystem_entries * n_chars_per_path * log(n_index_entries)).

The apply_virtualfilesystem code was refactored a bit and modified to
clear flags for all names that 'alias' a given virtual filesystem name
when ignore_case is set.

n_virtual_filesystem_entries is typically much less than
n_index_entries, in which case the new approach is much faster. We wind
up building the name hash for the index, but this occurs quickly thanks
to the multi-threading.

Signed-off-by: Neeraj Singh <neerajsi@ntdev.microsoft.com>
---
 name-hash.c         |  20 +++++++
 name-hash.h         |   1 +
 unpack-trees.c      |  27 ++++-----
 virtualfilesystem.c | 142 ++++++++++++++++++++++++++++----------------
 virtualfilesystem.h |   7 +++
 5 files changed, 132 insertions(+), 65 deletions(-)

diff --git a/name-hash.c b/name-hash.c
index d66de1cdfd5633..f57649de6dde52 100644
--- a/name-hash.c
+++ b/name-hash.c
@@ -747,6 +747,26 @@ struct cache_entry *index_file_exists(struct index_state *istate, const char *na
 	return NULL;
 }
 
+struct cache_entry *index_file_next_match(struct index_state *istate, struct cache_entry *ce, int igncase)
+{
+	struct cache_entry *next;
+
+	if (!igncase || !ce) {
+		return NULL;
+	}
+
+	next = hashmap_get_next_entry(&istate->name_hash, ce, ent);
+	if (!next)
+		return NULL;
+
+	hashmap_for_each_entry_from(&istate->name_hash, next, ent) {
+		if (same_name(next, ce->name, ce_namelen(ce), igncase))
+			return next;
+	}
+
+	return NULL;
+}
+
 void free_name_hash(struct index_state *istate)
 {
 	if (!istate->name_hash_initialized)
diff --git a/name-hash.h b/name-hash.h
index 0cbfc4286316b2..d808eba3e3b672 100644
--- a/name-hash.h
+++ b/name-hash.h
@@ -12,6 +12,7 @@ int index_dir_find(struct index_state *istate, const char *name, int namelen,
 
 void adjust_dirname_case(struct index_state *istate, char *name);
 struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int igncase);
+struct cache_entry *index_file_next_match(struct index_state *istate, struct cache_entry *ce, int igncase);
 
 int test_lazy_init_name_hash(struct index_state *istate, int try_threaded);
 void add_name_hash(struct index_state *istate, struct cache_entry *ce);
diff --git a/unpack-trees.c b/unpack-trees.c
index 00d47b319b9349..4fa8ae800b6ed5 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -1722,14 +1722,6 @@ static int clear_ce_flags_1(struct index_state *istate,
 			continue;
 		}
 
-		/* if it's not in the virtual file system, exit early */
-		if (core_virtualfilesystem) {
-			if (is_included_in_virtualfilesystem(ce->name, ce->ce_namelen) > 0)
-				ce->ce_flags &= ~clear_mask;
-			cache++;
-			continue;
-		}
-
 		if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))
 			break;
 
@@ -1806,12 +1798,19 @@ static int clear_ce_flags(struct index_state *istate,
 	xsnprintf(label, sizeof(label), "clear_ce_flags/0x%08lx_0x%08lx",
 		  (unsigned long)select_mask, (unsigned long)clear_mask);
 	trace2_region_enter("unpack_trees", label, the_repository);
-	rval = clear_ce_flags_1(istate,
-				istate->cache,
-				istate->cache_nr,
-				&prefix,
-				select_mask, clear_mask,
-				pl, 0, 0);
+	if (core_virtualfilesystem) {
+		rval = clear_ce_flags_virtualfilesystem(istate,
+							select_mask,
+							clear_mask);
+	} else {
+		rval = clear_ce_flags_1(istate,
+					istate->cache,
+					istate->cache_nr,
+					&prefix,
+					select_mask, clear_mask,
+					pl, 0, 0);
+	}
+
 	trace2_region_leave("unpack_trees", label, the_repository);
 
 	stop_progress(&istate->progress);
diff --git a/virtualfilesystem.c b/virtualfilesystem.c
index 66117dc8447757..269af2de1d969d 100644
--- a/virtualfilesystem.c
+++ b/virtualfilesystem.c
@@ -252,93 +252,133 @@ int is_excluded_from_virtualfilesystem(const char *pathname, int pathlen, int dt
 	return -1;
 }
 
-/*
- * Update the CE_SKIP_WORKTREE bits based on the virtual file system.
- */
-void apply_virtualfilesystem(struct index_state *istate)
+struct apply_virtual_filesystem_stats {
+	int nr_unknown;
+	int nr_vfs_dirs;
+	int nr_vfs_rows;
+	int nr_bulk_skip;
+	int nr_explicit_skip;
+};
+
+static void clear_ce_flags_virtualfilesystem_1(struct index_state *istate, int select_mask, int clear_mask,
+					       struct apply_virtual_filesystem_stats *stats)
 {
 	char *buf, *entry;
 	size_t i;
-	int nr_unknown = 0;
-	int nr_vfs_dirs = 0;
-	int nr_vfs_rows = 0;
-	int nr_bulk_skip = 0;
-	int nr_explicit_skip = 0;
-
-	if (!repo_config_get_virtualfilesystem(istate->repo))
-		return;
-
-	trace2_region_enter("vfs", "apply", the_repository);
 
 	if (!virtual_filesystem_data.len)
 		get_virtual_filesystem_data(istate->repo, &virtual_filesystem_data);
 
-	/* set CE_SKIP_WORKTREE bit on all entries */
-	for (i = 0; i < istate->cache_nr; i++)
-		istate->cache[i]->ce_flags |= CE_SKIP_WORKTREE;
-
-	/* clear CE_SKIP_WORKTREE bit for everything in the virtual file system */
+	/* clear specified flag bits for everything in the virtual file system */
 	entry = buf = virtual_filesystem_data.buf;
 	for (i = 0; i < virtual_filesystem_data.len; i++) {
 		if (buf[i] == '\0') {
+			struct cache_entry *ce;
 			ssize_t pos, len;
 
-			nr_vfs_rows++;
+			stats->nr_vfs_rows++;
 
 			len = buf + i - entry;
 
 			/* look for a directory wild card (ie "dir1/") */
 			if (buf[i - 1] == '/') {
-				nr_vfs_dirs++;
+				stats->nr_vfs_dirs++;
 				if (ignore_case)
 					adjust_dirname_case(istate, entry);
 				pos = index_name_pos(istate, entry, len);
 				if (pos < 0) {
-					pos = -pos - 1;
-					while ((size_t)pos < istate->cache_nr && !fspathncmp(istate->cache[pos]->name, entry, len)) {
-						if (istate->cache[pos]->ce_flags & CE_SKIP_WORKTREE)
-							nr_bulk_skip++;
-						istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE;
-						pos++;
+					for (pos = -pos - 1; (size_t)pos < istate->cache_nr; pos++) {
+						ce = istate->cache[pos];
+						if (fspathncmp(ce->name, entry, len))
+							break;
+
+						if (select_mask && !(ce->ce_flags & select_mask))
+							continue;
+
+						if (ce->ce_flags & clear_mask)
+							stats->nr_bulk_skip++;
+						ce->ce_flags &= ~clear_mask;
 					}
 				}
 			} else {
 				if (ignore_case) {
-					struct cache_entry *ce = index_file_exists(istate, entry, len, ignore_case);
-					if (ce) {
-						if (ce->ce_flags & CE_SKIP_WORKTREE)
-							nr_explicit_skip++;
-						ce->ce_flags &= ~CE_SKIP_WORKTREE;
-					}
-					else {
-						nr_unknown++;
-					}
+					ce = index_file_exists(istate, entry, len, ignore_case);
 				} else {
 					int pos = index_name_pos(istate, entry, len);
-					if (pos >= 0) {
-						if (istate->cache[pos]->ce_flags & CE_SKIP_WORKTREE)
-							nr_explicit_skip++;
-						istate->cache[pos]->ce_flags &= ~CE_SKIP_WORKTREE;
-					}
-					else {
-						nr_unknown++;
-					}
+
+					ce = NULL;
+					if (pos >= 0)
+						ce = istate->cache[pos];
+				}
+
+				if (ce) {
+					do {
+						if (!select_mask || (ce->ce_flags & select_mask)) {
+							if (ce->ce_flags & clear_mask)
+								stats->nr_explicit_skip++;
+							ce->ce_flags &= ~clear_mask;
+						}
+
+						/*
+						 * There may be aliases with different cases of the same
+						 * name that also need to be modified.
+						 */
+						if (ignore_case)
+							ce = index_file_next_match(istate, ce, ignore_case);
+						else
+							break;
+
+					} while (ce);
+				} else {
+					stats->nr_unknown++;
 				}
 			}
 
 			entry += len + 1;
 		}
 	}
+}
+
+/*
+ * Clear the specified flags for all entries in the virtual file system
+ * that match the specified select mask. Returns the number of entries
+ * processed.
+ */
+int clear_ce_flags_virtualfilesystem(struct index_state *istate, int select_mask, int clear_mask)
+{
+	struct apply_virtual_filesystem_stats stats = {0};
+
+	clear_ce_flags_virtualfilesystem_1(istate, select_mask, clear_mask, &stats);
+	return istate->cache_nr;
+}
+
+/*
+ * Update the CE_SKIP_WORKTREE bits based on the virtual file system.
+ */
+void apply_virtualfilesystem(struct index_state *istate)
+{
+	size_t i;
+	struct apply_virtual_filesystem_stats stats = {0};
+
+	if (!repo_config_get_virtualfilesystem(istate->repo))
+		return;
+
+	trace2_region_enter("vfs", "apply", the_repository);
+
+	/* set CE_SKIP_WORKTREE bit on all entries */
+	for (i = 0; i < istate->cache_nr; i++)
+		istate->cache[i]->ce_flags |= CE_SKIP_WORKTREE;
 
-	if (nr_vfs_rows > 0) {
-		trace2_data_intmax("vfs", the_repository, "apply/tracked", nr_bulk_skip + nr_explicit_skip);
+	clear_ce_flags_virtualfilesystem_1(istate, 0, CE_SKIP_WORKTREE, &stats);
+	if (stats.nr_vfs_rows > 0) {
+		trace2_data_intmax("vfs", the_repository, "apply/tracked", stats.nr_bulk_skip + stats.nr_explicit_skip);
 
-		trace2_data_intmax("vfs", the_repository, "apply/vfs_rows", nr_vfs_rows);
-		trace2_data_intmax("vfs", the_repository, "apply/vfs_dirs", nr_vfs_dirs);
+		trace2_data_intmax("vfs", the_repository, "apply/vfs_rows", stats.nr_vfs_rows);
+		trace2_data_intmax("vfs", the_repository, "apply/vfs_dirs", stats.nr_vfs_dirs);
 
-		trace2_data_intmax("vfs", the_repository, "apply/nr_unknown", nr_unknown);
-		trace2_data_intmax("vfs", the_repository, "apply/nr_bulk_skip", nr_bulk_skip);
-		trace2_data_intmax("vfs", the_repository, "apply/nr_explicit_skip", nr_explicit_skip);
+		trace2_data_intmax("vfs", the_repository, "apply/nr_unknown", stats.nr_unknown);
+		trace2_data_intmax("vfs", the_repository, "apply/nr_bulk_skip", stats.nr_bulk_skip);
+		trace2_data_intmax("vfs", the_repository, "apply/nr_explicit_skip", stats.nr_explicit_skip);
 	}
 
 	trace2_region_leave("vfs", "apply", the_repository);
diff --git a/virtualfilesystem.h b/virtualfilesystem.h
index 5e8c5b096df09a..7a31126ab9ea8c 100644
--- a/virtualfilesystem.h
+++ b/virtualfilesystem.h
@@ -6,6 +6,13 @@
  */
 void apply_virtualfilesystem(struct index_state *istate);
 
+/*
+ * Clear the specified flags for all entries in the virtual file system
+ * that match the specified select mask. Returns the number of entries
+ * processed.
+ */
+int clear_ce_flags_virtualfilesystem(struct index_state *istate, int select_mask, int clear_mask);
+
 /*
  * Return 1 if the requested item is found in the virtual file system,
  * 0 for not found and -1 for undecided.

From 3a82d290c9f323a6c23fc594a679d13bace9e818 Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Fri, 16 Jul 2021 10:35:04 -0400
Subject: [PATCH 128/207] release: create initial Windows installer build
 workflow

- trigger on tag matching basic "vfs" version pattern
- validate tag is annotated & matches stricter checks
- include `scalar`
- build x86_64 & portable git installers, upload artifacts to workflow

Update Apr 18, 2022: these steps are built explicitly on 'windows-2019'
agents (rather than 'windows-latest') to ensure the correct version of
Visual Studio is used (verified in the pipeline via 'type -p mspdb140.dll').
Additionally, due to a known (but not-yet-fixed) issue downloading the
'build-installers' flavor of the Git for Windows SDK with the
'git-for-windows/setup-git-for-windows-sdk' Action, the SDK used is the
'full' flavor.

Signed-off-by: Victoria Dye <vdye@github.com>
Signed-off-by: Johannes Schindelin <Johannes.Schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 315 +++++++++++++++++++++
 1 file changed, 315 insertions(+)
 create mode 100644 .github/workflows/build-git-installers.yml

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
new file mode 100644
index 00000000000000..df4eb9e908015f
--- /dev/null
+++ b/.github/workflows/build-git-installers.yml
@@ -0,0 +1,315 @@
+name: build-git-installers
+
+on:
+  push:
+    tags:
+      - 'v[0-9]*vfs*' # matches "v<number><any characters>vfs<any characters>"
+
+jobs:
+  # Check prerequisites for the workflow
+  prereqs:
+    runs-on: ubuntu-latest
+    environment: release
+    outputs:
+      tag_name: ${{ steps.tag.outputs.name }}           # The full name of the tag, e.g. v2.32.0.vfs.0.0
+      tag_version: ${{ steps.tag.outputs.version }}     # The version number (without preceding "v"), e.g. 2.32.0.vfs.0.0
+    steps:
+      - name: Validate tag
+        run: |
+          echo "$GITHUB_REF" |
+          grep -E '^refs/tags/v2\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.vfs\.0\.(0|[1-9][0-9]*)(\.rc[0-9])?$' || {
+            echo "::error::${GITHUB_REF#refs/tags/} is not of the form v2.<X>.<Y>.vfs.0.<W>[.rc<N>]" >&2
+            exit 1
+          }
+      - name: Determine tag to build
+        run: |
+          echo "name=${GITHUB_REF#refs/tags/}" >>$GITHUB_OUTPUT
+          echo "version=${GITHUB_REF#refs/tags/v}" >>$GITHUB_OUTPUT
+        id: tag
+      - name: Clone git
+        uses: actions/checkout@v4
+      - name: Validate the tag identified with trigger
+        run: |
+          die () {
+            echo "::error::$*" >&2
+            exit 1
+          }
+
+          # `actions/checkout` only downloads the peeled tag (i.e. the commit)
+          git fetch origin +$GITHUB_REF:$GITHUB_REF
+
+          # Verify that the tag is annotated
+          test $(git cat-file -t "$GITHUB_REF") == "tag" || die "Tag ${{ steps.tag.outputs.name }} is not annotated"
+
+          # Verify tag follows rules in GIT-VERSION-GEN (i.e., matches the specified "DEF_VER" in
+          # GIT-VERSION-FILE) and matches tag determined from trigger
+          make GIT-VERSION-FILE
+          test "${{ steps.tag.outputs.version }}" == "$(sed -n 's/^GIT_VERSION = //p'< GIT-VERSION-FILE)" || die "GIT-VERSION-FILE tag does not match ${{ steps.tag.outputs.name }}"
+  # End check prerequisites for the workflow
+
+  # Build Windows installers (x86_64 & aarch64; installer & portable)
+  windows_pkg:
+    environment: release
+    needs: prereqs
+    strategy:
+      fail-fast: false
+      matrix:
+        arch:
+          - name: x86_64
+            artifact: pkg-x86_64
+            toolchain: x86_64
+            mingwprefix: mingw64
+            runner: windows-2019
+          - name: aarch64
+            artifact: pkg-aarch64
+            toolchain: clang-aarch64
+            mingwprefix: clangarm64
+            runner: ['self-hosted', '1ES.Pool=github-arm64-pool']
+    runs-on: ${{ matrix.arch.runner }}
+    env:
+      GPG_OPTIONS: "--batch --yes --no-tty --list-options no-show-photos --verify-options no-show-photos --pinentry-mode loopback"
+      HOME: "${{github.workspace}}\\home"
+      USERPROFILE: "${{github.workspace}}\\home"
+    steps:
+      - name: Configure user
+        shell: bash
+        run:
+          USER_NAME="${{github.actor}}" &&
+          USER_EMAIL="${{github.actor}}@users.noreply.github.com" &&
+          mkdir -p "$HOME" &&
+          git config --global user.name "$USER_NAME" &&
+          git config --global user.email "$USER_EMAIL" &&
+          echo "PACKAGER=$USER_NAME <$USER_EMAIL>" >>$GITHUB_ENV
+      - uses: git-for-windows/setup-git-for-windows-sdk@v1
+        with:
+          flavor: build-installers
+          architecture: ${{ matrix.arch.name }}
+      - name: Clone build-extra
+        shell: bash
+        run: |
+          git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
+      - name: Clone git
+        shell: bash
+        run: |
+          # Since we cannot directly clone a specified tag (as we would a branch with `git clone -b <branch name>`),
+          # this clone has to be done manually (via init->fetch->reset).
+
+          tag_name="${{ needs.prereqs.outputs.tag_name }}" &&
+          git -c init.defaultBranch=main init &&
+          git remote add -f origin https://github.com/git-for-windows/git &&
+          git fetch "https://github.com/${{github.repository}}" refs/tags/${tag_name}:refs/tags/${tag_name} &&
+          git reset --hard ${tag_name}
+      - name: Prepare home directory for code-signing
+        env:
+          CODESIGN_P12: ${{secrets.CODESIGN_P12}}
+          CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
+        if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+        shell: bash
+        run: |
+          cd home &&
+          mkdir -p .sig &&
+          echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >.sig/codesign.p12 &&
+          echo -n "$CODESIGN_PASS" >.sig/codesign.pass
+          git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
+      - name: Prepare home directory for GPG signing
+        if: env.GPGKEY != ''
+        shell: bash
+        run: |
+          # This section ensures that the identity for the GPG key matches the git user identity, otherwise
+          # signing will fail
+
+          echo '${{secrets.PRIVGPGKEY}}' | tr % '\n' | gpg $GPG_OPTIONS --import &&
+          info="$(gpg --list-keys --with-colons "${GPGKEY%% *}" | cut -d : -f 1,10 | sed -n '/^uid/{s|uid:||p;q}')" &&
+          git config --global user.name "${info% <*}" &&
+          git config --global user.email "<${info#*<}"
+        env:
+          GPGKEY: ${{secrets.GPGKEY}}
+      - name: Build mingw-w64-${{matrix.arch.toolchain}}-git
+        env:
+          GPGKEY: "${{secrets.GPGKEY}}"
+        shell: bash
+        run: |
+          set -x
+
+          # Make sure that there is a `/usr/bin/git` that can be used by `makepkg-mingw`
+          printf '#!/bin/sh\n\nexec /${{matrix.arch.mingwprefix}}/bin/git.exe "$@"\n' >/usr/bin/git &&
+
+          sh -x /usr/src/build-extra/please.sh build-mingw-w64-git --only-${{matrix.arch.name}} --build-src-pkg -o artifacts HEAD &&
+          if test -n "$GPGKEY"
+          then
+            for tar in artifacts/*.tar*
+            do
+              /usr/src/build-extra/gnupg-with-gpgkey.sh --detach-sign --no-armor $tar
+            done
+          fi &&
+
+          b=$PWD/artifacts &&
+          version=${{ needs.prereqs.outputs.tag_name }} &&
+          (cd /usr/src/MINGW-packages/mingw-w64-git &&
+          cp PKGBUILD.$version PKGBUILD &&
+          git commit -s -m "mingw-w64-git: new version ($version)" PKGBUILD &&
+          git bundle create "$b"/MINGW-packages.bundle origin/main..main)
+      - name: Publish mingw-w64-${{matrix.arch.toolchain}}-git
+        uses: actions/upload-artifact@v4
+        with:
+          name: "${{ matrix.arch.artifact }}"
+          path: artifacts
+  windows_artifacts:
+    environment: release
+    needs: [prereqs, windows_pkg]
+    env:
+      HOME: "${{github.workspace}}\\home"
+    strategy:
+      fail-fast: false
+      matrix:
+        arch:
+          - name: x86_64
+            artifact: pkg-x86_64
+            toolchain: x86_64
+            mingwprefix: mingw64
+            runner: windows-2019
+          - name: aarch64
+            artifact: pkg-aarch64
+            toolchain: clang-aarch64
+            mingwprefix: clangarm64
+            runner: ['self-hosted', '1ES.Pool=github-arm64-pool']
+        type:
+          - name: installer
+            fileprefix: Git
+          - name: portable
+            fileprefix: PortableGit
+    runs-on: ${{ matrix.arch.runner }}
+    steps:
+      - name: Download ${{ matrix.arch.artifact }}
+        uses: actions/download-artifact@v4
+        with:
+          name: ${{ matrix.arch.artifact }}
+          path: ${{ matrix.arch.artifact }}
+      - uses: git-for-windows/setup-git-for-windows-sdk@v1
+        with:
+          flavor: build-installers
+          architecture: ${{ matrix.arch.name }}
+      - name: Clone build-extra
+        shell: bash
+        run: |
+          git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
+      - name: Prepare home directory for code-signing
+        env:
+          CODESIGN_P12: ${{secrets.CODESIGN_P12}}
+          CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
+        if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+        shell: bash
+        run: |
+          mkdir -p home/.sig &&
+          echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >home/.sig/codesign.p12 &&
+          echo -n "$CODESIGN_PASS" >home/.sig/codesign.pass &&
+          git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
+      - name: Retarget auto-update to microsoft/git
+        shell: bash
+        run: |
+          set -x
+
+          b=/usr/src/build-extra &&
+
+          filename=$b/git-update-git-for-windows.config
+          tr % '\t' >$filename <<-\EOF &&
+          [update]
+          %fromFork = microsoft/git
+          EOF
+
+          sed -i -e '/^#include "file-list.iss"/a\
+          Source: {#SourcePath}\\..\\git-update-git-for-windows.config; DestDir: {app}\\${{matrix.arch.mingwprefix}}\\bin; Flags: replacesameversion; AfterInstall: DeleteFromVirtualStore' \
+            -e '/^Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}$/i\
+          Type: files; Name: {app}\\{#MINGW_BITNESS}\\bin\\git-update-git-for-windows.config\
+          Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}\\bin' \
+            $b/installer/install.iss
+      - name: Set the installer Publisher to the Git Client team
+        shell: bash
+        run: |
+          b=/usr/src/build-extra &&
+          sed -i -e 's/^\(AppPublisher=\).*/\1The Git Client Team at Microsoft/' $b/installer/install.iss
+      - name: Let the installer configure Visual Studio to use the installed Git
+        shell: bash
+        run: |
+          set -x
+
+          b=/usr/src/build-extra &&
+
+          sed -i -e '/^ *InstallAutoUpdater();$/a\
+              CustomPostInstall();' \
+            -e '/^ *UninstallAutoUpdater();$/a\
+              CustomPostUninstall();' \
+            $b/installer/install.iss &&
+
+          cat >>$b/installer/helpers.inc.iss <<\EOF
+
+          procedure CustomPostInstall();
+          begin
+              if not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+                not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+                not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+                not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+                not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+                not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) then
+                  LogError('Could not register TeamFoundation\GitSourceControl');
+          end;
+
+          procedure CustomPostUninstall();
+          begin
+              if not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath') or
+                not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath') or
+                not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath') or
+                not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath') or
+                not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath') or
+                not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath') then
+                  LogError('Could not register TeamFoundation\GitSourceControl');
+          end;
+          EOF
+      - name: Enable Scalar/C and the auto-updater in the installer by default
+        shell: bash
+        run: |
+          set -x
+
+          b=/usr/src/build-extra &&
+
+          sed -i -e "/ChosenOptions:=''/a\\
+              if (ExpandConstant('{param:components|/}')='/') then begin\n\
+                  WizardSelectComponents('autoupdate');\n\
+          #ifdef WITH_SCALAR\n\
+                  WizardSelectComponents('scalar');\n\
+          #endif\n\
+              end;" $b/installer/install.iss
+      - name: Build ${{matrix.type.name}} (${{matrix.arch.name}})
+        shell: bash
+        run: |
+          set -x
+
+          # Copy the PDB archive to the directory where `--include-pdbs` expects it
+          b=/usr/src/build-extra &&
+          mkdir -p $b/cached-source-packages &&
+          cp ${{matrix.arch.artifact}}/*-pdb* $b/cached-source-packages/ &&
+
+          # Build the installer, embedding PDBs
+          eval $b/please.sh make_installers_from_mingw_w64_git --include-pdbs \
+              --version=${{ needs.prereqs.outputs.tag_version }} \
+              -o artifacts --${{matrix.type.name}} \
+              --pkg=${{matrix.arch.artifact}}/mingw-w64-${{matrix.arch.toolchain}}-git-[0-9]*.tar.xz \
+              --pkg=${{matrix.arch.artifact}}/mingw-w64-${{matrix.arch.toolchain}}-git-doc-html-[0-9]*.tar.xz &&
+
+          if test portable = '${{matrix.type.name}}' && test -n "$(git config alias.signtool)"
+          then
+            git signtool artifacts/PortableGit-*.exe
+          fi &&
+          openssl dgst -sha256 artifacts/${{matrix.type.fileprefix}}-*.exe | sed "s/.* //" >artifacts/sha-256.txt
+      - name: Verify that .exe files are code-signed
+        if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+        shell: bash
+        run: |
+          PATH=$PATH:"/c/Program Files (x86)/Windows Kits/10/App Certification Kit/" \
+          signtool verify //pa artifacts/${{matrix.type.fileprefix}}-*.exe
+      - name: Publish ${{matrix.type.name}}-${{matrix.arch.name}}
+        uses: actions/upload-artifact@v4
+        with:
+          name: win-${{matrix.type.name}}-${{matrix.arch.name}}
+          path: artifacts
+  # End build Windows installers

From 4758384f08d3b4ace54841f60374fa77269a9f7b Mon Sep 17 00:00:00 2001
From: Jeff Hostetler <jeffhostetler@github.com>
Date: Tue, 17 Oct 2023 17:43:51 -0400
Subject: [PATCH 129/207] help: special-case HOST_CPU `universal`

When building Git as a universal binary on macOS, the binary supports more than
one target architecture. This is a bit of a problem for the `HOST_CPU`
setting that is woefully unprepared for such a situation, as it wants to
show architecture hard-coded at build time.

In preparation for releasing universal builds, work around this by
special-casing `universal` and replacing it at run-time with the known
values `x86_64` or `arm64`.

Signed-off-by: Jeff Hostetler <jeffhostetler@github.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 help.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/help.c b/help.c
index 5483ea8fd293fc..849e7da89f6e82 100644
--- a/help.c
+++ b/help.c
@@ -745,6 +745,22 @@ char *help_unknown_cmd(const char *cmd)
 	exit(1);
 }
 
+#if defined(__APPLE__)
+static const char *git_host_cpu(void) {
+	if (!strcmp(GIT_HOST_CPU, "universal")) {
+#if defined(__x86_64__)
+		return "x86_64";
+#elif defined(__aarch64__)
+		return "arm64";
+#endif
+	}
+
+	return GIT_HOST_CPU;
+}
+#undef GIT_HOST_CPU
+#define GIT_HOST_CPU git_host_cpu()
+#endif
+
 void get_version_info(struct strbuf *buf, int show_build_options)
 {
 	/*

From 35f2aa58e3ca3e5e66bdfb8656354aedf31446c7 Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Fri, 16 Jul 2021 10:38:13 -0400
Subject: [PATCH 130/207] release: add Mac OSX installer build

- include `scalar`
- build signed .dmg & .pkg for target OS version 10.6
- upload artifacts to workflow

Co-authored-by: Lessley Dennington <ldennington@github.com>
---
 .github/macos-installer/Makefile              | 157 +++++++++++++++++
 .../assets/etc/gitconfig.osxkeychain          |   2 +
 .../assets/git-components.plist               |  18 ++
 .../assets/scripts/postinstall                |  62 +++++++
 .github/macos-installer/assets/uninstall.sh   |  34 ++++
 .github/macos-installer/entitlements.xml      |  12 ++
 .github/scripts/codesign.sh                   |  65 +++++++
 .github/scripts/notarize.sh                   |  35 ++++
 .github/scripts/symlink-git-hardlinks.rb      |  19 +++
 .github/workflows/build-git-installers.yml    | 161 ++++++++++++++++++
 10 files changed, 565 insertions(+)
 create mode 100644 .github/macos-installer/Makefile
 create mode 100644 .github/macos-installer/assets/etc/gitconfig.osxkeychain
 create mode 100644 .github/macos-installer/assets/git-components.plist
 create mode 100755 .github/macos-installer/assets/scripts/postinstall
 create mode 100755 .github/macos-installer/assets/uninstall.sh
 create mode 100644 .github/macos-installer/entitlements.xml
 create mode 100755 .github/scripts/codesign.sh
 create mode 100755 .github/scripts/notarize.sh
 create mode 100644 .github/scripts/symlink-git-hardlinks.rb

diff --git a/.github/macos-installer/Makefile b/.github/macos-installer/Makefile
new file mode 100644
index 00000000000000..3e1d60dcbeb2ef
--- /dev/null
+++ b/.github/macos-installer/Makefile
@@ -0,0 +1,157 @@
+SHELL := /bin/bash
+SUDO := sudo
+C_INCLUDE_PATH := /usr/include
+CPLUS_INCLUDE_PATH := /usr/include
+LD_LIBRARY_PATH := /usr/lib
+
+OSX_VERSION := $(shell sw_vers -productVersion)
+TARGET_FLAGS := -mmacosx-version-min=$(OSX_VERSION) -DMACOSX_DEPLOYMENT_TARGET=$(OSX_VERSION)
+
+uname_M := $(shell sh -c 'uname -m 2>/dev/null || echo not')
+
+ARCH_UNIV := universal
+ARCH_FLAGS := -arch x86_64 -arch arm64
+
+CFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS)
+LDFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS)
+
+PREFIX := /usr/local
+GIT_PREFIX := $(PREFIX)/git
+
+BUILD_DIR := $(GITHUB_WORKSPACE)/payload
+DESTDIR := $(PWD)/stage/git-$(ARCH_UNIV)-$(VERSION)
+ARTIFACTDIR := build-artifacts
+SUBMAKE := $(MAKE) C_INCLUDE_PATH="$(C_INCLUDE_PATH)" CPLUS_INCLUDE_PATH="$(CPLUS_INCLUDE_PATH)" LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)" TARGET_FLAGS="$(TARGET_FLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" NO_GETTEXT=1 NO_DARWIN_PORTS=1 prefix=$(GIT_PREFIX) DESTDIR=$(DESTDIR)
+CORES := $(shell bash -c "sysctl hw.ncpu | awk '{print \$$2}'")
+
+# Guard against environment variables
+APPLE_APP_IDENTITY =
+APPLE_INSTALLER_IDENTITY =
+APPLE_KEYCHAIN_PROFILE =
+
+.PHONY: image pkg payload codesign notarize
+
+.SECONDARY:
+
+$(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(ARCH_UNIV):
+	rm -f $(BUILD_DIR)/git-$(VERSION)/osx-installed*
+	mkdir -p $(DESTDIR)$(GIT_PREFIX)
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-keychain:
+	cd $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain; $(SUBMAKE) CFLAGS="$(CFLAGS) -g -O2 -Wall"
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built:
+	[ -d $(DESTDIR)$(GIT_PREFIX) ] && $(SUDO) rm -rf $(DESTDIR) || echo ok
+	cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) -j $(CORES) all strip
+	echo "================"
+	echo "Dumping Linkage"
+	cd $(BUILD_DIR)/git-$(VERSION); ./git version
+	echo "===="
+	cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git
+	echo "===="
+	cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-http-fetch
+	echo "===="
+	cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-http-push
+	echo "===="
+	cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-remote-http
+	echo "===="
+	cd $(BUILD_DIR)/git-$(VERSION); /usr/bin/otool -L ./git-gvfs-helper
+	echo "================"
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-bin: $(BUILD_DIR)/git-$(VERSION)/osx-built $(BUILD_DIR)/git-$(VERSION)/osx-built-keychain
+	cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) install
+	cp $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain $(DESTDIR)$(GIT_PREFIX)/bin/git-credential-osxkeychain
+	mkdir -p $(DESTDIR)$(GIT_PREFIX)/contrib/completion
+	cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.bash $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+	cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.zsh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+	cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-prompt.sh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+	# This is needed for Git-Gui, GitK
+	mkdir -p $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl
+	[ ! -f $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm ] && cp $(BUILD_DIR)/git-$(VERSION)/perl/private-Error.pm $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm || echo done
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-man: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
+	mkdir -p $(DESTDIR)$(GIT_PREFIX)/share/man
+	cp -R $(GITHUB_WORKSPACE)/manpages/ $(DESTDIR)$(GIT_PREFIX)/share/man
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-subtree:
+	cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" all git-subtree.1
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree: $(BUILD_DIR)/git-$(VERSION)/osx-built-subtree
+	mkdir -p $(DESTDIR)
+	cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" install install-man
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-assets: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
+	mkdir -p $(DESTDIR)$(GIT_PREFIX)/etc
+	cat assets/etc/gitconfig.osxkeychain >> $(DESTDIR)$(GIT_PREFIX)/etc/gitconfig
+	cp assets/uninstall.sh $(DESTDIR)$(GIT_PREFIX)/uninstall.sh
+	sh -c "echo .DS_Store >> $(DESTDIR)$(GIT_PREFIX)/share/git-core/templates/info/exclude"
+
+symlinks:
+	mkdir -p $(ARTIFACTDIR)$(PREFIX)/bin
+	cd $(ARTIFACTDIR)$(PREFIX)/bin; find ../git/bin -type f -exec ln -sf {} \;
+	for man in man1 man3 man5 man7; do mkdir -p $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; (cd $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; ln -sf ../../../git/share/man/$$man/* ./); done
+	ruby ../scripts/symlink-git-hardlinks.rb $(ARTIFACTDIR)
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed: $(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(ARCH_UNIV) $(BUILD_DIR)/git-$(VERSION)/osx-installed-man $(BUILD_DIR)/git-$(VERSION)/osx-installed-assets $(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree
+	find $(DESTDIR)$(GIT_PREFIX) -type d -exec chmod ugo+rx {} \;
+	find $(DESTDIR)$(GIT_PREFIX) -type f -exec chmod ugo+r {} \;
+	touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_UNIV): $(BUILD_DIR)/git-$(VERSION)/osx-built
+	File $(BUILD_DIR)/git-$(VERSION)/git
+	File $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain
+	touch $@
+
+disk-image/VERSION-$(VERSION)-$(ARCH_UNIV):
+	rm -f disk-image/*.pkg disk-image/VERSION-* disk-image/.DS_Store
+	mkdir disk-image
+	touch "$@"
+
+pkg_cmd := pkgbuild --identifier com.git.pkg --version $(VERSION) \
+	--root $(ARTIFACTDIR)$(PREFIX) --scripts assets/scripts \
+	--install-location $(PREFIX) --component-plist ./assets/git-components.plist
+
+ifdef APPLE_INSTALLER_IDENTITY
+	pkg_cmd += --sign "$(APPLE_INSTALLER_IDENTITY)"
+endif
+
+pkg_cmd += disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg
+disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg: disk-image/VERSION-$(VERSION)-$(ARCH_UNIV) symlinks
+	$(pkg_cmd)
+
+git-%-$(ARCH_UNIV).dmg:
+	hdiutil create git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg -fs HFS+ -srcfolder disk-image -volname "Git $(VERSION) $(ARCH_UNIV)" -ov 2>&1 | tee err || { \
+		grep "Resource busy" err && \
+		sleep 5 && \
+		hdiutil create git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg -fs HFS+ -srcfolder disk-image -volname "Git $(VERSION) $(ARCH_UNIV)" -ov; }
+	hdiutil convert -format UDZO -o $@ git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg
+	rm -f git-$(VERSION)-$(ARCH_UNIV).uncompressed.dmg
+
+payload: $(BUILD_DIR)/git-$(VERSION)/osx-installed $(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_UNIV)
+
+pkg: disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg
+
+image: git-$(VERSION)-$(ARCH_UNIV).dmg
+
+ifdef APPLE_APP_IDENTITY
+codesign:
+	@$(CURDIR)/../scripts/codesign.sh --payload="build-artifacts/usr/local/git" \
+		--identity="$(APPLE_APP_IDENTITY)" \
+		--entitlements="$(CURDIR)/entitlements.xml"
+endif
+
+# Notarization can only happen if the package is fully signed
+ifdef APPLE_KEYCHAIN_PROFILE
+notarize:
+	@$(CURDIR)/../scripts/notarize.sh \
+		--package="disk-image/git-$(VERSION)-$(ARCH_UNIV).pkg" \
+		--keychain-profile="$(APPLE_KEYCHAIN_PROFILE)"
+endif
diff --git a/.github/macos-installer/assets/etc/gitconfig.osxkeychain b/.github/macos-installer/assets/etc/gitconfig.osxkeychain
new file mode 100644
index 00000000000000..788266b3a40a9d
--- /dev/null
+++ b/.github/macos-installer/assets/etc/gitconfig.osxkeychain
@@ -0,0 +1,2 @@
+[credential]
+	helper = osxkeychain
diff --git a/.github/macos-installer/assets/git-components.plist b/.github/macos-installer/assets/git-components.plist
new file mode 100644
index 00000000000000..78db36777df3ed
--- /dev/null
+++ b/.github/macos-installer/assets/git-components.plist
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<array>
+	<dict>
+		<key>BundleHasStrictIdentifier</key>
+		<true/>
+		<key>BundleIsRelocatable</key>
+		<false/>
+		<key>BundleIsVersionChecked</key>
+		<true/>
+		<key>BundleOverwriteAction</key>
+		<string>upgrade</string>
+		<key>RootRelativeBundlePath</key>
+		<string>git/share/git-gui/lib/Git Gui.app</string>
+	</dict>
+</array>
+</plist>
diff --git a/.github/macos-installer/assets/scripts/postinstall b/.github/macos-installer/assets/scripts/postinstall
new file mode 100755
index 00000000000000..94056db9b7b864
--- /dev/null
+++ b/.github/macos-installer/assets/scripts/postinstall
@@ -0,0 +1,62 @@
+#!/bin/bash
+INSTALL_DST="$2"
+SCALAR_C_CMD="$INSTALL_DST/git/bin/scalar"
+SCALAR_DOTNET_CMD="/usr/local/scalar/scalar"
+SCALAR_UNINSTALL_SCRIPT="/usr/local/scalar/uninstall_scalar.sh"
+
+function cleanupScalar()
+{
+    echo "checking whether Scalar was installed"
+    if [ ! -f "$SCALAR_C_CMD" ]; then
+        echo "Scalar not installed; exiting..."
+        return 0
+    fi
+    echo "Scalar is installed!"
+
+    echo "looking for Scalar.NET"
+    if [ ! -f "$SCALAR_DOTNET_CMD" ]; then
+        echo "Scalar.NET not found; exiting..."
+        return 0
+    fi
+    echo "Scalar.NET found!"
+
+    currentUser=$(echo "show State:/Users/ConsoleUser" | scutil | awk '/Name :/ { print $3 }')
+
+    # Re-register Scalar.NET repositories with the newly-installed Scalar
+    for repo in $($SCALAR_DOTNET_CMD list); do
+        (
+            PATH="$INSTALL_DST/git/bin:$PATH"
+            sudo -u "$currentUser" scalar register $repo || \
+                echo "warning: skipping re-registration of $repo"
+        )
+    done
+
+    # Uninstall Scalar.NET
+    echo "removing Scalar.NET"
+
+    # Add /usr/local/bin to path - default install location of Homebrew
+    PATH="/usr/local/bin:$PATH"
+    if (sudo -u "$currentUser" brew list --cask scalar); then
+        # Remove from Homebrew
+        sudo -u "$currentUser" brew remove --cask scalar || echo "warning: Scalar.NET uninstall via Homebrew completed with code $?"
+        echo "Scalar.NET uninstalled via Homebrew!"
+    elif (sudo -u "$currentUser" brew list --cask scalar-azrepos); then
+        sudo -u "$currentUser" brew remove --cask scalar-azrepos || echo "warning: Scalar.NET with GVFS uninstall via Homebrew completed with code $?"
+        echo "Scalar.NET with GVFS uninstalled via Homebrew!"
+    elif [ -f $SCALAR_UNINSTALL_SCRIPT ]; then
+        # If not installed with Homebrew, manually remove package
+        sudo -S sh $SCALAR_UNINSTALL_SCRIPT || echo "warning: Scalar.NET uninstall completed with code $?"
+        echo "Scalar.NET uninstalled!"
+    else
+        echo "warning: Scalar.NET uninstall script not found"
+    fi
+
+    # Re-create the Scalar symlink, in case it was removed by the Scalar.NET uninstall operation
+    mkdir -p $INSTALL_DST/bin
+    /bin/ln -Fs "$SCALAR_C_CMD" "$INSTALL_DST/bin/scalar"
+}
+
+# Run Scalar cleanup (will exit if not applicable)
+cleanupScalar
+
+exit 0
\ No newline at end of file
diff --git a/.github/macos-installer/assets/uninstall.sh b/.github/macos-installer/assets/uninstall.sh
new file mode 100755
index 00000000000000..4fc79fbaa2e652
--- /dev/null
+++ b/.github/macos-installer/assets/uninstall.sh
@@ -0,0 +1,34 @@
+#!/bin/bash -e
+if [ ! -r "/usr/local/git" ]; then
+  echo "Git doesn't appear to be installed via this installer.  Aborting"
+  exit 1
+fi
+
+if [ "$1" != "--yes" ]; then
+  echo "This will uninstall git by removing /usr/local/git/, and symlinks"
+  printf "Type 'yes' if you are sure you wish to continue: "
+  read response
+else
+  response="yes"
+fi
+
+if [ "$response" == "yes" ]; then
+  # remove all of the symlinks we've created
+  pkgutil --files com.git.pkg | grep bin | while read f; do
+    if [ -L /usr/local/$f ]; then
+      sudo rm /usr/local/$f
+    fi
+  done
+
+  # forget receipts.
+  pkgutil --packages | grep com.git.pkg | xargs -I {} sudo pkgutil --forget {}
+  echo "Uninstalled"
+
+  # The guts all go here.
+  sudo rm -rf /usr/local/git/
+else
+  echo "Aborted"
+  exit 1
+fi
+
+exit 0
diff --git a/.github/macos-installer/entitlements.xml b/.github/macos-installer/entitlements.xml
new file mode 100644
index 00000000000000..46f675661149b6
--- /dev/null
+++ b/.github/macos-installer/entitlements.xml
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+    <key>com.apple.security.cs.allow-jit</key>
+    <true/>
+    <key>com.apple.security.cs.allow-unsigned-executable-memory</key>
+    <true/>
+    <key>com.apple.security.cs.disable-library-validation</key>
+    <true/>
+</dict>
+</plist>
diff --git a/.github/scripts/codesign.sh b/.github/scripts/codesign.sh
new file mode 100755
index 00000000000000..076b29f93be45e
--- /dev/null
+++ b/.github/scripts/codesign.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+sign_directory () {
+	(
+	cd "$1"
+	for f in *
+	do
+		macho=$(file --mime $f | grep mach)
+		# Runtime sign dylibs and Mach-O binaries
+		if [[ $f == *.dylib ]] || [ ! -z "$macho" ];
+		then
+			echo "Runtime Signing $f"
+			codesign -s "$IDENTITY" $f --timestamp --force --options=runtime --entitlements $ENTITLEMENTS_FILE
+		elif [ -d "$f" ];
+		then
+			echo "Signing files in subdirectory $f"
+			sign_directory "$f"
+
+		else
+			echo "Signing $f"
+			codesign -s "$IDENTITY" $f  --timestamp --force
+		fi
+	done
+	)
+}
+
+for i in "$@"
+do
+case "$i" in
+	--payload=*)
+	SIGN_DIR="${i#*=}"
+	shift # past argument=value
+	;;
+	--identity=*)
+	IDENTITY="${i#*=}"
+	shift # past argument=value
+	;;
+	--entitlements=*)
+	ENTITLEMENTS_FILE="${i#*=}"
+	shift # past argument=value
+	;;
+	*)
+	die "unknown option '$i'"
+	;;
+esac
+done
+
+if [ -z "$SIGN_DIR" ]; then
+    echo "error: missing directory argument"
+    exit 1
+elif [ -z "$IDENTITY" ]; then
+    echo "error: missing signing identity argument"
+    exit 1
+elif [ -z "$ENTITLEMENTS_FILE" ]; then
+    echo "error: missing entitlements file argument"
+    exit 1
+fi
+
+echo "======== INPUTS ========"
+echo "Directory: $SIGN_DIR"
+echo "Signing identity: $IDENTITY"
+echo "Entitlements: $ENTITLEMENTS_FILE"
+echo "======== END INPUTS ========"
+
+sign_directory "$SIGN_DIR"
diff --git a/.github/scripts/notarize.sh b/.github/scripts/notarize.sh
new file mode 100755
index 00000000000000..9315d688afbd49
--- /dev/null
+++ b/.github/scripts/notarize.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+for i in "$@"
+do
+case "$i" in
+	--package=*)
+	PACKAGE="${i#*=}"
+	shift # past argument=value
+	;;
+	--keychain-profile=*)
+	KEYCHAIN_PROFILE="${i#*=}"
+	shift # past argument=value
+	;;
+	*)
+	die "unknown option '$i'"
+	;;
+esac
+done
+
+if [ -z "$PACKAGE" ]; then
+    echo "error: missing package argument"
+    exit 1
+elif [ -z "$KEYCHAIN_PROFILE" ]; then
+    echo "error: missing keychain profile argument"
+    exit 1
+fi
+
+# Exit as soon as any line fails
+set -e
+
+# Send the notarization request
+xcrun notarytool submit -v "$PACKAGE" -p "$KEYCHAIN_PROFILE" --wait
+
+# Staple the notarization ticket (to allow offline installation)
+xcrun stapler staple -v "$PACKAGE"
diff --git a/.github/scripts/symlink-git-hardlinks.rb b/.github/scripts/symlink-git-hardlinks.rb
new file mode 100644
index 00000000000000..174802ccc85d93
--- /dev/null
+++ b/.github/scripts/symlink-git-hardlinks.rb
@@ -0,0 +1,19 @@
+#!/usr/bin/env ruby
+
+install_prefix = ARGV[0]
+puts install_prefix
+git_binary = File.join(install_prefix, '/usr/local/git/bin/git')
+
+[
+  ['git'          , File.join(install_prefix, '/usr/local/git/bin')],
+  ['../../bin/git', File.join(install_prefix, '/usr/local/git/libexec/git-core')]
+].each do |link, path|
+  Dir.glob(File.join(path, '*')).each do |file|
+    next if file == git_binary
+		puts "#{file} #{File.size(file)} == #{File.size(git_binary)}"
+    next unless File.size(file) == File.size(git_binary)
+    puts "Symlinking #{file}"
+    puts `ln -sf #{link} #{file}`
+    exit $?.exitstatus if $?.exitstatus != 0
+  end
+end
\ No newline at end of file
diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index df4eb9e908015f..46d65850a5a381 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -313,3 +313,164 @@ jobs:
           name: win-${{matrix.type.name}}-${{matrix.arch.name}}
           path: artifacts
   # End build Windows installers
+
+  # Build and sign Mac OSX installers & upload artifacts
+  create-macos-artifacts:
+    runs-on: macos-latest-xl-arm64
+    needs: prereqs
+    env:
+      VERSION: "${{ needs.prereqs.outputs.tag_version }}"
+    environment: release
+    steps:
+      - name: Check out repository
+        uses: actions/checkout@v4
+        with:
+          path: 'git'
+
+      - name: Install Git dependencies
+        run: |
+          set -ex
+
+          # Install x86_64 packages
+          arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
+          arch -x86_64 /usr/local/bin/brew install gettext
+
+          # Install arm64 packages
+          brew install automake asciidoc xmlto docbook
+          brew link --force gettext
+
+          # Make universal gettext library
+          lipo -create -output libintl.a /usr/local/opt/gettext/lib/libintl.a /opt/homebrew/opt/gettext/lib/libintl.a
+
+      - name: Set up signing/notarization infrastructure
+        env:
+          A1: ${{ secrets.APPLICATION_CERTIFICATE_BASE64 }}
+          A2: ${{ secrets.APPLICATION_CERTIFICATE_PASSWORD }}
+          I1: ${{ secrets.INSTALLER_CERTIFICATE_BASE64 }}
+          I2: ${{ secrets.INSTALLER_CERTIFICATE_PASSWORD }}
+          N1: ${{ secrets.APPLE_TEAM_ID }}
+          N2: ${{ secrets.APPLE_DEVELOPER_ID }}
+          N3: ${{ secrets.APPLE_DEVELOPER_PASSWORD }}
+          N4: ${{ secrets.APPLE_KEYCHAIN_PROFILE }}
+        run: |
+          echo "Setting up signing certificates"
+          security create-keychain -p pwd $RUNNER_TEMP/buildagent.keychain
+          security default-keychain -s $RUNNER_TEMP/buildagent.keychain
+          security unlock-keychain -p pwd $RUNNER_TEMP/buildagent.keychain
+          # Prevent re-locking
+          security set-keychain-settings $RUNNER_TEMP/buildagent.keychain
+
+          echo "$A1" | base64 -D > $RUNNER_TEMP/cert.p12
+          security import $RUNNER_TEMP/cert.p12 \
+            -k $RUNNER_TEMP/buildagent.keychain \
+            -P "$A2" \
+            -T /usr/bin/codesign
+          security set-key-partition-list \
+            -S apple-tool:,apple:,codesign: \
+            -s -k pwd \
+            $RUNNER_TEMP/buildagent.keychain
+
+          echo "$I1" | base64 -D > $RUNNER_TEMP/cert.p12
+          security import $RUNNER_TEMP/cert.p12 \
+            -k $RUNNER_TEMP/buildagent.keychain \
+            -P "$I2" \
+            -T /usr/bin/pkgbuild
+          security set-key-partition-list \
+            -S apple-tool:,apple:,pkgbuild: \
+            -s -k pwd \
+            $RUNNER_TEMP/buildagent.keychain
+
+          echo "Setting up notarytool"
+          xcrun notarytool store-credentials \
+            --team-id "$N1" \
+            --apple-id "$N2" \
+            --password "$N3" \
+            "$N4"
+
+      - name: Build, sign, and notarize artifacts
+        env:
+          A3: ${{ secrets.APPLE_APPLICATION_SIGNING_IDENTITY }}
+          I3: ${{ secrets.APPLE_INSTALLER_SIGNING_IDENTITY }}
+          N4: ${{ secrets.APPLE_KEYCHAIN_PROFILE }}
+        run: |
+          die () {
+            echo "$*" >&2
+            exit 1
+          }
+
+          # Trace execution, stop on error
+          set -ex
+
+          # Write to "version" file to force match with trigger payload version
+          echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
+
+          # Configure universal build
+          cat >git/config.mak <<EOF
+          # Create universal binaries. HOST_CPU is a bit of a lie and only
+          # used in 'git version --build-options'.  We'll fix that in code.
+          HOST_CPU = universal
+          BASIC_CFLAGS += -arch arm64 -arch x86_64
+          EOF
+
+          # Configure the Git build to pick up gettext
+          homebrew_prefix="$(brew --prefix)"
+          cat >>git/config.mak <<EOF
+          CFLAGS = -I$homebrew_prefix/include -I/usr/local/opt/gettext/include
+          LDFLAGS = -L"$(pwd)"
+          EOF
+
+          # Configure the Git to use the OS supplied libcurl.
+          cat >>git/config.mak <<EOF
+          CURL_LDFLAGS := -lcurl
+          CURL_CONFIG := /usr/bin/true
+          EOF
+
+          # Avoid even building the dashed built-ins; Those should be hard-linked
+          # copies of the `git` executable but would end up as actual copies instead,
+          # bloating the size of the `.dmg` indecently.
+          echo 'SKIP_DASHED_BUILT_INS = YabbaDabbaDoo' >>git/config.mak
+
+          # To make use of the catalogs...
+          export XML_CATALOG_FILES=$homebrew_prefix/etc/xml/catalog
+
+          make -C git -j$(sysctl -n hw.physicalcpu) GIT-VERSION-FILE dist dist-doc
+
+          # Extract tarballs
+          mkdir payload manpages
+          tar -xvf git/git-$VERSION.tar.gz -C payload
+          tar -xvf git/git-manpages-$VERSION.tar.gz -C manpages
+
+          # Lay out payload
+          cp git/config.mak payload/git-$VERSION/config.mak
+          make -C git/.github/macos-installer V=1 payload
+
+          # Codesign payload
+          cp -R stage/git-universal-$VERSION/ \
+            git/.github/macos-installer/build-artifacts
+          make -C git/.github/macos-installer V=1 codesign \
+            APPLE_APP_IDENTITY="$A3" || die "Creating signed payload failed"
+
+          # Build and sign pkg
+          make -C git/.github/macos-installer V=1 pkg \
+            APPLE_INSTALLER_IDENTITY="$I3" \
+            || die "Creating signed pkg failed"
+
+          # Notarize pkg
+          make -C git/.github/macos-installer V=1 notarize \
+            APPLE_INSTALLER_IDENTITY="$I3" APPLE_KEYCHAIN_PROFILE="$N4" \
+            || die "Creating signed and notarized pkg failed"
+
+          # Create DMG
+          make -C git/.github/macos-installer V=1 image || die "Creating DMG failed"
+
+          # Move all artifacts into top-level directory
+          mv git/.github/macos-installer/disk-image/*.pkg git/.github/macos-installer/
+
+      - name: Upload artifacts
+        uses: actions/upload-artifact@v4
+        with:
+          name: macos-artifacts
+          path: |
+            git/.github/macos-installer/*.dmg
+            git/.github/macos-installer/*.pkg
+  # End build and sign Mac OSX installers

From d682d3016d33763dfbdda2d6b05f93998724f07e Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Fri, 16 Jul 2021 10:44:26 -0400
Subject: [PATCH 131/207] release: build unsigned Ubuntu .deb package

- include `scalar`
- build & upload unsigned .deb package

Co-authored-by: Lessley Dennington <ldennington@github.com>
---
 .github/workflows/build-git-installers.yml | 80 ++++++++++++++++++++++
 1 file changed, 80 insertions(+)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 46d65850a5a381..5e947cdf9b7f6c 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -474,3 +474,83 @@ jobs:
             git/.github/macos-installer/*.dmg
             git/.github/macos-installer/*.pkg
   # End build and sign Mac OSX installers
+
+  # Build unsigned Ubuntu package
+  create-linux-artifacts:
+    runs-on: ubuntu-latest
+    needs: prereqs
+    steps:
+      - name: Install git dependencies
+        run: |
+          set -ex
+          sudo apt-get update -q
+          sudo apt-get install -y -q --no-install-recommends gettext libcurl4-gnutls-dev libpcre3-dev asciidoc xmlto
+
+      - name: Clone git
+        uses: actions/checkout@v4
+        with:
+          path: git
+
+      - name: Build and create Debian package
+        run: |
+          set -ex
+
+          die () {
+              echo "$*" >&2
+              exit 1
+          }
+
+          echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
+          make -C git GIT-VERSION-FILE
+
+          VERSION="${{ needs.prereqs.outputs.tag_version }}"
+
+          ARCH="$(dpkg-architecture -q DEB_HOST_ARCH)"
+          if test -z "$ARCH"; then
+            die "Could not determine host architecture!"
+          fi
+
+          PKGNAME="microsoft-git_$VERSION"
+          PKGDIR="$(dirname $(pwd))/$PKGNAME"
+
+          rm -rf "$PKGDIR"
+          mkdir -p "$PKGDIR"
+
+          DESTDIR="$PKGDIR" make -C git -j5 V=1 DEVELOPER=1 \
+            USE_LIBPCRE=1 \
+            NO_CROSS_DIRECTORY_HARDLINKS=1 \
+            ASCIIDOC8=1 ASCIIDOC_NO_ROFF=1 \
+            ASCIIDOC='TZ=UTC asciidoc' \
+            prefix=/usr/local \
+            gitexecdir=/usr/local/lib/git-core \
+            libexecdir=/usr/local/lib/git-core \
+            htmldir=/usr/local/share/doc/git/html \
+            install install-doc install-html
+
+          cd ..
+          mkdir "$PKGNAME/DEBIAN"
+
+          # Based on https://packages.ubuntu.com/xenial/vcs/git
+          cat >"$PKGNAME/DEBIAN/control" <<EOF
+          Package: microsoft-git
+          Version: $VERSION
+          Section: vcs
+          Priority: optional
+          Architecture: $ARCH
+          Depends: libcurl3-gnutls, liberror-perl, libexpat1, libpcre2-8-0, perl, perl-modules, zlib1g
+          Maintainer: GitClient <gitclient@microsoft.com>
+          Description: Git client built from the https://github.com/microsoft/git repository,
+            specialized in supporting monorepo scenarios. Includes the Scalar CLI.
+          EOF
+
+          dpkg-deb -Zxz --build "$PKGNAME"
+          # Move Debian package for later artifact upload
+          mv "$PKGNAME.deb" "$GITHUB_WORKSPACE"
+
+      - name: Upload artifacts
+        uses: actions/upload-artifact@v4
+        with:
+          name: linux-artifacts
+          path: |
+            *.deb
+  # End build unsigned Debian package

From 93a590156282d07c5b5e583fed2e9b6b356dba4f Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Fri, 16 Jul 2021 10:48:08 -0400
Subject: [PATCH 132/207] release: add signing step for .deb package

- sign using Azure-stored certificates & client
- sign on Windows agent via python script
- job skipped if credentials for accessing certificate aren't present

Co-authored-by: Lessley Dennington <ldennington@github.com>
---
 .github/workflows/build-git-installers.yml | 49 +++++++++++++++++++++-
 1 file changed, 47 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 5e947cdf9b7f6c..aacfdbb5e19999 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -5,6 +5,9 @@ on:
     tags:
       - 'v[0-9]*vfs*' # matches "v<number><any characters>vfs<any characters>"
 
+permissions:
+  id-token: write # required for Azure login via OIDC
+
 jobs:
   # Check prerequisites for the workflow
   prereqs:
@@ -475,10 +478,11 @@ jobs:
             git/.github/macos-installer/*.pkg
   # End build and sign Mac OSX installers
 
-  # Build unsigned Ubuntu package
+  # Build and sign Debian package
   create-linux-artifacts:
     runs-on: ubuntu-latest
     needs: prereqs
+    environment: release
     steps:
       - name: Install git dependencies
         run: |
@@ -547,10 +551,51 @@ jobs:
           # Move Debian package for later artifact upload
           mv "$PKGNAME.deb" "$GITHUB_WORKSPACE"
 
+      - name: Log into Azure
+        uses: azure/login@v2
+        with:
+          client-id: ${{ secrets.AZURE_CLIENT_ID }}
+          tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+          subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+      - name: Prepare for GPG signing
+        env:
+          AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
+          GPG_KEY_SECRET_NAME: ${{ secrets.GPG_KEY_SECRET_NAME }}
+          GPG_PASSPHRASE_SECRET_NAME: ${{ secrets.GPG_PASSPHRASE_SECRET_NAME }}
+          GPG_KEYGRIP_SECRET_NAME: ${{ secrets.GPG_KEYGRIP_SECRET_NAME }}
+        run: |
+          # Install debsigs
+          sudo apt install debsigs
+
+          # Download GPG key, passphrase, and keygrip from Azure Key Vault
+          key=$(az keyvault secret show --name $GPG_KEY_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
+          passphrase=$(az keyvault secret show --name $GPG_PASSPHRASE_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
+          keygrip=$(az keyvault secret show --name $GPG_KEYGRIP_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
+
+          # Remove quotes from downloaded values
+          key=$(sed -e 's/^"//' -e 's/"$//' <<<"$key")
+          passphrase=$(sed -e 's/^"//' -e 's/"$//' <<<"$passphrase")
+          keygrip=$(sed -e 's/^"//' -e 's/"$//' <<<"$keygrip")
+
+          # Import GPG key
+          echo "$key" | base64 -d | gpg --import --no-tty --batch --yes
+
+          # Configure GPG
+          echo "allow-preset-passphrase" > ~/.gnupg/gpg-agent.conf
+          gpg-connect-agent RELOADAGENT /bye
+          /usr/lib/gnupg2/gpg-preset-passphrase --preset "$keygrip" <<<"$passphrase"
+
+      - name: Sign Debian package
+        run: |
+          # Sign Debian package
+          version="${{ needs.prereqs.outputs.tag_version }}"
+          debsigs --sign=origin --verify --check microsoft-git_"$version".deb
+
       - name: Upload artifacts
         uses: actions/upload-artifact@v4
         with:
           name: linux-artifacts
           path: |
             *.deb
-  # End build unsigned Debian package
+  # End build and sign Debian package

From 69ebf5f1d54a0e7228e19d31fab6c72c860b00c2 Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Fri, 16 Jul 2021 10:51:02 -0400
Subject: [PATCH 133/207] release: create draft GitHub release with packages &
 installers

- create release & uploads artifact using Octokit
- use job "if" condition to handle uploading signed *or* unsigned .deb

Co-authored-by: Lessley Dennington <ldennington@github.com>
---
 .github/workflows/build-git-installers.yml | 109 +++++++++++++++++++++
 1 file changed, 109 insertions(+)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index aacfdbb5e19999..8837e2e0d6f0df 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -599,3 +599,112 @@ jobs:
           path: |
             *.deb
   # End build and sign Debian package
+
+  create-github-release:
+    runs-on: ubuntu-latest
+    permissions:
+      contents: write
+    needs:
+      - create-linux-artifacts
+      - create-macos-artifacts
+      - windows_artifacts
+      - prereqs
+    if: |
+      success() ||
+        (needs.create-linux-artifacts.result == 'skipped' &&
+        needs.create-macos-artifacts.result == 'success' &&
+        needs.windows_artifacts.result == 'success')
+    steps:
+      - name: Download Windows portable (x86_64)
+        uses: actions/download-artifact@v4
+        with:
+          name: win-portable-x86_64
+          path: win-portable-x86_64
+
+      - name: Download Windows portable (aarch64)
+        uses: actions/download-artifact@v4
+        with:
+          name: win-portable-aarch64
+          path: win-portable-aarch64
+
+      - name: Download Windows installer (x86_64)
+        uses: actions/download-artifact@v4
+        with:
+          name: win-installer-x86_64
+          path: win-installer-x86_64
+
+      - name: Download Windows installer (aarch64)
+        uses: actions/download-artifact@v4
+        with:
+          name: win-installer-aarch64
+          path: win-installer-aarch64
+
+      - name: Download macOS artifacts
+        uses: actions/download-artifact@v4
+        with:
+          name: macos-artifacts
+          path: macos-artifacts
+
+      - name: Download Debian package
+        uses: actions/download-artifact@v4
+        with:
+          name: linux-artifacts
+          path: deb-package
+
+      - uses: actions/github-script@v6
+        with:
+          script: |
+            const fs = require('fs');
+            const path = require('path');
+
+            var releaseMetadata = {
+              owner: context.repo.owner,
+              repo: context.repo.repo
+            };
+
+            // Create the release
+            var tagName = "${{ needs.prereqs.outputs.tag_name }}";
+            var createdRelease = await github.rest.repos.createRelease({
+              ...releaseMetadata,
+              draft: true,
+              tag_name: tagName,
+              name: tagName
+            });
+            releaseMetadata.release_id = createdRelease.data.id;
+
+            // Uploads contents of directory to the release created above
+            async function uploadDirectoryToRelease(directory, includeExtensions=[]) {
+              return fs.promises.readdir(directory)
+                .then(async(files) => Promise.all(
+                  files.filter(file => {
+                    return includeExtensions.length==0 || includeExtensions.includes(path.extname(file).toLowerCase());
+                  })
+                  .map(async (file) => {
+                    var filePath = path.join(directory, file);
+                    github.rest.repos.uploadReleaseAsset({
+                      ...releaseMetadata,
+                      name: file,
+                      headers: {
+                        "content-length": (await fs.promises.stat(filePath)).size
+                      },
+                      data: fs.createReadStream(filePath)
+                    });
+                  }))
+                );
+            }
+
+            await Promise.all([
+              // Upload Windows x86_64 artifacts
+              uploadDirectoryToRelease('win-installer-x86_64', ['.exe']),
+              uploadDirectoryToRelease('win-portable-x86_64', ['.exe']),
+
+              // Upload Windows aarch64 artifacts
+              uploadDirectoryToRelease('win-installer-aarch64', ['.exe']),
+              uploadDirectoryToRelease('win-portable-aarch64', ['.exe']),
+
+              // Upload Mac artifacts
+              uploadDirectoryToRelease('macos-artifacts'),
+
+              // Upload Ubuntu artifacts
+              uploadDirectoryToRelease('deb-package')
+            ]);

From 6619583efdae337cc801864b695bfb18c16f096d Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Thu, 2 Dec 2021 10:11:41 -0500
Subject: [PATCH 134/207] dist: archive HEAD instead of HEAD^{tree}

Update `git archive` tree-ish argument from `HEAD^{tree}` to `HEAD`. By
using a commit (rather than tree) reference, the commit hash will be stored
as an extended pax header, extractable git `git get-tar-commit-id`.

The intended use-case for this change is building `git` from the output of
`make dist` - in combination with the ability to specify a fallback
`GIT_BUILT_FROM_COMMIT`, a user can extract the commit ID used to build the
archive and set it as `GIT_BUILT_FROM_COMMIT`. The result is fully-populated
information for the commit hash in `git version --build-options`.

Signed-off-by: Victoria Dye <vdye@github.com>
---
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index bd1034b6f0a027..e01b6180404e7b 100644
--- a/Makefile
+++ b/Makefile
@@ -3706,7 +3706,7 @@ dist: git-archive$(X) configure
 	@$(MAKE) -C git-gui TARDIR=../.dist-tmp-dir/git-gui dist-version
 	./git-archive --format=tar \
 		$(GIT_ARCHIVE_EXTRA_FILES) \
-		--prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar
+		--prefix=$(GIT_TARNAME)/ HEAD > $(GIT_TARNAME).tar
 	@$(RM) -r .dist-tmp-dir
 	gzip -f -9 $(GIT_TARNAME).tar
 

From 254cf446d11022eacc25d0eb04eec67f1110e530 Mon Sep 17 00:00:00 2001
From: Matthew John Cheetham <mjcheetham@outlook.com>
Date: Fri, 24 Jul 2020 15:27:57 +0100
Subject: [PATCH 135/207] homebrew: add GitHub workflow to release Cask

Add a GitHub workflow that is triggered on the `release` event to
automatically update the `microsoft-git` Homebrew Cask on the
`microsoft/git` Tap.

A secret `HOMEBREW_TOKEN` with push permissions to the
`microsoft/homebrew-git` repository must exist. A pull request will be
created at the moment to allow for last minute manual verification.

Signed-off-by: Matthew John Cheetham <mjcheetham@outlook.com>
---
 .github/workflows/release-homebrew.yml | 51 ++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)
 create mode 100644 .github/workflows/release-homebrew.yml

diff --git a/.github/workflows/release-homebrew.yml b/.github/workflows/release-homebrew.yml
new file mode 100644
index 00000000000000..e00b90d8c07579
--- /dev/null
+++ b/.github/workflows/release-homebrew.yml
@@ -0,0 +1,51 @@
+name: Update Homebrew Tap
+on:
+  release:
+    types: [released]
+
+permissions:
+  id-token: write # required for Azure login via OIDC
+
+jobs:
+  release:
+    runs-on: ubuntu-latest
+    environment: release
+    steps:
+    - id: version
+      name: Compute version number
+      run: |
+        echo "result=$(echo $GITHUB_REF | sed -e "s/^refs\/tags\/v//")" >>$GITHUB_OUTPUT
+    - id: hash
+      name: Compute release asset hash
+      uses: mjcheetham/asset-hash@v1.1
+      with:
+        asset: /git-(.*)\.pkg/
+        hash: sha256
+        token: ${{ secrets.GITHUB_TOKEN }}
+    - name: Log into Azure
+      uses: azure/login@v2
+      with:
+        client-id: ${{ secrets.AZURE_CLIENT_ID }}
+        tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+        subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+    - name: Retrieve token
+      id: token
+      run: |
+        az keyvault secret show \
+          --name ${{ secrets.HOMEBREW_TOKEN_SECRET_NAME }} \
+          --vault-name ${{ secrets.AZURE_VAULT }} \
+          --query "value" -o tsv >token &&
+        # avoid outputting the token under `set -x` by using `sed` instead of `echo`
+        sed s/^/::add-mask::/ <token &&
+        sed s/^/result=/ <token >>$GITHUB_OUTPUT &&
+        rm token
+    - name: Update scalar Cask
+      uses: mjcheetham/update-homebrew@v1.4
+      with:
+        token: ${{ steps.token.outputs.result }}
+        tap: microsoft/git
+        name: microsoft-git
+        type: cask
+        version: ${{ steps.version.outputs.result }}
+        sha256: ${{ steps.hash.outputs.result }}
+        alwaysUsePullRequest: false

From a71743504db405cf5e0003fa6dc5ae112a5ec6e1 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 13 May 2022 23:55:32 +0200
Subject: [PATCH 136/207] Disable the `monitor-components` workflow in msft-git

It really does not make sense to run that workflow in any fork of
git-for-windows/git. Typically, it is enough to simply disable it (since
it is a scheduled workflow, it is disabled by default in any new fork).

However, in microsoft/git, we switch the default branch whenever we
rebase to a new upstream version, and every time we do so, this
scheduled workflow gets re-enabled.

Let's just delete it in microsoft/git and never be bothered by it again.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/monitor-components.yml | 98 ------------------------
 1 file changed, 98 deletions(-)
 delete mode 100644 .github/workflows/monitor-components.yml

diff --git a/.github/workflows/monitor-components.yml b/.github/workflows/monitor-components.yml
deleted file mode 100644
index fedc6add69a9e0..00000000000000
--- a/.github/workflows/monitor-components.yml
+++ /dev/null
@@ -1,98 +0,0 @@
-name: Monitor component updates
-
-# Git for Windows is a slightly modified subset of MSYS2. Some of its
-# components are maintained by Git for Windows, others by MSYS2. To help
-# keeping the former up to date, this workflow monitors the Atom/RSS feeds
-# and opens new tickets for each new component version.
-
-on:
-  schedule:
-    - cron: "23 8,11,14,17 * * *"
-  workflow_dispatch:
-
-env:
-  CHARACTER_LIMIT: 5000
-  MAX_AGE: 7d
-
-jobs:
-  job:
-    # Only run this in Git for Windows' fork
-    if: github.event.repository.owner.login == 'git-for-windows'
-    runs-on: ubuntu-latest
-    permissions:
-      issues: write
-    strategy:
-      matrix:
-        component:
-          - label: git
-            feed: https://github.com/git/git/tags.atom
-          - label: git-lfs
-            feed: https://github.com/git-lfs/git-lfs/tags.atom
-          - label: git-credential-manager
-            feed: https://github.com/git-ecosystem/git-credential-manager/tags.atom
-          - label: tig
-            feed: https://github.com/jonas/tig/tags.atom
-          - label: cygwin
-            feed: https://github.com/cygwin/cygwin/releases.atom
-            title-pattern: ^(?!.*newlib)
-          - label: msys2-runtime-package
-            feed: https://github.com/msys2/MSYS2-packages/commits/master/msys2-runtime.atom
-          - label: msys2-runtime
-            feed: https://github.com/msys2/msys2-runtime/commits/HEAD.atom
-            aggregate: true
-          - label: openssh
-            feed: https://github.com/openssh/openssh-portable/tags.atom
-          - label: libfido2
-            feed: https://github.com/Yubico/libfido2/tags.atom
-          - label: libcbor
-            feed: https://github.com/PJK/libcbor/tags.atom
-          - label: openssl
-            feed: https://github.com/openssl/openssl/tags.atom
-            title-pattern: ^(?!.*alpha)
-          - label: gnutls
-            feed: https://gnutls.org/news.atom
-          - label: heimdal
-            feed: https://github.com/heimdal/heimdal/tags.atom
-          - label: git-sizer
-            feed: https://github.com/github/git-sizer/tags.atom
-          - label: gitflow
-            feed: https://github.com/petervanderdoes/gitflow-avh/tags.atom
-          - label: curl
-            feed: https://github.com/curl/curl/tags.atom
-          - label: libgpg-error
-            feed: https://github.com/gpg/libgpg-error/releases.atom
-            title-pattern: ^libgpg-error-[0-9\.]*$
-          - label: libgcrypt
-            feed: https://github.com/gpg/libgcrypt/releases.atom
-            title-pattern: ^libgcrypt-[0-9\.]*$
-          - label: gpg
-            feed: https://github.com/gpg/gnupg/releases.atom
-          - label: mintty
-            feed: https://github.com/mintty/mintty/releases.atom
-          - label: 7-zip
-            feed: https://sourceforge.net/projects/sevenzip/rss?path=/7-Zip
-            aggregate: true
-          - label: bash
-            feed: https://git.savannah.gnu.org/cgit/bash.git/atom/?h=master
-            aggregate: true
-          - label: perl
-            feed: https://github.com/Perl/perl5/tags.atom
-            title-pattern: ^(?!.*(5\.[0-9]+[13579]|RC))
-          - label: pcre2
-            feed: https://github.com/PCRE2Project/pcre2/tags.atom
-          - label: mingw-w64-llvm
-            feed: https://github.com/msys2/MINGW-packages/commits/master/mingw-w64-llvm.atom
-          - label: innosetup
-            feed: https://github.com/jrsoftware/issrc/tags.atom
-      fail-fast: false
-    steps:
-      - uses: git-for-windows/rss-to-issues@v0
-        with:
-          feed: ${{matrix.component.feed}}
-          prefix: "[New ${{matrix.component.label}} version]"
-          labels: component-update
-          github-token: ${{ secrets.GITHUB_TOKEN }}
-          character-limit: ${{ env.CHARACTER_LIMIT }}
-          max-age: ${{ env.MAX_AGE }}
-          aggregate: ${{matrix.component.aggregate}}
-          title-pattern: ${{matrix.component.title-pattern}}

From 191b4e25f7c4f490443b5280488d67de883083d9 Mon Sep 17 00:00:00 2001
From: Lessley <lessleydennington@gmail.com>
Date: Tue, 24 Oct 2023 15:45:05 -0600
Subject: [PATCH 137/207] build-git-installers: publish gpg public key

Update build-git-installers workflow to publish `microsoft/git`'s GPG public
key as part of each release. Add explanation for how to use this key to verify
the Debian package's signature to the README.
---
 .github/workflows/build-git-installers.yml | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 8837e2e0d6f0df..ce24fc166bff0c 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -604,11 +604,16 @@ jobs:
     runs-on: ubuntu-latest
     permissions:
       contents: write
+      id-token: write # required for Azure login via OIDC
     needs:
       - create-linux-artifacts
       - create-macos-artifacts
       - windows_artifacts
       - prereqs
+    env:
+      AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
+      GPG_PUBLIC_KEY_SECRET_NAME: ${{ secrets.GPG_PUBLIC_KEY_SECRET_NAME }}
+    environment: release
     if: |
       success() ||
         (needs.create-linux-artifacts.result == 'skipped' &&
@@ -651,6 +656,20 @@ jobs:
           name: linux-artifacts
           path: deb-package
 
+      - name: Log into Azure
+        uses: azure/login@v2
+        with:
+          client-id: ${{ secrets.AZURE_CLIENT_ID }}
+          tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+          subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+      - name: Download GPG public key signature file
+        run: |
+          az keyvault secret show --name "$GPG_PUBLIC_KEY_SECRET_NAME" \
+            --vault-name "$AZURE_VAULT" --query "value" \
+            | sed -e 's/^"//' -e 's/"$//' | base64 -d >msft-git-public.asc
+          mv msft-git-public.asc deb-package
+
       - uses: actions/github-script@v6
         with:
           script: |

From 075aaa8c12c34d0b8884621cc62e0f3d764c2821 Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Thu, 2 Dec 2021 14:50:05 -0500
Subject: [PATCH 138/207] release: include GIT_BUILT_FROM_COMMIT in MacOS build

Set the `GIT_BUILT_FROM_COMMIT` based on the version specified in the `make
dist` output archive header. This ensures the commit hash is shown in
`git version --build-options`.

Signed-off-by: Victoria Dye <vdye@github.com>
---
 .github/macos-installer/Makefile           | 2 +-
 .github/workflows/build-git-installers.yml | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/.github/macos-installer/Makefile b/.github/macos-installer/Makefile
index 3e1d60dcbeb2ef..1a06f6200e62dc 100644
--- a/.github/macos-installer/Makefile
+++ b/.github/macos-installer/Makefile
@@ -21,7 +21,7 @@ GIT_PREFIX := $(PREFIX)/git
 BUILD_DIR := $(GITHUB_WORKSPACE)/payload
 DESTDIR := $(PWD)/stage/git-$(ARCH_UNIV)-$(VERSION)
 ARTIFACTDIR := build-artifacts
-SUBMAKE := $(MAKE) C_INCLUDE_PATH="$(C_INCLUDE_PATH)" CPLUS_INCLUDE_PATH="$(CPLUS_INCLUDE_PATH)" LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)" TARGET_FLAGS="$(TARGET_FLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" NO_GETTEXT=1 NO_DARWIN_PORTS=1 prefix=$(GIT_PREFIX) DESTDIR=$(DESTDIR)
+SUBMAKE := $(MAKE) C_INCLUDE_PATH="$(C_INCLUDE_PATH)" CPLUS_INCLUDE_PATH="$(CPLUS_INCLUDE_PATH)" LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)" TARGET_FLAGS="$(TARGET_FLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" NO_GETTEXT=1 NO_DARWIN_PORTS=1 prefix=$(GIT_PREFIX) GIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)" DESTDIR=$(DESTDIR)
 CORES := $(shell bash -c "sysctl hw.ncpu | awk '{print \$$2}'")
 
 # Guard against environment variables
diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 8c890effb39309..4ce1f368c41da7 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -447,6 +447,9 @@ jobs:
 
           make -C git -j$(sysctl -n hw.physicalcpu) GIT-VERSION-FILE dist dist-doc
 
+          export GIT_BUILT_FROM_COMMIT=$(gunzip -c git/git-$VERSION.tar.gz | git get-tar-commit-id) ||
+            die "Could not determine commit for build"
+
           # Extract tarballs
           mkdir payload manpages
           tar -xvf git/git-$VERSION.tar.gz -C payload

From 0d5753623542e63aaefefa8e87324a995523a307 Mon Sep 17 00:00:00 2001
From: Lessley Dennington <ledennin@microsoft.com>
Date: Thu, 29 Apr 2021 10:28:44 -0700
Subject: [PATCH 139/207] Adding winget workflows

---
 .github/workflows/release-winget.yml | 51 ++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)
 create mode 100644 .github/workflows/release-winget.yml

diff --git a/.github/workflows/release-winget.yml b/.github/workflows/release-winget.yml
new file mode 100644
index 00000000000000..d6edab844d05b5
--- /dev/null
+++ b/.github/workflows/release-winget.yml
@@ -0,0 +1,51 @@
+name: "release-winget"
+on:
+  release:
+    types: [released]
+
+  workflow_dispatch:
+    inputs:
+      release:
+        description: 'Release Id'
+        required: true
+        default: 'latest'
+
+permissions:
+  id-token: write # required for Azure login via OIDC
+
+jobs:
+  release:
+    runs-on: windows-latest
+    environment: release
+    steps:
+      - name: Log into Azure
+        uses: azure/login@v2
+        with:
+          client-id: ${{ secrets.AZURE_CLIENT_ID }}
+          tenant-id: ${{ secrets.AZURE_TENANT_ID }}
+          subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
+
+      - name: Publish manifest with winget-create
+        run: |
+          # Get correct release asset
+          $github = Get-Content '${{ github.event_path }}' | ConvertFrom-Json
+          $asset = $github.release.assets | Where-Object -Property name -match '64-bit.exe$'
+
+          # Remove 'v' and 'vfs' from the version
+          $github.release.tag_name -match '\d.*'
+          $version = $Matches[0] -replace ".vfs",""
+
+          # Download wingetcreate and create manifests
+          Invoke-WebRequest https://aka.ms/wingetcreate/latest -OutFile wingetcreate.exe
+          .\wingetcreate.exe update Microsoft.Git -u $asset.browser_download_url -v $version -o manifests
+
+          # Manually substitute the name of the default branch in the License
+          # and Copyright URLs since the tooling cannot do that for us.
+          $shortenedVersion = $version -replace ".{4}$"
+          $manifestPath = dir -Path ./manifests -Filter Microsoft.Git.locale.en-US.yaml -Recurse | %{$_.FullName}
+          sed -i "s/vfs-[.0-9]*/vfs-$shortenedVersion/g" "$manifestPath"
+
+          # Submit manifests
+          $manifestDirectory = Split-Path "$manifestPath"
+          .\wingetcreate.exe submit -t "(az keyvault secret show --name ${{ secrets.WINGET_TOKEN_SECRET_NAME }} --vault-name ${{ secrets.AZURE_VAULT }} --query "value")" $manifestDirectory
+        shell: powershell

From 02182c493ca040838eeb615c41e886a693c30495 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Wed, 8 Mar 2023 12:57:56 -0500
Subject: [PATCH 140/207] .github: enable windows builds on microsoft fork

This was disabled by a0da6deeec1 (ci: only run win+VS build & tests in
Git for Windows' fork, 2022-12-19) to avoid other forks doing too many
builds. But we want to keep these builds for the microsoft/git fork.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 .github/workflows/main.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index b1ed3794e2b2c8..787343b3b88db9 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -164,7 +164,7 @@ jobs:
   vs-build:
     name: win+VS build
     needs: ci-config
-    if: github.event.repository.owner.login == 'git-for-windows' && needs.ci-config.outputs.enabled == 'yes'
+    if: github.event.repository.owner.login == 'microsoft' && needs.ci-config.outputs.enabled == 'yes'
     env:
       NO_PERL: 1
       GIT_CONFIG_PARAMETERS: "'user.name=CI' 'user.email=ci@git'"

From 85c997a6e1669b8412c1441f0a29b1de6cbbbbb1 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Wed, 6 Oct 2021 15:45:07 -0400
Subject: [PATCH 141/207] release: continue pestering until user upgrades

In 'git-update-git-for-windows', there is a recently_seen variable that
is loaded from Git config. This is intended to allow users to say "No, I
don't want that version of Git for Windows." If users say no, then they
are not reminded. Ever.

We want users of microsoft/git to be notified repeately until they
upgrade. The first notification might be dismissed because they don't
want to interrupt their work. They should get the picture within a few
reminders and upgrade in a timely fashion.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index ce24fc166bff0c..8c890effb39309 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -226,6 +226,15 @@ jobs:
           Type: files; Name: {app}\\{#MINGW_BITNESS}\\bin\\git-update-git-for-windows.config\
           Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}\\bin' \
             $b/installer/install.iss
+      - name: Set alerts to continue until upgrade is taken
+        shell: bash
+        run: |
+          set -x
+
+          b=/${{matrix.arch.mingwprefix}}/bin &&
+
+          sed -i -e '6 a use_recently_seen=no' \
+            $b/git-update-git-for-windows
       - name: Set the installer Publisher to the Git Client team
         shell: bash
         run: |

From 131fbe83596aa0ce63d85c14a77933fce5ec4024 Mon Sep 17 00:00:00 2001
From: Lessley Dennington <lessleydennington@gmail.com>
Date: Wed, 17 Aug 2022 10:58:25 -0700
Subject: [PATCH 142/207] release: add installer validation

Add basic installer validation to release pipeline for Windows, macOS, and
Linux (Debian package only). Validation runs the installers/any necessary
setup and checks that the installed version matches the expected version.
---
 .github/workflows/build-git-installers.yml | 71 ++++++++++++++++++++++
 1 file changed, 71 insertions(+)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 4ce1f368c41da7..de13e7df7239eb 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -612,12 +612,83 @@ jobs:
             *.deb
   # End build and sign Debian package
 
+  # Validate installers
+  validate-installers:
+    name: Validate installers
+    strategy:
+      matrix:
+        component:
+          - os: ubuntu-latest
+            artifact: linux-artifacts
+            command: git
+          - os: macos-latest-xl-arm64
+            artifact: macos-artifacts
+            command: git
+          - os: macos-latest
+            artifact: macos-artifacts
+            command: git
+          - os: windows-latest
+            artifact: win-installer-x86_64
+            command: $PROGRAMFILES\Git\cmd\git.exe
+          - os: ['self-hosted', '1ES.Pool=github-arm64-pool']
+            artifact: win-installer-aarch64
+            command: $PROGRAMFILES\Git\cmd\git.exe
+    runs-on: ${{ matrix.component.os }}
+    needs: [prereqs, windows_artifacts, create-macos-artifacts, create-linux-artifacts]
+    steps:
+      - name: Download artifacts
+        uses: actions/download-artifact@v4
+        with:
+          name: ${{ matrix.component.artifact }}
+
+      - name: Install Windows
+        if: contains(matrix.component.artifact, 'win-installer')
+        shell: pwsh
+        run: |
+          $exePath = Get-ChildItem -Path ./*.exe | %{$_.FullName}
+          Start-Process -Wait -FilePath "$exePath" -ArgumentList "/SILENT /VERYSILENT /NORESTART /SUPPRESSMSGBOXES /ALLOWDOWNGRADE=1"
+
+      - name: Install Linux
+        if: contains(matrix.component.artifact, 'linux')
+        run: |
+          debpath=$(find ./*.deb)
+          sudo apt install $debpath
+
+      - name: Install macOS
+        if: contains(matrix.component.artifact, 'macos')
+        run: |
+          # avoid letting Homebrew's `git` in `/opt/homebrew/bin` override `/usr/local/bin/git`
+          arch="$(uname -m)"
+          test arm64 != "$arch" ||
+          brew uninstall git
+
+          pkgpath=$(find ./*universal*.pkg)
+          sudo installer -pkg $pkgpath -target /
+
+      - name: Validate
+        shell: bash
+        run: |
+          "${{ matrix.component.command }}" --version | sed 's/git version //' >actual
+          echo ${{ needs.prereqs.outputs.tag_version }} >expect
+          cmp expect actual || exit 1
+
+      - name: Validate universal binary CPU architecture
+        if: contains(matrix.component.os, 'macos')
+        shell: bash
+        run: |
+          set -ex
+          git version --build-options >actual
+          cat actual
+          grep "cpu: $(uname -m)" actual
+  # End validate installers
+
   create-github-release:
     runs-on: ubuntu-latest
     permissions:
       contents: write
       id-token: write # required for Azure login via OIDC
     needs:
+      - validate-installers
       - create-linux-artifacts
       - create-macos-artifacts
       - windows_artifacts

From 6e75105ebe4ff5614c7ecb6e053614b2b924578b Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 29 Apr 2021 10:58:39 -0400
Subject: [PATCH 143/207] update-microsoft-git: create barebones builtin

Just do the boilerplate stuff of making a new builtin, including
documentation and integration with git.c.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 .gitignore                                 |  1 +
 Documentation/git-update-microsoft-git.txt | 24 ++++++++++++++++++++++
 Documentation/lint-manpages.sh             |  1 +
 Makefile                                   |  1 +
 builtin.h                                  |  1 +
 builtin/update-microsoft-git.c             | 20 ++++++++++++++++++
 git.c                                      |  1 +
 meson.build                                |  1 +
 8 files changed, 50 insertions(+)
 create mode 100644 Documentation/git-update-microsoft-git.txt
 create mode 100644 builtin/update-microsoft-git.c

diff --git a/.gitignore b/.gitignore
index a461bdd35b2985..3e0b4d016cb1ce 100644
--- a/.gitignore
+++ b/.gitignore
@@ -174,6 +174,7 @@
 /git-unpack-file
 /git-unpack-objects
 /git-update-index
+/git-update-microsoft-git
 /git-update-ref
 /git-update-server-info
 /git-upload-archive
diff --git a/Documentation/git-update-microsoft-git.txt b/Documentation/git-update-microsoft-git.txt
new file mode 100644
index 00000000000000..724bfc172f8ab7
--- /dev/null
+++ b/Documentation/git-update-microsoft-git.txt
@@ -0,0 +1,24 @@
+git-update-microsoft-git(1)
+===========================
+
+NAME
+----
+git-update-microsoft-git - Update the installed version of Git
+
+
+SYNOPSIS
+--------
+[verse]
+'git update-microsoft-git'
+
+DESCRIPTION
+-----------
+This version of Git is based on the Microsoft fork of Git, which
+has custom capabilities focused on supporting monorepos. This
+command checks for the latest release of that fork and installs
+it on your machine.
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/lint-manpages.sh b/Documentation/lint-manpages.sh
index 8bc316ba7646e3..2622a493566950 100755
--- a/Documentation/lint-manpages.sh
+++ b/Documentation/lint-manpages.sh
@@ -28,6 +28,7 @@ check_missing_docs () (
 		git-remote-*) continue;;
 		git-stage) continue;;
 		git-gvfs-helper) continue;;
+		git-update-microsoft-git) continue;;
 		git-legacy-*) continue;;
 		git-?*--?* ) continue ;;
 		esac
diff --git a/Makefile b/Makefile
index bd1034b6f0a027..a2e2a58f6bc198 100644
--- a/Makefile
+++ b/Makefile
@@ -1326,6 +1326,7 @@ BUILTIN_OBJS += builtin/tag.o
 BUILTIN_OBJS += builtin/unpack-file.o
 BUILTIN_OBJS += builtin/unpack-objects.o
 BUILTIN_OBJS += builtin/update-index.o
+BUILTIN_OBJS += builtin/update-microsoft-git.o
 BUILTIN_OBJS += builtin/update-ref.o
 BUILTIN_OBJS += builtin/update-server-info.o
 BUILTIN_OBJS += builtin/upload-archive.o
diff --git a/builtin.h b/builtin.h
index 5f64730cf0273d..f99519a65bcba6 100644
--- a/builtin.h
+++ b/builtin.h
@@ -239,6 +239,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix, struct repository *
 int cmd_unpack_file(int argc, const char **argv, const char *prefix, struct repository *repo);
 int cmd_unpack_objects(int argc, const char **argv, const char *prefix, struct repository *repo);
 int cmd_update_index(int argc, const char **argv, const char *prefix, struct repository *repo);
+int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix, struct repository *repo);
 int cmd_update_ref(int argc, const char **argv, const char *prefix, struct repository *repo);
 int cmd_update_server_info(int argc, const char **argv, const char *prefix, struct repository *repo);
 int cmd_upload_archive(int argc, const char **argv, const char *prefix, struct repository *repo);
diff --git a/builtin/update-microsoft-git.c b/builtin/update-microsoft-git.c
new file mode 100644
index 00000000000000..2d555a1ece21e8
--- /dev/null
+++ b/builtin/update-microsoft-git.c
@@ -0,0 +1,20 @@
+#include "builtin.h"
+#include "repository.h"
+#include "parse-options.h"
+#include "run-command.h"
+
+static int platform_specific_upgrade(void)
+{
+	return 1;
+}
+
+static const char builtin_update_microsoft_git_usage[] =
+	N_("git update-microsoft-git");
+
+int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix UNUSED, struct repository *repo UNUSED)
+{
+	if (argc == 2 && !strcmp(argv[1], "-h"))
+		usage(builtin_update_microsoft_git_usage);
+
+	return platform_specific_upgrade();
+}
diff --git a/git.c b/git.c
index 7e244b82f450eb..c24ba4924ee5c4 100644
--- a/git.c
+++ b/git.c
@@ -708,6 +708,7 @@ static struct cmd_struct commands[] = {
 	{ "unpack-file", cmd_unpack_file, RUN_SETUP | NO_PARSEOPT },
 	{ "unpack-objects", cmd_unpack_objects, RUN_SETUP | NO_PARSEOPT },
 	{ "update-index", cmd_update_index, RUN_SETUP },
+	{ "update-microsoft-git", cmd_update_microsoft_git },
 	{ "update-ref", cmd_update_ref, RUN_SETUP },
 	{ "update-server-info", cmd_update_server_info, RUN_SETUP },
 	{ "upload-archive", cmd_upload_archive, NO_PARSEOPT },
diff --git a/meson.build b/meson.build
index e977eda6e6a349..5c1672b89c40b8 100644
--- a/meson.build
+++ b/meson.build
@@ -603,6 +603,7 @@ builtin_sources = [
   'builtin/unpack-file.c',
   'builtin/unpack-objects.c',
   'builtin/update-index.c',
+  'builtin/update-microsoft-git.c',
   'builtin/update-ref.c',
   'builtin/update-server-info.c',
   'builtin/upload-archive.c',

From bf7295e3686c6131df68764d407161f3d2e5c8f3 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Wed, 16 Mar 2022 10:21:05 -0400
Subject: [PATCH 144/207] .github: update ISSUE_TEMPLATE.md for microsoft/git

We have been using the default issue template from git-for-windows/git,
but we should ask different questions than Git for Windows. Update the
issue template to ask these helpful questions.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 .github/ISSUE_TEMPLATE.md | 34 +++++++++++++++++++---------------
 1 file changed, 19 insertions(+), 15 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 4017ed82ca4341..c19530b086311a 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,8 +1,10 @@
- - [ ] I was not able to find an [open](https://github.com/git-for-windows/git/issues?q=is%3Aopen) or [closed](https://github.com/git-for-windows/git/issues?q=is%3Aclosed) issue matching what I'm seeing
+ - [ ] I was not able to find an [open](https://github.com/microsoft/git/issues?q=is%3Aopen)
+        or [closed](https://github.com/microsoft/git/issues?q=is%3Aclosed) issue matching
+        what I'm seeing, including in [the `git-for-windows/git` tracker](https://github.com/git-for-windows/git/issues).
 
 ### Setup
 
- - Which version of Git for Windows are you using? Is it 32-bit or 64-bit?
+ - Which version of `microsoft/git` are you using? Is it 32-bit or 64-bit?
 
 ```
 $ git --version --build-options
@@ -10,24 +12,22 @@ $ git --version --build-options
 ** insert your machine's response here **
 ```
 
- - Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
+Are you using Scalar or VFS for Git?
+
+** insert your answer here **
+
+If VFS for Git, then what version?
 
 ```
-$ cmd.exe /c ver
+$ gvfs version
 
 ** insert your machine's response here **
 ```
 
- - What options did you set as part of the installation? Or did you choose the
-   defaults?
+ - Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
 
 ```
-# One of the following:
-> type "C:\Program Files\Git\etc\install-options.txt"
-> type "C:\Program Files (x86)\Git\etc\install-options.txt"
-> type "%USERPROFILE%\AppData\Local\Programs\Git\etc\install-options.txt"
-> type "$env:USERPROFILE\AppData\Local\Programs\Git\etc\install-options.txt"
-$ cat /etc/install-options.txt
+$ cmd.exe /c ver
 
 ** insert your machine's response here **
 ```
@@ -58,7 +58,11 @@ $ cat /etc/install-options.txt
 
 ** insert here **
 
- - If the problem was occurring with a specific repository, can you provide the
-   URL to that repository to help us with testing?
+ - If the problem was occurring with a specific repository, can you specify
+   the repository?
 
-** insert URL here **
+   * [ ] Public repo: **insert URL here**
+   * [ ] Windows monorepo
+   * [ ] Office monorepo
+   * [ ] Other Microsoft-internal repo: **insert name here**
+   * [ ] Other internal repo.

From 09c4420c8b41a796e5fd2b0552cab4e30d085f80 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 29 Apr 2021 11:02:07 -0400
Subject: [PATCH 145/207] update-microsoft-git: Windows implementation

On Windows, we have the 'git update-git-for-windows' command. It is
poorly named within the microsoft/git fork, because the script has been
updated to look at the GitHub releases of microsoft/git, not
git-for-windows/git.

Still, it handles all the complicated details about downloading,
verifying, and running the installer.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 builtin/update-microsoft-git.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/builtin/update-microsoft-git.c b/builtin/update-microsoft-git.c
index 2d555a1ece21e8..6c426d66ea5f16 100644
--- a/builtin/update-microsoft-git.c
+++ b/builtin/update-microsoft-git.c
@@ -2,11 +2,28 @@
 #include "repository.h"
 #include "parse-options.h"
 #include "run-command.h"
+#include "strvec.h"
 
+#if defined(GIT_WINDOWS_NATIVE)
+/*
+ * On Windows, run 'git update-git-for-windows' which
+ * is installed by the installer, based on the script
+ * in git-for-windows/build-extra.
+ */
 static int platform_specific_upgrade(void)
 {
+	struct child_process cp = CHILD_PROCESS_INIT;
+
+	strvec_push(&cp.args, "git-update-git-for-windows");
+	return run_command(&cp);
+}
+#else
+static int platform_specific_upgrade(void)
+{
+	error(_("update-microsoft-git is not supported on this platform"));
 	return 1;
 }
+#endif
 
 static const char builtin_update_microsoft_git_usage[] =
 	N_("git update-microsoft-git");

From 99cb2d140110e8547434733384eac8c0caf12ca5 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Wed, 16 Mar 2022 10:29:15 -0400
Subject: [PATCH 146/207] .github: update PULL_REQUEST_TEMPLATE.md

We have long inherited the pull request template from
git-for-windows/git, but we should probably do a better job of
specifying the need for why a PR in microsoft/git exists instead of an
upstream contribution.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 .github/PULL_REQUEST_TEMPLATE.md | 28 ++++++++--------------------
 1 file changed, 8 insertions(+), 20 deletions(-)

diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 7baf31f2c471ec..3cb48d8582f31c 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,22 +1,10 @@
 Thanks for taking the time to contribute to Git!
 
-Those seeking to contribute to the Git for Windows fork should see
-http://gitforwindows.org/#contribute on how to contribute Windows specific
-enhancements.
-
-If your contribution is for the core Git functions and documentation
-please be aware that the Git community does not use the github.com issues
-or pull request mechanism for their contributions.
-
-Instead, we use the Git mailing list (git@vger.kernel.org) for code and
-documentation submissions, code reviews, and bug reports. The
-mailing list is plain text only (anything with HTML is sent directly
-to the spam folder).
-
-Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
-to conveniently send your Pull Requests commits to our mailing list.
-
-For a single-commit pull request, please *leave the pull request description
-empty*: your commit message itself should describe your changes.
-
-Please read the "guidelines for contributing" linked above!
+This fork contains changes specific to monorepo scenarios. If you are an
+external contributor, then please detail your reason for submitting to
+this fork:
+
+* [ ] This is an early version of work already under review upstream.
+* [ ] This change only applies to interactions with Azure DevOps and the
+      GVFS Protocol.
+* [ ] This change only applies to the virtualization hook and VFS for Git.

From 09d17b9162d754118f5b21a4dc97156f2b42fc23 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Thu, 29 Apr 2021 11:18:46 -0400
Subject: [PATCH 147/207] update-microsoft-git: use brew on macOS

The steps to update the microsoft-git cask are:

1. brew update
2. brew upgrade --cask microsoft-git

This is adapted from the UpgradeVerb within microsoft/scalar. There is
one important simplification: Scalar needed to check 'brew list --cask'
to find out if the 'scalar' cask or the 'scalar-azrepos' cask was
installed (which determined if the 'microsoft-git' cask was a necessary
dependency). We do not need that here, since we are already in the
microsoft-git cask.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 builtin/update-microsoft-git.c | 32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git a/builtin/update-microsoft-git.c b/builtin/update-microsoft-git.c
index 6c426d66ea5f16..54e196b70116f2 100644
--- a/builtin/update-microsoft-git.c
+++ b/builtin/update-microsoft-git.c
@@ -17,6 +17,38 @@ static int platform_specific_upgrade(void)
 	strvec_push(&cp.args, "git-update-git-for-windows");
 	return run_command(&cp);
 }
+#elif defined(__APPLE__)
+/*
+ * On macOS, we expect the user to have the microsoft-git
+ * cask installed via Homebrew. We check using these
+ * commands:
+ *
+ * 1. 'brew update' to get latest versions.
+ * 2. 'brew upgrade --cask microsoft-git' to get the
+ *    latest version.
+ */
+static int platform_specific_upgrade(void)
+{
+	int res;
+	struct child_process update = CHILD_PROCESS_INIT;
+	struct child_process upgrade = CHILD_PROCESS_INIT;
+
+	printf("Updating Homebrew with 'brew update'\n");
+
+	strvec_pushl(&update.args, "brew", "update", NULL);
+	res = run_command(&update);
+
+	if (res) {
+		error(_("'brew update' failed; is brew installed?"));
+		return 1;
+	}
+
+	printf("Upgrading microsoft-git with 'brew upgrade --cask microsoft-git'\n");
+	strvec_pushl(&upgrade.args, "brew", "upgrade", "--cask", "microsoft-git", NULL);
+	res = run_command(&upgrade);
+
+	return res;
+}
 #else
 static int platform_specific_upgrade(void)
 {

From a37523631879d71ccd5b5733e8b2e2f4bf9099be Mon Sep 17 00:00:00 2001
From: Lessley Dennington <ledennin@microsoft.com>
Date: Tue, 4 May 2021 09:03:55 -0700
Subject: [PATCH 148/207] Adjust README.md for microsoft/git

Microsoft's fork of Git is not quite Git for Windows, therefore we want
to tell the keen reader all about it. :-)

Co-authored-by: Kyle Rader <kyrader@microsoft.com>
Co-authored-by: Victoria Dye <vdye@github.com>
Co-authored-by: Jeff Hostetler <jeffhost@microsoft.com>
Co-authored-by: Matthew Cheetham <mattche@microsoft.com>
Co-authored-by: Derrick Stolee <dstolee@microsoft.com>
Co-authored-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 README.md | 369 +++++++++++++++++++++++++++++++++---------------------
 1 file changed, 223 insertions(+), 146 deletions(-)

diff --git a/README.md b/README.md
index 4eabce53d89e1f..b39764e9ad1dcd 100644
--- a/README.md
+++ b/README.md
@@ -1,148 +1,225 @@
-Git for Windows
-===============
-
-[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md)
-[![Open in Visual Studio Code](https://img.shields.io/static/v1?logo=visualstudiocode&label=&message=Open%20in%20Visual%20Studio%20Code&labelColor=2c2c32&color=007acc&logoColor=007acc)](https://open.vscode.dev/git-for-windows/git)
-[![Build status](https://github.com/git-for-windows/git/workflows/CI/badge.svg)](https://github.com/git-for-windows/git/actions?query=branch%3Amain+event%3Apush)
-[![Join the chat at https://gitter.im/git-for-windows/git](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/git-for-windows/git?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-This is [Git for Windows](http://git-for-windows.github.io/), the Windows port
-of [Git](http://git-scm.com/).
-
-The Git for Windows project is run using a [governance
-model](http://git-for-windows.github.io/governance-model.html). If you
-encounter problems, you can report them as [GitHub
-issues](https://github.com/git-for-windows/git/issues), discuss them on Git
-for Windows' [Google Group](http://groups.google.com/group/git-for-windows),
-and [contribute bug
-fixes](https://github.com/git-for-windows/git/wiki/How-to-participate).
-
-To build Git for Windows, please either install [Git for Windows'
-SDK](https://gitforwindows.org/#download-sdk), start its `git-bash.exe`, `cd`
-to your Git worktree and run `make`, or open the Git worktree as a folder in
-Visual Studio.
-
-To verify that your build works, use one of the following methods:
-
-- If you want to test the built executables within Git for Windows' SDK,
-  prepend `<worktree>/bin-wrappers` to the `PATH`.
-- Alternatively, run `make install` in the Git worktree.
-- If you need to test this in a full installer, run `sdk build
-  git-and-installer`.
-- You can also "install" Git into an existing portable Git via `make install
-  DESTDIR=<dir>` where `<dir>` refers to the top-level directory of the
-  portable Git. In this instance, you will want to prepend that portable Git's
-  `/cmd` directory to the `PATH`, or test by running that portable Git's
-  `git-bash.exe` or `git-cmd.exe`.
-- If you built using a recent Visual Studio, you can use the menu item
-  `Build>Install git` (you will want to click on `Project>CMake Settings for
-  Git` first, then click on `Edit JSON` and then point `installRoot` to the
-  `mingw64` directory of an already-unpacked portable Git).
-
-  As in the previous  bullet point, you will then prepend `/cmd` to the `PATH`
-  or run using the portable Git's `git-bash.exe` or `git-cmd.exe`.
-- If you want to run the built executables in-place, but in a CMD instead of
-  inside a Bash, you can run a snippet like this in the `git-bash.exe` window
-  where Git was built (ensure that the `EOF` line has no leading spaces), and
-  then paste into the CMD window what was put in the clipboard:
-
-  ```sh
-  clip.exe <<EOF
-  set GIT_EXEC_PATH=$(cygpath -aw .)
-  set PATH=$(cygpath -awp ".:contrib/scalar:/mingw64/bin:/usr/bin:$PATH")
-  set GIT_TEMPLATE_DIR=$(cygpath -aw templates/blt)
-  set GITPERLLIB=$(cygpath -aw perl/build/lib)
-  EOF
-  ```
-- If you want to run the built executables in-place, but outside of Git for
-  Windows' SDK, and without an option to set/override any environment
-  variables (e.g. in Visual Studio's debugger), you can call the Git executable
-  by its absolute path and use the `--exec-path` option, like so:
-
-  ```cmd
-  C:\git-sdk-64\usr\src\git\git.exe --exec-path=C:\git-sdk-64\usr\src\git help
-  ```
-
-  Note: for this to work, you have to hard-link (or copy) the `.dll` files from
-  the `/mingw64/bin` directory to the Git worktree, or add the `/mingw64/bin`
-  directory to the `PATH` somehow or other.
-
-To make sure that you are testing the correct binary, call `./git.exe version`
-in the Git worktree, and then call `git version` in a directory/window where
-you want to test Git, and verify that they refer to the same version (you may
-even want to pass the command-line option `--build-options` to look at the
-exact commit from which the Git version was built).
-
-Git - fast, scalable, distributed revision control system
+`microsoft/git` and the Scalar CLI
+==================================
+
+[![Open in Visual Studio Code](https://open.vscode.dev/badges/open-in-vscode.svg)](https://open.vscode.dev/microsoft/git)
+[![Build status](https://github.com/microsoft/git/workflows/CI/badge.svg)](https://github.com/microsoft/git/actions/workflows/main.yml)
+
+This is `microsoft/git`, a special Git distribution to support monorepo scenarios. If you are _not_
+working in a monorepo, you are likely searching for
+[Git for Windows](https://git-for-windows.github.io/) instead of this codebase.
+
+In addition to the Git command-line interface (CLI), `microsoft/git` includes the Scalar CLI to
+further enable working with extremely large repositories. Scalar is a tool to apply the latest
+recommendations and use the most advanced Git features. You can read
+[the Scalar CLI documentation](Documentation/scalar.txt) or read our
+[Scalar user guide](contrib/scalar/docs/index.md) including
+[the philosophy of Scalar](contrib/scalar/docs/philosophy.md).
+
+If you encounter problems with `microsoft/git`, please report them as
+[GitHub issues](https://github.com/microsoft/git/issues).
+
+Why is this fork needed?
+=========================================================
+
+Git is awesome - it's a fast, scalable, distributed version control system with an unusually rich
+command set that provides both high-level operations and full access to internals. What more could
+you ask for?
+
+Well, because Git is a distributed version control system, each Git repository has a copy of all
+files in the entire history. As large repositories, aka _monorepos_ grow, Git can struggle to
+manage all that data. As Git commands like `status` and `fetch` get slower, developers stop waiting
+and start switching context. And context switches harm developer productivity.
+
+`microsoft/git` is focused on addressing these performance woes and making the monorepo developer
+experience first-class. The Scalar CLI packages all of these recommendations into a simple set of
+commands.
+
+One major feature that Scalar recommends is [partial clone](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/),
+which reduces the amount of data transferred in order to work with a Git repository. While several
+services such as GitHub support partial clone, Azure Repos instead has an older version of this
+functionality called
+[the GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md).
+The integration with the GVFS protocol present in `microsoft/git` is not appropriate to include in
+the core Git client because partial clone is the official version of that functionality.
+
+Downloading and Installing
 =========================================================
 
-Git is a fast, scalable, distributed revision control system with an
-unusually rich command set that provides both high-level operations
-and full access to internals.
-
-Git is an Open Source project covered by the GNU General Public
-License version 2 (some parts of it are under different licenses,
-compatible with the GPLv2). It was originally written by Linus
-Torvalds with help of a group of hackers around the net.
-
-Please read the file [INSTALL][] for installation instructions.
-
-Many Git online resources are accessible from <https://git-scm.com/>
-including full documentation and Git related tools.
-
-See [Documentation/gittutorial.txt][] to get started, then see
-[Documentation/giteveryday.txt][] for a useful minimum set of commands, and
-`Documentation/git-<commandname>.txt` for documentation of each command.
-If git has been correctly installed, then the tutorial can also be
-read with `man gittutorial` or `git help tutorial`, and the
-documentation of each command with `man git-<commandname>` or `git help
-<commandname>`.
-
-CVS users may also want to read [Documentation/gitcvs-migration.txt][]
-(`man gitcvs-migration` or `git help cvs-migration` if git is
-installed).
-
-The user discussion and development of core Git take place on the Git
-mailing list -- everyone is welcome to post bug reports, feature
-requests, comments and patches to git@vger.kernel.org (read
-[Documentation/SubmittingPatches][] for instructions on patch submission
-and [Documentation/CodingGuidelines][]).
-
-Those wishing to help with error message, usage and informational message
-string translations (localization l10) should see [po/README.md][]
-(a `po` file is a Portable Object file that holds the translations).
-
-To subscribe to the list, send an email to <git+subscribe@vger.kernel.org>
-(see https://subspace.kernel.org/subscribing.html for details). The mailing
-list archives are available at <https://lore.kernel.org/git/>,
-<https://marc.info/?l=git> and other archival sites.
-The core git mailing list is plain text (no HTML!).
-
-Issues which are security relevant should be disclosed privately to
-the Git Security mailing list <git-security@googlegroups.com>.
-
-The maintainer frequently sends the "What's cooking" reports that
-list the current status of various development topics to the mailing
-list.  The discussion following them give a good reference for
-project status, development direction and remaining tasks.
-
-The name "git" was given by Linus Torvalds when he wrote the very
-first version. He described the tool as "the stupid content tracker"
-and the name as (depending on your mood):
-
- - random three-letter combination that is pronounceable, and not
-   actually used by any common UNIX command.  The fact that it is a
-   mispronunciation of "get" may or may not be relevant.
- - stupid. contemptible and despicable. simple. Take your pick from the
-   dictionary of slang.
- - "global information tracker": you're in a good mood, and it actually
-   works for you. Angels sing, and a light suddenly fills the room.
- - "goddamn idiotic truckload of sh*t": when it breaks
-
-[INSTALL]: INSTALL
-[Documentation/gittutorial.txt]: Documentation/gittutorial.txt
-[Documentation/giteveryday.txt]: Documentation/giteveryday.txt
-[Documentation/gitcvs-migration.txt]: Documentation/gitcvs-migration.txt
-[Documentation/SubmittingPatches]: Documentation/SubmittingPatches
-[Documentation/CodingGuidelines]: Documentation/CodingGuidelines
-[po/README.md]: po/README.md
+If you're working in a monorepo and want to take advantage of the performance boosts in
+`microsoft/git`, then you can download the latest version installer for your OS from the
+[Releases page](https://github.com/microsoft/git/releases). Alternatively, you can opt to install
+via the command line, using the below instructions for supported OSes:
+
+## Windows
+
+__Note:__ Winget is still in public preview, meaning you currently
+[need to take special installation steps](https://docs.microsoft.com/en-us/windows/package-manager/winget/#install-winget):
+Either manually install the `.appxbundle` available at the
+[preview version of App Installer](https://www.microsoft.com/p/app-installer/9nblggh4nns1?ocid=9nblggh4nns1_ORSEARCH_Bing&rtc=1&activetab=pivot:overviewtab),
+or participate in the
+[Windows Insider flight ring](https://insider.windows.com/https://insider.windows.com/)
+since `winget` is available by default on preview versions of Windows.
+
+To install with Winget, run
+
+```shell
+winget install --id microsoft.git
+```
+
+Double-check that you have the right version by running these commands,
+which should have the same output:
+
+```shell
+git version
+scalar version
+```
+
+To upgrade `microsoft/git`, use the following Git command, which will download and install the latest
+release.
+
+```shell
+git update-microsoft-git
+```
+
+You may also be alerted with a notification to upgrade, which presents a single-click process for
+running `git update-microsoft-git`.
+
+## macOS
+
+To install `microsoft/git` on macOS, first [be sure that Homebrew is installed](https://brew.sh/) then
+install the `microsoft-git` cask with these steps:
+
+```shell
+brew tap microsoft/git
+brew install --cask microsoft-git
+```
+
+Double-check that you have the right version by running these commands,
+which should have the same output:
+
+```shell
+git version
+scalar version
+```
+
+To upgrade microsoft/git, you can run the necessary `brew` commands:
+
+```shell
+brew update
+brew upgrade --cask microsoft-git
+```
+
+Or you can run the `git update-microsoft-git` command, which will run those brew commands for you.
+
+## Linux
+### Ubuntu/Debian distributions
+
+On newer distributions*, you can install using the most recent Debian package.
+To download and validate the signature of this package, run the following:
+
+```shell
+# Install needed packages
+sudo apt-get install -y curl debsig-verify
+
+# Download public key signature file
+curl -s https://api.github.com/repos/microsoft/git/releases/latest \
+| grep -E 'browser_download_url.*msft-git-public.asc' \
+| cut -d : -f 2,3 \
+| tr -d \" \
+| xargs -I 'url' curl -L -o msft-git-public.asc 'url'
+
+# De-armor public key signature file
+gpg --output msft-git-public.gpg --dearmor msft-git-public.asc
+
+# Note that the fingerprint of this key is "B8F12E25441124E1", which you can
+# determine by running:
+gpg --show-keys msft-git-public.asc | head -n 2 | tail -n 1 | tail -c 17
+
+# Copy de-armored public key to debsig keyring folder
+sudo mkdir /usr/share/debsig/keyrings/B8F12E25441124E1
+sudo mv msft-git-public.gpg /usr/share/debsig/keyrings/B8F12E25441124E1/
+
+# Create an appropriate policy file
+sudo mkdir /etc/debsig/policies/B8F12E25441124E1
+cat > generic.pol << EOL
+<?xml version="1.0"?>
+<!DOCTYPE Policy SYSTEM "https://www.debian.org/debsig/1.0/policy.dtd">
+<Policy xmlns="https://www.debian.org/debsig/1.0/">
+  <Origin Name="Microsoft Git" id="B8F12E25441124E1" Description="Microsoft Git public key"/>
+  <Selection>
+    <Required Type="origin" File="msft-git-public.gpg" id="B8F12E25441124E1"/>
+  </Selection>
+  <Verification MinOptional="0">
+    <Required Type="origin" File="msft-git-public.gpg" id="B8F12E25441124E1"/>
+  </Verification>
+</Policy>
+EOL
+
+sudo mv generic.pol /etc/debsig/policies/B8F12E25441124E1/generic.pol
+
+# Download Debian package
+curl -s https://api.github.com/repos/microsoft/git/releases/latest \
+| grep "browser_download_url.*deb" \
+| cut -d : -f 2,3 \
+| tr -d \" \
+| xargs -I 'url' curl -L -o msft-git.deb 'url'
+
+# Verify
+debsig-verify msft-git.deb
+
+# Install
+sudo dpkg -i msft-git.deb
+```
+
+Double-check that you have the right version by running these commands,
+which should have the same output:
+
+```shell
+git version
+scalar version
+```
+
+To upgrade, you will need to repeat these steps to reinstall.
+
+*Older distributions are missing some required dependencies. Even
+though the package may appear to install successfully, `microsoft/
+git` will not function as expected. If you are running `Ubuntu 20.04` or
+older, please follow the install from source instructions below
+instead of installing the debian package.
+
+### Installing From Source
+
+On older or other distros you will need to compile and install `microsoft/git` from source:
+
+```shell
+git clone https://github.com/microsoft/git microsoft-git
+cd microsoft-git
+make -j12 prefix=/usr/local
+sudo make -j12 prefix=/usr/local install
+```
+
+For more assistance building Git from source, see
+[the INSTALL file in the core Git project](https://github.com/git/git/blob/master/INSTALL).
+
+#### Common Debian based dependencies
+While the INSTALL file covers dependencies in detail, here is a shortlist of common required dependencies on older Debian/Ubuntu distros:
+
+```shell
+sudo apt-get update
+sudo apt-get install libz-dev libssl-dev libcurl4-gnutls-dev libexpat1-dev gettext cmake gcc
+```
+
+Contributing
+=========================================================
+
+This project welcomes contributions and suggestions.  Most contributions require you to agree to a
+Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
+the rights to use your contribution. For details, visit <https://cla.microsoft.com>.
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
+a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
+provided by the bot. You will only need to do this once across all repos using our CLA.
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

From 7c042849132cc79b9c37ee7c2869f81aacd22195 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 26 Apr 2021 22:08:18 +0200
Subject: [PATCH 149/207] scalar: implement a minimal JSON parser

No grown-up C project comes without their own JSON parser.

Just kidding!

We need to parse a JSON result when determining which cache server to
use. It would appear that searching for needles `"CacheServers":[`,
`"Url":"` and `"GlobalDefault":true` _happens_ to work right now, it is
fragile as it depends on no whitespace padding and on the order of the
fields remaining as-is.

Let's implement a super simple JSON parser (at the cost of being
slightly inefficient) for that purpose. To avoid allocating a ton of
memory, we implement a callback-based one. And to save on complexity,
let's not even bother validating the input properly (we will just go
ahead and instead rely on Azure Repos to produce correct JSON).

Note: An alternative would have been to use existing solutions such as
JSON-C, CentiJSON or JSMN. However, they are all a lot larger than the
current solution; The smallest, JSMN, which does not even provide parsed
string values (something we actually need) weighs in with 471 lines,
while we get away with 182 + 29 lines for the C and the header file,
respectively.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Makefile                            |   3 +-
 contrib/buildsystems/CMakeLists.txt |   2 +-
 json-parser.c                       | 183 ++++++++++++++++++++++++++++
 json-parser.h                       |  31 +++++
 meson.build                         |   2 +-
 5 files changed, 218 insertions(+), 3 deletions(-)
 create mode 100644 json-parser.c
 create mode 100644 json-parser.h

diff --git a/Makefile b/Makefile
index a2e2a58f6bc198..92eb49002469ad 100644
--- a/Makefile
+++ b/Makefile
@@ -2786,6 +2786,7 @@ GIT_OBJS += git.o
 .PHONY: git-objs
 git-objs: $(GIT_OBJS)
 
+SCALAR_OBJS := json-parser.o
 SCALAR_OBJS += scalar.o
 .PHONY: scalar-objs
 scalar-objs: $(SCALAR_OBJS)
@@ -2937,7 +2938,7 @@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o $(LAZYLOAD_LIBCURL_OB
 	$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
 		$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
 
-scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
+scalar$X: $(SCALAR_OBJS) GIT-LDFLAGS $(GITLIBS)
 	$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
 		$(filter %.o,$^) $(LIBS)
 
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 6c562b6d633adf..b7d0ca62973267 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -804,7 +804,7 @@ target_link_libraries(git-sh-i18n--envsubst common-main)
 add_executable(git-shell ${CMAKE_SOURCE_DIR}/shell.c)
 target_link_libraries(git-shell common-main)
 
-add_executable(scalar ${CMAKE_SOURCE_DIR}/scalar.c)
+add_executable(scalar ${CMAKE_SOURCE_DIR}/scalar.c ${CMAKE_SOURCE_DIR}/json-parser.c)
 target_link_libraries(scalar common-main)
 
 if(CURL_FOUND)
diff --git a/json-parser.c b/json-parser.c
new file mode 100644
index 00000000000000..5f11ef9913fb50
--- /dev/null
+++ b/json-parser.c
@@ -0,0 +1,183 @@
+#include "git-compat-util.h"
+#include "hex.h"
+#include "json-parser.h"
+
+int reset_iterator(struct json_iterator *it)
+{
+	it->p = it->begin = it->json;
+	strbuf_release(&it->key);
+	strbuf_release(&it->string_value);
+	it->type = JSON_NULL;
+	return -1;
+}
+
+static int parse_json_string(struct json_iterator *it, struct strbuf *out)
+{
+	const char *begin = it->p;
+
+	if (*(it->p)++ != '"')
+		return error("expected double quote: '%.*s'", 5, begin),
+			reset_iterator(it);
+
+	strbuf_reset(&it->string_value);
+#define APPEND(c) strbuf_addch(out, c)
+	while (*it->p != '"') {
+		switch (*it->p) {
+		case '\0':
+			return error("incomplete string: '%s'", begin),
+				reset_iterator(it);
+		case '\\':
+			it->p++;
+			if (*it->p == '\\' || *it->p == '"')
+				APPEND(*it->p);
+			else if (*it->p == 'b')
+				APPEND(8);
+			else if (*it->p == 't')
+				APPEND(9);
+			else if (*it->p == 'n')
+				APPEND(10);
+			else if (*it->p == 'f')
+				APPEND(12);
+			else if (*it->p == 'r')
+				APPEND(13);
+			else if (*it->p == 'u') {
+				unsigned char binary[2];
+				int i;
+
+				if (hex_to_bytes(binary, it->p + 1, 2) < 0)
+					return error("invalid: '%.*s'",
+						     6, it->p - 1),
+						reset_iterator(it);
+				it->p += 4;
+
+				i = (binary[0] << 8) | binary[1];
+				if (i < 0x80)
+					APPEND(i);
+				else if (i < 0x0800) {
+					APPEND(0xc0 | ((i >> 6) & 0x1f));
+					APPEND(0x80 | (i & 0x3f));
+				} else if (i < 0x10000) {
+					APPEND(0xe0 | ((i >> 12) & 0x0f));
+					APPEND(0x80 | ((i >> 6) & 0x3f));
+					APPEND(0x80 | (i & 0x3f));
+				} else {
+					APPEND(0xf0 | ((i >> 18) & 0x07));
+					APPEND(0x80 | ((i >> 12) & 0x3f));
+					APPEND(0x80 | ((i >> 6) & 0x3f));
+					APPEND(0x80 | (i & 0x3f));
+				}
+			}
+			break;
+		default:
+			APPEND(*it->p);
+		}
+		it->p++;
+	}
+
+	it->end = it->p++;
+	return 0;
+}
+
+static void skip_whitespace(struct json_iterator *it)
+{
+	while (isspace(*it->p))
+		it->p++;
+}
+
+int iterate_json(struct json_iterator *it)
+{
+	skip_whitespace(it);
+	it->begin = it->p;
+
+	switch (*it->p) {
+	case '\0':
+		return reset_iterator(it), 0;
+	case 'n':
+		if (!starts_with(it->p, "null"))
+			return error("unexpected value: %.*s", 4, it->p),
+				reset_iterator(it);
+		it->type = JSON_NULL;
+		it->end = it->p = it->begin + 4;
+		break;
+	case 't':
+		if (!starts_with(it->p, "true"))
+			return error("unexpected value: %.*s", 4, it->p),
+				reset_iterator(it);
+		it->type = JSON_TRUE;
+		it->end = it->p = it->begin + 4;
+		break;
+	case 'f':
+		if (!starts_with(it->p, "false"))
+			return error("unexpected value: %.*s", 5, it->p),
+				reset_iterator(it);
+		it->type = JSON_FALSE;
+		it->end = it->p = it->begin + 5;
+		break;
+	case '-': case '.':
+	case '0': case '1': case '2': case '3': case '4':
+	case '5': case '6': case '7': case '8': case '9':
+		it->type = JSON_NUMBER;
+		it->end = it->p = it->begin + strspn(it->p, "-.0123456789");
+		break;
+	case '"':
+		it->type = JSON_STRING;
+		if (parse_json_string(it, &it->string_value) < 0)
+			return -1;
+		break;
+	case '[': {
+		const char *save = it->begin;
+		size_t key_offset = it->key.len;
+		int i = 0, res;
+
+		for (it->p++, skip_whitespace(it); *it->p != ']'; i++) {
+			strbuf_addf(&it->key, "[%d]", i);
+
+			if ((res = iterate_json(it)))
+				return reset_iterator(it), res;
+			strbuf_setlen(&it->key, key_offset);
+
+			skip_whitespace(it);
+			if (*it->p == ',')
+				it->p++;
+		}
+
+		it->type = JSON_ARRAY;
+		it->begin = save;
+		it->end = it->p;
+		it->p++;
+		break;
+	}
+	case '{': {
+		const char *save = it->begin;
+		size_t key_offset = it->key.len;
+		int res;
+
+		strbuf_addch(&it->key, '.');
+		for (it->p++, skip_whitespace(it); *it->p != '}'; ) {
+			strbuf_setlen(&it->key, key_offset + 1);
+			if (parse_json_string(it, &it->key) < 0)
+				return -1;
+			skip_whitespace(it);
+			if (*(it->p)++ != ':')
+				return error("expected colon: %.*s", 5, it->p),
+					reset_iterator(it);
+
+			if ((res = iterate_json(it)))
+				return res;
+
+			skip_whitespace(it);
+			if (*it->p == ',')
+				it->p++;
+		}
+		strbuf_setlen(&it->key, key_offset);
+
+		it->type = JSON_OBJECT;
+		it->begin = save;
+		it->end = it->p;
+		it->p++;
+		break;
+	}
+	}
+
+	return it->fn(it);
+}
diff --git a/json-parser.h b/json-parser.h
new file mode 100644
index 00000000000000..cb1f4832273e57
--- /dev/null
+++ b/json-parser.h
@@ -0,0 +1,31 @@
+#ifndef JSON_PARSER_H
+#define JSON_PARSER_H
+
+#include "strbuf.h"
+
+struct json_iterator {
+	const char *json, *p, *begin, *end;
+	struct strbuf key, string_value;
+	enum {
+		JSON_NULL = 0,
+		JSON_FALSE,
+		JSON_TRUE,
+		JSON_NUMBER,
+		JSON_STRING,
+		JSON_ARRAY,
+		JSON_OBJECT
+	} type;
+	int (*fn)(struct json_iterator *it);
+	void *fn_data;
+};
+#define JSON_ITERATOR_INIT(json_, fn_, fn_data_) { \
+	.json = json_, .p = json_, \
+	.key = STRBUF_INIT, .string_value = STRBUF_INIT, \
+	.fn = fn_, .fn_data = fn_data_ \
+}
+
+int iterate_json(struct json_iterator *it);
+/* Releases the iterator, always returns -1 */
+int reset_iterator(struct json_iterator *it);
+
+#endif
diff --git a/meson.build b/meson.build
index 5c1672b89c40b8..51e56e19a12802 100644
--- a/meson.build
+++ b/meson.build
@@ -1609,7 +1609,7 @@ test_dependencies += executable('git-http-backend',
 )
 
 bin_wrappers += executable('scalar',
-  sources: 'scalar.c',
+  sources: ['scalar.c', 'json-parser.c'],
   dependencies: [libgit, common_main],
   install: true,
   install_dir: get_option('libexecdir') / 'git-core',

From 864da7651aaa4ae6d2ef68cb5d8dc8af4349a4d9 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 24 Aug 2021 21:05:42 +0200
Subject: [PATCH 150/207] scalar clone: support GVFS-enabled remote
 repositories

With this change, we come a big step closer to feature parity with
Scalar: this allows cloning from Azure Repos (which do not support
partial clones at time of writing).

We use the just-implemented JSON parser to parse the response we got
from the `gvfs/config` endpoint; Please note that this response might,
or might not, contain information about a cache server. The presence or
absence of said cache server, however, has nothing to do with the
ability to speak the GVFS protocol (but the presence of the
`gvfs/config` endpoint does that).

An alternative considered during the development of this patch was to
perform simple string matching instead of parsing the JSON-formatted
data; However, this would have been fragile, as the response contains
free-form text (e.g. the repository's description) which might contain
parts that would confuse a simple string matcher (but not a proper JSON
parser).

Note: we need to limit the re-try logic in `git clone` to handle only
the non-GVFS case: the call to `set_config()` to un-set the partial
clone settings would otherwise fail because those settings would not
exist in the GVFS protocol case. This will at least give us a clearer
reason why such a fetch fails.

Co-authored-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 diagnose.c |   8 ++++
 scalar.c   | 130 +++++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 135 insertions(+), 3 deletions(-)

diff --git a/diagnose.c b/diagnose.c
index b11931df86c4ba..24dfa2a1d9a6b5 100644
--- a/diagnose.c
+++ b/diagnose.c
@@ -13,6 +13,7 @@
 #include "packfile.h"
 #include "parse-options.h"
 #include "write-or-die.h"
+#include "config.h"
 
 struct archive_dir {
 	const char *path;
@@ -184,6 +185,7 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	struct strvec archiver_args = STRVEC_INIT;
 	char **argv_copy = NULL;
 	int stdout_fd = -1, archiver_fd = -1;
+	char *cache_server_url = NULL;
 	struct strbuf buf = STRBUF_INIT;
 	int res;
 	struct archive_dir archive_dirs[] = {
@@ -219,6 +221,11 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	get_version_info(&buf, 1);
 
 	strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
+
+	git_config_get_string("gvfs.cache-server", &cache_server_url);
+	strbuf_addf(&buf, "Cache Server: %s\n\n",
+		    cache_server_url ? cache_server_url : "None");
+
 	get_disk_info(&buf);
 	write_or_die(stdout_fd, buf.buf, buf.len);
 	strvec_pushf(&archiver_args,
@@ -276,6 +283,7 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	free(argv_copy);
 	strvec_clear(&archiver_args);
 	strbuf_release(&buf);
+	free(cache_server_url);
 
 	return res;
 }
diff --git a/scalar.c b/scalar.c
index 6ecfcee41b549d..0e64a8cb62c7ac 100644
--- a/scalar.c
+++ b/scalar.c
@@ -19,6 +19,7 @@
 #include "help.h"
 #include "setup.h"
 #include "trace2.h"
+#include "json-parser.h"
 
 static void setup_enlistment_directory(int argc, const char **argv,
 				       const char * const *usagestr,
@@ -339,6 +340,84 @@ static int set_config(const char *fmt, ...)
 	return res;
 }
 
+/* Find N for which .CacheServers[N].GlobalDefault == true */
+static int get_cache_server_index(struct json_iterator *it)
+{
+	const char *p;
+	char *q;
+	long l;
+
+	if (it->type == JSON_TRUE &&
+	    skip_iprefix(it->key.buf, ".CacheServers[", &p) &&
+	    (l = strtol(p, &q, 10)) >= 0 && p != q &&
+	    !strcasecmp(q, "].GlobalDefault")) {
+		*(long *)it->fn_data = l;
+		return 1;
+	}
+
+	return 0;
+}
+
+struct cache_server_url_data {
+	char *key, *url;
+};
+
+/* Get .CacheServers[N].Url */
+static int get_cache_server_url(struct json_iterator *it)
+{
+	struct cache_server_url_data *data = it->fn_data;
+
+	if (it->type == JSON_STRING &&
+	    !strcasecmp(data->key, it->key.buf)) {
+		data->url = strbuf_detach(&it->string_value, NULL);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * If `cache_server_url` is `NULL`, print the list to `stdout`.
+ *
+ * Since `gvfs-helper` requires a Git directory, this _must_ be run in
+ * a worktree.
+ */
+static int supports_gvfs_protocol(const char *url, char **cache_server_url)
+{
+	struct child_process cp = CHILD_PROCESS_INIT;
+	struct strbuf out = STRBUF_INIT;
+
+	cp.git_cmd = 1;
+	strvec_pushl(&cp.args, "gvfs-helper", "--remote", url, "config", NULL);
+	if (!pipe_command(&cp, NULL, 0, &out, 512, NULL, 0)) {
+		long l = 0;
+		struct json_iterator it =
+			JSON_ITERATOR_INIT(out.buf, get_cache_server_index, &l);
+		struct cache_server_url_data data = { .url = NULL };
+
+		if (iterate_json(&it) < 0) {
+			reset_iterator(&it);
+			strbuf_release(&out);
+			return error("JSON parse error");
+		}
+		data.key = xstrfmt(".CacheServers[%ld].Url", l);
+		it.fn = get_cache_server_url;
+		it.fn_data = &data;
+		if (iterate_json(&it) < 0) {
+			reset_iterator(&it);
+			strbuf_release(&out);
+			return error("JSON parse error");
+		}
+		*cache_server_url = data.url;
+		free(data.key);
+		reset_iterator(&it);
+		strbuf_release(&out);
+		return 1;
+	}
+	strbuf_release(&out);
+	return 0; /* error out quietly */
+}
+
 static char *remote_default_branch(const char *url)
 {
 	struct child_process cp = CHILD_PROCESS_INIT;
@@ -438,6 +517,8 @@ static int cmd_clone(int argc, const char **argv)
 	const char *branch = NULL;
 	int full_clone = 0, single_branch = 0, show_progress = isatty(2);
 	int src = 1, tags = 1;
+	const char *cache_server_url = NULL;
+	char *default_cache_server_url = NULL;
 	struct option clone_options[] = {
 		OPT_STRING('b', "branch", &branch, N_("<branch>"),
 			   N_("branch to checkout after clone")),
@@ -450,6 +531,9 @@ static int cmd_clone(int argc, const char **argv)
 			 N_("create repository within 'src' directory")),
 		OPT_BOOL(0, "tags", &tags,
 			 N_("specify if tags should be fetched during clone")),
+		OPT_STRING(0, "cache-server-url", &cache_server_url,
+			   N_("<url>"),
+			   N_("the url or friendly name of the cache server")),
 		OPT_END(),
 	};
 	const char * const clone_usage[] = {
@@ -461,6 +545,7 @@ static int cmd_clone(int argc, const char **argv)
 	char *enlistment = NULL, *dir = NULL;
 	struct strbuf buf = STRBUF_INIT;
 	int res;
+	int gvfs_protocol;
 
 	argc = parse_options(argc, argv, NULL, clone_options, clone_usage, 0);
 
@@ -526,9 +611,7 @@ static int cmd_clone(int argc, const char **argv)
 	    set_config("remote.origin.fetch="
 		       "+refs/heads/%s:refs/remotes/origin/%s",
 		       single_branch ? branch : "*",
-		       single_branch ? branch : "*") ||
-	    set_config("remote.origin.promisor=true") ||
-	    set_config("remote.origin.partialCloneFilter=blob:none")) {
+		       single_branch ? branch : "*")) {
 		res = error(_("could not configure remote in '%s'"), dir);
 		goto cleanup;
 	}
@@ -538,6 +621,41 @@ static int cmd_clone(int argc, const char **argv)
 		goto cleanup;
 	}
 
+	if (set_config("credential.https://dev.azure.com.useHttpPath=true")) {
+		res = error(_("could not configure credential.useHttpPath"));
+		goto cleanup;
+	}
+
+	gvfs_protocol = cache_server_url ||
+			supports_gvfs_protocol(url, &default_cache_server_url);
+
+	if (gvfs_protocol) {
+		if (!cache_server_url)
+			cache_server_url = default_cache_server_url;
+		if (set_config("core.useGVFSHelper=true") ||
+		    set_config("core.gvfs=150") ||
+		    set_config("http.version=HTTP/1.1")) {
+			res = error(_("could not turn on GVFS helper"));
+			goto cleanup;
+		}
+		if (cache_server_url &&
+		    set_config("gvfs.cache-server=%s", cache_server_url)) {
+			res = error(_("could not configure cache server"));
+			goto cleanup;
+		}
+		if (cache_server_url)
+			fprintf(stderr, "Cache server URL: %s\n",
+				cache_server_url);
+	} else {
+		if (set_config("core.useGVFSHelper=false") ||
+		    set_config("remote.origin.promisor=true") ||
+		    set_config("remote.origin.partialCloneFilter=blob:none")) {
+			res = error(_("could not configure partial clone in "
+				      "'%s'"), dir);
+			goto cleanup;
+		}
+	}
+
 	if (!full_clone &&
 	    (res = run_git("sparse-checkout", "init", "--cone", NULL)))
 		goto cleanup;
@@ -550,6 +668,11 @@ static int cmd_clone(int argc, const char **argv)
 				"origin",
 				(tags ? NULL : "--no-tags"),
 				NULL))) {
+		if (gvfs_protocol) {
+			res = error(_("failed to prefetch commits and trees"));
+			goto cleanup;
+		}
+
 		warning(_("partial clone failed; attempting full clone"));
 
 		if (set_config("remote.origin.promisor") ||
@@ -582,6 +705,7 @@ static int cmd_clone(int argc, const char **argv)
 	free(enlistment);
 	free(dir);
 	strbuf_release(&buf);
+	free(default_cache_server_url);
 	return res;
 }
 

From fb522a1b7778838aa70416dd0b24bd18cfa77161 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 16 Apr 2021 19:47:05 +0200
Subject: [PATCH 151/207] test-gvfs-protocol: also serve smart protocol

This comes in handy, as we want to verify that `scalar clone` also works
against a GVFS-enabled remote repository.

Note that we have to set `MSYS2_ENV_CONV_EXCL` to prevent MSYS2 from
mangling `PATH_TRANSLATED`: The value _does_ look like a Unix-style
path, but no, MSYS2 must not be allowed to convert that into a Windows
path: `http-backend` needs it in the unmodified form. (The MSYS2 runtime
comes in when `git` is run via `bin-wrappers/git`, which is a shell
script.)

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 t/helper/test-gvfs-protocol.c | 51 +++++++++++++++++++++++++++++++++++
 1 file changed, 51 insertions(+)

diff --git a/t/helper/test-gvfs-protocol.c b/t/helper/test-gvfs-protocol.c
index 8a83b5e27c6a1b..d67e2a32729778 100644
--- a/t/helper/test-gvfs-protocol.c
+++ b/t/helper/test-gvfs-protocol.c
@@ -1,6 +1,7 @@
 #define USE_THE_REPOSITORY_VARIABLE
 #include "git-compat-util.h"
 #include "environment.h"
+#include "gettext.h"
 #include "hex.h"
 #include "alloc.h"
 #include "setup.h"
@@ -1500,6 +1501,8 @@ static enum worker_result req__read(struct req *req, int fd)
 
 static enum worker_result dispatch(struct req *req)
 {
+	static regex_t *smart_http_regex;
+	static int initialized;
 	const char *method;
 	enum worker_result wr;
 
@@ -1548,6 +1551,54 @@ static enum worker_result dispatch(struct req *req)
 			return do__gvfs_prefetch__get(req);
 	}
 
+	if (!initialized) {
+		smart_http_regex = xmalloc(sizeof(*smart_http_regex));
+		if (regcomp(smart_http_regex, "^/(HEAD|info/refs|"
+			    "objects/info/[^/]+|git-(upload|receive)-pack)$",
+			    REG_EXTENDED)) {
+			warning("could not compile smart HTTP regex");
+			smart_http_regex = NULL;
+		}
+		initialized = 1;
+	}
+
+	if (smart_http_regex &&
+	    !regexec(smart_http_regex, req->uri_base.buf, 0, NULL, 0)) {
+		const char *ok = "HTTP/1.1 200 OK\r\n";
+		struct child_process cp = CHILD_PROCESS_INIT;
+		size_t i;
+		int res;
+
+		if (write(1, ok, strlen(ok)) < 0)
+			return error(_("could not send '%s'"), ok);
+
+		strvec_pushf(&cp.env, "REQUEST_METHOD=%s", method);
+		strvec_pushf(&cp.env, "PATH_TRANSLATED=%s",
+			     req->uri_base.buf);
+		/* Prevent MSYS2 from "converting to a Windows path" */
+		strvec_pushf(&cp.env,
+			     "MSYS2_ENV_CONV_EXCL=PATH_TRANSLATED");
+		strvec_push(&cp.env, "SERVER_PROTOCOL=HTTP/1.1");
+		if (req->quest_args.len)
+			strvec_pushf(&cp.env, "QUERY_STRING=%s",
+				     req->quest_args.buf);
+		for (i = 0; i < req->header_list.nr; i++) {
+			const char *header = req->header_list.items[i].string;
+			if (!strncasecmp("Content-Type: ", header, 14))
+				strvec_pushf(&cp.env, "CONTENT_TYPE=%s",
+					     header + 14);
+			else if (!strncasecmp("Content-Length: ", header, 16))
+				strvec_pushf(&cp.env, "CONTENT_LENGTH=%s",
+					     header + 16);
+		}
+		cp.git_cmd = 1;
+		strvec_push(&cp.args, "http-backend");
+		res = run_command(&cp);
+		close(1);
+		close(0);
+		return !!res;
+	}
+
 	return send_http_error(1, 501, "Not Implemented", -1,
 			       WR_OK | WR_HANGUP);
 }

From 8668424fc8d092bac4b595580b43db5d1ba7beeb Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 26 Apr 2021 17:31:40 +0200
Subject: [PATCH 152/207] gvfs-helper: add the `endpoint` command

We already have the `config` command that accesses the `gvfs/config`
endpoint.

To implement `scalar`, we also need to be able to access the `vsts/info`
endpoint. Let's add a command to do precisely that.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 gvfs-helper.c | 61 +++++++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 54 insertions(+), 7 deletions(-)

diff --git a/gvfs-helper.c b/gvfs-helper.c
index f03be891f061d0..af04577d784e4f 100644
--- a/gvfs-helper.c
+++ b/gvfs-helper.c
@@ -202,6 +202,12 @@
 //            [2] Documentation/technical/long-running-process-protocol.txt
 //            [3] See GIT_TRACE_PACKET
 //
+//     endpoint
+//
+//            Fetch the given endpoint from the main Git server (specifying
+//            `gvfs/config` as endpoint is idempotent to the `config`
+//            command mentioned above).
+//
 //////////////////////////////////////////////////////////////////
 
 #define USE_THE_REPOSITORY_VARIABLE
@@ -3128,18 +3134,20 @@ static void do_req__with_fallback(const char *url_component,
  *
  * Return server's response buffer.  This is probably a raw JSON string.
  */
-static void do__http_get__gvfs_config(struct gh__response_status *status,
-				      struct strbuf *config_data)
+static void do__http_get__simple_endpoint(struct gh__response_status *status,
+					  struct strbuf *response,
+					  const char *endpoint,
+					  const char *tr2_label)
 {
 	struct gh__request_params params = GH__REQUEST_PARAMS_INIT;
 
-	strbuf_addstr(&params.tr2_label, "GET/config");
+	strbuf_addstr(&params.tr2_label, tr2_label);
 
 	params.b_is_post = 0;
 	params.b_write_to_file = 0;
 	/* cache-servers do not handle gvfs/config REST calls */
 	params.b_permit_cache_server_if_defined = 0;
-	params.buffer = config_data;
+	params.buffer = response;
 	params.objects_mode = GH__OBJECTS_MODE__NONE;
 
 	params.object_count = 1; /* a bit of a lie */
@@ -3161,15 +3169,22 @@ static void do__http_get__gvfs_config(struct gh__response_status *status,
 		 * see any need to report progress on the upload side of
 		 * the GET.  So just report progress on the download side.
 		 */
-		strbuf_addstr(&params.progress_base_phase3_msg,
-			      "Receiving gvfs/config");
+		strbuf_addf(&params.progress_base_phase3_msg,
+			    "Receiving %s", endpoint);
 	}
 
-	do_req__with_fallback("gvfs/config", &params, status);
+	do_req__with_fallback(endpoint, &params, status);
 
 	gh__request_params__release(&params);
 }
 
+static void do__http_get__gvfs_config(struct gh__response_status *status,
+				      struct strbuf *config_data)
+{
+	do__http_get__simple_endpoint(status, config_data, "gvfs/config",
+				      "GET/config");
+}
+
 static void setup_gvfs_objects_progress(struct gh__request_params *params,
 					unsigned long num, unsigned long den)
 {
@@ -3614,6 +3629,35 @@ static enum gh__error_code do_sub_cmd__config(int argc UNUSED, const char **argv
 	return ec;
 }
 
+static enum gh__error_code do_sub_cmd__endpoint(int argc, const char **argv)
+{
+	struct gh__response_status status = GH__RESPONSE_STATUS_INIT;
+	struct strbuf data = STRBUF_INIT;
+	enum gh__error_code ec = GH__ERROR_CODE__OK;
+	const char *endpoint;
+
+	if (argc != 2)
+		return GH__ERROR_CODE__ERROR;
+	endpoint = argv[1];
+
+	trace2_cmd_mode(endpoint);
+
+	finish_init(0);
+
+	do__http_get__simple_endpoint(&status, &data, endpoint, endpoint);
+	ec = status.ec;
+
+	if (ec == GH__ERROR_CODE__OK)
+		printf("%s\n", data.buf);
+	else
+		error("config: %s", status.error_message.buf);
+
+	gh__response_status__release(&status);
+	strbuf_release(&data);
+
+	return ec;
+}
+
 /*
  * Read a list of objects from stdin and fetch them as a series of
  * single object HTTP GET requests.
@@ -4106,6 +4150,9 @@ static enum gh__error_code do_sub_cmd(int argc, const char **argv)
 	if (!strcmp(argv[0], "config"))
 		return do_sub_cmd__config(argc, argv);
 
+	if (!strcmp(argv[0], "endpoint"))
+		return do_sub_cmd__endpoint(argc, argv);
+
 	if (!strcmp(argv[0], "prefetch"))
 		return do_sub_cmd__prefetch(argc, argv);
 

From 3028c8ca3d218d40bf1bbada0a496b384a2fb75b Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Sat, 15 May 2021 00:04:20 +0200
Subject: [PATCH 153/207] dir_inside_of(): handle directory separators
 correctly

On Windows, both the forward slash and the backslash are directory
separators. Which means that `a\b\c` really is inside `a/b`. Therefore,
we need to special-case the directory separators in the helper function
`cmp_icase()` that is used in the loop in `dir_inside_of()`.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 dir.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/dir.c b/dir.c
index 04b37090f9a19e..9a44953ca2c564 100644
--- a/dir.c
+++ b/dir.c
@@ -3226,6 +3226,8 @@ static int cmp_icase(char a, char b)
 {
 	if (a == b)
 		return 0;
+	if (is_dir_sep(a))
+		return is_dir_sep(b) ? 0 : -1;
 	if (ignore_case)
 		return toupper(a) - toupper(b);
 	return a - b;

From 500c10a600d30de45e3c92b6d511b633ac32cfa0 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Thu, 6 May 2021 14:35:12 +0200
Subject: [PATCH 154/207] scalar: disable authentication in unattended mode

Modified to remove call to is_unattended() that has not been implemented
yet.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 t/t9210-scalar.sh | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index a81662713eb876..956f40de504f3c 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -7,6 +7,13 @@ test_description='test the `scalar` command'
 GIT_TEST_MAINT_SCHEDULER="crontab:test-tool crontab cron.txt,launchctl:true,schtasks:true"
 export GIT_TEST_MAINT_SCHEDULER
 
+# Do not write any files outside the trash directory
+Scalar_UNATTENDED=1
+export Scalar_UNATTENDED
+
+GIT_ASKPASS=true
+export GIT_ASKPASS
+
 test_expect_success 'scalar shows a usage' '
 	test_expect_code 129 scalar -h
 '

From 42d4138926cbb6b10d4c31bda8e2d2a09a0e74f5 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 3 May 2021 23:41:28 +0200
Subject: [PATCH 155/207] scalar: do initialize `gvfs.sharedCache`

This finalizes the port of the `QueryVstsInfo()` function: we already
taught `gvfs-helper` to access the `vsts/info` endpoint on demand, we
implemented proper JSON parsing, and now it is time to hook it all up.

To that end, we also provide a default local cache root directory. It
works the same way as the .NET version of Scalar: it uses

    C:\scalarCache on Windows,

    ~/.scalarCache/ on macOS and

    ~/.cache/scalar on Linux

Modified to include call to is_unattended() that was removed from a
previous commit.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Documentation/scalar.txt |  14 ++-
 diagnose.c               |   9 +-
 scalar.c                 | 186 ++++++++++++++++++++++++++++++++++++++-
 3 files changed, 203 insertions(+), 6 deletions(-)

diff --git a/Documentation/scalar.txt b/Documentation/scalar.txt
index 7e4259c6743f9b..155df8a5c7f437 100644
--- a/Documentation/scalar.txt
+++ b/Documentation/scalar.txt
@@ -9,7 +9,8 @@ SYNOPSIS
 --------
 [verse]
 scalar clone [--single-branch] [--branch <main-branch>] [--full-clone]
-	[--[no-]src] <url> [<enlistment>]
+	[--[no-]src] [--local-cache-path <path>] [--cache-server-url <url>]
+	<url> [<enlistment>]
 scalar list
 scalar register [<enlistment>]
 scalar unregister [<enlistment>]
@@ -97,6 +98,17 @@ cloning. If the HEAD at the remote did not point at any branch when
 	A sparse-checkout is initialized by default. This behavior can be
 	turned off via `--full-clone`.
 
+--local-cache-path <path>::
+    Override the path to the local cache root directory; Pre-fetched objects
+    are stored into a repository-dependent subdirectory of that path.
++
+The default is `<drive>:\.scalarCache` on Windows (on the same drive as the
+clone), and `~/.scalarCache` on macOS.
+
+--cache-server-url <url>::
+    Retrieve missing objects from the specified remote, which is expected to
+    understand the GVFS protocol.
+
 List
 ~~~~
 
diff --git a/diagnose.c b/diagnose.c
index 24dfa2a1d9a6b5..20794eb88e32b3 100644
--- a/diagnose.c
+++ b/diagnose.c
@@ -185,7 +185,7 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	struct strvec archiver_args = STRVEC_INIT;
 	char **argv_copy = NULL;
 	int stdout_fd = -1, archiver_fd = -1;
-	char *cache_server_url = NULL;
+	char *cache_server_url = NULL, *shared_cache = NULL;
 	struct strbuf buf = STRBUF_INIT;
 	int res;
 	struct archive_dir archive_dirs[] = {
@@ -223,8 +223,10 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
 
 	git_config_get_string("gvfs.cache-server", &cache_server_url);
-	strbuf_addf(&buf, "Cache Server: %s\n\n",
-		    cache_server_url ? cache_server_url : "None");
+	git_config_get_string("gvfs.sharedCache", &shared_cache);
+	strbuf_addf(&buf, "Cache Server: %s\nLocal Cache: %s\n\n",
+		    cache_server_url ? cache_server_url : "None",
+		    shared_cache ? shared_cache : "None");
 
 	get_disk_info(&buf);
 	write_or_die(stdout_fd, buf.buf, buf.len);
@@ -284,6 +286,7 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	strvec_clear(&archiver_args);
 	strbuf_release(&buf);
 	free(cache_server_url);
+	free(shared_cache);
 
 	return res;
 }
diff --git a/scalar.c b/scalar.c
index 0e64a8cb62c7ac..1e5cefbc0013d0 100644
--- a/scalar.c
+++ b/scalar.c
@@ -7,6 +7,7 @@
 #include "git-compat-util.h"
 #include "abspath.h"
 #include "gettext.h"
+#include "hex.h"
 #include "parse-options.h"
 #include "config.h"
 #include "run-command.h"
@@ -15,11 +16,18 @@
 #include "fsmonitor-settings.h"
 #include "refs.h"
 #include "dir.h"
+#include "object-file.h"
 #include "packfile.h"
 #include "help.h"
 #include "setup.h"
+#include "wrapper.h"
 #include "trace2.h"
 #include "json-parser.h"
+#include "path.h"
+
+static int is_unattended(void) {
+	return git_env_bool("Scalar_UNATTENDED", 0);
+}
 
 static void setup_enlistment_directory(int argc, const char **argv,
 				       const char * const *usagestr,
@@ -106,6 +114,19 @@ static int run_git(const char *arg, ...)
 	return res;
 }
 
+static const char *ensure_absolute_path(const char *path, char **absolute)
+{
+	struct strbuf buf = STRBUF_INIT;
+
+	if (is_absolute_path(path))
+		return path;
+
+	strbuf_realpath_forgiving(&buf, path, 1);
+	free(*absolute);
+	*absolute = strbuf_detach(&buf, NULL);
+	return *absolute;
+}
+
 struct scalar_config {
 	const char *key;
 	const char *value;
@@ -418,6 +439,87 @@ static int supports_gvfs_protocol(const char *url, char **cache_server_url)
 	return 0; /* error out quietly */
 }
 
+static char *default_cache_root(const char *root)
+{
+	const char *env;
+
+	if (is_unattended())
+		return xstrfmt("%s/.scalarCache", root);
+
+#ifdef WIN32
+	(void)env;
+	return xstrfmt("%.*s.scalarCache", offset_1st_component(root), root);
+#elif defined(__APPLE__)
+	if ((env = getenv("HOME")) && *env)
+		return xstrfmt("%s/.scalarCache", env);
+	return NULL;
+#else
+	if ((env = getenv("XDG_CACHE_HOME")) && *env)
+		return xstrfmt("%s/scalar", env);
+	if ((env = getenv("HOME")) && *env)
+		return xstrfmt("%s/.cache/scalar", env);
+	return NULL;
+#endif
+}
+
+static int get_repository_id(struct json_iterator *it)
+{
+	if (it->type == JSON_STRING &&
+	    !strcasecmp(".repository.id", it->key.buf)) {
+		*(char **)it->fn_data = strbuf_detach(&it->string_value, NULL);
+		return 1;
+	}
+
+	return 0;
+}
+
+/* Needs to run this in a worktree; gvfs-helper requires a Git repository */
+static char *get_cache_key(const char *url)
+{
+	struct child_process cp = CHILD_PROCESS_INIT;
+	struct strbuf out = STRBUF_INIT;
+	char *cache_key = NULL;
+
+	cp.git_cmd = 1;
+	strvec_pushl(&cp.args, "gvfs-helper", "--remote", url,
+		     "endpoint", "vsts/info", NULL);
+	if (!pipe_command(&cp, NULL, 0, &out, 512, NULL, 0)) {
+		char *id = NULL;
+		struct json_iterator it =
+			JSON_ITERATOR_INIT(out.buf, get_repository_id, &id);
+
+		if (iterate_json(&it) < 0)
+			warning("JSON parse error (%s)", out.buf);
+		else if (id)
+			cache_key = xstrfmt("id_%s", id);
+		free(id);
+	}
+
+	if (!cache_key) {
+		struct strbuf downcased = STRBUF_INIT;
+		int hash_algo_index = hash_algo_by_name("sha1");
+		const struct git_hash_algo *hash_algo = hash_algo_index < 0 ?
+			the_hash_algo : &hash_algos[hash_algo_index];
+		git_hash_ctx ctx;
+		unsigned char hash[GIT_MAX_RAWSZ];
+
+		strbuf_addstr(&downcased, url);
+		strbuf_tolower(&downcased);
+
+		hash_algo->init_fn(&ctx);
+		hash_algo->update_fn(&ctx, downcased.buf, downcased.len);
+		hash_algo->final_fn(hash, &ctx);
+
+		strbuf_release(&downcased);
+
+		cache_key = xstrfmt("url_%s",
+				    hash_to_hex_algop(hash, hash_algo));
+	}
+
+	strbuf_release(&out);
+	return cache_key;
+}
+
 static char *remote_default_branch(const char *url)
 {
 	struct child_process cp = CHILD_PROCESS_INIT;
@@ -512,13 +614,49 @@ void load_builtin_commands(const char *prefix UNUSED,
 	die("not implemented");
 }
 
+static int init_shared_object_cache(const char *url,
+				    const char *local_cache_root)
+{
+	struct strbuf buf = STRBUF_INIT;
+	int res = 0;
+	char *cache_key = NULL, *shared_cache_path = NULL;
+
+	if (!(cache_key = get_cache_key(url))) {
+		res = error(_("could not determine cache key for '%s'"), url);
+		goto cleanup;
+	}
+
+	shared_cache_path = xstrfmt("%s/%s", local_cache_root, cache_key);
+	if (set_config("gvfs.sharedCache=%s", shared_cache_path)) {
+		res = error(_("could not configure shared cache"));
+		goto cleanup;
+	}
+
+	strbuf_addf(&buf, "%s/pack", shared_cache_path);
+	switch (safe_create_leading_directories(buf.buf)) {
+	case SCLD_OK: case SCLD_EXISTS:
+		break; /* okay */
+	default:
+		res = error_errno(_("could not initialize '%s'"), buf.buf);
+		goto cleanup;
+	}
+
+	write_file(git_path("objects/info/alternates"),"%s\n", shared_cache_path);
+
+	cleanup:
+	strbuf_release(&buf);
+	free(shared_cache_path);
+	free(cache_key);
+	return res;
+}
+
 static int cmd_clone(int argc, const char **argv)
 {
 	const char *branch = NULL;
 	int full_clone = 0, single_branch = 0, show_progress = isatty(2);
 	int src = 1, tags = 1;
-	const char *cache_server_url = NULL;
-	char *default_cache_server_url = NULL;
+	const char *cache_server_url = NULL, *local_cache_root = NULL;
+	char *default_cache_server_url = NULL, *local_cache_root_abs = NULL;
 	struct option clone_options[] = {
 		OPT_STRING('b', "branch", &branch, N_("<branch>"),
 			   N_("branch to checkout after clone")),
@@ -534,6 +672,9 @@ static int cmd_clone(int argc, const char **argv)
 		OPT_STRING(0, "cache-server-url", &cache_server_url,
 			   N_("<url>"),
 			   N_("the url or friendly name of the cache server")),
+		OPT_STRING(0, "local-cache-path", &local_cache_root,
+			   N_("<path>"),
+			   N_("override the path for the local Scalar cache")),
 		OPT_END(),
 	};
 	const char * const clone_usage[] = {
@@ -575,11 +716,23 @@ static int cmd_clone(int argc, const char **argv)
 	if (is_directory(enlistment))
 		die(_("directory '%s' exists already"), enlistment);
 
+	ensure_absolute_path(enlistment, &enlistment);
+
 	if (src)
 		dir = xstrfmt("%s/src", enlistment);
 	else
 		dir = xstrdup(enlistment);
 
+	if (!local_cache_root)
+		local_cache_root = local_cache_root_abs =
+			default_cache_root(enlistment);
+	else
+		local_cache_root = ensure_absolute_path(local_cache_root,
+							&local_cache_root_abs);
+
+	if (!local_cache_root)
+		die(_("could not determine local cache root"));
+
 	strbuf_reset(&buf);
 	if (branch)
 		strbuf_addf(&buf, "init.defaultBranch=%s", branch);
@@ -599,8 +752,28 @@ static int cmd_clone(int argc, const char **argv)
 
 	setup_git_directory();
 
+	git_config(git_default_config, NULL);
+
+	/*
+	 * This `dir_inside_of()` call relies on git_config() having parsed the
+	 * newly-initialized repository config's `core.ignoreCase` value.
+	 */
+	if (dir_inside_of(local_cache_root, dir) >= 0) {
+		struct strbuf path = STRBUF_INIT;
+
+		strbuf_addstr(&path, enlistment);
+		if (chdir("../..") < 0 ||
+		    remove_dir_recursively(&path, 0) < 0)
+			die(_("'--local-cache-path' cannot be inside the src "
+			      "folder;\nCould not remove '%s'"), enlistment);
+
+		die(_("'--local-cache-path' cannot be inside the src folder"));
+	}
+
 	/* common-main already logs `argv` */
 	trace2_def_repo(the_repository);
+	trace2_data_intmax("scalar", the_repository, "unattended",
+			   is_unattended());
 
 	if (!branch && !(branch = remote_default_branch(url))) {
 		res = error(_("failed to get default branch for '%s'"), url);
@@ -630,6 +803,8 @@ static int cmd_clone(int argc, const char **argv)
 			supports_gvfs_protocol(url, &default_cache_server_url);
 
 	if (gvfs_protocol) {
+		if ((res = init_shared_object_cache(url, local_cache_root)))
+			goto cleanup;
 		if (!cache_server_url)
 			cache_server_url = default_cache_server_url;
 		if (set_config("core.useGVFSHelper=true") ||
@@ -706,6 +881,7 @@ static int cmd_clone(int argc, const char **argv)
 	free(dir);
 	strbuf_release(&buf);
 	free(default_cache_server_url);
+	free(local_cache_root_abs);
 	return res;
 }
 
@@ -1117,6 +1293,12 @@ int cmd_main(int argc, const char **argv)
 	struct strbuf scalar_usage = STRBUF_INIT;
 	int i;
 
+	if (is_unattended()) {
+		setenv("GIT_ASKPASS", "", 0);
+		setenv("GIT_TERMINAL_PROMPT", "false", 0);
+		git_config_push_parameter("credential.interactive=false");
+	}
+
 	while (argc > 1 && *argv[1] == '-') {
 		if (!strcmp(argv[1], "-C")) {
 			if (argc < 3)

From 9be5a2cd67e8ea331912be5da778bb1cb631ae2b Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 1 Jun 2021 23:18:14 +0200
Subject: [PATCH 156/207] scalar diagnose: include shared cache info

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 diagnose.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 80 insertions(+), 1 deletion(-)

diff --git a/diagnose.c b/diagnose.c
index 20794eb88e32b3..83e0b3fcb0ab2a 100644
--- a/diagnose.c
+++ b/diagnose.c
@@ -73,6 +73,39 @@ static int dir_file_stats(struct object_directory *object_dir, void *data)
 	return 0;
 }
 
+static void dir_stats(struct strbuf *buf, const char *path)
+{
+	DIR *dir = opendir(path);
+	struct dirent *e;
+	struct stat e_stat;
+	struct strbuf file_path = STRBUF_INIT;
+	size_t base_path_len;
+
+	if (!dir)
+		return;
+
+	strbuf_addstr(buf, "Contents of ");
+	strbuf_add_absolute_path(buf, path);
+	strbuf_addstr(buf, ":\n");
+
+	strbuf_add_absolute_path(&file_path, path);
+	strbuf_addch(&file_path, '/');
+	base_path_len = file_path.len;
+
+	while ((e = readdir(dir)) != NULL)
+		if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) {
+			strbuf_setlen(&file_path, base_path_len);
+			strbuf_addstr(&file_path, e->d_name);
+			if (!stat(file_path.buf, &e_stat))
+				strbuf_addf(buf, "%-70s %16"PRIuMAX"\n",
+					    e->d_name,
+					    (uintmax_t)e_stat.st_size);
+		}
+
+	strbuf_release(&file_path);
+	closedir(dir);
+}
+
 static int count_files(struct strbuf *path)
 {
 	DIR *dir = opendir(path->buf);
@@ -186,7 +219,7 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 	char **argv_copy = NULL;
 	int stdout_fd = -1, archiver_fd = -1;
 	char *cache_server_url = NULL, *shared_cache = NULL;
-	struct strbuf buf = STRBUF_INIT;
+	struct strbuf buf = STRBUF_INIT, path = STRBUF_INIT;
 	int res;
 	struct archive_dir archive_dirs[] = {
 		{ ".git", 0 },
@@ -258,6 +291,52 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 		}
 	}
 
+	if (shared_cache) {
+		size_t path_len;
+
+		strbuf_reset(&buf);
+		strbuf_addf(&path, "%s/pack", shared_cache);
+		strbuf_reset(&buf);
+		strbuf_addstr(&buf, "--add-virtual-file=packs-cached.txt:");
+		dir_stats(&buf, path.buf);
+		strvec_push(&archiver_args, buf.buf);
+
+		strbuf_reset(&buf);
+		strbuf_addstr(&buf, "--add-virtual-file=objects-cached.txt:");
+		loose_objs_stats(&buf, shared_cache);
+		strvec_push(&archiver_args, buf.buf);
+
+		strbuf_reset(&path);
+		strbuf_addf(&path, "%s/info", shared_cache);
+		path_len = path.len;
+
+		if (is_directory(path.buf)) {
+			DIR *dir = opendir(path.buf);
+			struct dirent *e;
+
+			while ((e = readdir(dir))) {
+				if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
+					continue;
+				if (e->d_type == DT_DIR)
+					continue;
+
+				strbuf_reset(&buf);
+				strbuf_addf(&buf, "--add-virtual-file=info/%s:", e->d_name);
+
+				strbuf_setlen(&path, path_len);
+				strbuf_addch(&path, '/');
+				strbuf_addstr(&path, e->d_name);
+
+				if (strbuf_read_file(&buf, path.buf, 0) < 0) {
+					res = error_errno(_("could not read '%s'"), path.buf);
+					goto diagnose_cleanup;
+				}
+				strvec_push(&archiver_args, buf.buf);
+			}
+			closedir(dir);
+		}
+	}
+
 	strvec_pushl(&archiver_args, "--prefix=",
 		     oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
 

From 7b43037e9d622735633c0d5478c35ce54a3075d2 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Wed, 28 Apr 2021 13:56:16 +0200
Subject: [PATCH 157/207] scalar: only try GVFS protocol on https:// URLs

Well, technically also the http:// protocol is allowed _when testing_...

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 scalar.c | 47 ++++++++++++++++++++++++++++++++++-------------
 1 file changed, 34 insertions(+), 13 deletions(-)

diff --git a/scalar.c b/scalar.c
index 1e5cefbc0013d0..928c08e6dbe0bc 100644
--- a/scalar.c
+++ b/scalar.c
@@ -397,6 +397,13 @@ static int get_cache_server_url(struct json_iterator *it)
 	return 0;
 }
 
+static int can_url_support_gvfs(const char *url)
+{
+	return starts_with(url, "https://") ||
+		(git_env_bool("GIT_TEST_ALLOW_GVFS_VIA_HTTP", 0) &&
+		 starts_with(url, "http://"));
+}
+
 /*
  * If `cache_server_url` is `NULL`, print the list to `stdout`.
  *
@@ -408,6 +415,13 @@ static int supports_gvfs_protocol(const char *url, char **cache_server_url)
 	struct child_process cp = CHILD_PROCESS_INIT;
 	struct strbuf out = STRBUF_INIT;
 
+	/*
+	 * The GVFS protocol is only supported via https://; For testing, we
+	 * also allow http://.
+	 */
+	if (!can_url_support_gvfs(url))
+		return 0;
+
 	cp.git_cmd = 1;
 	strvec_pushl(&cp.args, "gvfs-helper", "--remote", url, "config", NULL);
 	if (!pipe_command(&cp, NULL, 0, &out, 512, NULL, 0)) {
@@ -480,19 +494,26 @@ static char *get_cache_key(const char *url)
 	struct strbuf out = STRBUF_INIT;
 	char *cache_key = NULL;
 
-	cp.git_cmd = 1;
-	strvec_pushl(&cp.args, "gvfs-helper", "--remote", url,
-		     "endpoint", "vsts/info", NULL);
-	if (!pipe_command(&cp, NULL, 0, &out, 512, NULL, 0)) {
-		char *id = NULL;
-		struct json_iterator it =
-			JSON_ITERATOR_INIT(out.buf, get_repository_id, &id);
-
-		if (iterate_json(&it) < 0)
-			warning("JSON parse error (%s)", out.buf);
-		else if (id)
-			cache_key = xstrfmt("id_%s", id);
-		free(id);
+	/*
+	 * The GVFS protocol is only supported via https://; For testing, we
+	 * also allow http://.
+	 */
+	if (can_url_support_gvfs(url)) {
+		cp.git_cmd = 1;
+		strvec_pushl(&cp.args, "gvfs-helper", "--remote", url,
+			     "endpoint", "vsts/info", NULL);
+		if (!pipe_command(&cp, NULL, 0, &out, 512, NULL, 0)) {
+			char *id = NULL;
+			struct json_iterator it =
+				JSON_ITERATOR_INIT(out.buf, get_repository_id,
+						   &id);
+
+			if (iterate_json(&it) < 0)
+				warning("JSON parse error (%s)", out.buf);
+			else if (id)
+				cache_key = xstrfmt("id_%s", id);
+			free(id);
+		}
 	}
 
 	if (!cache_key) {

From a3d1380f8cf7df8ba860b260bb3f684400e6d1bd Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 16 Apr 2021 21:43:57 +0200
Subject: [PATCH 158/207] scalar: verify that we can use a GVFS-enabled
 repository

Azure Repos does not support partial clones at the moment, but it does
support the GVFS protocol. To that end, the Microsoft fork of Git has a
`gvfs-helper` command that is optionally used to perform essentially the
same functionality as partial clone.

Let's verify that `scalar clone` detects that situation and enables the
GVFS helper.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 t/t9210-scalar.sh | 157 ++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 157 insertions(+)

diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index 956f40de504f3c..ca61027035bfd8 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -305,4 +305,161 @@ test_expect_success UNZIP 'scalar diagnose' '
 	grep "^Total: [1-9]" out
 '
 
+GIT_TEST_ALLOW_GVFS_VIA_HTTP=1
+export GIT_TEST_ALLOW_GVFS_VIA_HTTP
+
+test_set_port GIT_TEST_GVFS_PROTOCOL_PORT
+HOST_PORT=127.0.0.1:$GIT_TEST_GVFS_PROTOCOL_PORT
+PID_FILE="$(pwd)"/pid-file.pid
+SERVER_LOG="$(pwd)"/OUT.server.log
+
+test_atexit '
+	test -f "$PID_FILE" || return 0
+
+	# The server will shutdown automatically when we delete the pid-file.
+	rm -f "$PID_FILE"
+
+	test -z "$verbose$verbose_log" || {
+		echo "server log:"
+		cat "$SERVER_LOG"
+	}
+
+	# Give it a few seconds to shutdown (mainly to completely release the
+	# port before the next test start another instance and it attempts to
+	# bind to it).
+	for k in $(test_seq 5)
+	do
+		grep -q "Starting graceful shutdown" "$SERVER_LOG" &&
+		return 0 ||
+		sleep 1
+	done
+
+	echo "stop_gvfs_protocol_server: timeout waiting for server shutdown"
+	return 1
+'
+
+start_gvfs_enabled_http_server () {
+	GIT_HTTP_EXPORT_ALL=1 \
+	test-gvfs-protocol --verbose \
+		--listen=127.0.0.1 \
+		--port=$GIT_TEST_GVFS_PROTOCOL_PORT \
+		--reuseaddr \
+		--pid-file="$PID_FILE" \
+		2>"$SERVER_LOG" &
+
+	for k in 0 1 2 3 4
+	do
+		if test -f "$PID_FILE"
+		then
+			return 0
+		fi
+		sleep 1
+	done
+	return 1
+}
+
+test_expect_success 'start GVFS-enabled server' '
+	git config uploadPack.allowFilter false &&
+	git config uploadPack.allowAnySHA1InWant false &&
+	start_gvfs_enabled_http_server
+'
+
+test_expect_success '`scalar clone` with GVFS-enabled server' '
+	: the fake cache server requires fake authentication &&
+	git config --global core.askPass true &&
+
+	# We must set credential.interactive=true to bypass a setting
+	# in "scalar clone" that disables interactive credentials during
+	# an unattended command.
+	scalar -c credential.interactive=true clone --single-branch -- http://$HOST_PORT/ using-gvfs &&
+
+	: verify that the shared cache has been configured &&
+	cache_key="url_$(printf "%s" http://$HOST_PORT/ |
+		tr A-Z a-z |
+		test-tool sha1)" &&
+	echo "$(pwd)/using-gvfs/.scalarCache/$cache_key" >expect &&
+	git -C using-gvfs/src config gvfs.sharedCache >actual &&
+	test_cmp expect actual &&
+
+	second=$(git rev-parse --verify second:second.t) &&
+	(
+		cd using-gvfs/src &&
+		test_path_is_missing 1/2 &&
+		GIT_TRACE=$PWD/trace.txt git cat-file blob $second >actual &&
+		: verify that the gvfs-helper was invoked to fetch it &&
+		test_grep gvfs-helper trace.txt &&
+		echo "second" >expect &&
+		test_cmp expect actual
+	)
+'
+
+test_expect_success '`scalar register` parallel to worktree is unsupported' '
+	git init test-repo/src &&
+	mkdir -p test-repo/out &&
+
+	: parallel to worktree is unsupported &&
+	test_must_fail env GIT_CEILING_DIRECTORIES="$(pwd)" \
+		scalar register test-repo/out &&
+	test_must_fail git config --get --global --fixed-value \
+		maintenance.repo "$(pwd)/test-repo/src" &&
+	scalar list >scalar.repos &&
+	! grep -F "$(pwd)/test-repo/src" scalar.repos &&
+
+	: at enlistment root, i.e. parent of repository, is supported &&
+	GIT_CEILING_DIRECTORIES="$(pwd)" scalar register test-repo &&
+	git config --get --global --fixed-value \
+		maintenance.repo "$(pwd)/test-repo/src" &&
+	scalar list >scalar.repos &&
+	grep -F "$(pwd)/test-repo/src" scalar.repos &&
+
+	: scalar delete properly unregisters enlistment &&
+	scalar delete test-repo &&
+	test_must_fail git config --get --global --fixed-value \
+		maintenance.repo "$(pwd)/test-repo/src" &&
+	scalar list >scalar.repos &&
+	! grep -F "$(pwd)/test-repo/src" scalar.repos
+'
+
+test_expect_success '`scalar register` & `unregister` with existing repo' '
+	git init existing &&
+	scalar register existing &&
+	git config --get --global --fixed-value \
+		maintenance.repo "$(pwd)/existing" &&
+	scalar list >scalar.repos &&
+	grep -F "$(pwd)/existing" scalar.repos &&
+	scalar unregister existing &&
+	test_must_fail git config --get --global --fixed-value \
+		maintenance.repo "$(pwd)/existing" &&
+	scalar list >scalar.repos &&
+	! grep -F "$(pwd)/existing" scalar.repos
+'
+
+test_expect_success '`scalar unregister` with existing repo, deleted .git' '
+	scalar register existing &&
+	rm -rf existing/.git &&
+	scalar unregister existing &&
+	test_must_fail git config --get --global --fixed-value \
+		maintenance.repo "$(pwd)/existing" &&
+	scalar list >scalar.repos &&
+	! grep -F "$(pwd)/existing" scalar.repos
+'
+
+test_expect_success '`scalar register` existing repo with `src` folder' '
+	git init existing &&
+	mkdir -p existing/src &&
+	scalar register existing/src &&
+	scalar list >scalar.repos &&
+	grep -F "$(pwd)/existing" scalar.repos &&
+	scalar unregister existing &&
+	scalar list >scalar.repos &&
+	! grep -F "$(pwd)/existing" scalar.repos
+'
+
+test_expect_success '`scalar delete` with existing repo' '
+	git init existing &&
+	scalar register existing &&
+	scalar delete existing &&
+	test_path_is_missing existing
+'
+
 test_done

From 5ab2cbf04e61aef4d5e62ab5d4b233912c0ad525 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 23 Apr 2021 16:12:33 +0200
Subject: [PATCH 159/207] scalar: add the `cache-server` command

This allows setting the GVFS-enabled cache server, or listing the one(s)
associated with the remote repository.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Documentation/scalar.txt | 22 ++++++++++
 scalar.c                 | 95 +++++++++++++++++++++++++++++++++++++++-
 t/t9210-scalar.sh        | 34 ++++++++++++++
 3 files changed, 150 insertions(+), 1 deletion(-)

diff --git a/Documentation/scalar.txt b/Documentation/scalar.txt
index 155df8a5c7f437..cea68a23d5ee6d 100644
--- a/Documentation/scalar.txt
+++ b/Documentation/scalar.txt
@@ -18,6 +18,7 @@ scalar run ( all | config | commit-graph | fetch | loose-objects | pack-files )
 scalar reconfigure [ --all | <enlistment> ]
 scalar diagnose [<enlistment>]
 scalar delete <enlistment>
+scalar cache-server ( --get | --set <url> | --list [<remote>] ) [<enlistment>]
 
 DESCRIPTION
 -----------
@@ -182,6 +183,27 @@ delete <enlistment>::
 	This subcommand lets you delete an existing Scalar enlistment from your
 	local file system, unregistering the repository.
 
+Cache-server
+~~~~~~~~~~~~
+
+cache-server ( --get | --set <url> | --list [<remote>] ) [<enlistment>]::
+    This command lets you query or set the GVFS-enabled cache server used
+    to fetch missing objects.
+
+--get::
+    This is the default command mode: query the currently-configured cache
+    server URL, if any.
+
+--list::
+    Access the `gvfs/info` endpoint of the specified remote (default:
+    `origin`) to figure out which cache servers are available, if any.
++
+In contrast to the `--get` command mode (which only accesses the local
+repository), this command mode triggers a request via the network that
+potentially requires authentication. If authentication is required, the
+configured credential helper is employed (see linkgit:git-credential[1]
+for details).
+
 SEE ALSO
 --------
 linkgit:git-clone[1], linkgit:git-maintenance[1].
diff --git a/scalar.c b/scalar.c
index 928c08e6dbe0bc..8e62bc3d5a889c 100644
--- a/scalar.c
+++ b/scalar.c
@@ -23,6 +23,7 @@
 #include "wrapper.h"
 #include "trace2.h"
 #include "json-parser.h"
+#include "remote.h"
 #include "path.h"
 
 static int is_unattended(void) {
@@ -361,6 +362,21 @@ static int set_config(const char *fmt, ...)
 	return res;
 }
 
+static int list_cache_server_urls(struct json_iterator *it)
+{
+	const char *p;
+	char *q;
+	long l;
+
+	if (it->type == JSON_STRING &&
+	    skip_iprefix(it->key.buf, ".CacheServers[", &p) &&
+	    (l = strtol(p, &q, 10)) >= 0 && p != q &&
+	    !strcasecmp(q, "].Url"))
+		printf("#%ld: %s\n", l, it->string_value.buf);
+
+	return 0;
+}
+
 /* Find N for which .CacheServers[N].GlobalDefault == true */
 static int get_cache_server_index(struct json_iterator *it)
 {
@@ -430,6 +446,18 @@ static int supports_gvfs_protocol(const char *url, char **cache_server_url)
 			JSON_ITERATOR_INIT(out.buf, get_cache_server_index, &l);
 		struct cache_server_url_data data = { .url = NULL };
 
+		if (!cache_server_url) {
+			it.fn = list_cache_server_urls;
+			if (iterate_json(&it) < 0) {
+				reset_iterator(&it);
+				strbuf_release(&out);
+				return error("JSON parse error");
+			}
+			reset_iterator(&it);
+			strbuf_release(&out);
+			return 0;
+		}
+
 		if (iterate_json(&it) < 0) {
 			reset_iterator(&it);
 			strbuf_release(&out);
@@ -450,7 +478,9 @@ static int supports_gvfs_protocol(const char *url, char **cache_server_url)
 		return 1;
 	}
 	strbuf_release(&out);
-	return 0; /* error out quietly */
+	/* error out quietly, unless we wanted to list URLs */
+	return cache_server_url ?
+		0 : error(_("Could not access gvfs/config endpoint"));
 }
 
 static char *default_cache_root(const char *root)
@@ -1292,6 +1322,68 @@ static int cmd_version(int argc, const char **argv)
 	return 0;
 }
 
+static int cmd_cache_server(int argc, const char **argv)
+{
+	int get = 0;
+	const char *set = NULL, *list = NULL;
+	struct option options[] = {
+		OPT_CMDMODE(0, "get", &get,
+			    N_("get the configured cache-server URL"), 1),
+		OPT_STRING(0, "set", &set, N_("URL"),
+			   N_("configure the cache-server to use")),
+		OPT_STRING(0, "list", &list, N_("remote"),
+			   N_("list the possible cache-server URLs")),
+		OPT_END(),
+	};
+	const char * const usage[] = {
+		N_("scalar cache-server "
+		   "[--get | --set <url> | --list <remote>] [<enlistment>]"),
+		NULL
+	};
+	int res = 0;
+
+	argc = parse_options(argc, argv, NULL, options,
+			     usage, 0);
+
+	if (get + !!set + !!list > 1)
+		usage_msg_opt(_("--get/--set/--list are mutually exclusive"),
+			      usage, options);
+
+	setup_enlistment_directory(argc, argv, usage, options, NULL);
+
+	if (list) {
+		const char *name = list, *url = list;
+
+		if (!strchr(list, '/')) {
+			struct remote *remote;
+
+			/* Look up remote */
+			remote = remote_get(list);
+			if (!remote) {
+				error("no such remote: '%s'", name);
+				return 1;
+			}
+			if (!remote->url.nr) {
+				return error(_("remote '%s' has no URLs"),
+					     name);
+			}
+			url = remote->url.v[0];
+		}
+		res = supports_gvfs_protocol(url, NULL);
+	} else if (set) {
+		res = set_config("gvfs.cache-server=%s", set);
+	} else {
+		char *url = NULL;
+
+		printf("Using cache server: %s\n",
+		       git_config_get_string("gvfs.cache-server", &url) ?
+		       "(undefined)" : url);
+		free(url);
+	}
+
+	return !!res;
+}
+
 static struct {
 	const char *name;
 	int (*fn)(int, const char **);
@@ -1306,6 +1398,7 @@ static struct {
 	{ "help", cmd_help },
 	{ "version", cmd_version },
 	{ "diagnose", cmd_diagnose },
+	{ "cache-server", cmd_cache_server },
 	{ NULL, NULL},
 };
 
diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index ca61027035bfd8..fc03773cdfcd39 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -462,4 +462,38 @@ test_expect_success '`scalar delete` with existing repo' '
 	test_path_is_missing existing
 '
 
+test_expect_success 'scalar cache-server basics' '
+	repo=with-cache-server &&
+	git init $repo &&
+	scalar cache-server --get $repo >out &&
+	cat >expect <<-EOF &&
+	Using cache server: (undefined)
+	EOF
+	test_cmp expect out &&
+
+	scalar cache-server --set http://fake-server/url $repo &&
+	test_cmp_config -C $repo http://fake-server/url gvfs.cache-server &&
+	scalar delete $repo &&
+	test_path_is_missing $repo
+'
+
+test_expect_success 'scalar cache-server list URL' '
+	repo=with-real-gvfs &&
+	git init $repo &&
+	git -C $repo remote add origin http://$HOST_PORT/ &&
+	scalar cache-server --list origin $repo >out &&
+
+	cat >expect <<-EOF &&
+	#0: http://$HOST_PORT/servertype/cache
+	EOF
+
+	test_cmp expect out &&
+
+	test_must_fail scalar -C $repo cache-server --list 2>err &&
+	grep "requires a value" err &&
+
+	scalar delete $repo &&
+	test_path_is_missing $repo
+'
+
 test_done

From 0f6df8bbc4ef52f78e1e57ab9a656287f2d9f54f Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Wed, 12 May 2021 17:59:58 +0200
Subject: [PATCH 160/207] scalar: add a test toggle to skip accessing the
 vsts/info endpoint

In Scalar's functional tests, we do not do anything with authentication.
Therefore, we do want to avoid accessing the `vsts/info` endpoint
because it requires authentication even on otherwise public
repositories.

Let's introduce the environment variable `SCALAR_TEST_SKIP_VSTS_INFO`
which can be set to `true` to simply skip that step (and force the
`url_*` style repository IDs instead of `id_*` whenever possible).

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 scalar.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/scalar.c b/scalar.c
index 8e62bc3d5a889c..c16b1c3f2ca710 100644
--- a/scalar.c
+++ b/scalar.c
@@ -528,7 +528,8 @@ static char *get_cache_key(const char *url)
 	 * The GVFS protocol is only supported via https://; For testing, we
 	 * also allow http://.
 	 */
-	if (can_url_support_gvfs(url)) {
+	if (!git_env_bool("SCALAR_TEST_SKIP_VSTS_INFO", 0) &&
+	    can_url_support_gvfs(url)) {
 		cp.git_cmd = 1;
 		strvec_pushl(&cp.args, "gvfs-helper", "--remote", url,
 			     "endpoint", "vsts/info", NULL);

From 31d933b64e088400f3c65f3b0f62a4452d8406e5 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 25 Jan 2022 23:49:21 +0100
Subject: [PATCH 161/207] scalar: adjust documentation to the microsoft/git
 fork

Scalar in Microsoft's Git fork can do a little more than Scalar in
upstream Git: in Microsoft's Git, it supports the GVFS protocol so that
Scalar can clone from Azure DevOps.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 contrib/scalar/docs/getting-started.md | 24 +++++++++++++++----
 contrib/scalar/docs/index.md           | 14 +++++++----
 contrib/scalar/docs/philosophy.md      | 33 +++++++++++++++-----------
 contrib/scalar/docs/troubleshooting.md | 20 ++++++++++++++++
 4 files changed, 68 insertions(+), 23 deletions(-)

diff --git a/contrib/scalar/docs/getting-started.md b/contrib/scalar/docs/getting-started.md
index ef7ea07b0f948c..d5125330320d2c 100644
--- a/contrib/scalar/docs/getting-started.md
+++ b/contrib/scalar/docs/getting-started.md
@@ -18,8 +18,9 @@ Creating a new Scalar clone
 ---------------------------------------------------
 
 The `clone` verb creates a local enlistment of a remote repository using the
-partial clone feature available e.g. on GitHub.
-
+partial clone feature available e.g. on GitHub, or using the
+[GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md),
+such as Azure Repos.
 
 ```
 scalar clone [options] <url> [<dir>]
@@ -68,11 +69,26 @@ in `<path>`.
 These options allow a user to customize their initial enlistment.
 
 * `--full-clone`: If specified, do not initialize the sparse-checkout feature.
-  All files will be present in your `src` directory. This uses a Git partial
-  clone: blobs are downloaded on demand.
+  All files will be present in your `src` directory. This behaves very similar
+  to a Git partial clone in that blobs are downloaded on demand. However, it
+  will use the GVFS protocol to download all Git objects.
+
+* `--cache-server-url=<url>`: If specified, set the intended cache server to
+  the specified `<url>`. All object queries will use the GVFS protocol to this
+  `<url>` instead of the origin remote. If the remote supplies a list of
+  cache servers via the `<url>/gvfs/config` endpoint, then the `clone` command
+  will select a nearby cache server from that list.
 
 * `--branch=<ref>`: Specify the branch to checkout after clone.
 
+* `--local-cache-path=<path>`: Use this option to override the path for the
+  local Scalar cache. If not specified, then Scalar will select a default
+  path to share objects with your other enlistments. On Windows, this path
+  is a subdirectory of `<Volume>:\.scalarCache\`. On Mac, this path is a
+  subdirectory of `~/.scalarCache/`. The default cache path is recommended so
+  multiple enlistments of the same remote repository share objects on the
+  same device.
+
 ### Advanced Options
 
 The options below are not intended for use by a typical user. These are
diff --git a/contrib/scalar/docs/index.md b/contrib/scalar/docs/index.md
index f9f5ab06e09253..4f56e2b0ebbac6 100644
--- a/contrib/scalar/docs/index.md
+++ b/contrib/scalar/docs/index.md
@@ -28,10 +28,14 @@ these features for that repo (except partial clone) and start running suggested
 maintenance in the background using
 [the `git maintenance` feature](https://git-scm.com/docs/git-maintenance).
 
-Repos cloned with the `scalar clone` command use partial clone to significantly
-reduce the amount of data required to get started using a repository. By
-delaying all blob downloads until they are required, Scalar allows you to work
-with very large repositories quickly.
+Repos cloned with the `scalar clone` command use partial clone or the
+[GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md)
+to significantly reduce the amount of data required to get started
+using a repository. By delaying all blob downloads until they are required,
+Scalar allows you to work with very large repositories quickly. The GVFS
+protocol allows a network of _cache servers_ to serve objects with lower
+latency and higher throughput. The cache servers also reduce load on the
+central server.
 
 Documentation
 -------------
@@ -42,7 +46,7 @@ Documentation
 
 * [Troubleshooting](troubleshooting.md):
   Collect diagnostic information or update custom settings. Includes
-  `scalar diagnose`.
+  `scalar diagnose` and `scalar cache-server`.
 
 * [The Philosophy of Scalar](philosophy.md): Why does Scalar work the way
   it does, and how do we make decisions about its future?
diff --git a/contrib/scalar/docs/philosophy.md b/contrib/scalar/docs/philosophy.md
index 51486a75e41f0d..e3dfa025a2504c 100644
--- a/contrib/scalar/docs/philosophy.md
+++ b/contrib/scalar/docs/philosophy.md
@@ -13,22 +13,27 @@ Scalar only to configure those new settings. In particular, we ported
 features like background maintenance to Git to make Scalar simpler and
 make Git more powerful.
 
-Services such as GitHub support partial clone , a standard adopted by the Git
-project to download only part of the Git objects when cloning, and fetching
-further objects on demand. If your hosting service supports partial clone, then
-we absolutely recommend it as a way to greatly speed up your clone and fetch
-times and to reduce how much disk space your Git repository requires. Scalar
-will help with this!
+Scalar ships inside [a custom version of Git][microsoft-git], but we are
+working to make it available in other forks of Git. The only feature
+that is not intended to ever reach the standard Git client is Scalar's use
+of [the GVFS Protocol][gvfs-protocol], which is essentially an older
+version of [Git's partial clone feature](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/)
+that was available first in Azure Repos. Services such as GitHub support
+only partial clone instead of the GVFS protocol because that is the
+standard adopted by the Git project. If your hosting service supports
+partial clone, then we absolutely recommend it as a way to greatly speed
+up your clone and fetch times and to reduce how much disk space your Git
+repository requires. Scalar will help with this!
 
-Most of the value of Scalar can be found in the core Git client. However, most
-of the advanced features that really optimize Git's performance are off by
-default for compatibility reasons. To really take advantage of Git's latest and
-greatest features, you either need to study the [`git config`
-documentation](https://git-scm.com/docs/git-config) and regularly read [the Git
-release notes](https://github.com/git/git/tree/master/Documentation/RelNotes).
+If you don't use the GVFS Protocol, then most of the value of Scalar can
+be found in the core Git client. However, most of the advanced features
+that really optimize Git's performance are off by default for compatibility
+reasons. To really take advantage of Git's latest and greatest features,
+you either need to study the [`git config` documentation](https://git-scm.com/docs/git-config)
+and regularly read [the Git release notes](https://github.com/git/git/tree/master/Documentation/RelNotes).
 Even if you do all that work and customize your Git settings on your machines,
-you likely will want to share those settings with other team members. Or, you
-can just use Scalar!
+you likely will want to share those settings with other team members.
+Or, you can just use Scalar!
 
 Using `scalar register` on an existing Git repository will give you these
 benefits:
diff --git a/contrib/scalar/docs/troubleshooting.md b/contrib/scalar/docs/troubleshooting.md
index 8ec56ad437ff09..c54d2438f22523 100644
--- a/contrib/scalar/docs/troubleshooting.md
+++ b/contrib/scalar/docs/troubleshooting.md
@@ -18,3 +18,23 @@ files for that repository. This includes:
 
 As the `diagnose` command completes, it provides the path of the resulting
 zip file. This zip can be attached to bug reports to make the analysis easier.
+
+Modifying Configuration Values
+------------------------------
+
+The Scalar-specific configuration is only available for repos using the
+GVFS protocol.
+
+### Cache Server URL
+
+When using an enlistment cloned with `scalar clone` and the GVFS protocol,
+you will have a value called the cache server URL. Cache servers are a feature
+of the GVFS protocol to provide low-latency access to the on-demand object
+requests. This modifies the `gvfs.cache-server` setting in your local Git config
+file.
+
+Run `scalar cache-server --get` to see the current cache server.
+
+Run `scalar cache-server --list` to see the available cache server URLs.
+
+Run `scalar cache-server --set=<url>` to set your cache server to `<url>`.

From 30c8cc615ac5d547ffd585172f496cda2a4c7cf2 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 21 Jun 2021 08:21:48 -0400
Subject: [PATCH 162/207] scalar: enable untracked cache unconditionally

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 scalar.c | 16 ----------------
 1 file changed, 16 deletions(-)

diff --git a/scalar.c b/scalar.c
index c16b1c3f2ca710..35c363b853979a 100644
--- a/scalar.c
+++ b/scalar.c
@@ -166,23 +166,7 @@ static int set_recommended_config(int reconfigure)
 		{ "core.FSCache", "true", 1 },
 		{ "core.multiPackIndex", "true", 1 },
 		{ "core.preloadIndex", "true", 1 },
-#ifndef WIN32
 		{ "core.untrackedCache", "true", 1 },
-#else
-		/*
-		 * Unfortunately, Scalar's Functional Tests demonstrated
-		 * that the untracked cache feature is unreliable on Windows
-		 * (which is a bummer because that platform would benefit the
-		 * most from it). For some reason, freshly created files seem
-		 * not to update the directory's `lastModified` time
-		 * immediately, but the untracked cache would need to rely on
-		 * that.
-		 *
-		 * Therefore, with a sad heart, we disable this very useful
-		 * feature on Windows.
-		 */
-		{ "core.untrackedCache", "false", 1 },
-#endif
 		{ "core.logAllRefUpdates", "true", 1 },
 		{ "credential.https://dev.azure.com.useHttpPath", "true", 1 },
 		{ "credential.validate", "false", 1 }, /* GCM4W-only */

From 9ae4ebebebb178d3a358c8ab9f49f5599c08a9a9 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 24 Aug 2021 18:01:47 +0200
Subject: [PATCH 163/207] scalar: parse `clone --no-fetch-commits-and-trees`
 for backwards compatibility

This option does not do anything anymore, though.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 scalar.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/scalar.c b/scalar.c
index 35c363b853979a..c193566317c849 100644
--- a/scalar.c
+++ b/scalar.c
@@ -688,6 +688,7 @@ static int init_shared_object_cache(const char *url,
 
 static int cmd_clone(int argc, const char **argv)
 {
+	int dummy = 0;
 	const char *branch = NULL;
 	int full_clone = 0, single_branch = 0, show_progress = isatty(2);
 	int src = 1, tags = 1;
@@ -711,6 +712,8 @@ static int cmd_clone(int argc, const char **argv)
 		OPT_STRING(0, "local-cache-path", &local_cache_root,
 			   N_("<path>"),
 			   N_("override the path for the local Scalar cache")),
+		OPT_HIDDEN_BOOL(0, "no-fetch-commits-and-trees",
+				&dummy, N_("no longer used")),
 		OPT_END(),
 	};
 	const char * const clone_usage[] = {

From 12d9968afeecdec0f2c2cceeaec40e6354b1069f Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 10 May 2022 00:29:55 +0200
Subject: [PATCH 164/207] scalar diagnose: accommodate Scalar's Functional
 Tests

Those tests specifically verify that the `.zip` file path is shown on
`stdout`. Let's do that again, under the assumption that there are
scripts out there that rely on this behavior.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 diagnose.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/diagnose.c b/diagnose.c
index 83e0b3fcb0ab2a..e367255be178b0 100644
--- a/diagnose.c
+++ b/diagnose.c
@@ -350,10 +350,13 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
 		goto diagnose_cleanup;
 	}
 
-	fprintf(stderr, "\n"
-		"Diagnostics complete.\n"
-		"All of the gathered info is captured in '%s'\n",
-		zip_path->buf);
+	strbuf_reset(&buf);
+	strbuf_addf(&buf, "\n"
+		    "Diagnostics complete.\n"
+		    "All of the gathered info is captured in '%s'\n",
+		    zip_path->buf);
+	write_or_die(stdout_fd, buf.buf, buf.len);
+	write_or_die(2, buf.buf, buf.len);
 
 diagnose_cleanup:
 	if (archiver_fd >= 0) {

From c7989a230ed90b0c19ce80cb432c9c6e2f120574 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Wed, 1 May 2024 10:08:11 -0400
Subject: [PATCH 165/207] scalar: make GVFS Protocol a forced choice

In the Office monorepo, we've recently had an uptick in issues with
`scalar clone`. These issues didn't make sense at first and seemed like
the users weren't using `microsoft/git` but instead the upstream
version's `scalar clone`. Instead of using GVFS cache servers, they were
attempting to use the Git protocol's partial clone (which times out).

It turns out that what's actually happening is that some network issue
is causing the connection with Azure DevOps to error out during the
`/gvfs/config` request. In the Git traces, we see the following error
during this request:

  (curl:56) Failure when receiving data from the peer [transient]

This isn't 100% of the time, but has increased enough to cause problems
for a variety of users.

The solution being proposed in this pull request is to remove the
fall-back mechanism and instead have an explicit choice to use the GVFS
protocol. To avoid significant disruption to Azure DevOps customers (the
vast majority of `microsoft/git` users who use `scalar clone` based on
my understanding), I added some inferring of a default value from the
clone URL.

This fallback mechanism was first implemented in the C# version of
Scalar in microsoft/scalar#339. This was an attempt to make the Scalar
client interesting to non-Azure DevOps customers, especially as GitHub
was about to launch the availability of partial clones. Now that the
`scalar` client is available upstream, users don't need the GVFS-enabled
version to get these benefits.

In addition, this will resolve #384 since those requests won't happen
against non-ADO URLs unless requested.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 Documentation/scalar.txt | 20 ++++++++++++++++++++
 scalar.c                 | 19 ++++++++++++++++---
 t/t9210-scalar.sh        | 25 ++++++++++++++++++++++++-
 3 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/Documentation/scalar.txt b/Documentation/scalar.txt
index cea68a23d5ee6d..17cc2d500ca40b 100644
--- a/Documentation/scalar.txt
+++ b/Documentation/scalar.txt
@@ -110,6 +110,26 @@ clone), and `~/.scalarCache` on macOS.
     Retrieve missing objects from the specified remote, which is expected to
     understand the GVFS protocol.
 
+--[no-]gvfs-protocol::
+	When cloning from a `<url>` with either `dev.azure.com` or
+	`visualstudio.com` in the name, `scalar clone` will attempt to use the GVFS
+	Protocol to access Git objects, specifically from a cache server when
+	available, and will fail to clone if there is an error over that protocol.
+
+	To enable the GVFS Protocol regardless of the origin `<url>`, use
+	`--gvfs-protocol`. This will cause `scalar clone` to fail when the origin
+	server fails to provide a valid response to the `gvfs/config` endpoint.
+
+	To disable the GVFS Protocol, use `--no-gvfs-protocol` and `scalar clone`
+	will only use the Git protocol, starting with a partial clone. This can be
+	helpful if your `<url>` points to Azure Repos but the repository does not
+	have GVFS cache servers enabled. It is likely more efficient to use its
+	partial clone functionality through the Git protocol.
+
+	Previous versions of `scalar clone` could fall back to a partial clone over
+	the Git protocol if there is any issue gathering GVFS configuration
+	information from the origin server.
+
 List
 ~~~~
 
diff --git a/scalar.c b/scalar.c
index c193566317c849..072d725e98034a 100644
--- a/scalar.c
+++ b/scalar.c
@@ -694,6 +694,8 @@ static int cmd_clone(int argc, const char **argv)
 	int src = 1, tags = 1;
 	const char *cache_server_url = NULL, *local_cache_root = NULL;
 	char *default_cache_server_url = NULL, *local_cache_root_abs = NULL;
+	int gvfs_protocol = -1;
+
 	struct option clone_options[] = {
 		OPT_STRING('b', "branch", &branch, N_("<branch>"),
 			   N_("branch to checkout after clone")),
@@ -706,6 +708,8 @@ static int cmd_clone(int argc, const char **argv)
 			 N_("create repository within 'src' directory")),
 		OPT_BOOL(0, "tags", &tags,
 			 N_("specify if tags should be fetched during clone")),
+		OPT_BOOL(0, "gvfs-protocol", &gvfs_protocol,
+			 N_("force enable (or disable) the GVFS Protocol")),
 		OPT_STRING(0, "cache-server-url", &cache_server_url,
 			   N_("<url>"),
 			   N_("the url or friendly name of the cache server")),
@@ -725,7 +729,6 @@ static int cmd_clone(int argc, const char **argv)
 	char *enlistment = NULL, *dir = NULL;
 	struct strbuf buf = STRBUF_INIT;
 	int res;
-	int gvfs_protocol;
 
 	argc = parse_options(argc, argv, NULL, clone_options, clone_usage, 0);
 
@@ -838,8 +841,18 @@ static int cmd_clone(int argc, const char **argv)
 		goto cleanup;
 	}
 
-	gvfs_protocol = cache_server_url ||
-			supports_gvfs_protocol(url, &default_cache_server_url);
+	/* Is --[no-]gvfs-protocol unspecified? Infer from url. */
+	if (gvfs_protocol < 0) {
+		if (cache_server_url ||
+		    strstr(url, "dev.azure.com") ||
+		    strstr(url, "visualstudio.com"))
+			gvfs_protocol = 1;
+		else
+			gvfs_protocol = 0;
+	}
+
+	if (gvfs_protocol && !supports_gvfs_protocol(url, &default_cache_server_url))
+		die(_("failed to contact server via GVFS Protocol"));
 
 	if (gvfs_protocol) {
 		if ((res = init_shared_object_cache(url, local_cache_root)))
diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index fc03773cdfcd39..c070dd81f4cb10 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -371,7 +371,12 @@ test_expect_success '`scalar clone` with GVFS-enabled server' '
 	# We must set credential.interactive=true to bypass a setting
 	# in "scalar clone" that disables interactive credentials during
 	# an unattended command.
-	scalar -c credential.interactive=true clone --single-branch -- http://$HOST_PORT/ using-gvfs &&
+	GIT_TRACE2_EVENT="$(pwd)/clone-trace-with-gvfs" scalar \
+		-c credential.interactive=true \
+		clone --gvfs-protocol \
+		--single-branch -- http://$HOST_PORT/ using-gvfs &&
+
+	grep "GET/config(main)" <clone-trace-with-gvfs &&
 
 	: verify that the shared cache has been configured &&
 	cache_key="url_$(printf "%s" http://$HOST_PORT/ |
@@ -393,6 +398,24 @@ test_expect_success '`scalar clone` with GVFS-enabled server' '
 	)
 '
 
+test_expect_success '`scalar clone --no-gvfs-protocol` skips gvfs/config' '
+	# the fake cache server requires fake authentication &&
+	git config --global core.askPass true &&
+
+	# We must set credential.interactive=true to bypass a setting
+	# in "scalar clone" that disables interactive credentials during
+	# an unattended command.
+	GIT_TRACE2_EVENT="$(pwd)/clone-trace-no-gvfs" scalar \
+		-c credential.interactive=true \
+		clone --no-gvfs-protocol \
+		--single-branch -- http://$HOST_PORT/ skipping-gvfs &&
+
+	! grep "GET/config(main)" <clone-trace-no-gvfs &&
+	! git -C skipping-gvfs/src config core.gvfs &&
+
+	test_config -C skipping-gvfs/src remote.origin.partialclonefilter blob:none
+'
+
 test_expect_success '`scalar register` parallel to worktree is unsupported' '
 	git init test-repo/src &&
 	mkdir -p test-repo/out &&

From 0ca5b50c64fa0f81734a84dcab49d5d21e3d2e05 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Tue, 8 Jun 2021 09:43:36 +0200
Subject: [PATCH 166/207] ci: run Scalar's Functional Tests

Scalar's Functional Test suite is pretty comprehensive, and caught more
than just one bug in the built-in FSMonitor that was missed by Git's own
test suite.

To benefit from this test suite, automatically run it on the `vfs-*`
and `features/*` branches.

Note: for simplicity, we're building Git from scratch in all matrix
jobs.

Also note: for speed, we are using `git-sdk-64-minimal`, even if it
lacks the `/bin/install` that we need to install Git's files; We're
providing a minimal shell script shim instead. Also, we do not need to
bother with the Tcl/Tk parts, therefore we're skipping them, too.

Finally, we use GIT_FORCE_UNTRACKED_CACHE in the functional tests, to
give the untracked cache a thorough work-out.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/scalar-functional-tests.yml | 220 ++++++++++++++++++
 1 file changed, 220 insertions(+)
 create mode 100644 .github/workflows/scalar-functional-tests.yml

diff --git a/.github/workflows/scalar-functional-tests.yml b/.github/workflows/scalar-functional-tests.yml
new file mode 100644
index 00000000000000..a5946bc33939d6
--- /dev/null
+++ b/.github/workflows/scalar-functional-tests.yml
@@ -0,0 +1,220 @@
+name: Scalar Functional Tests
+
+env:
+  SCALAR_REPOSITORY: microsoft/scalar
+  SCALAR_REF: main
+  DEBUG_WITH_TMATE: false
+  SCALAR_TEST_SKIP_VSTS_INFO: true
+
+on:
+  push:
+    branches: [ vfs-*, tentative/vfs-* ]
+  pull_request:
+    branches: [ vfs-*, features/* ]
+
+jobs:
+  scalar:
+    name: "Scalar Functional Tests"
+
+    strategy:
+      fail-fast: false
+      matrix:
+        # Order by runtime (in descending order)
+        os: [windows-2019, macos-13, ubuntu-20.04, ubuntu-22.04]
+        # Scalar.NET used to be tested using `features: [false, experimental]`
+        # But currently, Scalar/C ignores `feature.scalar` altogether, so let's
+        # save some electrons and run only one of them...
+        features: [ignored]
+        exclude:
+          # The built-in FSMonitor is not (yet) supported on Linux
+          - os: ubuntu-20.04
+            features: experimental
+          - os: ubuntu-22.04
+            features: experimental
+    runs-on: ${{ matrix.os }}
+
+    env:
+      BUILD_FRAGMENT: bin/Release/netcoreapp3.1
+      GIT_FORCE_UNTRACKED_CACHE: 1
+
+    steps:
+      - name: Check out Git's source code
+        uses: actions/checkout@v4
+
+      - name: Setup build tools on Windows
+        if: runner.os == 'Windows'
+        uses: git-for-windows/setup-git-for-windows-sdk@v1
+
+      - name: Provide a minimal `install` on Windows
+        if: runner.os == 'Windows'
+        shell: bash
+        run: |
+          test -x /usr/bin/install ||
+          tr % '\t' >/usr/bin/install <<-\EOF
+          #!/bin/sh
+
+          cmd=cp
+          while test $# != 0
+          do
+          %case "$1" in
+          %-d) cmd="mkdir -p";;
+          %-m) shift;; # ignore mode
+          %*) break;;
+          %esac
+          %shift
+          done
+
+          exec $cmd "$@"
+          EOF
+
+      - name: Install build dependencies for Git (Linux)
+        if: runner.os == 'Linux'
+        run: |
+          sudo apt-get update
+          sudo apt-get -q -y install libssl-dev libcurl4-openssl-dev gettext
+
+      - name: Build and install Git
+        shell: bash
+        env:
+          NO_TCLTK: Yup
+        run: |
+          # We do require a VFS version
+          def_ver="$(sed -n 's/DEF_VER=\(.*vfs.*\)/\1/p' GIT-VERSION-GEN)"
+          test -n "$def_ver"
+
+          # Ensure that `git version` reflects DEF_VER
+          case "$(git describe --match "v[0-9]*vfs*" HEAD)" in
+          ${def_ver%%.vfs.*}.vfs.*) ;; # okay, we can use this
+          *) git -c user.name=ci -c user.email=ci@github tag -m for-testing ${def_ver}.NNN.g$(git rev-parse --short HEAD);;
+          esac
+
+          SUDO=
+          extra=
+          case "${{ runner.os }}" in
+          Windows)
+            extra=DESTDIR=/c/Progra~1/Git
+            cygpath -aw "/c/Program Files/Git/cmd" >>$GITHUB_PATH
+            ;;
+          Linux)
+            SUDO=sudo
+            extra=prefix=/usr
+            ;;
+          macOS)
+            SUDO=sudo
+            extra=prefix=/usr/local
+            ;;
+          esac
+
+          $SUDO make -j5 $extra install
+
+      - name: Ensure that we use the built Git and Scalar
+        shell: bash
+        run: |
+          type -p git
+          git version
+          case "$(git version)" in *.vfs.*) echo Good;; *) exit 1;; esac
+          type -p scalar
+          scalar version
+          case "$(scalar version 2>&1)" in *.vfs.*) echo Good;; *) exit 1;; esac
+
+      - name: Check out Scalar's source code
+        uses: actions/checkout@v4
+        with:
+          fetch-depth: 0 # Indicate full history so Nerdbank.GitVersioning works.
+          path: scalar
+          repository: ${{ env.SCALAR_REPOSITORY }}
+          ref: ${{ env.SCALAR_REF }}
+
+      - name: Setup .NET Core
+        uses: actions/setup-dotnet@v4
+        with:
+          dotnet-version: '3.1.x'
+
+      - name: Install dependencies
+        run: dotnet restore
+        working-directory: scalar
+        env:
+          DOTNET_NOLOGO: 1
+
+      - name: Build
+        working-directory: scalar
+        run: dotnet build --configuration Release --no-restore -p:UseAppHost=true # Force generation of executable on macOS.
+
+      - name: Setup platform (Linux)
+        if: runner.os == 'Linux'
+        run: |
+          echo "BUILD_PLATFORM=${{ runner.os }}" >>$GITHUB_ENV
+          echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
+
+      - name: Setup platform (Mac)
+        if: runner.os == 'macOS'
+        run: |
+          echo 'BUILD_PLATFORM=Mac' >>$GITHUB_ENV
+          echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
+
+      - name: Setup platform (Windows)
+        if: runner.os == 'Windows'
+        run: |
+          echo "BUILD_PLATFORM=${{ runner.os }}" >>$env:GITHUB_ENV
+          echo 'BUILD_FILE_EXT=.exe' >>$env:GITHUB_ENV
+          echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$env:GITHUB_ENV
+
+      - name: Configure feature.scalar
+        run: git config --global feature.scalar ${{ matrix.features }}
+
+      - id: functional_test
+        name: Functional test
+        timeout-minutes: 60
+        working-directory: scalar
+        shell: bash
+        run: |
+          export GIT_TRACE2_EVENT="$PWD/$TRACE2_BASENAME/Event"
+          export GIT_TRACE2_PERF="$PWD/$TRACE2_BASENAME/Perf"
+          export GIT_TRACE2_EVENT_BRIEF=true
+          export GIT_TRACE2_PERF_BRIEF=true
+          mkdir -p "$TRACE2_BASENAME"
+          mkdir -p "$TRACE2_BASENAME/Event"
+          mkdir -p "$TRACE2_BASENAME/Perf"
+          git version --build-options
+          cd ../out
+          Scalar.FunctionalTests/$BUILD_FRAGMENT/Scalar.FunctionalTests$BUILD_FILE_EXT --test-scalar-on-path --test-git-on-path --timeout=300000 --full-suite
+
+      - name: Force-stop FSMonitor daemons and Git processes (Windows)
+        if: runner.os == 'Windows' && (success() || failure())
+        shell: bash
+        run: |
+          set -x
+          wmic process get CommandLine,ExecutablePath,HandleCount,Name,ParentProcessID,ProcessID
+          wmic process where "CommandLine Like '%fsmonitor--daemon %run'" delete
+          wmic process where "ExecutablePath Like '%git.exe'" delete
+
+      - id: trace2_zip_unix
+        if: runner.os != 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
+        name: Zip Trace2 Logs (Unix)
+        shell: bash
+        working-directory: scalar
+        run: zip -q -r $TRACE2_BASENAME.zip $TRACE2_BASENAME/
+
+      - id: trace2_zip_windows
+        if: runner.os == 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
+        name: Zip Trace2 Logs (Windows)
+        working-directory: scalar
+        run: Compress-Archive -DestinationPath ${{ env.TRACE2_BASENAME }}.zip -Path ${{ env.TRACE2_BASENAME }}
+
+      - name: Archive Trace2 Logs
+        if: ( success() || failure() ) && ( steps.trace2_zip_unix.conclusion == 'success' || steps.trace2_zip_windows.conclusion == 'success' )
+        uses: actions/upload-artifact@v3
+        with:
+          name: ${{ env.TRACE2_BASENAME }}.zip
+          path: scalar/${{ env.TRACE2_BASENAME }}.zip
+          retention-days: 3
+
+      # The GitHub Action `action-tmate` allows developers to connect to the running agent
+      # using SSH (it will be a `tmux` session; on Windows agents it will be inside the MSYS2
+      # environment in `C:\msys64`, therefore it can be slightly tricky to interact with
+      # Git for Windows, which runs a slightly incompatible MSYS2 runtime).
+      - name: action-tmate
+        if: env.DEBUG_WITH_TMATE == 'true' && failure()
+        uses: mxschmitt/action-tmate@v3
+        with:
+          limit-access-to-actor: true

From e74dc5754a01a8ebddb94c0e79e7fd7d9a46c81c Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Tue, 5 Apr 2022 13:42:23 -0700
Subject: [PATCH 167/207] scalar: upgrade to newest FSMonitor config setting

When FSMonitor was upstreamed, the 'core.useBuiltinFSMonitor' config was
deprecated and replaced with an overload of the 'core.fsmonitor' config
(i.e., if a boolean value was specified in 'core.fsmonitor', it is treated
the way 'core.useBuiltinFSMonitor' originally was). Because 'scalar
register' actively sets that config, use it to upgrade the deprecated config
setting.

Co-authored-by: Johannes Schindelin <Johannes.Schindelin@gmx.de>
Signed-off-by: Victoria Dye <vdye@github.com>
---
 scalar.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/scalar.c b/scalar.c
index 072d725e98034a..a24f1822fe4fd5 100644
--- a/scalar.c
+++ b/scalar.c
@@ -200,6 +200,23 @@ static int set_recommended_config(int reconfigure)
 	int i;
 	char *value;
 
+	/*
+	 * If a user has "core.usebuiltinfsmonitor" enabled, try to switch to
+	 * the new (non-deprecated) setting (core.fsmonitor).
+	 */
+	if (!git_config_get_string("core.usebuiltinfsmonitor", &value)) {
+		char *dummy = NULL;
+		if (git_config_get_string("core.fsmonitor", &dummy) &&
+		    git_config_set_gently("core.fsmonitor", value) < 0)
+			return error(_("could not configure %s=%s"),
+				     "core.fsmonitor", value);
+		if (git_config_set_gently("core.usebuiltinfsmonitor", NULL) < 0)
+			return error(_("could not configure %s=%s"),
+				     "core.useBuiltinFSMonitor", "NULL");
+		free(value);
+		free(dummy);
+	}
+
 	for (i = 0; config[i].key; i++) {
 		if (set_scalar_config(config + i, reconfigure))
 			return error(_("could not configure %s=%s"),

From d7579e0da1c10c86e586cd7071075ac4e70acfea Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Tue, 4 Oct 2022 08:01:17 -0400
Subject: [PATCH 168/207] abspath: make strip_last_path_component() global

The strip_last_component() method is helpful for finding the parent
directory of a path stored in a strbuf. Extract it to a global method
advertised in abspath.h. With that additional visibility, it is helpful to
rename it to be more specific to paths.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 abspath.c | 6 +++---
 abspath.h | 5 +++++
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/abspath.c b/abspath.c
index 0c17e98654e4b0..e899f46d02097a 100644
--- a/abspath.c
+++ b/abspath.c
@@ -14,7 +14,7 @@ int is_directory(const char *path)
 }
 
 /* removes the last path component from 'path' except if 'path' is root */
-static void strip_last_component(struct strbuf *path)
+void strip_last_path_component(struct strbuf *path)
 {
 	size_t offset = offset_1st_component(path->buf);
 	size_t len = path->len;
@@ -119,7 +119,7 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
 			continue; /* '.' component */
 		} else if (next.len == 2 && !strcmp(next.buf, "..")) {
 			/* '..' component; strip the last path component */
-			strip_last_component(resolved);
+			strip_last_path_component(resolved);
 			continue;
 		}
 
@@ -171,7 +171,7 @@ static char *strbuf_realpath_1(struct strbuf *resolved, const char *path,
 				 * strip off the last component since it will
 				 * be replaced with the contents of the symlink
 				 */
-				strip_last_component(resolved);
+				strip_last_path_component(resolved);
 			}
 
 			/*
diff --git a/abspath.h b/abspath.h
index 4653080d5e4b7a..06241ba13cf646 100644
--- a/abspath.h
+++ b/abspath.h
@@ -10,6 +10,11 @@ char *real_pathdup(const char *path, int die_on_error);
 const char *absolute_path(const char *path);
 char *absolute_pathdup(const char *path);
 
+/**
+ * Remove the last path component from 'path' except if 'path' is root.
+ */
+void strip_last_path_component(struct strbuf *path);
+
 /*
  * Concatenate "prefix" (if len is non-zero) and "path", with no
  * connecting characters (so "prefix" should end with a "/").

From 1eb82d131786c913d9637833ce299e8f926e4a49 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Mon, 21 Aug 2023 11:14:48 -0400
Subject: [PATCH 169/207] scalar: configure maintenance during 'reconfigure'

The 'scalar reconfigure' command is intended to update registered repos
with the latest settings available. However, up to now we were not
reregistering the repos with background maintenance.

In particular, this meant that the background maintenance schedule would
not be updated if there are improvements between versions.

Be sure to register repos for maintenance during the reconfigure step.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 scalar.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/scalar.c b/scalar.c
index a24f1822fe4fd5..fa60abeb994f53 100644
--- a/scalar.c
+++ b/scalar.c
@@ -1126,7 +1126,8 @@ static int cmd_reconfigure(int argc, const char **argv)
 		old_repo = the_repository;
 		the_repository = &r;
 
-		if (set_recommended_config(1) >= 0)
+		if (set_recommended_config(1) >= 0 &&
+		    toggle_maintenance(1) >= 0)
 			succeeded = 1;
 
 		the_repository = old_repo;

From 5073191e4be9640df3c889e53e32d10896d64a58 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <derrickstolee@github.com>
Date: Fri, 30 Sep 2022 12:41:27 -0400
Subject: [PATCH 170/207] scalar: .scalarCache should live above enlistment

We should not be putting the .scalarCache inside the enlistment as a
sibling to the 'src' directory. This only happens in "unattended" mode,
but it also negates any benefit of a shared object cache because each
enlistment absolutely does not share any objects with others.

Move the shared object cache in this case to a level above the
enlistment, so at least there is some hope that it can be reused. This
is also critical to the upcoming --no-src option, since the shared
object cache cannot be located within the Git repository.

Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 scalar.c          | 9 +++++++--
 t/t9210-scalar.sh | 2 +-
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/scalar.c b/scalar.c
index a24f1822fe4fd5..127e5fd2c9f326 100644
--- a/scalar.c
+++ b/scalar.c
@@ -488,8 +488,13 @@ static char *default_cache_root(const char *root)
 {
 	const char *env;
 
-	if (is_unattended())
-		return xstrfmt("%s/.scalarCache", root);
+	if (is_unattended()) {
+		struct strbuf path = STRBUF_INIT;
+		strbuf_addstr(&path, root);
+		strip_last_path_component(&path);
+		strbuf_addstr(&path, "/.scalarCache");
+		return strbuf_detach(&path, NULL);
+	}
 
 #ifdef WIN32
 	(void)env;
diff --git a/t/t9210-scalar.sh b/t/t9210-scalar.sh
index c070dd81f4cb10..b932082e663914 100755
--- a/t/t9210-scalar.sh
+++ b/t/t9210-scalar.sh
@@ -382,7 +382,7 @@ test_expect_success '`scalar clone` with GVFS-enabled server' '
 	cache_key="url_$(printf "%s" http://$HOST_PORT/ |
 		tr A-Z a-z |
 		test-tool sha1)" &&
-	echo "$(pwd)/using-gvfs/.scalarCache/$cache_key" >expect &&
+	echo "$(pwd)/.scalarCache/$cache_key" >expect &&
 	git -C using-gvfs/src config gvfs.sharedCache >actual &&
 	test_cmp expect actual &&
 

From 32ef1f4c5c952a69cc6709009163400c0435d7b7 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Sun, 22 Aug 2021 14:55:59 -0400
Subject: [PATCH 171/207] sparse-checkout: add config to disable deleting dirs

The clean_tracked_sparse_directories() method deletes the tracked
directories that go out of scope when the sparse-checkout cone changes,
at least in cone mode. This is new behavior, but is recommended based on
our understanding of how users are interacting with the feature in most
cases.

It is possible that some users will object to the new behavior, so
create a new configuration option 'index.deleteSparseDirectories' that
can be set to 'false' to make clean_tracked_sparse_directories() do
nothing. This will keep all untracked files in the working tree and
cause performance problems with the sparse index, but those trade-offs
are for the user to decide.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 Documentation/config/index.txt     | 6 ++++++
 builtin/sparse-checkout.c          | 9 ++++++++-
 t/t1091-sparse-checkout-builtin.sh | 4 ++++
 3 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt
index 3eff42036033ea..0d6d05b70ce03d 100644
--- a/Documentation/config/index.txt
+++ b/Documentation/config/index.txt
@@ -1,3 +1,9 @@
+index.deleteSparseDirectories::
+	When enabled, the cone mode sparse-checkout feature will delete
+	directories that are outside of the sparse-checkout cone, unless
+	such a directory contains an untracked, non-ignored file. Defaults
+	to true.
+
 index.recordEndOfIndexEntries::
 	Specifies whether the index file should include an "End Of Index
 	Entry" section. This reduces index load time on multiprocessor
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 14dcace5f8ff7c..dcfe1832af33ff 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -111,7 +111,7 @@ static int sparse_checkout_list(int argc, const char **argv, const char *prefix,
 
 static void clean_tracked_sparse_directories(struct repository *r)
 {
-	int i, was_full = 0;
+	int i, value, was_full = 0;
 	struct strbuf path = STRBUF_INIT;
 	size_t pathlen;
 	struct string_list_item *item;
@@ -127,6 +127,13 @@ static void clean_tracked_sparse_directories(struct repository *r)
 	    !r->index->sparse_checkout_patterns->use_cone_patterns)
 		return;
 
+	/*
+	 * Users can disable this behavior.
+	 */
+	if (!repo_config_get_bool(r, "index.deletesparsedirectories", &value) &&
+	    !value)
+		return;
+
 	/*
 	 * Use the sparse index as a data structure to assist finding
 	 * directories that are safe to delete. This conversion to a
diff --git a/t/t1091-sparse-checkout-builtin.sh b/t/t1091-sparse-checkout-builtin.sh
index 29838259bb4fae..7e63354b8e33e5 100755
--- a/t/t1091-sparse-checkout-builtin.sh
+++ b/t/t1091-sparse-checkout-builtin.sh
@@ -783,6 +783,10 @@ test_expect_success 'cone mode clears ignored subdirectories' '
 	git -C repo status --porcelain=v2 >out &&
 	test_must_be_empty out &&
 
+	git -C repo -c index.deleteSparseDirectories=false sparse-checkout reapply &&
+	test_path_is_dir repo/folder1 &&
+	test_path_is_dir repo/deep/deeper2 &&
+
 	git -C repo sparse-checkout reapply &&
 	test_path_is_missing repo/folder1 &&
 	test_path_is_missing repo/deep/deeper2 &&

From 47db922a0bd1432677c4e3eeb1f6dc2e3a88391d Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Tue, 29 Jun 2021 11:12:56 -0400
Subject: [PATCH 172/207] add/rm: allow adding sparse entries when virtual

Upstream, a20f704 (add: warn when asked to update SKIP_WORKTREE entries,
2021-04-08) modified how 'git add <pathspec>' works with cache entries
marked with the SKIP_WORKTREE bit. The intention is to prevent a user
from accidentally adding a path that is outside their sparse-checkout
definition but somehow matches an existing index entry.

A similar change for 'git rm' happened in d5f4b82 (rm: honor sparse
checkout patterns, 2021-04-08).

This breaks when using the virtual filesystem in VFS for Git. It is
rare, but we could be in a scenario where the user has staged a change
and then the file is projected away. If the user re-adds the file, then
this warning causes the command to fail with the advise message.

Disable this logic when core_virtualfilesystem is enabled.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 builtin/add.c | 22 +++++++++++++++++-----
 builtin/rm.c  |  9 +++++++--
 read-cache.c  |  2 +-
 3 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/builtin/add.c b/builtin/add.c
index fc2866fad8afbe..d85b2bd81974d7 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -1,3 +1,5 @@
+#define USE_THE_REPOSITORY_VARIABLE
+
 /*
  * "git add" builtin command
  *
@@ -5,6 +7,7 @@
  */
 
 #include "builtin.h"
+#include "environment.h"
 #include "advice.h"
 #include "config.h"
 #include "lockfile.h"
@@ -47,6 +50,7 @@ static int chmod_pathspec(struct repository *repo,
 		int err;
 
 		if (!include_sparse &&
+		    !core_virtualfilesystem &&
 		    (ce_skip_worktree(ce) ||
 		     !path_in_sparse_checkout(ce->name, repo->index)))
 			continue;
@@ -132,8 +136,9 @@ static int refresh(struct repository *repo, int verbose, const struct pathspec *
 		if (!seen[i]) {
 			const char *path = pathspec->items[i].original;
 
-			if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
-			    !path_in_sparse_checkout(path, repo->index)) {
+			if (!core_virtualfilesystem &&
+			    (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
+			     !path_in_sparse_checkout(path, repo->index))) {
 				string_list_append(&only_match_skip_worktree,
 						   pathspec->items[i].original);
 			} else {
@@ -143,7 +148,11 @@ static int refresh(struct repository *repo, int verbose, const struct pathspec *
 		}
 	}
 
-	if (only_match_skip_worktree.nr) {
+	/*
+	 * When using a virtual filesystem, we might re-add a path
+	 * that is currently virtual and we want that to succeed.
+	 */
+	if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
 		advise_on_updating_sparse_paths(&only_match_skip_worktree);
 		ret = 1;
 	}
@@ -529,7 +538,11 @@ int cmd_add(int argc,
 			if (seen[i])
 				continue;
 
-			if (!include_sparse &&
+			/*
+			 * When using a virtual filesystem, we might re-add a path
+			 * that is currently virtual and we want that to succeed.
+			 */
+			if (!include_sparse && !core_virtualfilesystem &&
 			    matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
 				string_list_append(&only_match_skip_worktree,
 						   pathspec.items[i].original);
@@ -553,7 +566,6 @@ int cmd_add(int argc,
 			}
 		}
 
-
 		if (only_match_skip_worktree.nr) {
 			advise_on_updating_sparse_paths(&only_match_skip_worktree);
 			exit_status = 1;
diff --git a/builtin/rm.c b/builtin/rm.c
index 12ae086a556ce3..67a1cea2226747 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -8,6 +8,7 @@
 #define DISABLE_SIGN_COMPARE_WARNINGS
 
 #include "builtin.h"
+#include "environment.h"
 #include "advice.h"
 #include "config.h"
 #include "lockfile.h"
@@ -317,7 +318,7 @@ int cmd_rm(int argc,
 	for (i = 0; i < the_repository->index->cache_nr; i++) {
 		const struct cache_entry *ce = the_repository->index->cache[i];
 
-		if (!include_sparse &&
+		if (!include_sparse && !core_virtualfilesystem &&
 		    (ce_skip_worktree(ce) ||
 		     !path_in_sparse_checkout(ce->name, the_repository->index)))
 			continue;
@@ -354,7 +355,11 @@ int cmd_rm(int argc,
 				    *original ? original : ".");
 		}
 
-		if (only_match_skip_worktree.nr) {
+		/*
+		 * When using a virtual filesystem, we might re-add a path
+		 * that is currently virtual and we want that to succeed.
+		 */
+		if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
 			advise_on_updating_sparse_paths(&only_match_skip_worktree);
 			ret = 1;
 		}
diff --git a/read-cache.c b/read-cache.c
index ce3abdb063f208..e9994d00f3fca1 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -3971,7 +3971,7 @@ static void update_callback(struct diff_queue_struct *q,
 		struct diff_filepair *p = q->queue[i];
 		const char *path = p->one->path;
 
-		if (!data->include_sparse &&
+		if (!data->include_sparse && !core_virtualfilesystem &&
 		    !path_in_sparse_checkout(path, data->index))
 			continue;
 

From 6ea1db9dd4eecc864fa0ba2fc4603c5458141c09 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Mon, 26 Jul 2021 15:43:05 -0400
Subject: [PATCH 173/207] diff: ignore sparse paths in diffstat

The diff_populate_filespec() method is used to describe the diff after a
merge operation is complete, especially when a conflict appears. In
order to avoid expanding a sparse index, the reuse_worktree_file() needs
to be adapted to ignore files that are outside of the sparse-checkout
cone. The file names and OIDs used for this check come from the merged
tree in the case of the ORT strategy, not the index, hence the ability
to look into these paths without having already expanded the index.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 diff.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/diff.c b/diff.c
index d28b4114c8dffb..b555b2c35a618d 100644
--- a/diff.c
+++ b/diff.c
@@ -4046,6 +4046,13 @@ static int reuse_worktree_file(struct index_state *istate,
 	    has_object_pack(istate->repo, oid))
 		return 0;
 
+	/*
+	 * If this path does not match our sparse-checkout definition,
+	 * then the file will not be in the working directory.
+	 */
+	if (!path_in_sparse_checkout(name, istate))
+		return 0;
+
 	/*
 	 * Similarly, if we'd have to convert the file contents anyway, that
 	 * makes the optimization not worthwhile.

From 5ac80b90ef3d34d173b308977ff35b9b5ff99422 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Tue, 15 Jun 2021 11:07:11 -0400
Subject: [PATCH 174/207] repo-settings: enable sparse index by default

There is some strangeness when expanding a sparse-index that exists
within a submodule. We will need to resolve that later, but for now,
let's do a better job of explicitly disabling the sparse-index when
requested, and do so in t7817.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 repo-settings.c                          | 2 +-
 t/perf/p2000-sparse-operations.sh        | 4 ++--
 t/t1092-sparse-checkout-compatibility.sh | 1 +
 t/t7817-grep-sparse-checkout.sh          | 2 +-
 4 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/repo-settings.c b/repo-settings.c
index 20f68200fab63b..77483f06b7f20a 100644
--- a/repo-settings.c
+++ b/repo-settings.c
@@ -71,7 +71,7 @@ void prepare_repo_settings(struct repository *r)
 	repo_cfg_bool(r, "pack.usesparse", &r->settings.pack_use_sparse, 1);
 	repo_cfg_bool(r, "pack.usepathwalk", &r->settings.pack_use_path_walk, 0);
 	repo_cfg_bool(r, "core.multipackindex", &r->settings.core_multi_pack_index, 1);
-	repo_cfg_bool(r, "index.sparse", &r->settings.sparse_index, 0);
+	repo_cfg_bool(r, "index.sparse", &r->settings.sparse_index, 1);
 	repo_cfg_bool(r, "index.skiphash", &r->settings.index_skip_hash, r->settings.index_skip_hash);
 	repo_cfg_bool(r, "pack.readreverseindex", &r->settings.pack_read_reverse_index, 1);
 	repo_cfg_bool(r, "pack.usebitmapboundarytraversal",
diff --git a/t/perf/p2000-sparse-operations.sh b/t/perf/p2000-sparse-operations.sh
index 39e92b0841437b..c366a822031291 100755
--- a/t/perf/p2000-sparse-operations.sh
+++ b/t/perf/p2000-sparse-operations.sh
@@ -56,7 +56,7 @@ test_expect_success 'setup repo and indexes' '
 	git -c core.sparseCheckoutCone=true clone --branch=wide --sparse . full-v3 &&
 	(
 		cd full-v3 &&
-		git sparse-checkout init --cone &&
+		git sparse-checkout init --cone --no-sparse-index &&
 		git sparse-checkout set $SPARSE_CONE &&
 		git config index.version 3 &&
 		git update-index --index-version=3 &&
@@ -65,7 +65,7 @@ test_expect_success 'setup repo and indexes' '
 	git -c core.sparseCheckoutCone=true clone --branch=wide --sparse . full-v4 &&
 	(
 		cd full-v4 &&
-		git sparse-checkout init --cone &&
+		git sparse-checkout init --cone --no-sparse-index &&
 		git sparse-checkout set $SPARSE_CONE &&
 		git config index.version 4 &&
 		git update-index --index-version=4 &&
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index c9d1a2c642adc7..63a340733b69ca 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -155,6 +155,7 @@ init_repos () {
 	git -C sparse-index reset --hard &&
 
 	# initialize sparse-checkout definitions
+	git -C sparse-checkout config index.sparse false &&
 	git -C sparse-checkout sparse-checkout init --cone &&
 	git -C sparse-checkout sparse-checkout set deep &&
 	git -C sparse-index sparse-checkout init --cone --sparse-index &&
diff --git a/t/t7817-grep-sparse-checkout.sh b/t/t7817-grep-sparse-checkout.sh
index eb595645657fad..db3004c4fe71c0 100755
--- a/t/t7817-grep-sparse-checkout.sh
+++ b/t/t7817-grep-sparse-checkout.sh
@@ -49,7 +49,7 @@ test_expect_success 'setup' '
 		echo "text" >B/b &&
 		git add A B &&
 		git commit -m sub &&
-		git sparse-checkout init --cone &&
+		git sparse-checkout init --cone --no-sparse-index &&
 		git sparse-checkout set B
 	) &&
 

From 8df952e1c1c669cece17f268007229ea622437f8 Mon Sep 17 00:00:00 2001
From: Lessley Dennington <lessleydennington@gmail.com>
Date: Fri, 10 Sep 2021 13:57:25 -0700
Subject: [PATCH 175/207] diff(sparse-index): verify with partially-sparse

This verifies that `diff` and `diff --staged` behave the same in sparse
index repositories in the following partially-staged scenarios (i.e. the
index, HEAD, and working directory differ at a given path):
    1. Path is within sparse-checkout cone.
    2. Path is outside sparse-checkout cone.
    3. A merge conflict exists for paths outside sparse-checkout cone.

Signed-off-by: Lessley Dennington <lessleydennington@gmail.com>
---
 t/t1092-sparse-checkout-compatibility.sh | 44 ++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 63a340733b69ca..15bc290cfc6529 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -541,6 +541,45 @@ test_expect_success 'diff --cached' '
 	test_all_match git diff --cached
 '
 
+test_expect_success 'diff partially-staged' '
+	init_repos &&
+
+	git -C full-checkout config advice.sparseIndexExpanded false &&
+
+	write_script edit-contents <<-\EOF &&
+	echo text >>$1
+	EOF
+
+	# Add file within cone
+	test_all_match git sparse-checkout set deep &&
+	run_on_all ../edit-contents deep/testfile &&
+	test_all_match git add deep/testfile &&
+	run_on_all ../edit-contents deep/testfile &&
+
+	test_all_match git diff &&
+	test_all_match git diff --staged &&
+
+	# Add file outside cone
+	test_all_match git reset --hard &&
+	run_on_all mkdir newdirectory &&
+	run_on_all ../edit-contents newdirectory/testfile &&
+	test_all_match git sparse-checkout set newdirectory &&
+	test_all_match git add newdirectory/testfile &&
+	run_on_all ../edit-contents newdirectory/testfile &&
+	test_all_match git sparse-checkout set &&
+
+	test_all_match git diff &&
+	test_all_match git diff --staged &&
+
+	# Merge conflict outside cone
+	test_all_match git reset --hard &&
+	test_all_match git checkout merge-left &&
+	test_all_match test_must_fail git merge merge-right &&
+
+	test_all_match git diff &&
+	test_all_match git diff --staged
+'
+
 # NEEDSWORK: sparse-checkout behaves differently from full-checkout when
 # running this test with 'df-conflict-2' after 'df-conflict-1'.
 test_expect_success 'diff with renames and conflicts' '
@@ -1482,6 +1521,11 @@ test_expect_success 'sparse-index is not expanded' '
 	ensure_not_expanded reset --merge update-deep &&
 	ensure_not_expanded reset --hard &&
 
+	echo a test change >>sparse-index/README.md &&
+	ensure_not_expanded diff &&
+	git -C sparse-index add README.md &&
+	ensure_not_expanded diff --staged &&
+
 	ensure_not_expanded reset base -- deep/a &&
 	ensure_not_expanded reset base -- nonexistent-file &&
 	ensure_not_expanded reset deepest -- deep &&

From c66ff5c8b26ad81f4333be6a0c98fb13bba64c3c Mon Sep 17 00:00:00 2001
From: Victoria Dye <vdye@github.com>
Date: Wed, 22 Sep 2021 14:02:21 -0400
Subject: [PATCH 176/207] stash: expand testing for `git stash -u`

Test cases specific to handling untracked files in `git stash` a) ensure
that files outside the sparse checkout definition are handled as-expected
and b) document the index expansion inside of `git stash -u`. Note that, in b),
it is not the full repository index that is expanded - it is the temporary,
standalone index containing the stashed untracked files only.

Signed-off-by: Victoria Dye <vdye@github.com>
---
 t/t1092-sparse-checkout-compatibility.sh | 40 ++++++++++++++++++++++++
 1 file changed, 40 insertions(+)

diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 15bc290cfc6529..358ce85d37ad9d 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -1849,6 +1849,46 @@ test_expect_success 'sparse index is not expanded: sparse-checkout' '
 	ensure_not_expanded sparse-checkout set
 '
 
+# NEEDSWORK: although the full repository's index is _not_ expanded as part of
+# stash, a temporary index, which is _not_ sparse, is created when stashing and
+# applying a stash of untracked files. As a result, the test reports that it
+# finds an instance of `ensure_full_index`, but it does not carry with it the
+# performance implications of expanding the full repository index.
+test_expect_success 'sparse index is not expanded: stash -u' '
+	init_repos &&
+
+	mkdir -p sparse-index/folder1 &&
+	echo >>sparse-index/README.md &&
+	echo >>sparse-index/a &&
+	echo >>sparse-index/folder1/new &&
+
+	GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \
+		git -C sparse-index stash -u &&
+	test_region index ensure_full_index trace2.txt &&
+
+	GIT_TRACE2_EVENT="$(pwd)/trace2.txt" GIT_TRACE2_EVENT_NESTING=10 \
+		git -C sparse-index stash pop &&
+	test_region index ensure_full_index trace2.txt
+'
+
+# NEEDSWORK: similar to `git add`, untracked files outside of the sparse
+# checkout definition are successfully stashed and unstashed.
+test_expect_success 'stash -u outside sparse checkout definition' '
+	init_repos &&
+
+	write_script edit-contents <<-\EOF &&
+	echo text >>$1
+	EOF
+
+	run_on_sparse mkdir -p folder1 &&
+	run_on_all ../edit-contents folder1/new &&
+	test_all_match git stash -u &&
+	test_all_match git status --porcelain=v2 &&
+
+	test_all_match git stash pop -q &&
+	test_all_match git status --porcelain=v2
+'
+
 # NEEDSWORK: a sparse-checkout behaves differently from a full checkout
 # in this scenario, but it shouldn't.
 test_expect_success 'reset mixed and checkout orphan' '

From 375acdd5111cf2367eae84f4a283bd8a4b8aea19 Mon Sep 17 00:00:00 2001
From: Kevin Willford <kewillf@microsoft.com>
Date: Wed, 15 Mar 2017 16:36:53 -0600
Subject: [PATCH 177/207] reset: fix mixed reset when using virtual filesystem

During the 2.35.0 rebase, we ejected 570f64b (Fix reset when using the
sparse-checkout feature., 2017-03-15) because of a similar change
upstream that actually works with the expected behavior of
sparse-checkout.

That commit only ever existed in microsoft/git, but when it was
considered for upstream we realized that it behaved strangely for a
sparse-checkout scenario.

The root problem is that during a mixed reset, 'git reset <commit>'
updates the index to aggree with <commit> but leaves the worktree the
same as it was before. The issue with sparse-checkout is that some files
might not be in the worktree and thus the information from those files
would be "lost".

The upstream decision was to leave these files as ignored, because
that's what the SKIP_WORKTREE bit means: don't put these files in the
worktree and ignore their contents. If there already were files in the
worktree, then Git does not change them. The case for "losing" data is
if a committed change outside of the sparse-checkout was in the previous
HEAD position. However, this information could be recovered from the
reflog.

The case where this is different is in a virtualized filesystem. The
virtualization is projecting the index contents onto the filesystem, so
we need to do something different here. In a virtual environment, every
file is considered "important" and we abuse the SKIP_WORKTREE bit to
indicate that Git does not need to process a projected file. When a file
is populated, the virtual filesystem hook provides the information for
removing the SKIP_WORKTREE bit.

In the case of these mixed resets, we have the issue where we change the
projection of the worktree for these cache entries that change. If a
file is populated in the worktree, then the populated file will persist
and appear in a follow-up 'git status'. However, if the file is not
populated and only projected, we change the projection from the current
value to the new value, leaving a clean 'git status'.

The previous version of this commit includes a call to checkout_entry(),
which populates the file. This causes the file to be actually in the
working tree and no longer projected.

To make this work with the upstream changes, stop setting the
skip-worktree bit for the new cache entry. This seemed to work fine
without this change, but it's likely due to some indirection with the
virtual filesystem. Better to do the best-possible thing here so we
don't hide a corner-case bug by accident.

Helped-by: Victoria Dye <vdye@github.com>
Signed-off-by: Kevin Willford <kewillf@microsoft.com>
Signed-off-by: Derrick Stolee <derrickstolee@github.com>
---
 builtin/reset.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 49 insertions(+), 2 deletions(-)

diff --git a/builtin/reset.c b/builtin/reset.c
index 6cfab674e40541..2d5e388147fdb6 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -40,6 +40,8 @@
 #include "add-interactive.h"
 #include "strbuf.h"
 #include "quote.h"
+#include "dir.h"
+#include "entry.h"
 
 #define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000)
 
@@ -160,9 +162,48 @@ static void update_index_from_diff(struct diff_queue_struct *q,
 
 	for (i = 0; i < q->nr; i++) {
 		int pos;
+		int respect_skip_worktree = 1;
 		struct diff_filespec *one = q->queue[i]->one;
+		struct diff_filespec *two = q->queue[i]->two;
 		int is_in_reset_tree = one->mode && !is_null_oid(&one->oid);
+		int is_missing = !(one->mode && !is_null_oid(&one->oid));
+		int was_missing = !two->mode && is_null_oid(&two->oid);
 		struct cache_entry *ce;
+		struct cache_entry *ceBefore;
+		struct checkout state = CHECKOUT_INIT;
+
+		/*
+		 * When using the virtual filesystem feature, the cache entries that are
+		 * added here will not have the skip-worktree bit set.
+		 *
+		 * Without this code there is data that is lost because the files that
+		 * would normally be in the working directory are not there and show as
+		 * deleted for the next status or in the case of added files just disappear.
+		 * We need to create the previous version of the files in the working
+		 * directory so that they will have the right content and the next
+		 * status call will show modified or untracked files correctly.
+		 */
+		if (core_virtualfilesystem && !file_exists(two->path))
+		{
+			respect_skip_worktree = 0;
+			pos = index_name_pos(the_repository->index, two->path, strlen(two->path));
+
+			if ((pos >= 0 && ce_skip_worktree(the_repository->index->cache[pos])) &&
+			    (is_missing || !was_missing))
+			{
+				state.force = 1;
+				state.refresh_cache = 1;
+				state.istate = the_repository->index;
+				ceBefore = make_cache_entry(the_repository->index, two->mode,
+							    &two->oid, two->path,
+							    0, 0);
+				if (!ceBefore)
+					die(_("make_cache_entry failed for path '%s'"),
+						two->path);
+
+				checkout_entry(ceBefore, &state, NULL, NULL);
+			}
+		}
 
 		if (!is_in_reset_tree && !intent_to_add) {
 			remove_file_from_index(the_repository->index, one->path);
@@ -181,8 +222,14 @@ static void update_index_from_diff(struct diff_queue_struct *q,
 		 * to properly construct the reset sparse directory.
 		 */
 		pos = index_name_pos(the_repository->index, one->path, strlen(one->path));
-		if ((pos >= 0 && ce_skip_worktree(the_repository->index->cache[pos])) ||
-		    (pos < 0 && !path_in_sparse_checkout(one->path, the_repository->index)))
+
+		/*
+		 * Do not add the SKIP_WORKTREE bit back if we populated the
+		 * file on purpose in a virtual filesystem scenario.
+		 */
+		if (respect_skip_worktree &&
+		    ((pos >= 0 && ce_skip_worktree(the_repository->index->cache[pos])) ||
+		     (pos < 0 && !path_in_sparse_checkout(one->path, the_repository->index))))
 			ce->ce_flags |= CE_SKIP_WORKTREE;
 
 		if (!ce)

From 52c6b5d100536320ca7e3cd5bdbf9fdb94af6dcb Mon Sep 17 00:00:00 2001
From: Derrick Stolee <dstolee@microsoft.com>
Date: Sat, 30 Oct 2021 20:41:32 -0400
Subject: [PATCH 178/207] sparse: add vfs-specific precautions

* t1092: remove the 'git update-index' test that currently fails
  because the command ignores the bad path, but doesn't return a
  failure.

* dir.c: prevent matching against sparse-checkout patterns when the
  virtual filesystem is enabled. Should prevent some corner case
  issues.

* t1092: add quiet mode for some rebase tests because the stderr
  output can change in some of the modes.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
---
 dir.c                                    | 7 +++++++
 t/t1092-sparse-checkout-compatibility.sh | 4 +++-
 2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/dir.c b/dir.c
index 04b37090f9a19e..579171b0eebbae 100644
--- a/dir.c
+++ b/dir.c
@@ -1583,6 +1583,13 @@ static int path_in_sparse_checkout_1(const char *path,
 	enum pattern_match_result match = UNDECIDED;
 	const char *end, *slash;
 
+	/*
+	 * When using a virtual filesystem, there aren't really patterns
+	 * to follow, but be extra careful to skip this check.
+	 */
+	if (core_virtualfilesystem)
+		return 1;
+
 	/*
 	 * We default to accepting a path if the path is empty, there are no
 	 * patterns, or the patterns are of the wrong type.
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 358ce85d37ad9d..43bb7f7f1dbdd6 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -1060,7 +1060,9 @@ test_expect_success 'read-tree --merge with directory-file conflicts' '
 test_expect_success 'merge, cherry-pick, and rebase' '
 	init_repos &&
 
-	for OPERATION in "merge -m merge" cherry-pick "rebase --apply" "rebase --merge"
+	# microsoft/git specific: we need to use "quiet" mode
+	# to avoid different stderr for some rebases.
+	for OPERATION in "merge -m merge" cherry-pick "rebase -q --apply" "rebase -q --merge"
 	do
 		test_all_match git checkout -B temp update-deep &&
 		test_all_match git $OPERATION update-folder1 &&

From 19c0322e2d19988f5ce746a39ac63d4c49d8fcf5 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 16:33:58 -0400
Subject: [PATCH 179/207] sparse-index: add ensure_full_index_with_reason()

It is sometimes difficult to support users who are hitting issues with
sparse index expansion because it is unclear why the index needs to expand
from logs alone. It is too invasive to set up a debugging scenario on the
user's machine, so let's improve the logging.

Create a new ensure_full_index_with_reason() method that takes a formatting
string and parameters. If the index is not fully expanded, then apply the
formatting logic to create the logged string and log it before calling
ensure_full_index(). This should assist with discovering why an index is
expanded from trace2 logs alone.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 sparse-index.c | 18 ++++++++++++++++++
 sparse-index.h |  8 ++++++++
 2 files changed, 26 insertions(+)

diff --git a/sparse-index.c b/sparse-index.c
index 82fcf36169a9de..42a2d031616b08 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -463,6 +463,24 @@ void ensure_full_index(struct index_state *istate)
 	expand_index(istate, NULL);
 }
 
+void ensure_full_index_with_reason(struct index_state *istate,
+				   const char *fmt, ...)
+{
+	va_list ap;
+	struct strbuf why = STRBUF_INIT;
+	if (!istate)
+		BUG("ensure_full_index_with_reason() must get an index!");
+	if (istate->sparse_index == INDEX_EXPANDED)
+		return;
+
+	va_start(ap, fmt);
+	strbuf_vaddf(&why, fmt, ap);
+	trace2_data_string("sparse-index", istate->repo, "expansion-reason", why.buf);
+	va_end(ap);
+	strbuf_release(&why);
+	ensure_full_index(istate);
+}
+
 void ensure_correct_sparsity(struct index_state *istate)
 {
 	/*
diff --git a/sparse-index.h b/sparse-index.h
index 727034be7ca917..09a8701476c7ff 100644
--- a/sparse-index.h
+++ b/sparse-index.h
@@ -46,4 +46,12 @@ void expand_index(struct index_state *istate, struct pattern_list *pl);
 
 void ensure_full_index(struct index_state *istate);
 
+/**
+ * If there is a clear reason why the sparse index is being expanded, then
+ * trace the information for why the expansion is occurring.
+ */
+void ensure_full_index_with_reason(struct index_state *istate,
+				   const char *fmt,
+				   ...);
+
 #endif

From 9cb2790a463060c8b139a83de84e5bb75d880057 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 16:57:43 -0400
Subject: [PATCH 180/207] treewide: add reasons for expanding index

These locations that previously called ensure_full_index() are now
updated to call the ..._with_reason() varation using fixed strings that
should be enough to identify the reason for the expansion.

This will help users use tracing to determine why the index is expanding
in their scenarios.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 builtin/checkout-index.c                 | 3 ++-
 builtin/ls-files.c                       | 2 +-
 builtin/read-tree.c                      | 3 ++-
 builtin/reset.c                          | 3 ++-
 builtin/rm.c                             | 3 ++-
 builtin/sparse-checkout.c                | 6 ++++--
 read-cache.c                             | 6 +++---
 repository.c                             | 2 +-
 sequencer.c                              | 4 ++--
 sparse-index.c                           | 6 ++++--
 t/t1092-sparse-checkout-compatibility.sh | 8 ++++++++
 unpack-trees.c                           | 4 ++--
 12 files changed, 33 insertions(+), 17 deletions(-)

diff --git a/builtin/checkout-index.c b/builtin/checkout-index.c
index a81501098d9fdb..b7c176f1790f0a 100644
--- a/builtin/checkout-index.c
+++ b/builtin/checkout-index.c
@@ -156,7 +156,8 @@ static int checkout_all(const char *prefix, int prefix_length)
 			 * first entry inside the expanded sparse directory).
 			 */
 			if (ignore_skip_worktree) {
-				ensure_full_index(the_repository->index);
+				ensure_full_index_with_reason(the_repository->index,
+							      "checkout-index");
 				ce = the_repository->index->cache[i];
 			}
 		}
diff --git a/builtin/ls-files.c b/builtin/ls-files.c
index 15499cd12b6bd5..2411fe523eb89e 100644
--- a/builtin/ls-files.c
+++ b/builtin/ls-files.c
@@ -413,7 +413,7 @@ static void show_files(struct repository *repo, struct dir_struct *dir)
 		return;
 
 	if (!show_sparse_dirs)
-		ensure_full_index(repo->index);
+		ensure_full_index_with_reason(repo->index, "ls-files");
 
 	for (i = 0; i < repo->index->cache_nr; i++) {
 		const struct cache_entry *ce = repo->index->cache[i];
diff --git a/builtin/read-tree.c b/builtin/read-tree.c
index d2a807a828b6ab..e84cd4ee4d2d9a 100644
--- a/builtin/read-tree.c
+++ b/builtin/read-tree.c
@@ -226,7 +226,8 @@ int cmd_read_tree(int argc,
 		setup_work_tree();
 
 	if (opts.skip_sparse_checkout)
-		ensure_full_index(the_repository->index);
+		ensure_full_index_with_reason(the_repository->index,
+					      "read-tree");
 
 	if (opts.merge) {
 		switch (stage - 1) {
diff --git a/builtin/reset.c b/builtin/reset.c
index 2d5e388147fdb6..4e9a058766f382 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -262,7 +262,8 @@ static int read_from_tree(const struct pathspec *pathspec,
 	opt.add_remove = diff_addremove;
 
 	if (pathspec->nr && pathspec_needs_expanded_index(the_repository->index, pathspec))
-		ensure_full_index(the_repository->index);
+		ensure_full_index_with_reason(the_repository->index,
+					      "reset pathspec");
 
 	if (do_diff_cache(tree_oid, &opt))
 		return 1;
diff --git a/builtin/rm.c b/builtin/rm.c
index 67a1cea2226747..043f95240388c1 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -313,7 +313,8 @@ int cmd_rm(int argc,
 	seen = xcalloc(pathspec.nr, 1);
 
 	if (pathspec_needs_expanded_index(the_repository->index, &pathspec))
-		ensure_full_index(the_repository->index);
+		ensure_full_index_with_reason(the_repository->index,
+					      "rm pathspec");
 
 	for (i = 0; i < the_repository->index->cache_nr; i++) {
 		const struct cache_entry *ce = the_repository->index->cache[i];
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index dcfe1832af33ff..ec1a7de9995fce 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -207,7 +207,8 @@ static void clean_tracked_sparse_directories(struct repository *r)
 	strbuf_release(&path);
 
 	if (was_full)
-		ensure_full_index(r->index);
+		ensure_full_index_with_reason(r->index,
+				"sparse-checkout:was full");
 }
 
 static int update_working_directory(struct pattern_list *pl)
@@ -437,7 +438,8 @@ static int update_modes(int *cone_mode, int *sparse_index)
 		the_repository->index->updated_workdir = 1;
 
 		if (!*sparse_index)
-			ensure_full_index(the_repository->index);
+			ensure_full_index_with_reason(the_repository->index,
+				"sparse-checkout:disabling sparse index");
 	}
 
 	return 0;
diff --git a/read-cache.c b/read-cache.c
index e9994d00f3fca1..0b9cd77e1c09f0 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -2376,7 +2376,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
 	 */
 	prepare_repo_settings(istate->repo);
 	if (istate->repo->settings.command_requires_full_index)
-		ensure_full_index(istate);
+		ensure_full_index_with_reason(istate, "incompatible builtin");
 	else
 		ensure_correct_sparsity(istate);
 
@@ -3208,7 +3208,7 @@ static int do_write_locked_index(struct index_state *istate,
 				   "%s", get_lock_file_path(lock));
 
 	if (was_full)
-		ensure_full_index(istate);
+		ensure_full_index_with_reason(istate, "re-expanding after write");
 
 	if (ret)
 		return ret;
@@ -3319,7 +3319,7 @@ static int write_shared_index(struct index_state *istate,
 				   the_repository, "%s", get_tempfile_path(*temp));
 
 	if (was_full)
-		ensure_full_index(istate);
+		ensure_full_index_with_reason(istate, "re-expanding after write");
 
 	if (ret)
 		return ret;
diff --git a/repository.c b/repository.c
index 468fe580a5d6b6..6ae80da16691eb 100644
--- a/repository.c
+++ b/repository.c
@@ -434,7 +434,7 @@ int repo_read_index(struct repository *repo)
 
 	prepare_repo_settings(repo);
 	if (repo->settings.command_requires_full_index)
-		ensure_full_index(repo->index);
+		ensure_full_index_with_reason(repo->index, "incompatible builtin");
 
 	/*
 	 * If sparse checkouts are in use, check whether paths with the
diff --git a/sequencer.c b/sequencer.c
index 6d069a890ed18c..ea917c93a55a2b 100644
--- a/sequencer.c
+++ b/sequencer.c
@@ -797,7 +797,7 @@ static int do_recursive_merge(struct repository *r,
 		merge_switch_to_result(&o, head_tree, &result, 1, show_output);
 		clean = result.clean;
 	} else {
-		ensure_full_index(r->index);
+		ensure_full_index_with_reason(r->index, "non-ort merge strategy");
 		clean = merge_trees(&o, head_tree, next_tree, base_tree);
 		if (is_rebase_i(opts) && clean <= 0)
 			fputs(o.obuf.buf, stdout);
@@ -2574,7 +2574,7 @@ static int read_and_refresh_cache(struct repository *r,
 	 * expand the sparse index.
 	 */
 	if (opts->strategy && strcmp(opts->strategy, "ort"))
-		ensure_full_index(r->index);
+		ensure_full_index_with_reason(r->index, "non-ort merge strategy");
 	return 0;
 }
 
diff --git a/sparse-index.c b/sparse-index.c
index 42a2d031616b08..cfe3d21c8b44fa 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -490,7 +490,8 @@ void ensure_correct_sparsity(struct index_state *istate)
 	if (is_sparse_index_allowed(istate, 0))
 		convert_to_sparse(istate, 0);
 	else
-		ensure_full_index(istate);
+		ensure_full_index_with_reason(istate,
+					      "sparse index not allowed");
 }
 
 struct path_found_data {
@@ -693,7 +694,8 @@ void clear_skip_worktree_from_present_files(struct index_state *istate)
 		return;
 
 	if (clear_skip_worktree_from_present_files_sparse(istate)) {
-		ensure_full_index(istate);
+		ensure_full_index_with_reason(istate,
+			"failed to clear skip-worktree while sparse");
 		clear_skip_worktree_from_present_files_full(istate);
 	}
 }
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 43bb7f7f1dbdd6..683158d56c5519 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -2500,4 +2500,12 @@ test_expect_success 'cat-file --batch' '
 	ensure_expanded cat-file --batch <in
 '
 
+test_expect_success 'ensure_full_index_with_reason' '
+	init_repos &&
+
+	GIT_TRACE2_EVENT="$(pwd)/ls-files-trace" \
+		git -C sparse-index ls-files --no-sparse HEAD &&
+	test_trace2_data "sparse-index" "expansion-reason" "ls-files" <ls-files-trace
+'
+
 test_done
diff --git a/unpack-trees.c b/unpack-trees.c
index 4fa8ae800b6ed5..3e4269b6a70e37 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -1936,9 +1936,9 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 
 	prepare_repo_settings(repo);
 	if (repo->settings.command_requires_full_index) {
-		ensure_full_index(o->src_index);
+		ensure_full_index_with_reason(o->src_index, "incompatible builtin");
 		if (o->dst_index)
-			ensure_full_index(o->dst_index);
+			ensure_full_index_with_reason(o->dst_index, "incompatible builtin");
 	}
 
 	if (o->reset == UNPACK_RESET_OVERWRITE_UNTRACKED &&

From 2332092508fed1ab1a36fd1a54dc38be2a87f4d8 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 16:59:07 -0400
Subject: [PATCH 181/207] treewide: custom reasons for expanding index

These cases that call ensure_full_index() are likely to be due to a data
shape issue on a user's machine, so take the extra time to format a
message that can be placed in their trace2 output and hopefully identify
the problem that is leading to this slow behavior.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 builtin/update-index.c                   |  4 +++-
 merge-ort.c                              |  3 ++-
 read-cache.c                             |  4 +++-
 sparse-index.c                           |  4 +++-
 t/t1092-sparse-checkout-compatibility.sh | 10 +++++++++-
 unpack-trees.c                           |  6 ++++--
 6 files changed, 24 insertions(+), 7 deletions(-)

diff --git a/builtin/update-index.c b/builtin/update-index.c
index 04b2dbe6ec6046..073c55e280296d 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -714,7 +714,9 @@ static int do_reupdate(const char **paths,
 		 * to process each path individually
 		 */
 		if (S_ISSPARSEDIR(ce->ce_mode)) {
-			ensure_full_index(the_repository->index);
+			const char *fmt = "update-index:modified sparse dir '%s'";
+			ensure_full_index_with_reason(the_repository->index,
+						      fmt, ce->name);
 			goto redo;
 		}
 
diff --git a/merge-ort.c b/merge-ort.c
index 46e78c3ffa68e6..9c2de1cb97a2be 100644
--- a/merge-ort.c
+++ b/merge-ort.c
@@ -4534,7 +4534,8 @@ static int record_conflicted_index_entries(struct merge_options *opt)
 	 */
 	strmap_for_each_entry(&opt->priv->conflicted, &iter, e) {
 		if (!path_in_sparse_checkout(e->key, index)) {
-			ensure_full_index(index);
+			const char *fmt = "merge-ort: path outside sparse checkout (%s)";
+			ensure_full_index_with_reason(index, fmt, e->key);
 			break;
 		}
 	}
diff --git a/read-cache.c b/read-cache.c
index 0b9cd77e1c09f0..647d5423b1ec24 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -555,7 +555,9 @@ static int index_name_stage_pos(struct index_state *istate,
 		if (S_ISSPARSEDIR(ce->ce_mode) &&
 		    ce_namelen(ce) < namelen &&
 		    !strncmp(name, ce->name, ce_namelen(ce))) {
-			ensure_full_index(istate);
+			const char *fmt = "searching for '%s' and found parent dir '%s'";
+			ensure_full_index_with_reason(istate, fmt,
+						      name, ce->name);
 			return index_name_stage_pos(istate, name, namelen, stage, search_mode);
 		}
 	}
diff --git a/sparse-index.c b/sparse-index.c
index cfe3d21c8b44fa..32865a740ab345 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -758,7 +758,9 @@ void expand_to_path(struct index_state *istate,
 			 * in the index, perhaps it exists within this
 			 * sparse-directory.  Expand accordingly.
 			 */
-			ensure_full_index(istate);
+			const char *fmt = "found index entry for '%s'";
+			ensure_full_index_with_reason(istate, fmt,
+						      path_mutable.buf);
 			break;
 		}
 
diff --git a/t/t1092-sparse-checkout-compatibility.sh b/t/t1092-sparse-checkout-compatibility.sh
index 683158d56c5519..894ece6fe4e8ca 100755
--- a/t/t1092-sparse-checkout-compatibility.sh
+++ b/t/t1092-sparse-checkout-compatibility.sh
@@ -2505,7 +2505,15 @@ test_expect_success 'ensure_full_index_with_reason' '
 
 	GIT_TRACE2_EVENT="$(pwd)/ls-files-trace" \
 		git -C sparse-index ls-files --no-sparse HEAD &&
-	test_trace2_data "sparse-index" "expansion-reason" "ls-files" <ls-files-trace
+	test_trace2_data "sparse-index" "expansion-reason" "ls-files" <ls-files-trace &&
+
+	mkdir -p sparse-index/folder2 &&
+	echo >sparse-index/folder2/a &&
+	GIT_TRACE2_EVENT="$(pwd)/status-trace" \
+		git -C sparse-index status &&
+	test_trace2_data "sparse-index" "skip-worktree sparsedir" "folder2/" <status-trace &&
+	test_trace2_data "sparse-index" "expansion-reason" \
+		"failed to clear skip-worktree while sparse" <status-trace
 '
 
 test_done
diff --git a/unpack-trees.c b/unpack-trees.c
index 3e4269b6a70e37..14f35d60839b9d 100644
--- a/unpack-trees.c
+++ b/unpack-trees.c
@@ -1891,8 +1891,10 @@ static void update_sparsity_for_prefix(const char *prefix,
 	 *   the 'ensure_full_index(...)' below.
 	 */
 	if (!path_in_cone_mode_sparse_checkout(ce_prefix.buf, istate) &&
-	    index_name_pos(istate, ce_prefix.buf, ce_prefix.len) >= 0)
-		ensure_full_index(istate);
+	    index_name_pos(istate, ce_prefix.buf, ce_prefix.len) >= 0) {
+		const char *fmt = "could not find '%s' in index";
+		ensure_full_index_with_reason(istate, fmt, ce_prefix.buf);
+	}
 
 	strbuf_release(&ce_prefix);
 }

From e6220cf483887ef8dd66c0577de0be82d1e98de0 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 17:05:06 -0400
Subject: [PATCH 182/207] sparse-index: add macro for unaudited expansions

For safety, areas of code that iterate over the cache entries in the
index were guarded with ensure_full_index() and labeled with a comment.
Replace these with a macro that calls ensure_full_index_with_reason()
using the line number of the caller to help identify the situation that
is causing the index expansion.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 builtin/commit.c            | 4 ++--
 builtin/difftool.c          | 2 +-
 builtin/fsck.c              | 2 +-
 builtin/merge-index.c       | 4 ++--
 builtin/stash.c             | 2 +-
 builtin/submodule--helper.c | 2 +-
 entry.c                     | 2 +-
 merge-recursive.c           | 2 +-
 read-cache.c                | 4 ++--
 resolve-undo.c              | 2 +-
 revision.c                  | 2 +-
 sparse-index.h              | 6 ++++++
 12 files changed, 20 insertions(+), 14 deletions(-)

diff --git a/builtin/commit.c b/builtin/commit.c
index da296192b46f95..5ca91dcecb7ac4 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -385,7 +385,7 @@ static int list_paths(struct string_list *list, const char *with_tree,
 	}
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(the_repository->index);
+	ensure_full_index_unaudited(the_repository->index);
 	for (i = 0; i < the_repository->index->cache_nr; i++) {
 		const struct cache_entry *ce = the_repository->index->cache[i];
 		struct string_list_item *item;
@@ -1133,7 +1133,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
 			int i, ita_nr = 0;
 
 			/* TODO: audit for interaction with sparse-index. */
-			ensure_full_index(the_repository->index);
+			ensure_full_index_unaudited(the_repository->index);
 			for (i = 0; i < the_repository->index->cache_nr; i++)
 				if (ce_intent_to_add(the_repository->index->cache[i]))
 					ita_nr++;
diff --git a/builtin/difftool.c b/builtin/difftool.c
index fbd7537b1be769..fc5811c43eb57e 100644
--- a/builtin/difftool.c
+++ b/builtin/difftool.c
@@ -592,7 +592,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
 	ret = run_command(&cmd);
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(&wtindex);
+	ensure_full_index_unaudited(&wtindex);
 
 	/*
 	 * If the diff includes working copy files and those
diff --git a/builtin/fsck.c b/builtin/fsck.c
index 0196c54eb68ee5..e86176f6a75c1b 100644
--- a/builtin/fsck.c
+++ b/builtin/fsck.c
@@ -821,7 +821,7 @@ static void fsck_index(struct index_state *istate, const char *index_path,
 	unsigned int i;
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(istate);
+	ensure_full_index_unaudited(istate);
 	for (i = 0; i < istate->cache_nr; i++) {
 		unsigned int mode;
 		struct blob *blob;
diff --git a/builtin/merge-index.c b/builtin/merge-index.c
index 342699edb77c97..6a1d7966626692 100644
--- a/builtin/merge-index.c
+++ b/builtin/merge-index.c
@@ -66,7 +66,7 @@ static void merge_all(void)
 {
 	int i;
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(the_repository->index);
+	ensure_full_index_unaudited(the_repository->index);
 	for (i = 0; i < the_repository->index->cache_nr; i++) {
 		const struct cache_entry *ce = the_repository->index->cache[i];
 		if (!ce_stage(ce))
@@ -93,7 +93,7 @@ int cmd_merge_index(int argc,
 	repo_read_index(the_repository);
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(the_repository->index);
+	ensure_full_index_unaudited(the_repository->index);
 
 	i = 1;
 	if (!strcmp(argv[i], "-o")) {
diff --git a/builtin/stash.c b/builtin/stash.c
index dbaa999cf171a7..2e2e40823b77f9 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -1560,7 +1560,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
 		char *ps_matched = xcalloc(ps->nr, 1);
 
 		/* TODO: audit for interaction with sparse-index. */
-		ensure_full_index(the_repository->index);
+		ensure_full_index_unaudited(the_repository->index);
 		for (size_t i = 0; i < the_repository->index->cache_nr; i++)
 			ce_path_match(the_repository->index, the_repository->index->cache[i], ps,
 				      ps_matched);
diff --git a/builtin/submodule--helper.c b/builtin/submodule--helper.c
index f9b970f8a64a54..5a708639452993 100644
--- a/builtin/submodule--helper.c
+++ b/builtin/submodule--helper.c
@@ -3400,7 +3400,7 @@ static void die_on_index_match(const char *path, int force)
 		char *ps_matched = xcalloc(ps.nr, 1);
 
 		/* TODO: audit for interaction with sparse-index. */
-		ensure_full_index(the_repository->index);
+		ensure_full_index_unaudited(the_repository->index);
 
 		/*
 		 * Since there is only one pathspec, we just need to
diff --git a/entry.c b/entry.c
index 358379a94cf6ec..ac5eff43e8493f 100644
--- a/entry.c
+++ b/entry.c
@@ -453,7 +453,7 @@ static void mark_colliding_entries(const struct checkout *state,
 	ce->ce_flags |= CE_MATCHED;
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(state->istate);
+	ensure_full_index_unaudited(state->istate);
 	for (size_t i = 0; i < state->istate->cache_nr; i++) {
 		struct cache_entry *dup = state->istate->cache[i];
 
diff --git a/merge-recursive.c b/merge-recursive.c
index ed87ce52b95cd1..cc10b1c6f86af8 100644
--- a/merge-recursive.c
+++ b/merge-recursive.c
@@ -540,7 +540,7 @@ static struct string_list *get_unmerged(struct index_state *istate)
 	string_list_init_dup(unmerged);
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(istate);
+	ensure_full_index_unaudited(istate);
 	for (i = 0; i < istate->cache_nr; i++) {
 		struct string_list_item *item;
 		struct stage_data *e;
diff --git a/read-cache.c b/read-cache.c
index 647d5423b1ec24..70cbfc55cd5bf2 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -2590,7 +2590,7 @@ int repo_index_has_changes(struct repository *repo,
 		return opt.flags.has_changes != 0;
 	} else {
 		/* TODO: audit for interaction with sparse-index. */
-		ensure_full_index(istate);
+		ensure_full_index_unaudited(istate);
 		for (i = 0; sb && i < istate->cache_nr; i++) {
 			if (i)
 				strbuf_addch(sb, ' ');
@@ -3872,7 +3872,7 @@ void overlay_tree_on_index(struct index_state *istate,
 
 	/* Hoist the unmerged entries up to stage #3 to make room */
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(istate);
+	ensure_full_index_unaudited(istate);
 	for (i = 0; i < istate->cache_nr; i++) {
 		struct cache_entry *ce = istate->cache[i];
 		if (!ce_stage(ce))
diff --git a/resolve-undo.c b/resolve-undo.c
index b5a9dfb4acc511..6f148a89e396e3 100644
--- a/resolve-undo.c
+++ b/resolve-undo.c
@@ -161,7 +161,7 @@ void unmerge_index(struct index_state *istate, const struct pathspec *pathspec,
 		return;
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(istate);
+	ensure_full_index_unaudited(istate);
 
 	for_each_string_list_item(item, istate->resolve_undo) {
 		const char *path = item->string;
diff --git a/revision.c b/revision.c
index 32d949b36feb80..7a2e32eb27aa90 100644
--- a/revision.c
+++ b/revision.c
@@ -1850,7 +1850,7 @@ static void do_add_index_objects_to_pending(struct rev_info *revs,
 	int i;
 
 	/* TODO: audit for interaction with sparse-index. */
-	ensure_full_index(istate);
+	ensure_full_index_unaudited(istate);
 	for (i = 0; i < istate->cache_nr; i++) {
 		struct cache_entry *ce = istate->cache[i];
 		struct blob *blob;
diff --git a/sparse-index.h b/sparse-index.h
index 09a8701476c7ff..15180b02ea6599 100644
--- a/sparse-index.h
+++ b/sparse-index.h
@@ -1,6 +1,8 @@
 #ifndef SPARSE_INDEX_H__
 #define SPARSE_INDEX_H__
 
+#include "strbuf.h"
+
 /*
  * If performing an operation where the index is supposed to expand to a
  * full index, then disable the advice message by setting this global to
@@ -54,4 +56,8 @@ void ensure_full_index_with_reason(struct index_state *istate,
 				   const char *fmt,
 				   ...);
 
+#define ensure_full_index_unaudited(i) \
+	ensure_full_index_with_reason((i), \
+		"unaudited call (%s.%d)", __FILE__, __LINE__);
+
 #endif

From 1c7e7871194923d82616a39e1c83801cefc3bec6 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 17:07:14 -0400
Subject: [PATCH 183/207] Docs: update sparse index plan with logging

The recent changes update the callers of ensure_full_index() to call
variants that will log extra information. This should assist developers
assisting users who are hitting the sparse index expansion message.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 Documentation/technical/sparse-index.txt | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/Documentation/technical/sparse-index.txt b/Documentation/technical/sparse-index.txt
index 3b24c1a219f811..c466dbddc930a9 100644
--- a/Documentation/technical/sparse-index.txt
+++ b/Documentation/technical/sparse-index.txt
@@ -206,3 +206,10 @@ Here are some commands that might be useful to update:
 * `git am`
 * `git clean`
 * `git stash`
+
+In order to help identify the cases where remaining index expansion is
+occurring in user machines, calls to `ensure_full_index()` have been
+replaced with `ensure_full_index_with_reason()` or with
+`ensure_full_index_unaudited()`. These versions add tracing that should
+help identify the reason for the index expansion without needing full
+access to someone's repository.

From b210e77a127e5b00794f522b2be7b62e6ee45345 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 13:20:41 -0400
Subject: [PATCH 184/207] stash: use -f in checkout-index child process

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 builtin/stash.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/builtin/stash.c b/builtin/stash.c
index 2e2e40823b77f9..79fec5612edb6c 100644
--- a/builtin/stash.c
+++ b/builtin/stash.c
@@ -403,7 +403,7 @@ static int restore_untracked(struct object_id *u_tree)
 
 	child_process_init(&cp);
 	cp.git_cmd = 1;
-	strvec_pushl(&cp.args, "checkout-index", "--all", NULL);
+	strvec_pushl(&cp.args, "checkout-index", "--all", "-f", NULL);
 	strvec_pushf(&cp.env, "GIT_INDEX_FILE=%s",
 		     stash_index_path.buf);
 

From c69d14fa9bab98692d9d39e2e77641480f2cf539 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 30 Sep 2024 17:14:58 -0400
Subject: [PATCH 185/207] sparse-index: log failure to clear skip-worktree

The clear_skip_worktree_from_present_files_sparse() method attempts to
clear the skip worktree bit from cache entries in the index depending on
when they exist in the workdir. When this comes across a sparse
directory that actually exists in the workdir, then this method fails
and signals that the index needs expansion.

The index expansion already logs a reason, but this reason is separate
from the path that caused this failure.

Add logging to demonstrate this situation for full clarity.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 sparse-index.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/sparse-index.c b/sparse-index.c
index 32865a740ab345..c264766dd632d9 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -639,6 +639,8 @@ static int clear_skip_worktree_from_present_files_sparse(struct index_state *ist
 			if (path_found(ce->name, &data)) {
 				if (S_ISSPARSEDIR(ce->ce_mode)) {
 					to_restart = 1;
+					trace2_data_string("sparse-index", istate->repo,
+							   "skip-worktree sparsedir", ce->name);
 					break;
 				}
 				ce->ce_flags &= ~CE_SKIP_WORKTREE;

From 9d5c423b583fec3d9e9cd0d0379f38bd8ff39166 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Fri, 27 Sep 2024 13:46:35 -0400
Subject: [PATCH 186/207] sparse-index: do not copy hashtables during expansion

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 sparse-index.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/sparse-index.c b/sparse-index.c
index c264766dd632d9..485b3972836d7d 100644
--- a/sparse-index.c
+++ b/sparse-index.c
@@ -375,6 +375,10 @@ void expand_index(struct index_state *istate, struct pattern_list *pl)
 	full = xcalloc(1, sizeof(struct index_state));
 	memcpy(full, istate, sizeof(struct index_state));
 
+	full->name_hash_initialized = 0;
+	memset(&full->name_hash, 0, sizeof(full->name_hash));
+	memset(&full->dir_hash, 0, sizeof(full->dir_hash));
+
 	/*
 	 * This slightly-misnamed 'full' index might still be sparse if we
 	 * are only modifying the list of sparse directories. This hinges
@@ -433,9 +437,15 @@ void expand_index(struct index_state *istate, struct pattern_list *pl)
 	}
 
 	/* Copy back into original index. */
+	if (istate->name_hash_initialized) {
+		hashmap_clear(&istate->name_hash);
+		hashmap_clear(&istate->dir_hash);
+	}
+
 	istate->name_hash_initialized = full->name_hash_initialized;
 	memcpy(&istate->name_hash, &full->name_hash, sizeof(full->name_hash));
 	memcpy(&istate->dir_hash, &full->dir_hash, sizeof(full->dir_hash));
+
 	istate->sparse_index = pl ? INDEX_PARTIALLY_SPARSE : INDEX_EXPANDED;
 	free(istate->cache);
 	istate->cache = full->cache;

From 6548dbdf8dc36070be81ec1c6d7780278085f39a Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Tue, 22 Oct 2024 00:11:50 -0400
Subject: [PATCH 187/207] t5616: mark tests as bogus with --path-walk

These two tests in t5616-partial-clone.sh are actually already broken
and there are comments supporting that. Those comments were focused on
the GIT_TEST_FULL_NAME_HASH variable, but they also apply to this one.
We will want to avoid issues here.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 t/t5616-partial-clone.sh | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
index 20d946cc16ac22..2935ab0af9489c 100755
--- a/t/t5616-partial-clone.sh
+++ b/t/t5616-partial-clone.sh
@@ -526,6 +526,7 @@ test_expect_success 'fetch lazy-fetches only to resolve deltas' '
 	# used as delta bases!
 	GIT_TRACE_PACKET="$(pwd)/trace" \
 	GIT_TEST_FULL_NAME_HASH=0 \
+	GIT_TEST_PACK_PATH_WALK=0 \
 		git -C client \
 		fetch "file://$(pwd)/server" main &&
 
@@ -556,6 +557,7 @@ test_expect_success 'fetch lazy-fetches only to resolve deltas, protocol v2' '
 	# used as delta bases!
 	GIT_TRACE_PACKET="$(pwd)/trace" \
 	GIT_TEST_FULL_NAME_HASH=0 \
+	GIT_TEST_PACK_PATH_WALK=0 \
 		git -C client \
 		fetch "file://$(pwd)/server" main &&
 

From 93178ea76fcb13566c3598753e83d8f99658e36f Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 21 Oct 2024 21:40:50 -0400
Subject: [PATCH 188/207] path-walk: add new 'edge_aggressive' option

In preparation for allowing both the --shallow and --path-walk options
in the 'git pack-objects' builtin, create a new 'edge_aggressive' option
in the path-walk API. This option will help walk the boundary more
thoroughly and help avoid sending extra objects during fetches and
pushes.

The only use of the 'edge_hint_aggressive' option in the revision API is
within mark_edges_uninteresting(), which is usually called before
between prepare_revision_walk() and before visiting commits with
get_revision(). In prepare_revision_walk(), the UNINTERESTING commits
are walked until a boundary is found.

We didn't use this in the past because we would mark objects
UNINTERESTING after doing the initial commit walk to the boundary. While
we should be marking these objects as UNINTERESTING, we shouldn't _emit_
them all via the path-walk algorithm or else our delta calculations will
get really slow.

Based on these observations, the way we were handling the UNINTERESTING
flag in walk_objects_by_path() was overly complicated and buggy. A lot
of it can be removed and simplified to work with this new approach.

It also means that we will see the UNINTERESTING boundaries of paths
when doing a default path-walk call, changing some existing test cases.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 Documentation/technical/api-path-walk.txt |  8 +++
 path-walk.c                               | 60 +++++++++++++----------
 path-walk.h                               |  7 +++
 t/t6601-path-walk.sh                      | 16 +++---
 4 files changed, 58 insertions(+), 33 deletions(-)

diff --git a/Documentation/technical/api-path-walk.txt b/Documentation/technical/api-path-walk.txt
index 83bfe3d665e9fb..85d8e262401486 100644
--- a/Documentation/technical/api-path-walk.txt
+++ b/Documentation/technical/api-path-walk.txt
@@ -65,6 +65,14 @@ better off using the revision walk API instead.
 	the revision walk so that the walk emits commits marked with the
 	`UNINTERESTING` flag.
 
+`edge_aggressive`::
+	For performance reasons, usually only the boundary commits are
+	explored to find UNINTERESTING objects. However, in the case of
+	shallow clones it can be helpful to mark all trees and blobs
+	reachable from UNINTERESTING tip commits as UNINTERESTING. This
+	matches the behavior of `--objects-edge-aggressive` in the
+	revision API.
+
 `pl`::
 	This pattern list pointer allows focusing the path-walk search to
 	a set of patterns, only emitting paths that match the given
diff --git a/path-walk.c b/path-walk.c
index c0be95922c1615..6451e16562971f 100644
--- a/path-walk.c
+++ b/path-walk.c
@@ -18,6 +18,7 @@
 #include "trace2.h"
 #include "tree.h"
 #include "tree-walk.h"
+#include "list-objects.h"
 
 struct type_and_oid_list
 {
@@ -233,6 +234,26 @@ static void clear_strmap(struct strmap *map)
 	strmap_init(map);
 }
 
+static struct repository *edge_repo;
+static struct type_and_oid_list *edge_tree_list;
+
+static void show_edge(struct commit *commit)
+{
+	struct tree *t = repo_get_commit_tree(edge_repo, commit);
+
+	if (!t)
+		return;
+
+	if (commit->object.flags & UNINTERESTING)
+		t->object.flags |= UNINTERESTING;
+
+	if (t->object.flags & SEEN)
+		return;
+	t->object.flags |= SEEN;
+
+	oid_array_append(&edge_tree_list->oids, &t->object.oid);
+}
+
 /**
  * Given the configuration of 'info', walk the commits based on 'info->revs' and
  * call 'info->path_fn' on each discovered path.
@@ -242,7 +263,7 @@ static void clear_strmap(struct strmap *map)
 int walk_objects_by_path(struct path_walk_info *info)
 {
 	const char *root_path = "";
-	int ret = 0, has_uninteresting = 0;
+	int ret = 0;
 	size_t commits_nr = 0, paths_nr = 0;
 	struct commit *c;
 	struct type_and_oid_list *root_tree_list;
@@ -254,7 +275,6 @@ int walk_objects_by_path(struct path_walk_info *info)
 		.path_stack = STRING_LIST_INIT_DUP,
 		.paths_to_lists = STRMAP_INIT
 	};
-	struct oidset root_tree_set = OIDSET_INIT;
 
 	trace2_region_enter("path-walk", "commit-walk", info->revs->repo);
 
@@ -280,6 +300,18 @@ int walk_objects_by_path(struct path_walk_info *info)
 	if (prepare_revision_walk(info->revs))
 		die(_("failed to setup revision walk"));
 
+	/*
+	 * Do an initial walk of tip commits in info->revs->commits and
+	 * info->revs->cmdline.rev to match the standard edge-walk behavior.
+	 *
+	 * This is particularly important when 'edge_aggressive' is set.
+	 */
+	info->revs->edge_hint_aggressive = info->edge_aggressive;
+
+	edge_repo = info->revs->repo;
+	edge_tree_list = root_tree_list;
+	mark_edges_uninteresting(info->revs, show_edge, info->prune_all_uninteresting);
+
 	info->revs->blob_objects = info->revs->tree_objects = 0;
 
 	if (info->tags) {
@@ -366,17 +398,10 @@ int walk_objects_by_path(struct path_walk_info *info)
 			if (t->object.flags & SEEN)
 				continue;
 			t->object.flags |= SEEN;
-
-			if (!oidset_insert(&root_tree_set, oid))
-				oid_array_append(&root_tree_list->oids, oid);
+			oid_array_append(&root_tree_list->oids, oid);
 		} else {
 			warning("could not find tree %s", oid_to_hex(oid));
 		}
-
-		if (t && (c->object.flags & UNINTERESTING)) {
-			t->object.flags |= UNINTERESTING;
-			has_uninteresting = 1;
-		}
 	}
 
 	trace2_data_intmax("path-walk", ctx.repo, "commits", commits_nr);
@@ -389,21 +414,6 @@ int walk_objects_by_path(struct path_walk_info *info)
 	oid_array_clear(&commit_list->oids);
 	free(commit_list);
 
-	/*
-	 * Before performing a DFS of our paths and emitting them as interesting,
-	 * do a full walk of the trees to distribute the UNINTERESTING bit. Use
-	 * the sparse algorithm if prune_all_uninteresting was set.
-	 */
-	if (has_uninteresting) {
-		trace2_region_enter("path-walk", "uninteresting-walk", info->revs->repo);
-		if (info->prune_all_uninteresting)
-			mark_trees_uninteresting_sparse(ctx.repo, &root_tree_set);
-		else
-			mark_trees_uninteresting_dense(ctx.repo, &root_tree_set);
-		trace2_region_leave("path-walk", "uninteresting-walk", info->revs->repo);
-	}
-	oidset_clear(&root_tree_set);
-
 	string_list_append(&ctx.path_stack, root_path);
 
 	trace2_region_enter("path-walk", "path-walk", info->revs->repo);
diff --git a/path-walk.h b/path-walk.h
index 090cda3b5cf8f4..d19048d0d312e5 100644
--- a/path-walk.h
+++ b/path-walk.h
@@ -48,6 +48,13 @@ struct path_walk_info {
 	 */
 	int prune_all_uninteresting;
 
+	/**
+	 * When 'edge_aggressive' is set, then the revision walk will use
+	 * the '--object-edge-aggressive' option to mark even more objects
+	 * as uninteresting.
+	 */
+	int edge_aggressive;
+
 	/**
 	 * Specify a sparse-checkout definition to match our paths to. Do not
 	 * walk outside of this sparse definition. If the patterns are in
diff --git a/t/t6601-path-walk.sh b/t/t6601-path-walk.sh
index 312bf3c19c176a..d67a077d37b99d 100755
--- a/t/t6601-path-walk.sh
+++ b/t/t6601-path-walk.sh
@@ -181,13 +181,13 @@ test_expect_success 'topic, not base' '
 	COMMIT::$(git rev-parse topic)
 	commits:1
 	TREE::$(git rev-parse topic^{tree})
-	TREE:left/:$(git rev-parse topic:left)
+	TREE:left/:$(git rev-parse base~1:left):UNINTERESTING
 	TREE:right/:$(git rev-parse topic:right)
 	trees:3
-	BLOB:a:$(git rev-parse topic:a)
-	BLOB:left/b:$(git rev-parse topic:left/b)
+	BLOB:a:$(git rev-parse base~1:a):UNINTERESTING
+	BLOB:left/b:$(git rev-parse base~1:left/b):UNINTERESTING
 	BLOB:right/c:$(git rev-parse topic:right/c)
-	BLOB:right/d:$(git rev-parse topic:right/d)
+	BLOB:right/d:$(git rev-parse base~1:right/d):UNINTERESTING
 	blobs:4
 	tags:0
 	EOF
@@ -205,10 +205,10 @@ test_expect_success 'topic, not base, only blobs' '
 	cat >expect <<-EOF &&
 	commits:0
 	trees:0
-	BLOB:a:$(git rev-parse topic:a)
-	BLOB:left/b:$(git rev-parse topic:left/b)
+	BLOB:a:$(git rev-parse base~1:a):UNINTERESTING
+	BLOB:left/b:$(git rev-parse base~1:left/b):UNINTERESTING
 	BLOB:right/c:$(git rev-parse topic:right/c)
-	BLOB:right/d:$(git rev-parse topic:right/d)
+	BLOB:right/d:$(git rev-parse base~1:right/d):UNINTERESTING
 	blobs:4
 	tags:0
 	EOF
@@ -246,7 +246,7 @@ test_expect_success 'topic, not base, only trees' '
 	cat >expect <<-EOF &&
 	commits:0
 	TREE::$(git rev-parse topic^{tree})
-	TREE:left/:$(git rev-parse topic:left)
+	TREE:left/:$(git rev-parse base~1:left):UNINTERESTING
 	TREE:right/:$(git rev-parse topic:right)
 	trees:3
 	blobs:0

From 05c4c1d165f1d67cc494403d794097526ab538c9 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Wed, 18 Dec 2024 13:13:40 +0100
Subject: [PATCH 189/207] sub-process: avoid leaking `cmd`

In some instances (particularly the `read_object` hook), the `cmd`
attribute is set to an `strdup()`ed value. This value needs to be
released in the end!

Since other users assign a non-`strdup()`ed value, be careful to add
_another_ attribute (called `to_free`) that can hold a reference to such
a string that needs to be released once the sub process is done.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 sub-process.c | 9 ++++++++-
 sub-process.h | 6 ++++++
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/sub-process.c b/sub-process.c
index 29a65f8aebbd9d..86a0d3084b75d9 100644
--- a/sub-process.c
+++ b/sub-process.c
@@ -63,6 +63,8 @@ void subprocess_stop(struct hashmap *hashmap, struct subprocess_entry *entry)
 	finish_command(&entry->process);
 
 	hashmap_remove(hashmap, &entry->ent, NULL);
+	FREE_AND_NULL(entry->to_free);
+	entry->cmd = NULL;
 }
 
 static void subprocess_exit_handler(struct child_process *process)
@@ -100,6 +102,7 @@ int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, co
 	process->trace2_child_class = "subprocess";
 
 	entry->cmd = process->args.v[0];
+	entry->to_free = NULL;
 
 	err = start_command(process);
 	if (err) {
@@ -145,11 +148,13 @@ int subprocess_start_strvec(struct hashmap *hashmap,
 	process->trace2_child_class = "subprocess";
 
 	sq_quote_argv_pretty(&quoted, argv->v);
-	entry->cmd = strbuf_detach(&quoted, NULL);
+	entry->cmd = entry->to_free = strbuf_detach(&quoted, NULL);
 
 	err = start_command(process);
 	if (err) {
 		error("cannot fork to run subprocess '%s'", entry->cmd);
+		FREE_AND_NULL(entry->to_free);
+		entry->cmd = NULL;
 		return err;
 	}
 
@@ -158,6 +163,8 @@ int subprocess_start_strvec(struct hashmap *hashmap,
 	err = startfn(entry);
 	if (err) {
 		error("initialization for subprocess '%s' failed", entry->cmd);
+		FREE_AND_NULL(entry->to_free);
+		entry->cmd = NULL;
 		subprocess_stop(hashmap, entry);
 		return err;
 	}
diff --git a/sub-process.h b/sub-process.h
index 73cc536646df79..926d43ae2d2054 100644
--- a/sub-process.h
+++ b/sub-process.h
@@ -25,6 +25,12 @@
 struct subprocess_entry {
 	struct hashmap_entry ent;
 	const char *cmd;
+	/**
+	 * In case `cmd` is a `strdup()`ed value that needs to be released,
+	 * you can assign the pointer to `to_free` so that `subprocess_stop()`
+	 * will release it.
+	 */
+	char *to_free;
 	struct child_process process;
 };
 

From 9f0c7c7a34aa78f8090b61ea6c3c72ebf05307b8 Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Mon, 21 Oct 2024 22:59:45 -0400
Subject: [PATCH 190/207] pack-objects: allow --shallow and --path-walk

There does not appear to be anything particularly incompatible about the
--shallow and --path-walk options of 'git pack-objects'. If shallow
commits are to be handled differently, then it is by the revision walk
that defines the commit set and which are interesting or uninteresting.

However, before the previous change, a trivial removal of the warning
would cause a failure in t5500-fetch-pack.sh when
GIT_TEST_PACK_PATH_WALK is enabled. The shallow fetch would provide more
objects than we desired, due to some incorrect behavior of the path-walk
API, especially around walking uninteresting objects.

To also cover the symmetrical case of pushing from a shallow clone, add
a new test to t5538-push-shallow.sh that confirms the correct behavior
of pushing only the new object. This works to validate both the
--path-walk and --no-path-walk case when toggling the
GIT_TEST_PACK_PATH_WALK environment variable. This test would have
failed in the --path-walk case if we created it before the previous
change.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 builtin/pack-objects.c  |  7 ++-----
 t/t5538-push-shallow.sh | 13 +++++++++++++
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/builtin/pack-objects.c b/builtin/pack-objects.c
index 4eeaf788b117f8..8c5e39d73f886d 100644
--- a/builtin/pack-objects.c
+++ b/builtin/pack-objects.c
@@ -203,6 +203,7 @@ static int keep_unreachable, unpack_unreachable, include_tag;
 static timestamp_t unpack_unreachable_expiration;
 static int pack_loose_unreachable;
 static int cruft;
+static int shallow = 0;
 static timestamp_t cruft_expiration;
 static int local;
 static int have_non_local_packs;
@@ -4429,6 +4430,7 @@ static void get_object_list_path_walk(struct rev_info *revs)
 	 * base objects.
 	 */
 	info.prune_all_uninteresting = sparse;
+	info.edge_aggressive = shallow;
 
 	if (walk_objects_by_path(&info))
 		die(_("failed to pack objects via path-walk"));
@@ -4630,7 +4632,6 @@ int cmd_pack_objects(int argc,
 		     struct repository *repo UNUSED)
 {
 	int use_internal_rev_list = 0;
-	int shallow = 0;
 	int all_progress_implied = 0;
 	struct strvec rp = STRVEC_INIT;
 	int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
@@ -4818,10 +4819,6 @@ int cmd_pack_objects(int argc,
 		warning(_("cannot use delta islands with --path-walk"));
 		path_walk = 0;
 	}
-	if (path_walk && shallow) {
-		warning(_("cannot use --shallow with --path-walk"));
-		path_walk = 0;
-	}
 	if (path_walk) {
 		strvec_push(&rp, "--boundary");
 		 /*
diff --git a/t/t5538-push-shallow.sh b/t/t5538-push-shallow.sh
index e91fcc173e8116..7e34b2b74caed5 100755
--- a/t/t5538-push-shallow.sh
+++ b/t/t5538-push-shallow.sh
@@ -123,4 +123,17 @@ EOF
 	git cat-file blob $(echo 1|git hash-object --stdin) >/dev/null
 	)
 '
+
+test_expect_success 'push new commit from shallow clone has correct object count' '
+	git init origin &&
+	test_commit -C origin a &&
+	test_commit -C origin b &&
+
+	git clone --depth=1 "file://$(pwd)/origin" client &&
+	git -C client checkout -b topic &&
+	git -C client commit --allow-empty -m "empty" &&
+	GIT_PROGRESS_DELAY=0 git -C client push --progress origin topic 2>err &&
+	test_grep "Enumerating objects: 1, done." err
+'
+
 test_done

From ce632e94a99acbc566e5c1fb1262b7e8f6b1a7d3 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Wed, 18 Dec 2024 23:20:07 +0100
Subject: [PATCH 191/207] remote-curl: release filter options before re-setting
 them

This fixes a leak that is not detected by Git's test suite (but by
microsoft/git's).

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 remote-curl.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/remote-curl.c b/remote-curl.c
index b2898809bd9f6a..c2068eaf1ea180 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -212,6 +212,7 @@ static int set_option(const char *name, size_t namelen, const char *value)
 		options.refetch = 1;
 		return 0;
 	} else if (!strncmp(name, "filter", namelen)) {
+		free(options.filter);
 		options.filter = xstrdup(value);
 		return 0;
 	} else if (!strncmp(name, "object-format", namelen)) {

From 2cd3a3ff3c712b379ed4c8d01bdc04f58d6f50ae Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Tue, 22 Oct 2024 12:05:27 -0400
Subject: [PATCH 192/207] t5538: add test to confirm deltas in shallow pushes

It can be notoriously difficult to detect if delta bases are being
computed properly during 'git push'. Construct an example where it will
make a kilobyte worth of difference when a delta base is not found. We
can then use the progress indicators to distinguish between bytes and
KiB depending on whether the delta base is found and used.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
---
 t/t5538-push-shallow.sh | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/t/t5538-push-shallow.sh b/t/t5538-push-shallow.sh
index 7e34b2b74caed5..11b85cca9e88c0 100755
--- a/t/t5538-push-shallow.sh
+++ b/t/t5538-push-shallow.sh
@@ -136,4 +136,25 @@ test_expect_success 'push new commit from shallow clone has correct object count
 	test_grep "Enumerating objects: 1, done." err
 '
 
+test_expect_success 'push new commit from shallow clone has good deltas' '
+	git init base &&
+	test_seq 1 999 >base/a &&
+	test_commit -C base initial &&
+	git -C base add a &&
+	git -C base commit -m "big a" &&
+
+	git clone --depth=1 "file://$(pwd)/base" deltas &&
+	git -C deltas checkout -b deltas &&
+	test_seq 1 1000 >deltas/a &&
+	git -C deltas commit -a -m "bigger a" &&
+	GIT_TRACE2_PERF="$(pwd)/trace.txt" \
+	GIT_PROGRESS_DELAY=0 git -C deltas push --progress origin deltas 2>err &&
+
+	test_grep "Enumerating objects: 5, done" err &&
+
+	# If the delta base is found, then this message uses "bytes".
+	# If the delta base is not found, then this message uses "KiB".
+	test_grep "Writing objects: .* bytes" err
+'
+
 test_done

From 2955dc8ecfa4c5eace39553e46679b996992e4a8 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Wed, 18 Dec 2024 23:22:02 +0100
Subject: [PATCH 193/207] transport: release object filter options

This fixes a leak that is not detected by Git's own test suite (but by
microsoft/git's, in the t9210-scalar.sh test).

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 transport-helper.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/transport-helper.c b/transport-helper.c
index 7513fd7eea05e4..5459010f44b54e 100644
--- a/transport-helper.c
+++ b/transport-helper.c
@@ -404,6 +404,7 @@ static int release_helper(struct transport *transport)
 	free(data->import_marks);
 	free(data->export_marks);
 	res = disconnect_helper(transport);
+	list_objects_filter_release(&data->transport_options.filter_options);
 	free(transport->data);
 	return res;
 }

From 7e6ed7f7bbc6fe84af5a46d653276d6a23534154 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 6 Dec 2024 19:50:12 +0100
Subject: [PATCH 194/207] mingw: special-case administrators even more

The check for dubious ownership has one particular quirk on Windows: if
running as an administrator, files owned by the Administrators _group_
are considered owned by the user.

The rationale for that is: When running in elevated mode, Git creates
files that aren't owned by the individual user but by the Administrators
group.

There is yet another quirk, though: The check I introduced to determine
whether the current user is an administrator uses the
`CheckTokenMembership()` function with the current process token. And
that check only succeeds when running in elevated mode!

Let's be a bit more lenient here and look harder whether the current
user is an administrator. We do this by looking for a so-called "linked
token". That token exists when administrators run in non-elevated mode,
and can be used to create a new process in elevated mode. And feeding
_that_ token to the `CheckTokenMembership()` function succeeds!

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 compat/mingw.c | 39 ++++++++++++++++++++++++++++-----------
 1 file changed, 28 insertions(+), 11 deletions(-)

diff --git a/compat/mingw.c b/compat/mingw.c
index 8470936af099b7..1e46fa2ae8f57b 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -3718,31 +3718,44 @@ static void setup_windows_environment(void)
 		has_symlinks = 0;
 }
 
-static PSID get_current_user_sid(void)
+static void get_current_user_sid(PSID *sid, HANDLE *linked_token)
 {
 	HANDLE token;
 	DWORD len = 0;
-	PSID result = NULL;
+	TOKEN_ELEVATION_TYPE elevationType;
+	DWORD size;
+
+	*sid = NULL;
+	*linked_token = NULL;
 
 	if (!OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &token))
-		return NULL;
+		return;
 
 	if (!GetTokenInformation(token, TokenUser, NULL, 0, &len)) {
 		TOKEN_USER *info = xmalloc((size_t)len);
 		if (GetTokenInformation(token, TokenUser, info, len, &len)) {
 			len = GetLengthSid(info->User.Sid);
-			result = xmalloc(len);
-			if (!CopySid(len, result, info->User.Sid)) {
+			*sid = xmalloc(len);
+			if (!CopySid(len, *sid, info->User.Sid)) {
 				error(_("failed to copy SID (%ld)"),
 				      GetLastError());
-				FREE_AND_NULL(result);
+				FREE_AND_NULL(*sid);
 			}
 		}
 		FREE_AND_NULL(info);
 	}
-	CloseHandle(token);
 
-	return result;
+	if (GetTokenInformation(token, TokenElevationType, &elevationType, sizeof(elevationType), &size) &&
+	    elevationType == TokenElevationTypeLimited) {
+		/*
+		 * The current process is run by a member of the Administrators
+		 * group, but is not running elevated.
+		 */
+		if (!GetTokenInformation(token, TokenLinkedToken, linked_token, sizeof(*linked_token), &size))
+			linked_token = NULL; /* there is no linked token */
+	}
+
+	CloseHandle(token);
 }
 
 static BOOL user_sid_to_user_name(PSID sid, LPSTR *str)
@@ -3821,18 +3834,22 @@ int is_path_owned_by_current_sid(const char *path, struct strbuf *report)
 	if (err == ERROR_SUCCESS && sid && IsValidSid(sid)) {
 		/* Now, verify that the SID matches the current user's */
 		static PSID current_user_sid;
+		static HANDLE linked_token;
 		BOOL is_member;
 
 		if (!current_user_sid)
-			current_user_sid = get_current_user_sid();
+			get_current_user_sid(&current_user_sid, &linked_token);
 
 		if (current_user_sid &&
 		    IsValidSid(current_user_sid) &&
 		    EqualSid(sid, current_user_sid))
 			result = 1;
 		else if (IsWellKnownSid(sid, WinBuiltinAdministratorsSid) &&
-			 CheckTokenMembership(NULL, sid, &is_member) &&
-			 is_member)
+			 ((CheckTokenMembership(NULL, sid, &is_member) &&
+			   is_member) ||
+			  (linked_token &&
+			   CheckTokenMembership(linked_token, sid, &is_member) &&
+			   is_member)))
 			/*
 			 * If owned by the Administrators group, and the
 			 * current user is an administrator, we consider that

From eb1cff5283e4daf26ad587df038fd08c89fe4197 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 6 Dec 2024 20:12:33 +0100
Subject: [PATCH 195/207] test-tool path-utils: support debugging "dubious
 ownership" issues

This adds a new sub-sub-command for `test-tool`, simply passing through
the command-line arguments to the `is_path_owned_by_current_user()`
function.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 t/helper/test-path-utils.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/t/helper/test-path-utils.c b/t/helper/test-path-utils.c
index 72ac8d1b1b011d..f3c59e5028561c 100644
--- a/t/helper/test-path-utils.c
+++ b/t/helper/test-path-utils.c
@@ -504,6 +504,25 @@ int cmd__path_utils(int argc, const char **argv)
 		return !!res;
 	}
 
+	if (argc > 1 && !strcmp(argv[1], "is_path_owned_by_current_user")) {
+		int res = 0;
+
+		for (int i = 2; i < argc; i++) {
+			struct strbuf buf = STRBUF_INIT;
+
+			if (is_path_owned_by_current_user(argv[i], &buf))
+				printf("'%s' is owned by current SID\n", argv[i]);
+			else {
+				printf("'%s' is not owned by current SID: %s\n", argv[i], buf.buf);
+				res = 1;
+			}
+
+			strbuf_release(&buf);
+		}
+
+		return res;
+	}
+
 	fprintf(stderr, "%s: unknown function name: %s\n", argv[0],
 		argv[1] ? argv[1] : "(there was none)");
 	return 1;

From 87a4fa34f8922b9f6f957589331ccb4ac1546afc Mon Sep 17 00:00:00 2001
From: Derrick Stolee <stolee@gmail.com>
Date: Fri, 15 Nov 2024 21:09:22 -0500
Subject: [PATCH 196/207] push: don't reuse deltas with path walk

The --path-walk option in `git pack-objects` is implied by the
pack.usePathWalk=true config value. This is intended to help the
packfile generation within `git push` specifically.

While this config does enable the path-walk feature, it does not lead to
the expected levels of compression in the cases it was designed to
handle. This is due to the default implication of the --reuse-delta
option as well as auto-GC.

In the performance tests used to evaluate the --path-walk option, such
as those in p5313, the --no-reuse-delta option is used to ensure that
deltas are recomputed according to the new object walk. However, it was
assumed (I assumed this) that when the objects were loose from
client-side operations that better deltas would be computed during this
operation. This wasn't confirmed because the test process used data that
was fetched from real repositories and thus existed in packed form only.

I was able to confirm that this does not reproduce when the objects to
push are loose. Careful use of making the pushed commit unreachable and
loosening the objects via `git repack -Ad` helps to confirm my
suspicions here. Independent of this change, I'm pushing for these
pipeline agents to set `gc.auto=0` before creating their Git objects. In
the current setup, the repo is adding objects and then incrementally
repacking them and ending up with bad cross-path deltas. This approach
can help scenarios where that makes sense, but will not cover all of our
users without them choosing to opt-in to background maintenance (and
even then, an incremental repack could cost them efficiency).

In order to make sure we are getting the intended compression in `git
push`, this change enforces the spawned `git pack-objects` process to
use `--no-reuse-delta`.

As far as I can tell, the main motivation for implying the --reuse-delta
option by default is two-fold:

 1. The code in send-pack.c that executes 'git pack-objects' is ignorant
    of whether the current process is a client pushing to a remote or a
    remote sending a fetch or clone to a client.

 2. For servers, it is critical that they trust the previously computed
    deltas whenever possible, or they could overload their CPU
    resources.

There's also the side that most servers use repacking logic that will
replace any bad deltas that are sent by clients (or at least, that's the
hope; we've seen that repacks can also pick bad deltas).

This commit also adds a test case that demonstrates that `git -c
pack.usePathWalk=true push` now avoids reusing deltas.

To do this, the test case constructs a pack with a horrendously
inefficient delta object, then verifies that the pack on the receiving
side of the `push` fails to have such an inefficient delta.

The test case would probably be a lot more readable if hex numbers were
used instead of octal numbers, but alas, `printf "\x<hex>"` is not
portable, only `printf "\<octal>"` is. For example, dash's built-in
`printf` function simply prints `\x` verbatim while bash's built-in
happily converts this construct to the corresponding byte.

Signed-off-by: Derrick Stolee <stolee@gmail.com>
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 builtin/push.c            |   4 ++
 send-pack.c               |   2 +
 send-pack.h               |   1 +
 t/meson.build             |   1 +
 t/t5590-push-path-walk.sh | 109 ++++++++++++++++++++++++++++++++++++++
 transport.c               |   1 +
 transport.h               |   1 +
 7 files changed, 119 insertions(+)
 create mode 100755 t/t5590-push-path-walk.sh

diff --git a/builtin/push.c b/builtin/push.c
index 90de3746b5229f..f49f436dd389b1 100644
--- a/builtin/push.c
+++ b/builtin/push.c
@@ -619,6 +619,10 @@ int cmd_push(int argc,
 	else if (recurse_submodules == RECURSE_SUBMODULES_ONLY)
 		flags |= TRANSPORT_RECURSE_SUBMODULES_ONLY;
 
+	prepare_repo_settings(the_repository);
+	if (the_repository->settings.pack_use_path_walk)
+		flags |= TRANSPORT_PUSH_NO_REUSE_DELTA;
+
 	if (tags)
 		refspec_append(&rs, "refs/tags/*");
 
diff --git a/send-pack.c b/send-pack.c
index cdb6dc11d1ea18..e66b1b5c6b4085 100644
--- a/send-pack.c
+++ b/send-pack.c
@@ -92,6 +92,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *advertised,
 		strvec_push(&po.args, "--shallow");
 	if (args->disable_bitmaps)
 		strvec_push(&po.args, "--no-use-bitmap-index");
+	if (args->no_reuse_delta)
+		strvec_push(&po.args, "--no-reuse-delta");
 	po.in = -1;
 	po.out = args->stateless_rpc ? -1 : fd;
 	po.git_cmd = 1;
diff --git a/send-pack.h b/send-pack.h
index 7edb80596c7b0e..759a55c1bdab1c 100644
--- a/send-pack.h
+++ b/send-pack.h
@@ -22,6 +22,7 @@ struct send_pack_args {
 		force_update:1,
 		use_thin_pack:1,
 		use_ofs_delta:1,
+		no_reuse_delta:1,
 		dry_run:1,
 		/* One of the SEND_PACK_PUSH_CERT_* constants. */
 		push_cert:2,
diff --git a/t/meson.build b/t/meson.build
index 3e8fd774d6c0eb..43cbe6d24a62be 100644
--- a/t/meson.build
+++ b/t/meson.build
@@ -710,6 +710,7 @@ integration_tests = [
   't5582-fetch-negative-refspec.sh',
   't5583-push-branches.sh',
   't5584-vfs.sh',
+  't5590-push-path-walk.sh',
   't5600-clone-fail-cleanup.sh',
   't5601-clone.sh',
   't5602-clone-remote-exec.sh',
diff --git a/t/t5590-push-path-walk.sh b/t/t5590-push-path-walk.sh
new file mode 100755
index 00000000000000..7849ec337b0aa7
--- /dev/null
+++ b/t/t5590-push-path-walk.sh
@@ -0,0 +1,109 @@
+#!/bin/sh
+
+test_description='verify that push respects `pack.usePathWalk`'
+
+TEST_PASSES_SANITIZE_LEAK=true
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-pack.sh
+
+test_expect_success 'setup bare repository and clone' '
+	git init --bare -b main bare.git &&
+	git --git-dir=bare.git config receive.unpackLimit 0 &&
+	git --git-dir bare.git commit-tree -m initial $EMPTY_TREE >head_oid &&
+	git --git-dir bare.git update-ref refs/heads/main $(cat head_oid) &&
+	git clone --bare bare.git clone.git
+'
+test_expect_success 'avoid reusing deltified objects' '
+	# construct two commits, one containing a file with the hex digits
+	# repeated 16 times, the next reducing that to 8 times. The crucial
+	# part is that the blob of the second commit is deltified _really_
+	# badly and it is therefore easy to detect if a `git push` reused that
+	# delta.
+	x="0123456789abcdef" &&
+	printf "$x$x$x$x$x$x$x$x" >x128 &&
+	printf "$x$x$x$x$x$x$x$x$x$x$x$x$x$x$x$x" >x256 &&
+
+	pack=clone.git/objects/pack/pack-tmp.pack &&
+	pack_header 2 >$pack &&
+
+	# add x256 as a non-deltified object, using an uncompressed zlib stream
+	# for simplicity
+	# 060 = OBJ_BLOB << 4, 0200 = size larger than 15,
+	# 0 = lower 4 bits of size, 020 = bits 5-9 of size (size = 256)
+	printf "\260\020" >>$pack &&
+	# Uncompressed zlib stream always starts with 0170 1 1, followed
+	# by two bytes encoding the size, little endian, then two bytes with
+	# the bitwise-complement of that size, then the payload, and then the
+	# Adler32 checksum. For some reason, the checksum is in big-endian
+	# format.
+	printf "\170\001\001\0\001\377\376" >>$pack &&
+	cat x256 >>$pack &&
+	# Manually-computed Adler32 checksum: 0xd7ae4621
+	printf "\327\256\106\041" >>$pack &&
+
+	# add x128 as a very badly deltified object
+	# 0120 = OBJ_OFS_DELTA << 4, 0200 = total size larger than 15,
+	# 4 = lower 4 bits of size, 030 = bits 5-9 of size
+	# (size = 128 * 3 + 2 + 2)
+	printf "\344\030" >>$pack &&
+	# 0415 = size (i.e. the relative negative offset) of the previous
+	# object (x256, used as base object)
+	# encoded as 0200 | ((0415 >> 7) - 1), 0415 & 0177
+	printf "\201\015" >>$pack &&
+	# Uncompressed zlib stream, as before, size = 2 + 2 + 128 * 3 (i.e.
+	# 0604)
+	printf "\170\001\001\204\001\173\376" >>$pack &&
+	# base object size = 0400 (encoded as 0200 | (0400 & 0177),
+	# 0400 >> 7)
+	printf "\200\002" >>$pack &&
+	# object size = 0200 (encoded as 0200 | (0200 & 0177), 0200 >> 7
+	printf "\200\001" >>$pack &&
+	# massively badly-deltified object: copy every single byte individually
+	# 0200 = copy, 1 = use 1 byte to encode the offset (counter),
+	# 020 = use 1 byte to encode the size (1)
+	printf "$(printf "\\\\221\\\\%03o\\\\001" $(test_seq 0 127))" >>$pack &&
+	# Manually-computed Adler32 checksum: 0x99c369c4
+	printf "\231\303\151\304" >>$pack &&
+
+	pack_trailer $pack &&
+	git index-pack -v $pack &&
+
+	oid256=$(git hash-object x256) &&
+	printf "100755 blob $oid256\thex\n" >tree &&
+	tree_oid="$(git --git-dir=clone.git mktree <tree)" &&
+	commit_oid=$(git --git-dir=clone.git commit-tree \
+		-p $(git --git-dir=clone.git rev-parse main) \
+		-m 256 $tree_oid) &&
+
+	oid128=$(git hash-object x128) &&
+	printf "100755 blob $oid128\thex\n" >tree &&
+	tree_oid="$(git --git-dir=clone.git mktree <tree)" &&
+	commit_oid=$(git --git-dir=clone.git commit-tree \
+		-p $commit_oid \
+		-m 128 $tree_oid) &&
+
+	# Verify that the on-disk size of the delta object is suboptimal in the
+	# clone (see below why 18 bytes or smaller is the optimal size):
+	git index-pack --verify-stat clone.git/objects/pack/pack-*.pack >verify &&
+	size="$(sed -n "s/^$oid128 blob *\([^ ]*\).*/\1/p" <verify)" &&
+	test $size -gt 18 &&
+
+	git --git-dir=clone.git update-ref refs/heads/main $commit_oid &&
+	git --git-dir=clone.git -c pack.usePathWalk=true push origin main &&
+	git index-pack --verify-stat bare.git/objects/pack/pack-*.pack >verify &&
+	size="$(sed -n "s/^$oid128 blob *\([^ ]*\).*/\1/p" <verify)" &&
+	# The on-disk size of the delta object should be smaller than, or equal
+	# to, 18 bytes, as that would be the size if storing the payload
+	# uncompressed:
+	#   3 bytes: 0170 01 01
+	# + 2 bytes: zlib stream size
+	# + 2 bytes: but-wise complement of the zlib stream size
+	# + 7 bytes: payload
+	#   (= 2 bytes for the size of tbe base object
+	#    + 2 bytes for the size of the delta command
+	#    + 3 bytes for the copy command)
+	# + 2 + 2 bytes: Adler32 checksum
+	test $size -le 18
+'
+
+test_done
diff --git a/transport.c b/transport.c
index 10d820c33353f6..606a92ed937dec 100644
--- a/transport.c
+++ b/transport.c
@@ -916,6 +916,7 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
 	args.dry_run = !!(flags & TRANSPORT_PUSH_DRY_RUN);
 	args.porcelain = !!(flags & TRANSPORT_PUSH_PORCELAIN);
 	args.atomic = !!(flags & TRANSPORT_PUSH_ATOMIC);
+	args.no_reuse_delta = !!(flags & TRANSPORT_PUSH_NO_REUSE_DELTA);
 	args.push_options = transport->push_options;
 	args.url = transport->url;
 
diff --git a/transport.h b/transport.h
index 44100fa9b7fdd6..54767b5a051e10 100644
--- a/transport.h
+++ b/transport.h
@@ -158,6 +158,7 @@ struct transport {
 #define TRANSPORT_RECURSE_SUBMODULES_ONLY	(1<<15)
 #define TRANSPORT_PUSH_FORCE_IF_INCLUDES	(1<<16)
 #define TRANSPORT_PUSH_AUTO_UPSTREAM		(1<<17)
+#define TRANSPORT_PUSH_NO_REUSE_DELTA		(1<<18)
 
 int transport_summary_width(const struct ref *refs);
 

From 0908f83326ab7141dc45c441733df5857b407442 Mon Sep 17 00:00:00 2001
From: Matthew John Cheetham <mjcheetham@outlook.com>
Date: Fri, 17 Jan 2025 13:25:01 +0000
Subject: [PATCH 197/207] release-winget.yml: update command to include ARM
 installers

Update the WinGet release workflow to match the updating manifest in
`microsoft/winget-pkgs`, where there are now four installation options:

- x86_64 / x64 with machine scope
- x86_64 / x64 with user scope
- aarch64 / arm64 with machine scope
- aarch64 / arm64 with user scope

Signed-off-by: Matthew John Cheetham <mjcheetham@outlook.com>
---
 .github/workflows/release-winget.yml | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/release-winget.yml b/.github/workflows/release-winget.yml
index d6edab844d05b5..9ee86c84263280 100644
--- a/.github/workflows/release-winget.yml
+++ b/.github/workflows/release-winget.yml
@@ -29,7 +29,8 @@ jobs:
         run: |
           # Get correct release asset
           $github = Get-Content '${{ github.event_path }}' | ConvertFrom-Json
-          $asset = $github.release.assets | Where-Object -Property name -match '64-bit.exe$'
+          $asset_x64 = $github.release.assets | Where-Object -Property name -match '64-bit.exe$'
+          $asset_arm64 = $github.release.assets | Where-Object -Property name -match 'arm64.exe$'
 
           # Remove 'v' and 'vfs' from the version
           $github.release.tag_name -match '\d.*'
@@ -37,7 +38,13 @@ jobs:
 
           # Download wingetcreate and create manifests
           Invoke-WebRequest https://aka.ms/wingetcreate/latest -OutFile wingetcreate.exe
-          .\wingetcreate.exe update Microsoft.Git -u $asset.browser_download_url -v $version -o manifests
+          .\wingetcreate.exe update Microsoft.Git `
+              -v $version `
+              -o manifests `
+              -u "$($asset_x64.browser_download_url)|x64|machine" `
+                 "$($asset_x64.browser_download_url)|x64|user" `
+                 "$($asset_arm64.browser_download_url)|arm64|machine" `
+                 "$($asset_arm64.browser_download_url)|arm64|user"
 
           # Manually substitute the name of the default branch in the License
           # and Copyright URLs since the tooling cannot do that for us.

From 2651f3fafdba05cddf6ad9ea51c4e4d9e5c74ee1 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 20 Jan 2025 14:33:46 +0100
Subject: [PATCH 198/207] fixup! release: create initial Windows installer
 build workflow

No need to initialize the `release` environment for the `prereq` job;
Originally, the expectation was that this would prevent the repeated
need for validating manually that the jobs are allowed to access that
environment; However, GitHub Actions does ask for every single job, and
for certain reasons it is unlikely that this is ever going to be fixed.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index de13e7df7239eb..e9203c4a357522 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -12,7 +12,6 @@ jobs:
   # Check prerequisites for the workflow
   prereqs:
     runs-on: ubuntu-latest
-    environment: release
     outputs:
       tag_name: ${{ steps.tag.outputs.name }}           # The full name of the tag, e.g. v2.32.0.vfs.0.0
       tag_version: ${{ steps.tag.outputs.version }}     # The version number (without preceding "v"), e.g. 2.32.0.vfs.0.0

From f1efa3d26eac6b8722b74aa43e716ed281e0382b Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Fri, 17 Jan 2025 16:26:33 +0100
Subject: [PATCH 199/207] fixup! release: build unsigned Ubuntu .deb package

Currently, we target whatever GitHub Actions use as `ubuntu-latest`;
This, however, led to the unintentional requirement in v2.47.2.vfs.0.0
to run Ubuntu 24.04 (up from 22.04 in v2.47.1.vfs.0.1).

It is important to target a wider audience, though, especially in light
of CVE-2024-52005 which is only addressed in Git for Windows and
`microsoft/git`, but not Git.

We could now go back to 22.04; This would only be a temporary band-aid,
https://github.blog/changelog/2025-01-15-github-actions-ubuntu-20-runner-image-brownout-dates-and-other-breaking-changes/
already announced that 20.04 is phased out very soon, and 22.04 will be
next.

Let's just use a Docker container instead that targets the oldest Ubuntu
LTS that is still maintained in _some_ shape or form.

This requires a few modifications (`sudo` is not available, GitHub
Actions' node.js needs to be overridden, and we need to install a couple
of packages explicitly). In particular, we now need two jobs because it
turned out to be too convoluted to get `debsign` to work in a headless
workflow with Ubuntu 16.04; We still resort to `ubuntu-latest` for that
instead.

By still verifying the resulting binary in `validate-installers`, we
ensure that it installs and works on the latest Ubuntu version by virtue
of using `runs-on: ubuntu-latest` in _that_ matrix job.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 41 +++++++++++++++++++---
 1 file changed, 36 insertions(+), 5 deletions(-)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index e9203c4a357522..1d8e0e388db4ab 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -490,16 +490,30 @@ jobs:
   # End build and sign Mac OSX installers
 
   # Build and sign Debian package
-  create-linux-artifacts:
+  create-linux-unsigned-artifacts:
     runs-on: ubuntu-latest
+    container:
+      image: ubuntu:16.04 # expanded security maintenance until 04/02/2026, according to https://endoflife.date/ubuntu
+      volumes:
+        # override /__e/node20 because GitHub Actions uses a version that requires too-recent glibc, see "Install dependencies" below
+        - /tmp:/__e/node20
     needs: prereqs
-    environment: release
     steps:
-      - name: Install git dependencies
+      - name: Install dependencies
         run: |
           set -ex
-          sudo apt-get update -q
-          sudo apt-get install -y -q --no-install-recommends gettext libcurl4-gnutls-dev libpcre3-dev asciidoc xmlto
+          apt-get update -q
+          apt-get install -y -q --no-install-recommends \
+            build-essential \
+            tcl tk gettext asciidoc xmlto \
+            libcurl4-gnutls-dev libpcre2-dev zlib1g-dev libexpat-dev \
+            curl ca-certificates
+
+          # Install a Node.js version that works in older Ubuntu containers (read: does not require very recent glibc)
+          NODE_VERSION=v20.18.1 &&
+          NODE_URL=https://unofficial-builds.nodejs.org/download/release/$NODE_VERSION/node-$NODE_VERSION-linux-x64-glibc-217.tar.gz &&
+          curl -Lo /tmp/node.tar.gz $NODE_URL &&
+          tar -C /__e/node20 -x --strip-components=1 -f /tmp/node.tar.gz
 
       - name: Clone git
         uses: actions/checkout@v4
@@ -562,6 +576,18 @@ jobs:
           # Move Debian package for later artifact upload
           mv "$PKGNAME.deb" "$GITHUB_WORKSPACE"
 
+      - name: Upload artifacts
+        uses: actions/upload-artifact@v4
+        with:
+          name: linux-unsigned-artifacts
+          path: |
+            *.deb
+
+  create-linux-artifacts:
+    runs-on: ubuntu-latest
+    needs: [prereqs, create-linux-unsigned-artifacts]
+    environment: release
+    steps:
       - name: Log into Azure
         uses: azure/login@v2
         with:
@@ -597,6 +623,11 @@ jobs:
           gpg-connect-agent RELOADAGENT /bye
           /usr/lib/gnupg2/gpg-preset-passphrase --preset "$keygrip" <<<"$passphrase"
 
+      - name: Download artifacts
+        uses: actions/download-artifact@v4
+        with:
+          name: linux-unsigned-artifacts
+
       - name: Sign Debian package
         run: |
           # Sign Debian package

From 0d2447e5e95b7285723e122add6a69f3e7b4522b Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 20 Jan 2025 10:56:45 +0100
Subject: [PATCH 200/207] fixup! release: build unsigned Ubuntu .deb package

Do use `apt-get` and ensure that it does not ask for confirmation.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 1d8e0e388db4ab..4cfe352ad5ef2f 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -603,7 +603,7 @@ jobs:
           GPG_KEYGRIP_SECRET_NAME: ${{ secrets.GPG_KEYGRIP_SECRET_NAME }}
         run: |
           # Install debsigs
-          sudo apt install debsigs
+          sudo apt-get install -y debsigs
 
           # Download GPG key, passphrase, and keygrip from Azure Key Vault
           key=$(az keyvault secret show --name $GPG_KEY_SECRET_NAME --vault-name $AZURE_VAULT --query "value")

From 25afe33e4e98cf99447165501874d208e99dbbb2 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 20 Jan 2025 10:09:01 +0100
Subject: [PATCH 201/207] fixup! release: build unsigned Ubuntu .deb package

By using cURL for `imap-send`, we can drop the libssl dependency, which
is not installed in Ubuntu 16.04 container by default.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index 4cfe352ad5ef2f..ca552a728e7d1f 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -547,6 +547,7 @@ jobs:
 
           DESTDIR="$PKGDIR" make -C git -j5 V=1 DEVELOPER=1 \
             USE_LIBPCRE=1 \
+            USE_CURL_FOR_IMAP_SEND=1 NO_OPENSSL=1 \
             NO_CROSS_DIRECTORY_HARDLINKS=1 \
             ASCIIDOC8=1 ASCIIDOC_NO_ROFF=1 \
             ASCIIDOC='TZ=UTC asciidoc' \

From a678566ccd0b07fc04c487eae4bd60981c36192e Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Mon, 20 Jan 2025 11:18:10 +0100
Subject: [PATCH 202/207] fixup! release: build unsigned Ubuntu .deb package

This avoids some Bash-only `sed` invocations to strip quotes where no
stripping is required if `az` is asked to output the value in a more
appropriate format.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/build-git-installers.yml | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
index ca552a728e7d1f..ae442d9e3d51cf 100644
--- a/.github/workflows/build-git-installers.yml
+++ b/.github/workflows/build-git-installers.yml
@@ -607,14 +607,9 @@ jobs:
           sudo apt-get install -y debsigs
 
           # Download GPG key, passphrase, and keygrip from Azure Key Vault
-          key=$(az keyvault secret show --name $GPG_KEY_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
-          passphrase=$(az keyvault secret show --name $GPG_PASSPHRASE_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
-          keygrip=$(az keyvault secret show --name $GPG_KEYGRIP_SECRET_NAME --vault-name $AZURE_VAULT --query "value")
-
-          # Remove quotes from downloaded values
-          key=$(sed -e 's/^"//' -e 's/"$//' <<<"$key")
-          passphrase=$(sed -e 's/^"//' -e 's/"$//' <<<"$passphrase")
-          keygrip=$(sed -e 's/^"//' -e 's/"$//' <<<"$keygrip")
+          key="$(az keyvault secret show --name "$GPG_KEY_SECRET_NAME" --vault-name "$AZURE_VAULT" --query "value" --output tsv)"
+          passphrase="$(az keyvault secret show --name "$GPG_PASSPHRASE_SECRET_NAME" --vault-name "$AZURE_VAULT" --query "value" --output tsv)"
+          keygrip="$(az keyvault secret show --name "$GPG_KEYGRIP_SECRET_NAME" --vault-name "$AZURE_VAULT" --query "value" --output tsv)"
 
           # Import GPG key
           echo "$key" | base64 -d | gpg --import --no-tty --batch --yes

From ee8f79c10b1f7fea92df496a57c7ec06034db2bd Mon Sep 17 00:00:00 2001
From: Matthew John Cheetham <mjcheetham@outlook.com>
Date: Wed, 22 Jan 2025 13:30:42 +0000
Subject: [PATCH 203/207] fixup! maintenance: care about gvfs.sharedCache
 config

---
 builtin/gc.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/builtin/gc.c b/builtin/gc.c
index 5d8f1561c12f88..0d1e6663d458c0 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -1154,7 +1154,7 @@ static int write_loose_object_to_stdin(const struct object_id *oid,
 	return ++(d->count) > d->batch_size;
 }
 
-static const char *object_dir = NULL;
+static const char *shared_object_dir = NULL;
 
 static int pack_loose(struct maintenance_run_opts *opts)
 {
@@ -1162,9 +1162,11 @@ static int pack_loose(struct maintenance_run_opts *opts)
 	int result = 0;
 	struct write_loose_object_data data;
 	struct child_process pack_proc = CHILD_PROCESS_INIT;
+	const char *object_dir = r->objects->odb->path;
 
-	if (!object_dir)
-		object_dir = r->objects->odb->path;
+	/* If set, use the shared object directory. */
+	if (shared_object_dir)
+		object_dir = shared_object_dir;
 
 	/*
 	 * Do not start pack-objects process
@@ -1634,8 +1636,8 @@ static int maintenance_run(int argc, const char **argv, const char *prefix,
 	 */
 	if (!git_config_get_value("gvfs.sharedcache", &tmp_obj_dir) &&
 	    tmp_obj_dir) {
-		object_dir = xstrdup(tmp_obj_dir);
-		setenv(DB_ENVIRONMENT, object_dir, 1);
+		shared_object_dir = xstrdup(tmp_obj_dir);
+		setenv(DB_ENVIRONMENT, shared_object_dir, 1);
 	}
 
 	ret = maintenance_run_tasks(&opts, &cfg);

From 4afc5ec0e6b55d63dee4f52c21ffc2d76005ba51 Mon Sep 17 00:00:00 2001
From: Matthew John Cheetham <mjcheetham@outlook.com>
Date: Wed, 22 Jan 2025 13:57:21 +0000
Subject: [PATCH 204/207] t7900-maintenance.sh: reset config between tests

Tests in t7900 assume the state of the `maintenance.strategy`
config setting; set/unset by previous tests. Correct this by
explictly unsetting and re-setting the config at the start of the
tests.

Signed-off-by: Matthew John Cheetham <mjcheetham@outlook.com>
---
 t/t7900-maintenance.sh | 1 +
 1 file changed, 1 insertion(+)

diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh
index 1909aed95e08ad..7109859ad1aa50 100755
--- a/t/t7900-maintenance.sh
+++ b/t/t7900-maintenance.sh
@@ -458,6 +458,7 @@ test_expect_success 'invalid --schedule value' '
 '
 
 test_expect_success '--schedule inheritance weekly -> daily -> hourly' '
+	git config --unset maintenance.strategy &&
 	git config maintenance.loose-objects.enabled true &&
 	git config maintenance.loose-objects.schedule hourly &&
 	git config maintenance.commit-graph.enabled true &&

From adc9200ae4a04f2bbfba4249eea6c63ee1df2e96 Mon Sep 17 00:00:00 2001
From: Matthew John Cheetham <mjcheetham@outlook.com>
Date: Tue, 14 Jan 2025 17:28:31 +0000
Subject: [PATCH 205/207] maintenance: add cache-local-objects maintenance task

Introduce a new maintenance task, `cache-local-objects`, that operates
on Scalar or VFS for Git repositories with a per-volume, shared object
cache (specified by `gvfs.sharedCache`) to migrate packfiles and loose
objects from the repository object directory to the shared cache.

Older versions of `microsoft/git` incorrectly placed packfiles in the
repository object directory instead of the shared cache; this task will
help clean up existing clones impacted by that issue.

Migration of packfiles involves the following steps for each pack:

1. Hardlink (or copy):
   a. the .pack file
   b. the .keep file
   c. the .rev file
2. Move (or copy + delete) the .idx file
3. Delete/unlink:
   a. the .pack file
   b. the .keep file
   c. the .rev file

Moving the index file after the others ensures the pack is not read
from the new cache directory until all associated files (rev, keep)
exist in the cache directory also.

Moving loose objects operates as a move, or copy + delete.

Signed-off-by: Matthew John Cheetham <mjcheetham@outlook.com>
---
 Documentation/git-maintenance.txt |   8 ++
 builtin/gc.c                      | 190 ++++++++++++++++++++++++++++++
 t/t7900-maintenance.sh            | 129 ++++++++++++++++++++
 3 files changed, 327 insertions(+)

diff --git a/Documentation/git-maintenance.txt b/Documentation/git-maintenance.txt
index 6e6651309d3253..b020f4fe7f3c6a 100644
--- a/Documentation/git-maintenance.txt
+++ b/Documentation/git-maintenance.txt
@@ -69,6 +69,7 @@ task:
 * `prefetch`: hourly.
 * `loose-objects`: daily.
 * `incremental-repack`: daily.
+* `cache-local-objects`: weekly.
 --
 +
 `git maintenance register` will also disable foreground maintenance by
@@ -158,6 +159,13 @@ pack-refs::
 	need to iterate across many references. See linkgit:git-pack-refs[1]
 	for more information.
 
+cache-local-objects::
+	The `cache-local-objects` task only operates on Scalar or VFS for Git
+	repositories (cloned with either `scalar clone` or `gvfs clone`) that
+	have the `gvfs.sharedCache` configuration setting present. This task
+	migrates pack files and loose objects from the repository's object
+	directory in to the shared volume cache.
+
 OPTIONS
 -------
 --auto::
diff --git a/builtin/gc.c b/builtin/gc.c
index 0d1e6663d458c0..c4ca7d6ec6813e 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -13,6 +13,7 @@
 #define USE_THE_REPOSITORY_VARIABLE
 #define DISABLE_SIGN_COMPARE_WARNINGS
 
+#include "git-compat-util.h"
 #include "builtin.h"
 #include "abspath.h"
 #include "date.h"
@@ -44,6 +45,8 @@
 #include "hook.h"
 #include "setup.h"
 #include "trace2.h"
+#include "copy.h"
+#include "dir.h"
 
 #define FAILED_RUN "failed to run %s"
 
@@ -1360,6 +1363,186 @@ static int maintenance_task_incremental_repack(struct maintenance_run_opts *opts
 	return 0;
 }
 
+static void link_or_copy_or_die(const char *src, const char *dst)
+{
+	if (!link(src, dst))
+		return;
+
+	/* Use copy operation if src and dst are on different file systems. */
+	if (errno != EXDEV)
+		warning_errno(_("failed to link '%s' to '%s'"), src, dst);
+
+	if (copy_file(dst, src, 0444))
+		die_errno(_("failed to copy '%s' to '%s'"), src, dst);
+}
+
+static void rename_or_copy_or_die(const char *src, const char *dst)
+{
+	if (!rename(src, dst))
+		return;
+
+	/* Use copy and delete if src and dst are on different file systems. */
+	if (errno != EXDEV)
+		warning_errno(_("failed to move '%s' to '%s'"), src, dst);
+
+	if (copy_file(dst, src, 0444))
+		die_errno(_("failed to copy '%s' to '%s'"), src, dst);
+
+	if (unlink(src))
+		die_errno(_("failed to delete '%s'"), src);
+}
+
+static void migrate_pack(const char *srcdir, const char *dstdir,
+			 const char *pack_filename)
+{
+	size_t basenamelen, srclen, dstlen;
+	struct strbuf src = STRBUF_INIT, dst = STRBUF_INIT;
+	struct {
+		const char *ext;
+		unsigned move:1;
+	} files[] = {
+		{".pack", 0},
+		{".keep", 0},
+		{".rev", 0},
+		{".idx", 1}, /* The index file must be atomically moved last. */
+	};
+
+	trace2_region_enter("maintenance", "migrate_pack", the_repository);
+
+	basenamelen = strlen(pack_filename) - 5; /* .pack */
+	strbuf_addstr(&src, srcdir);
+	strbuf_addch(&src, '/');
+	strbuf_add(&src, pack_filename, basenamelen);
+	strbuf_addstr(&src, ".idx");
+
+	/* A pack without an index file is not yet ready to be migrated. */
+	if (!file_exists(src.buf))
+		goto cleanup;
+
+	strbuf_setlen(&src, src.len - 4 /* .idx */);
+	strbuf_addstr(&dst, dstdir);
+	strbuf_addch(&dst, '/');
+	strbuf_add(&dst, pack_filename, basenamelen);
+
+	srclen = src.len;
+	dstlen = dst.len;
+
+	/* Move or copy files from the source directory to the destination. */
+	for (size_t i = 0; i < ARRAY_SIZE(files); i++) {
+		strbuf_setlen(&src, srclen);
+		strbuf_addstr(&src, files[i].ext);
+
+		if (!file_exists(src.buf))
+			continue;
+
+		strbuf_setlen(&dst, dstlen);
+		strbuf_addstr(&dst, files[i].ext);
+
+		if (files[i].move)
+			rename_or_copy_or_die(src.buf, dst.buf);
+		else
+			link_or_copy_or_die(src.buf, dst.buf);
+	}
+
+	/*
+	 * Now the pack and all associated files exist at the destination we can
+	 * now clean up the files in the source directory.
+	 */
+	for (size_t i = 0; i < ARRAY_SIZE(files); i++) {
+		/* Files that were moved rather than copied have no clean up. */
+		if (files[i].move)
+			continue;
+
+		strbuf_setlen(&src, srclen);
+		strbuf_addstr(&src, files[i].ext);
+
+		/* Files that never existed in originally have no clean up.*/
+		if (!file_exists(src.buf))
+			continue;
+
+		if (unlink(src.buf))
+			warning_errno(_("failed to delete '%s'"), src.buf);
+	}
+
+cleanup:
+	strbuf_release(&src);
+	strbuf_release(&dst);
+
+	trace2_region_leave("maintenance", "migrate_pack", the_repository);
+}
+
+static void move_pack_to_shared_cache(const char *full_path, size_t full_path_len,
+				      const char *file_name, void *data)
+{
+	char *srcdir;
+	const char *dstdir = (const char *)data;
+
+	/* We only care about the actual pack files here.
+	 * The associated .idx, .keep, .rev files will be copied in tandem
+	 * with the pack file, with the index file being moved last.
+	 * The original locations of the non-index files will only deleted
+	 * once all other files have been copied/moved.
+	 */
+	if (!ends_with(file_name, ".pack"))
+		return;
+
+	srcdir = xstrndup(full_path, full_path_len - strlen(file_name) - 1);
+
+	migrate_pack(srcdir, dstdir, file_name);
+
+	free(srcdir);
+}
+
+static int move_loose_object_to_shared_cache(const struct object_id *oid,
+					     const char *path,
+					     UNUSED void *data)
+{
+	struct stat st;
+	struct strbuf dst = STRBUF_INIT;
+	char *hex = oid_to_hex(oid);
+
+	strbuf_addf(&dst, "%s/%.2s/", shared_object_dir, hex);
+
+	if (stat(dst.buf, &st)) {
+		if (mkdir(dst.buf, 0777))
+			die_errno(_("failed to create directory '%s'"), dst.buf);
+	} else if (!S_ISDIR(st.st_mode))
+		die(_("expected '%s' to be a directory"), dst.buf);
+
+	strbuf_addstr(&dst, hex+2);
+	rename_or_copy_or_die(path, dst.buf);
+
+	strbuf_release(&dst);
+	return 0;
+}
+
+static int maintenance_task_cache_local_objs(UNUSED struct maintenance_run_opts *opts,
+					     UNUSED struct gc_config *cfg)
+{
+	struct strbuf dstdir = STRBUF_INIT;
+	struct repository *r = the_repository;
+
+	/* This task is only applicable with a VFS/Scalar shared cache. */
+	if (!shared_object_dir)
+		return 0;
+
+	/* If the dest is the same as the local odb path then we do nothing. */
+	if (!fspathcmp(r->objects->odb->path, shared_object_dir))
+		goto cleanup;
+
+	strbuf_addf(&dstdir, "%s/pack", shared_object_dir);
+
+	for_each_file_in_pack_dir(r->objects->odb->path, move_pack_to_shared_cache,
+				  dstdir.buf);
+
+	for_each_loose_object(move_loose_object_to_shared_cache, NULL,
+			      FOR_EACH_OBJECT_LOCAL_ONLY);
+
+cleanup:
+	strbuf_release(&dstdir);
+	return 0;
+}
+
 typedef int maintenance_task_fn(struct maintenance_run_opts *opts,
 				struct gc_config *cfg);
 
@@ -1389,6 +1572,7 @@ enum maintenance_task_label {
 	TASK_GC,
 	TASK_COMMIT_GRAPH,
 	TASK_PACK_REFS,
+	TASK_CACHE_LOCAL_OBJS,
 
 	/* Leave as final value */
 	TASK__COUNT
@@ -1425,6 +1609,10 @@ static struct maintenance_task tasks[] = {
 		maintenance_task_pack_refs,
 		pack_refs_condition,
 	},
+	[TASK_CACHE_LOCAL_OBJS] = {
+		"cache-local-objects",
+		maintenance_task_cache_local_objs,
+	},
 };
 
 static int compare_tasks_by_selection(const void *a_, const void *b_)
@@ -1519,6 +1707,8 @@ static void initialize_maintenance_strategy(void)
 		tasks[TASK_LOOSE_OBJECTS].schedule = SCHEDULE_DAILY;
 		tasks[TASK_PACK_REFS].enabled = 1;
 		tasks[TASK_PACK_REFS].schedule = SCHEDULE_WEEKLY;
+		tasks[TASK_CACHE_LOCAL_OBJS].enabled = 1;
+		tasks[TASK_CACHE_LOCAL_OBJS].schedule = SCHEDULE_WEEKLY;
 	}
 }
 
diff --git a/t/t7900-maintenance.sh b/t/t7900-maintenance.sh
index 7109859ad1aa50..e9eee13f8b6ba0 100755
--- a/t/t7900-maintenance.sh
+++ b/t/t7900-maintenance.sh
@@ -31,6 +31,25 @@ test_systemd_analyze_verify () {
 	fi
 }
 
+test_import_packfile () {
+	printf "blob\ndata <<END\n%s\nEND\n\n" 1 2 3 4 5 | \
+	git -c fastimport.unpackLimit=0 fast-import
+}
+
+test_get_packdir_files() {
+	if [ "$#" -eq 0 ]; then
+		find .git/objects/pack -type f
+	else
+		for arg in "$@"; do
+			find .git/objects/pack -type f -name $arg
+		done
+	fi
+}
+
+test_get_loose_object_files () {
+	find .git/objects -type f -path '.git/objects/??/*'
+}
+
 test_expect_success 'help text' '
 	test_expect_code 129 git maintenance -h >actual &&
 	test_grep "usage: git maintenance <subcommand>" actual &&
@@ -1025,4 +1044,114 @@ test_expect_success 'maintenance aborts with existing lock file' '
 	test_grep "Another scheduled git-maintenance(1) process seems to be running" err
 '
 
+test_expect_success 'cache-local-objects task with no shared cache no op' '
+	test_when_finished "rm -rf repo" &&
+	git init repo &&
+	(
+		cd repo &&
+
+		test_commit something &&
+		git config set maintenance.gc.enabled false &&
+		git config set maintenance.cache-local-objects.enabled true &&
+		git config set maintenance.cache-local-objects.auto 1 &&
+
+		test_import_packfile &&
+		test_get_packdir_files "*.pack" "*.idx" "*.keep" "*.rev" \
+			>files.txt &&
+		test_get_loose_object_files >>files.txt &&
+
+		git maintenance run &&
+		while IFS= read -r f; do
+			test_path_exists $f || exit 1
+		done <files.txt
+	)
+'
+
+test_expect_success 'cache-local-objects task cache path same as local odb no op' '
+	test_when_finished "rm -rf repo" &&
+	git init repo &&
+	(
+		cd repo &&
+
+		test_commit something &&
+		git config set gvfs.sharedcache .git/objects &&
+		git config set maintenance.gc.enabled false &&
+		git config set maintenance.cache-local-objects.enabled true &&
+		git config set maintenance.cache-local-objects.auto 1 &&
+
+		test_import_packfile &&
+		test_get_packdir_files "*.pack" "*.idx" "*.keep" "*.rev" \
+			>files.txt &&
+		test_get_loose_object_files >>files.txt &&
+
+		git maintenance run &&
+		while IFS= read -r f; do
+			test_path_exists $f || exit 1
+		done <files.txt
+	)
+'
+
+test_expect_success 'cache-local-objects task no .rev or .keep' '
+	test_when_finished "rm -rf repo cache" &&
+	mkdir -p cache/pack &&
+	git init repo &&
+	(
+		cd repo &&
+
+		test_commit something &&
+		git config set gvfs.sharedcache ../cache &&
+		git config set maintenance.gc.enabled false &&
+		git config set maintenance.cache-local-objects.enabled true &&
+		git config set maintenance.cache-local-objects.auto 1 &&
+
+		test_import_packfile &&
+		test_get_packdir_files "*.pack" "*.idx" >src.txt &&
+		test_get_loose_object_files >>src.txt &&
+
+		rm -f .git/objects/pack/*.rev .git/objects/pack/*.keep &&
+
+		sed "s/.git\\/objects\\//..\\/cache\\//" src.txt >dst.txt &&
+
+		git maintenance run &&
+		while IFS= read -r f; do
+			test_path_is_missing $f || exit 1
+		done <src.txt &&
+
+		while IFS= read -r f; do
+			test_path_exists $f || exit 1
+		done <dst.txt
+	)
+'
+
+test_expect_success 'cache-local-objects task success' '
+	test_when_finished "rm -rf repo cache" &&
+	mkdir -p cache/pack &&
+	git init repo &&
+	(
+		cd repo &&
+
+		test_commit something &&
+		git config set gvfs.sharedcache ../cache &&
+		git config set maintenance.gc.enabled false &&
+		git config set maintenance.cache-local-objects.enabled true &&
+		git config set maintenance.cache-local-objects.auto 1 &&
+
+		test_import_packfile &&
+		test_get_packdir_files "*.pack" "*.idx" "*.keep" "*.rev" \
+			>src.txt &&
+		test_get_loose_object_files >>src.txt &&
+
+		sed "s/.git\\/objects\\//..\\/cache\\//" src.txt >dst.txt &&
+
+		git maintenance run &&
+		while IFS= read -r f; do
+			test_path_is_missing $f || exit 1
+		done <src.txt &&
+
+		while IFS= read -r f; do
+			test_path_exists $f || exit 1
+		done <dst.txt
+	)
+'
+
 test_done

From 510b7d49c39d9f4770989267005ac5449b97ae10 Mon Sep 17 00:00:00 2001
From: Matthew John Cheetham <mjcheetham@outlook.com>
Date: Thu, 23 Jan 2025 09:16:19 +0000
Subject: [PATCH 206/207] scalar.c: add cache-local-objects task

Add the `cache-local-objects` maintenance task to the list of tasks run
by the `scalar run` command. It's often easier for users to run the
shorter `scalar run` command than the equivalent `git maintenance`
command.

Signed-off-by: Matthew John Cheetham <mjcheetham@outlook.com>
---
 scalar.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/scalar.c b/scalar.c
index f4e75c250d0100..566991ea6755de 100644
--- a/scalar.c
+++ b/scalar.c
@@ -1170,6 +1170,7 @@ static int cmd_run(int argc, const char **argv)
 		{ "fetch", "prefetch" },
 		{ "loose-objects", "loose-objects" },
 		{ "pack-files", "incremental-repack" },
+		{ "cache-local-objects", "cache-local-objects" },
 		{ NULL, NULL }
 	};
 	struct strbuf buf = STRBUF_INIT;

From e762f1e539d23461ce693a44260ce08e08024549 Mon Sep 17 00:00:00 2001
From: Johannes Schindelin <johannes.schindelin@gmx.de>
Date: Wed, 5 Feb 2025 18:49:59 +0100
Subject: [PATCH 207/207] fixup! ci: run Scalar's Functional Tests

The Scalar Functional Tests run at
https://github.com/microsoft/git/actions/runs/13074318147 has been
automatically failed because it uses a deprecated version of
`actions/upload-artifact: v3`.

The error message suggested to learn more at:
https://github.blog/changelog/2024-04-16-deprecation-notice-v3-of-the-artifact-actions/

While we may very well soon decide to just stop running the Scalar
Functional Tests because basically all of the tests (except for
integration testing with the remote `gvfs/ci` repository) have
equivalent coverage in `microsoft/git`'s own test suite.

Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
---
 .github/workflows/scalar-functional-tests.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/scalar-functional-tests.yml b/.github/workflows/scalar-functional-tests.yml
index a5946bc33939d6..4886c20450f726 100644
--- a/.github/workflows/scalar-functional-tests.yml
+++ b/.github/workflows/scalar-functional-tests.yml
@@ -203,7 +203,7 @@ jobs:
 
       - name: Archive Trace2 Logs
         if: ( success() || failure() ) && ( steps.trace2_zip_unix.conclusion == 'success' || steps.trace2_zip_windows.conclusion == 'success' )
-        uses: actions/upload-artifact@v3
+        uses: actions/upload-artifact@v4
         with:
           name: ${{ env.TRACE2_BASENAME }}.zip
           path: scalar/${{ env.TRACE2_BASENAME }}.zip