diff --git a/go.mod b/go.mod
index 1546b0537f..de7efeac5b 100644
--- a/go.mod
+++ b/go.mod
@@ -23,7 +23,7 @@ require (
 	golang.org/x/term v0.27.0
 	golang.org/x/text v0.21.0
 	google.golang.org/grpc v1.69.0
-	google.golang.org/protobuf v1.35.2
+	google.golang.org/protobuf v1.36.0
 	gopkg.in/yaml.v3 v3.0.1
 	k8s.io/api v0.33.0-alpha.0
 	k8s.io/apimachinery v0.33.0-alpha.0
diff --git a/go.sum b/go.sum
index abfedad2f6..6b7bb4fff7 100644
--- a/go.sum
+++ b/go.sum
@@ -222,8 +222,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:
 google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
 google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
 google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
-google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
-google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
+google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 2c0693d7ab..5a57ef6f3c 100644
Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index f325298564..378b826faa 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -117,6 +117,9 @@ type (
 		// GenerateLegacyUnmarshalJSON determines if the plugin generates the
 		// UnmarshalJSON([]byte) error method for enums.
 		GenerateLegacyUnmarshalJSON bool
+		// APILevel controls which API (Open, Hybrid or Opaque) should be used
+		// for generated code (.pb.go files).
+		APILevel int
 	}
 )
 
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 7611796e86..10132c9b38 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeVarint(b)
 			b = b[m:]
 			parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
+		case genid.GoFeatures_ApiLevel_field_number:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			parent.APILevel = int(v)
 		case genid.GoFeatures_StripEnumPrefix_field_number:
 			v, m := protowire.ConsumeVarint(b)
 			b = b[m:]
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 09792d96f6..f5ee7f5c2b 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -21,18 +21,35 @@ const (
 // Field names for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
+	GoFeatures_ApiLevel_field_name                protoreflect.Name = "api_level"
 	GoFeatures_StripEnumPrefix_field_name         protoreflect.Name = "strip_enum_prefix"
 
 	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_ApiLevel_field_fullname                protoreflect.FullName = "pb.GoFeatures.api_level"
 	GoFeatures_StripEnumPrefix_field_fullname         protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
 )
 
 // Field numbers for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
+	GoFeatures_ApiLevel_field_number                protoreflect.FieldNumber = 2
 	GoFeatures_StripEnumPrefix_field_number         protoreflect.FieldNumber = 3
 )
 
+// Full and short names for pb.GoFeatures.APILevel.
+const (
+	GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
+	GoFeatures_APILevel_enum_name     = "APILevel"
+)
+
+// Enum values for pb.GoFeatures.APILevel.
+const (
+	GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
+	GoFeatures_API_OPEN_enum_value              = 1
+	GoFeatures_API_HYBRID_enum_value            = 2
+	GoFeatures_API_OPAQUE_enum_value            = 3
+)
+
 // Full and short names for pb.GoFeatures.StripEnumPrefix.
 const (
 	GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go
new file mode 100644
index 0000000000..224f339302
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/name.go
@@ -0,0 +1,12 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+const (
+	NoUnkeyedLiteral_goname  = "noUnkeyedLiteral"
+	NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
+
+	BuilderSuffix_goname = "_builder"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
new file mode 100644
index 0000000000..6075d6f696
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"strconv"
+	"sync/atomic"
+	"unsafe"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (Export) UnmarshalField(msg any, fieldNum int32) {
+	UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
+}
+
+// Present checks the presence set for a certain field number (zero
+// based, ordered by appearance in original proto file). part is
+// a pointer to the correct element in the bitmask array, num is the
+// field number unaltered.  Example (field number 70 -> part =
+// &m.XXX_presence[1], num = 70)
+func (Export) Present(part *uint32, num uint32) bool {
+	// This hook will read an unprotected shadow presence set if
+	// we're unning under the race detector
+	raceDetectHookPresent(part, num)
+	return atomic.LoadUint32(part)&(1<<(num%32)) > 0
+}
+
+// SetPresent adds a field to the presence set. part is a pointer to
+// the relevant element in the array and num is the field number
+// unaltered.  size is the number of fields in the protocol
+// buffer.
+func (Export) SetPresent(part *uint32, num uint32, size uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookSetPresent(part, num, presenceSize(size))
+	for {
+		old := atomic.LoadUint32(part)
+		if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
+			return
+		}
+	}
+}
+
+// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
+// It is meant for use by builder methods, where the message is known not
+// to be accessible yet by other goroutines.
+func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookSetPresent(part, num, presenceSize(size))
+	*part |= 1 << (num % 32)
+}
+
+// ClearPresence removes a field from the presence set. part is a
+// pointer to the relevant element in the presence array and num is
+// the field number unaltered.
+func (Export) ClearPresent(part *uint32, num uint32) {
+	// This hook will mutate an unprotected shadow presence set if
+	// we're running under the race detector
+	raceDetectHookClearPresent(part, num)
+	for {
+		old := atomic.LoadUint32(part)
+		if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
+			return
+		}
+	}
+}
+
+// interfaceToPointer takes a pointer to an empty interface whose value is a
+// pointer type, and converts it into a "pointer" that points to the same
+// target
+func interfaceToPointer(i *any) pointer {
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+func (p pointer) atomicGetPointer() pointer {
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) atomicSetPointer(q pointer) {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
+}
+
+// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
+// pointer) and returns true if the pointed-to pointer is nil (using an
+// atomic load).  This function is inlineable and, on x86, just becomes a
+// simple load and compare.
+func (Export) AtomicCheckPointerIsNil(ptr any) bool {
+	return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
+}
+
+// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
+// second is a pointer) and atomically sets the second pointer into location
+// referenced by first pointer.  Unfortunately, atomicSetPointer() does not inline
+// (even on x86), so this does not become a simple store on x86.
+func (Export) AtomicSetPointer(dstPtr, valPtr any) {
+	interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
+}
+
+// AtomicLoadPointer loads the pointer at the location pointed at by src,
+// and stores that pointer value into the location pointed at by dst.
+func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
+	*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+}
+
+// AtomicInitializePointer makes ptr and dst point to the same value.
+//
+// If *ptr is a nil pointer, it sets *ptr = *dst.
+//
+// If *ptr is a non-nil pointer, it sets *dst = *ptr.
+func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
+	if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
+		*(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+	}
+}
+
+// MessageFieldStringOf returns the field formatted as a string,
+// either as the field name if resolvable otherwise as a decimal string.
+func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
+	fd := md.Fields().ByNumber(n)
+	if fd != nil {
+		return string(fd.Name())
+	}
+	return strconv.Itoa(int(n))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
new file mode 100644
index 0000000000..ea276547cd
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package impl
+
+// There is no additional data as we're not running under race detector.
+type RaceDetectHookData struct{}
+
+// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
+func (presence) raceDetectHookPresent(num uint32)                       {}
+func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
+func (presence) raceDetectHookClearPresent(num uint32)                  {}
+func (presence) raceDetectHookAllocAndCopy(src presence)                {}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
new file mode 100644
index 0000000000..e9a27583ae
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
@@ -0,0 +1,126 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package impl
+
+// When running under race detector, we add a presence map of bytes, that we can access
+// in the hook functions so that we trigger the race detection whenever we have concurrent
+// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
+// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
+type RaceDetectHookData struct {
+	shadowPresence *[]byte
+}
+
+// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
+// using non-atomic operations.
+func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
+	sp := make([]byte, size)
+	atomicStoreShadowPresence(&data.shadowPresence, &sp)
+}
+
+func (p presence) raceDetectHookPresent(num uint32) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		_ = (*sp)[num]
+	}
+}
+
+func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp == nil {
+		data.raceDetectHookAlloc(size)
+		sp = atomicLoadShadowPresence(&data.shadowPresence)
+	}
+	(*sp)[num] = 1
+}
+
+func (p presence) raceDetectHookClearPresent(num uint32) {
+	data := p.toRaceDetectData()
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		(*sp)[num] = 0
+
+	}
+}
+
+// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
+// shadowPresence bytes from src to lazy.
+func (p presence) raceDetectHookAllocAndCopy(q presence) {
+	sData := q.toRaceDetectData()
+	dData := p.toRaceDetectData()
+	if sData == nil {
+		return
+	}
+	srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
+	if srcSp == nil {
+		atomicStoreShadowPresence(&dData.shadowPresence, nil)
+		return
+	}
+	n := len(*srcSp)
+	dSlice := make([]byte, n)
+	atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
+	for i := 0; i < n; i++ {
+		dSlice[i] = (*srcSp)[i]
+	}
+}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		_ = (*sp)[num]
+	}
+}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp == nil {
+		data.raceDetectHookAlloc(size)
+		sp = atomicLoadShadowPresence(&data.shadowPresence)
+	}
+	(*sp)[num] = 1
+}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {
+	data := findPointerToRaceDetectData(field, num)
+	if data == nil {
+		return
+	}
+	sp := atomicLoadShadowPresence(&data.shadowPresence)
+	if sp != nil {
+		(*sp)[num] = 0
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index f29e6a8fa8..fe2c719ce4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		}
 		return nil
 	}
+
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	if mi.extensionOffset.IsValid() {
 		e := p.Apply(mi.extensionOffset).Extensions()
 		if err := mi.isInitExtensions(e); err != nil {
@@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error {
 		if !f.isRequired && f.funcs.isInit == nil {
 			continue
 		}
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				if f.isRequired {
+					return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+				}
+				continue
+			}
+			if f.funcs.isInit != nil {
+				f.mi.init()
+				if f.mi.needsInitCheck {
+					if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
+						lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+						if !lazy.AllowedPartial() {
+							// Nothing to see here, it was checked on unmarshal
+							continue
+						}
+						mi.lazyUnmarshal(p, f.num)
+					}
+					if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
+						return err
+					}
+				}
+			}
+			continue
+		}
+
 		fptr := p.Apply(f.offset)
 		if f.isPointer && fptr.Elem().IsNil() {
 			if f.isRequired {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
new file mode 100644
index 0000000000..76818ea252
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
@@ -0,0 +1,264 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+	mi := getMessageInfo(ft)
+	if mi == nil {
+		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
+	}
+	switch fd.Kind() {
+	case protoreflect.MessageKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueMessage,
+			marshal:   appendOpaqueMessage,
+			unmarshal: consumeOpaqueMessage,
+			isInit:    isInitOpaqueMessage,
+			merge:     mergeOpaqueMessage,
+		}
+	case protoreflect.GroupKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueGroup,
+			marshal:   appendOpaqueGroup,
+			unmarshal: consumeOpaqueGroup,
+			isInit:    isInitOpaqueMessage,
+			merge:     mergeOpaqueMessage,
+		}
+	}
+	panic("unexpected field kind")
+}
+
+func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
+}
+
+func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	mp := p.AtomicGetPointer()
+	calculatedSize := f.mi.sizePointer(mp, opts)
+	b = protowire.AppendVarint(b, f.wiretag)
+	b = protowire.AppendVarint(b, uint64(calculatedSize))
+	before := len(b)
+	b, err := f.mi.marshalAppendPointer(b, mp, opts)
+	if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+		return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+	}
+	return b, err
+}
+
+func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.BytesType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeBytes(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+	if err != nil {
+		return out, err
+	}
+	out.n = n
+	out.initialized = o.initialized
+	return out, nil
+}
+
+func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		return nil
+	}
+	return f.mi.checkInitializedPointer(mp)
+}
+
+func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+	dstmp := dst.AtomicGetPointer()
+	if dstmp.IsNil() {
+		dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
+}
+
+func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
+}
+
+func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	b = protowire.AppendVarint(b, f.wiretag) // start group
+	b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
+	b = protowire.AppendVarint(b, f.wiretag+1) // end group
+	return b, err
+}
+
+func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.StartGroupType {
+		return out, errUnknown
+	}
+	mp := p.AtomicGetPointer()
+	if mp.IsNil() {
+		mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+	}
+	o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
+	return o, e
+}
+
+func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
+	}
+	mt := ft.Elem().Elem() // *[]*T -> *T
+	mi := getMessageInfo(mt)
+	if mi == nil {
+		panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
+	}
+	switch fd.Kind() {
+	case protoreflect.MessageKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueMessageSlice,
+			marshal:   appendOpaqueMessageSlice,
+			unmarshal: consumeOpaqueMessageSlice,
+			isInit:    isInitOpaqueMessageSlice,
+			merge:     mergeOpaqueMessageSlice,
+		}
+	case protoreflect.GroupKind:
+		return mi, pointerCoderFuncs{
+			size:      sizeOpaqueGroupSlice,
+			marshal:   appendOpaqueGroupSlice,
+			unmarshal: consumeOpaqueGroupSlice,
+			isInit:    isInitOpaqueMessageSlice,
+			merge:     mergeOpaqueMessageSlice,
+		}
+	}
+	panic("unexpected field kind")
+}
+
+func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.AtomicGetPointer().PointerSlice()
+	n := 0
+	for _, v := range s {
+		n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+	}
+	return n
+}
+
+func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.AtomicGetPointer().PointerSlice()
+	var err error
+	for _, v := range s {
+		b = protowire.AppendVarint(b, f.wiretag)
+		siz := f.mi.sizePointer(v, opts)
+		b = protowire.AppendVarint(b, uint64(siz))
+		before := len(b)
+		b, err = f.mi.marshalAppendPointer(b, v, opts)
+		if err != nil {
+			return b, err
+		}
+		if measuredSize := len(b) - before; siz != measuredSize {
+			return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+		}
+	}
+	return b, nil
+}
+
+func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.BytesType {
+		return out, errUnknown
+	}
+	v, n := protowire.ConsumeBytes(b)
+	if n < 0 {
+		return out, errDecode
+	}
+	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+	o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+	if err != nil {
+		return out, err
+	}
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	sp.AppendPointerSlice(mp)
+	out.n = n
+	out.initialized = o.initialized
+	return out, nil
+}
+
+func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		return nil
+	}
+	s := sp.PointerSlice()
+	for _, v := range s {
+		if err := f.mi.checkInitializedPointer(v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+	ds := dst.AtomicGetPointer()
+	if ds.IsNil() {
+		ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	for _, sp := range src.AtomicGetPointer().PointerSlice() {
+		dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+		f.mi.mergePointer(dm, sp, opts)
+		ds.AppendPointerSlice(dm)
+	}
+}
+
+func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+	s := p.AtomicGetPointer().PointerSlice()
+	n := 0
+	for _, v := range s {
+		n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+	}
+	return n
+}
+
+func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+	s := p.AtomicGetPointer().PointerSlice()
+	var err error
+	for _, v := range s {
+		b = protowire.AppendVarint(b, f.wiretag) // start group
+		b, err = f.mi.marshalAppendPointer(b, v, opts)
+		if err != nil {
+			return b, err
+		}
+		b = protowire.AppendVarint(b, f.wiretag+1) // end group
+	}
+	return b, nil
+}
+
+func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	if wtyp != protowire.StartGroupType {
+		return out, errUnknown
+	}
+	mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+	out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
+	if err != nil {
+		return out, err
+	}
+	sp := p.AtomicGetPointer()
+	if sp.IsNil() {
+		sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+	}
+	sp.AppendPointerSlice(mp)
+	return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 78be9df342..2f7b363ec4 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -32,6 +32,10 @@ type coderMessageInfo struct {
 	needsInitCheck     bool
 	isMessageSet       bool
 	numRequiredFields  uint8
+
+	lazyOffset     offset
+	presenceOffset offset
+	presenceSize   presenceSize
 }
 
 type coderFieldInfo struct {
@@ -45,12 +49,19 @@ type coderFieldInfo struct {
 	tagsize    int                      // size of the varint-encoded tag
 	isPointer  bool                     // true if IsNil may be called on the struct field
 	isRequired bool                     // true if field is required
+
+	isLazy        bool
+	presenceIndex uint32
 }
 
+const noPresence = 0xffffffff
+
 func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	mi.sizecacheOffset = invalidOffset
 	mi.unknownOffset = invalidOffset
 	mi.extensionOffset = invalidOffset
+	mi.lazyOffset = invalidOffset
+	mi.presenceOffset = si.presenceOffset
 
 	if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
 		mi.sizecacheOffset = si.sizecacheOffset
@@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 			validation: newFieldValidationInfo(mi, si, fd, ft),
 			isPointer:  fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
 			isRequired: fd.Cardinality() == protoreflect.Required,
+
+			presenceIndex: noPresence,
 		}
 		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
 		mi.coderFields[cf.num] = cf
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
new file mode 100644
index 0000000000..88c16ae5b7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -0,0 +1,156 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/order"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
+	mi.sizecacheOffset = si.sizecacheOffset
+	mi.unknownOffset = si.unknownOffset
+	mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+	mi.extensionOffset = si.extensionOffset
+	mi.lazyOffset = si.lazyOffset
+	mi.presenceOffset = si.presenceOffset
+
+	mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+	fields := mi.Desc.Fields()
+	for i := 0; i < fields.Len(); i++ {
+		fd := fields.Get(i)
+
+		fs := si.fieldsByNumber[fd.Number()]
+		if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
+			fs = si.oneofsByName[fd.ContainingOneof().Name()]
+		}
+		ft := fs.Type
+		var wiretag uint64
+		if !fd.IsPacked() {
+			wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+		} else {
+			wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+		}
+		var fieldOffset offset
+		var funcs pointerCoderFuncs
+		var childMessage *MessageInfo
+		switch {
+		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+			fieldOffset = offsetOf(fs, mi.Exporter)
+		case fd.IsWeak():
+			fieldOffset = si.weakOffset
+			funcs = makeWeakMessageFieldCoder(fd)
+		case fd.Message() != nil && !fd.IsMap():
+			fieldOffset = offsetOf(fs, mi.Exporter)
+			if fd.IsList() {
+				childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
+			} else {
+				childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
+			}
+		default:
+			fieldOffset = offsetOf(fs, mi.Exporter)
+			childMessage, funcs = fieldCoder(fd, ft)
+		}
+		cf := &coderFieldInfo{
+			num:        fd.Number(),
+			offset:     fieldOffset,
+			wiretag:    wiretag,
+			ft:         ft,
+			tagsize:    protowire.SizeVarint(wiretag),
+			funcs:      funcs,
+			mi:         childMessage,
+			validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
+			isPointer: (fd.Cardinality() == protoreflect.Repeated ||
+				fd.Kind() == protoreflect.MessageKind ||
+				fd.Kind() == protoreflect.GroupKind),
+			isRequired:    fd.Cardinality() == protoreflect.Required,
+			presenceIndex: noPresence,
+		}
+
+		// TODO: Use presence for all fields.
+		//
+		// In some cases, such as maps, presence means only "might be set" rather
+		// than "is definitely set", but every field should have a presence bit to
+		// permit us to skip over definitely-unset fields at marshal time.
+
+		var hasPresence bool
+		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+
+		if hasPresence {
+			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
+		}
+
+		mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+		mi.coderFields[cf.num] = cf
+	}
+	for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+		if od := oneofs.Get(i); !od.IsSynthetic() {
+			mi.initOneofFieldCoders(od, si.structInfo)
+		}
+	}
+	if messageset.IsMessageSet(mi.Desc) {
+		if !mi.extensionOffset.IsValid() {
+			panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+		}
+		if !mi.unknownOffset.IsValid() {
+			panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+		}
+		mi.isMessageSet = true
+	}
+	sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+		return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+	})
+
+	var maxDense protoreflect.FieldNumber
+	for _, cf := range mi.orderedCoderFields {
+		if cf.num >= 16 && cf.num >= 2*maxDense {
+			break
+		}
+		maxDense = cf.num
+	}
+	mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+	for _, cf := range mi.orderedCoderFields {
+		if int(cf.num) > len(mi.denseCoderFields) {
+			break
+		}
+		mi.denseCoderFields[cf.num] = cf
+	}
+
+	// To preserve compatibility with historic wire output, marshal oneofs last.
+	if mi.Desc.Oneofs().Len() > 0 {
+		sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+			fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+			fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+			return order.LegacyFieldOrder(fi, fj)
+		})
+	}
+
+	mi.needsInitCheck = needsInitCheck(mi.Desc)
+	if mi.methods.Marshal == nil && mi.methods.Size == nil {
+		mi.methods.Flags |= piface.SupportMarshalDeterministic
+		mi.methods.Marshal = mi.marshal
+		mi.methods.Size = mi.size
+	}
+	if mi.methods.Unmarshal == nil {
+		mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+		mi.methods.Unmarshal = mi.unmarshal
+	}
+	if mi.methods.CheckInitialized == nil {
+		mi.methods.CheckInitialized = mi.checkInitialized
+	}
+	if mi.methods.Merge == nil {
+		mi.methods.Merge = mi.merge
+	}
+	if mi.methods.Equal == nil {
+		mi.methods.Equal = equal
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index cda0520c27..e0dd21fa5f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions {
 		AllowPartial:   true,
 		DiscardUnknown: o.DiscardUnknown(),
 		Resolver:       o.resolver,
+
+		NoLazyDecoding: o.NoLazyDecoding(),
 	}
 }
 
@@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool {
 	return o.flags&protoiface.UnmarshalDiscardUnknown != 0
 }
 
-func (o unmarshalOptions) IsDefault() bool {
-	return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
+func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
+func (o unmarshalOptions) Validated() bool   { return o.flags&protoiface.UnmarshalValidated != 0 }
+func (o unmarshalOptions) NoLazyDecoding() bool {
+	return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
+}
+
+func (o unmarshalOptions) CanBeLazy() bool {
+	if o.resolver != protoregistry.GlobalTypes {
+		return false
+	}
+	// We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
+	return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
 }
 
 var lazyUnmarshalOptions = unmarshalOptions{
 	resolver: protoregistry.GlobalTypes,
-	depth:    protowire.DefaultRecursionLimit,
+
+	flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
+
+	depth: protowire.DefaultRecursionLimit,
 }
 
 type unmarshalOutput struct {
@@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 	if flags.ProtoLegacy && mi.isMessageSet {
 		return unmarshalMessageSet(mi, b, p, opts)
 	}
+
+	lazyDecoding := LazyEnabled() // default
+	if opts.NoLazyDecoding() {
+		lazyDecoding = false // explicitly disabled
+	}
+	if mi.lazyOffset.IsValid() && lazyDecoding {
+		return mi.unmarshalPointerLazy(b, p, groupTag, opts)
+	}
+	return mi.unmarshalPointerEager(b, p, groupTag, opts)
+}
+
+// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
+// The corresponding function for Lazy is in google_lazy.go.
+func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+
 	initialized := true
 	var requiredMask uint64
 	var exts *map[int32]ExtensionField
+
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	start := len(b)
 	for len(b) > 0 {
 		// Parse the tag (field number and wire type).
@@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.
 			if f.funcs.isInit != nil && !o.initialized {
 				initialized = false
 			}
+
+			if f.presenceIndex != noPresence {
+				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			}
+
 		default:
 			// Possible extension.
 			if exts == nil && mi.extensionOffset.IsValid() {
@@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p
 		return out, errUnknown
 	}
 	if flags.LazyUnmarshalExtensions {
-		if opts.IsDefault() && x.canLazy(xt) {
+		if opts.CanBeLazy() && x.canLazy(xt) {
 			out, valid := skipExtension(b, xi, num, wtyp, opts)
 			switch valid {
 			case ValidationValid:
@@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp
 		if n < 0 {
 			return out, ValidationUnknown
 		}
+
+		if opts.Validated() {
+			out.initialized = true
+			out.n = n
+			return out, ValidationValid
+		}
+
 		out, st := xi.validation.mi.validate(v, 0, opts)
 		out.n = n
 		return out, st
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index 6254f5de41..b2e212291d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,6 +10,7 @@ import (
 	"sync/atomic"
 
 	"google.golang.org/protobuf/internal/flags"
+	"google.golang.org/protobuf/internal/protolazy"
 	"google.golang.org/protobuf/proto"
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
@@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
 		e := p.Apply(mi.extensionOffset).Extensions()
 		size += mi.sizeExtensions(e, opts)
 	}
+
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+		if mi.lazyOffset.IsValid() {
+			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+		}
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.size == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				continue
+			}
+
+			if f.isLazy && fptr.AtomicGetPointer().IsNil() {
+				if lazyFields(opts) {
+					size += (*lazy).SizeField(uint32(f.num))
+					continue
+				} else {
+					mi.lazyUnmarshal(p, f.num)
+				}
+			}
+			size += f.funcs.size(fptr, f, opts)
+			continue
+		}
+
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
 			return b, err
 		}
 	}
+
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	if mi.presenceOffset.IsValid() {
+		presence = p.Apply(mi.presenceOffset).PresenceInfo()
+		if mi.lazyOffset.IsValid() {
+			lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+		}
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.marshal == nil {
 			continue
 		}
 		fptr := p.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presence.Present(f.presenceIndex) {
+				continue
+			}
+			if f.isLazy {
+				// Be careful, this field needs to be read atomically, like for a get
+				if f.isPointer && fptr.AtomicGetPointer().IsNil() {
+					if lazyFields(opts) {
+						b, _ = (*lazy).AppendField(b, uint32(f.num))
+						continue
+					} else {
+						mi.lazyUnmarshal(p, f.num)
+					}
+				}
+
+				b, err = f.funcs.marshal(b, fptr, f, opts)
+				if err != nil {
+					return b, err
+				}
+				continue
+			} else if f.isPointer && fptr.Elem().IsNil() {
+				continue
+			}
+			b, err = f.funcs.marshal(b, fptr, f, opts)
+			if err != nil {
+				return b, err
+			}
+			continue
+		}
+
 		if f.isPointer && fptr.Elem().IsNil() {
 			continue
 		}
@@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool {
 	return opts.flags&piface.MarshalDeterministic == 0
 }
 
+// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
+func lazyFields(opts marshalOptions) bool {
+	// When deterministic marshaling is requested, force an unmarshal for lazy
+	// fields to produce a deterministic result, instead of passing through
+	// bytes lazily that may or may not match what Go Protobuf would produce.
+	return opts.flags&piface.MarshalDeterministic == 0
+}
+
 func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
 	if ext == nil {
 		return 0
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
new file mode 100644
index 0000000000..e8fb6c35b4
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -0,0 +1,433 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"math/bits"
+	"os"
+	"reflect"
+	"sort"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/protolazy"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	preg "google.golang.org/protobuf/reflect/protoregistry"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+var enableLazy int32 = func() int32 {
+	if os.Getenv("GOPROTODEBUG") == "nolazy" {
+		return 0
+	}
+	return 1
+}()
+
+// EnableLazyUnmarshal enables lazy unmarshaling.
+func EnableLazyUnmarshal(enable bool) {
+	if enable {
+		atomic.StoreInt32(&enableLazy, 1)
+		return
+	}
+	atomic.StoreInt32(&enableLazy, 0)
+}
+
+// LazyEnabled reports whether lazy unmarshalling is currently enabled.
+func LazyEnabled() bool {
+	return atomic.LoadInt32(&enableLazy) != 0
+}
+
+// UnmarshalField unmarshals a field in a message.
+func UnmarshalField(m interface{}, num protowire.Number) {
+	switch m := m.(type) {
+	case *messageState:
+		m.messageInfo().lazyUnmarshal(m.pointer(), num)
+	case *messageReflectWrapper:
+		m.messageInfo().lazyUnmarshal(m.pointer(), num)
+	default:
+		panic(fmt.Sprintf("unsupported wrapper type %T", m))
+	}
+}
+
+func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
+	var f *coderFieldInfo
+	if int(num) < len(mi.denseCoderFields) {
+		f = mi.denseCoderFields[num]
+	} else {
+		f = mi.coderFields[num]
+	}
+	if f == nil {
+		panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
+	}
+	lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
+	if !found && multipleEntries == nil {
+		panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
+	}
+	// The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
+	// Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
+	fp := pointerOfValue(reflect.New(f.ft))
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
+		}
+	} else {
+		mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
+	}
+	p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
+}
+
+func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
+	opts := lazyUnmarshalOptions
+	opts.flags |= flags
+	for len(b) > 0 {
+		// Parse the tag (field number and wire type).
+		var tag uint64
+		if b[0] < 0x80 {
+			tag = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			tag, n = protowire.ConsumeVarint(b)
+			if n < 0 {
+				return errors.New("invalid wire data")
+			}
+			b = b[n:]
+		}
+		var num protowire.Number
+		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+			return errors.New("invalid wire data")
+		} else {
+			num = protowire.Number(n)
+		}
+		wtyp := protowire.Type(tag & 7)
+		if num == f.num {
+			o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
+			if err == nil {
+				b = b[o.n:]
+				continue
+			}
+			if err != errUnknown {
+				return err
+			}
+		}
+		n := protowire.ConsumeFieldValue(num, wtyp, b)
+		if n < 0 {
+			return errors.New("invalid wire data")
+		}
+		b = b[n:]
+	}
+	return nil
+}
+
+func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+	fmi := f.validation.mi
+	if fmi == nil {
+		fd := mi.Desc.Fields().ByNumber(f.num)
+		if fd == nil || !fd.IsWeak() {
+			return out, ValidationUnknown
+		}
+		messageName := fd.Message().FullName()
+		messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+		if err != nil {
+			return out, ValidationUnknown
+		}
+		var ok bool
+		fmi, ok = messageType.(*MessageInfo)
+		if !ok {
+			return out, ValidationUnknown
+		}
+	}
+	fmi.init()
+	switch f.validation.typ {
+	case validationTypeMessage:
+		if wtyp != protowire.BytesType {
+			return out, ValidationWrongWireType
+		}
+		v, n := protowire.ConsumeBytes(b)
+		if n < 0 {
+			return out, ValidationInvalid
+		}
+		out, st := fmi.validate(v, 0, opts)
+		out.n = n
+		return out, st
+	case validationTypeGroup:
+		if wtyp != protowire.StartGroupType {
+			return out, ValidationWrongWireType
+		}
+		out, st := fmi.validate(b, f.num, opts)
+		return out, st
+	default:
+		return out, ValidationUnknown
+	}
+}
+
+// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
+// specifically handles lazy unmarshalling.  it expects lazyOffset and
+// presenceOffset to both be valid.
+func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+	initialized := true
+	var requiredMask uint64
+	var lazy **protolazy.XXX_lazyUnmarshalInfo
+	var presence presence
+	var lazyIndex []protolazy.IndexEntry
+	var lastNum protowire.Number
+	outOfOrder := false
+	lazyDecode := false
+	presence = p.Apply(mi.presenceOffset).PresenceInfo()
+	lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+	if !presence.AnyPresent(mi.presenceSize) {
+		if opts.CanBeLazy() {
+			// If the message contains existing data, we need to merge into it.
+			// Lazy unmarshaling doesn't merge, so only enable it when the
+			// message is empty (has no presence bitmap).
+			lazyDecode = true
+			if *lazy == nil {
+				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+			}
+			(*lazy).SetUnmarshalFlags(opts.flags)
+			if !opts.AliasBuffer() {
+				// Make a copy of the buffer for lazy unmarshaling.
+				// Set the AliasBuffer flag so recursive unmarshal
+				// operations reuse the copy.
+				b = append([]byte{}, b...)
+				opts.flags |= piface.UnmarshalAliasBuffer
+			}
+			(*lazy).SetBuffer(b)
+		}
+	}
+	// Track special handling of lazy fields.
+	//
+	// In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
+	// In the event that validation for a field fails, this map tracks handling of the field.
+	type lazyAction uint8
+	const (
+		lazyValidateOnly   lazyAction = iota // validate the field only
+		lazyUnmarshalNow                     // eagerly unmarshal the field
+		lazyUnmarshalLater                   // unmarshal the field after the message is fully processed
+	)
+	var lazyFields map[*coderFieldInfo]lazyAction
+	var exts *map[int32]ExtensionField
+	start := len(b)
+	pos := 0
+	for len(b) > 0 {
+		// Parse the tag (field number and wire type).
+		var tag uint64
+		if b[0] < 0x80 {
+			tag = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			tag, n = protowire.ConsumeVarint(b)
+			if n < 0 {
+				return out, errDecode
+			}
+			b = b[n:]
+		}
+		var num protowire.Number
+		if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+			return out, errors.New("invalid field number")
+		} else {
+			num = protowire.Number(n)
+		}
+		wtyp := protowire.Type(tag & 7)
+
+		if wtyp == protowire.EndGroupType {
+			if num != groupTag {
+				return out, errors.New("mismatching end group marker")
+			}
+			groupTag = 0
+			break
+		}
+
+		var f *coderFieldInfo
+		if int(num) < len(mi.denseCoderFields) {
+			f = mi.denseCoderFields[num]
+		} else {
+			f = mi.coderFields[num]
+		}
+		var n int
+		err := errUnknown
+		discardUnknown := false
+	Field:
+		switch {
+		case f != nil:
+			if f.funcs.unmarshal == nil {
+				break
+			}
+			if f.isLazy && lazyDecode {
+				switch {
+				case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
+					// Attempt to validate this field and leave it for later lazy unmarshaling.
+					o, valid := mi.skipField(b, f, wtyp, opts)
+					switch valid {
+					case ValidationValid:
+						// Skip over the valid field and continue.
+						err = nil
+						presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+						requiredMask |= f.validation.requiredBit
+						if !o.initialized {
+							initialized = false
+						}
+						n = o.n
+						break Field
+					case ValidationInvalid:
+						return out, errors.New("invalid proto wire format")
+					case ValidationWrongWireType:
+						break Field
+					case ValidationUnknown:
+						if lazyFields == nil {
+							lazyFields = make(map[*coderFieldInfo]lazyAction)
+						}
+						if presence.Present(f.presenceIndex) {
+							// We were unable to determine if the field is valid or not,
+							// and we've already skipped over at least one instance of this
+							// field. Clear the presence bit (so if we stop decoding early,
+							// we don't leave a partially-initialized field around) and flag
+							// the field for unmarshaling before we return.
+							presence.ClearPresent(f.presenceIndex)
+							lazyFields[f] = lazyUnmarshalLater
+							discardUnknown = true
+							break Field
+						} else {
+							// We were unable to determine if the field is valid or not,
+							// but this is the first time we've seen it. Flag it as needing
+							// eager unmarshaling and fall through to the eager unmarshal case below.
+							lazyFields[f] = lazyUnmarshalNow
+						}
+					}
+				case lazyFields[f] == lazyUnmarshalLater:
+					// This field will be unmarshaled in a separate pass below.
+					// Skip over it here.
+					discardUnknown = true
+					break Field
+				default:
+					// Eagerly unmarshal the field.
+				}
+			}
+			if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
+				if p.Apply(f.offset).AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(p, f.num)
+				}
+			}
+			var o unmarshalOutput
+			o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+			n = o.n
+			if err != nil {
+				break
+			}
+			requiredMask |= f.validation.requiredBit
+			if f.funcs.isInit != nil && !o.initialized {
+				initialized = false
+			}
+			if f.presenceIndex != noPresence {
+				presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			}
+		default:
+			// Possible extension.
+			if exts == nil && mi.extensionOffset.IsValid() {
+				exts = p.Apply(mi.extensionOffset).Extensions()
+				if *exts == nil {
+					*exts = make(map[int32]ExtensionField)
+				}
+			}
+			if exts == nil {
+				break
+			}
+			var o unmarshalOutput
+			o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+			if err != nil {
+				break
+			}
+			n = o.n
+			if !o.initialized {
+				initialized = false
+			}
+		}
+		if err != nil {
+			if err != errUnknown {
+				return out, err
+			}
+			n = protowire.ConsumeFieldValue(num, wtyp, b)
+			if n < 0 {
+				return out, errDecode
+			}
+			if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+				u := mi.mutableUnknownBytes(p)
+				*u = protowire.AppendTag(*u, num, wtyp)
+				*u = append(*u, b[:n]...)
+			}
+		}
+		b = b[n:]
+		end := start - len(b)
+		if lazyDecode && f != nil && f.isLazy {
+			if num != lastNum {
+				lazyIndex = append(lazyIndex, protolazy.IndexEntry{
+					FieldNum: uint32(num),
+					Start:    uint32(pos),
+					End:      uint32(end),
+				})
+			} else {
+				i := len(lazyIndex) - 1
+				lazyIndex[i].End = uint32(end)
+				lazyIndex[i].MultipleContiguous = true
+			}
+		}
+		if num < lastNum {
+			outOfOrder = true
+		}
+		pos = end
+		lastNum = num
+	}
+	if groupTag != 0 {
+		return out, errors.New("missing end group marker")
+	}
+	if lazyFields != nil {
+		// Some fields failed validation, and now need to be unmarshaled.
+		for f, action := range lazyFields {
+			if action != lazyUnmarshalLater {
+				continue
+			}
+			initialized = false
+			if *lazy == nil {
+				*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+			}
+			if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
+				return out, err
+			}
+			presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+		}
+	}
+	if lazyDecode {
+		if outOfOrder {
+			sort.Slice(lazyIndex, func(i, j int) bool {
+				return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
+					(lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
+						lazyIndex[i].Start < lazyIndex[j].Start)
+			})
+		}
+		if *lazy == nil {
+			*lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+		}
+
+		(*lazy).SetIndex(lazyIndex)
+	}
+	if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+		initialized = false
+	}
+	if initialized {
+		out.initialized = true
+	}
+	out.n = start - len(b)
+	return out, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index 7e65f64f28..8ffdce67d3 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) {
 	if src.IsNil() {
 		return
 	}
+
+	var presenceSrc presence
+	var presenceDst presence
+	if mi.presenceOffset.IsValid() {
+		presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
+		presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
+	}
+
 	for _, f := range mi.orderedCoderFields {
 		if f.funcs.merge == nil {
 			continue
 		}
 		sfptr := src.Apply(f.offset)
+
+		if f.presenceIndex != noPresence {
+			if !presenceSrc.Present(f.presenceIndex) {
+				continue
+			}
+			dfptr := dst.Apply(f.offset)
+			if f.isLazy {
+				if sfptr.AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(src, f.num)
+				}
+				if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
+					mi.lazyUnmarshal(dst, f.num)
+				}
+			}
+			f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+			presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+			continue
+		}
+
 		if f.isPointer && sfptr.Elem().IsNil() {
 			continue
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 741b5ed29c..fa10a0f5cc 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() {
 	if mi.initDone == 1 {
 		return
 	}
+	if opaqueInitHook(mi) {
+		return
+	}
 
 	t := mi.GoReflectType
 	if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -133,6 +136,9 @@ type structInfo struct {
 	extensionOffset offset
 	extensionType   reflect.Type
 
+	lazyOffset     offset
+	presenceOffset offset
+
 	fieldsByNumber        map[protoreflect.FieldNumber]reflect.StructField
 	oneofsByName          map[protoreflect.Name]reflect.StructField
 	oneofWrappersByType   map[reflect.Type]protoreflect.FieldNumber
@@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
 		weakOffset:      invalidOffset,
 		unknownOffset:   invalidOffset,
 		extensionOffset: invalidOffset,
+		lazyOffset:      invalidOffset,
+		presenceOffset:  invalidOffset,
 
 		fieldsByNumber:        map[protoreflect.FieldNumber]reflect.StructField{},
 		oneofsByName:          map[protoreflect.Name]reflect.StructField{},
@@ -175,6 +183,10 @@ fieldLoop:
 				si.extensionOffset = offsetOf(f, mi.Exporter)
 				si.extensionType = f.Type
 			}
+		case "lazyFields", "XXX_lazyUnmarshalInfo":
+			si.lazyOffset = offsetOf(f, mi.Exporter)
+		case "XXX_presence":
+			si.presenceOffset = offsetOf(f, mi.Exporter)
 		default:
 			for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
 				if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
new file mode 100644
index 0000000000..d407dd791e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -0,0 +1,614 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type opaqueStructInfo struct {
+	structInfo
+}
+
+// isOpaque determines whether a protobuf message type is on the Opaque API.  It
+// checks whether the type is a Go struct that protoc-gen-go would generate.
+//
+// This function only detects newly generated messages from the v2
+// implementation of protoc-gen-go. It is unable to classify generated messages
+// that are too old or those that are generated by a different generator
+// such as protoc-gen-gogo.
+func isOpaque(t reflect.Type) bool {
+	// The current detection mechanism is to simply check the first field
+	// for a struct tag with the "protogen" key.
+	if t.Kind() == reflect.Struct && t.NumField() > 0 {
+		pgt := t.Field(0).Tag.Get("protogen")
+		return strings.HasPrefix(pgt, "opaque.")
+	}
+	return false
+}
+
+func opaqueInitHook(mi *MessageInfo) bool {
+	mt := mi.GoReflectType.Elem()
+	si := opaqueStructInfo{
+		structInfo: mi.makeStructInfo(mt),
+	}
+
+	if !isOpaque(mt) {
+		return false
+	}
+
+	defer atomic.StoreUint32(&mi.initDone, 1)
+
+	mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
+	fds := mi.Desc.Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		fs := si.fieldsByNumber[fd.Number()]
+		var fi fieldInfo
+		usePresence, _ := usePresenceForField(si, fd)
+
+		switch {
+		case fd.IsWeak():
+			// Weak fields are no different for opaque.
+			fi = fieldInfoForWeakMessage(fd, si.weakOffset)
+		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+			// Oneofs are no different for opaque.
+			fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+		case fd.IsMap():
+			fi = mi.fieldInfoForMapOpaque(si, fd, fs)
+		case fd.IsList() && fd.Message() == nil && usePresence:
+			fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
+		case fd.IsList() && fd.Message() == nil:
+			// Proto3 lists without presence can use same access methods as open
+			fi = fieldInfoForList(fd, fs, mi.Exporter)
+		case fd.IsList() && usePresence:
+			fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
+		case fd.IsList():
+			// Proto3 opaque messages that does not need presence bitmap.
+			// Different representation than open struct, but same logic
+			fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
+		case fd.Message() != nil && usePresence:
+			fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
+		case fd.Message() != nil:
+			// Proto3 messages without presence can use same access methods as open
+			fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+		default:
+			fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
+		}
+		mi.fields[fd.Number()] = &fi
+	}
+	mi.oneofs = map[protoreflect.Name]*oneofInfo{}
+	for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
+		od := mi.Desc.Oneofs().Get(i)
+		if !od.IsSynthetic() {
+			mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter)
+		}
+	}
+
+	mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+	for i := 0; i < fds.Len(); i++ {
+		if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+			mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+		}
+	}
+
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
+			mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+			i += od.Fields().Len()
+		} else {
+			mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+			i++
+		}
+	}
+
+	mi.makeExtensionFieldsFunc(mt, si.structInfo)
+	mi.makeUnknownFieldsFunc(mt, si.structInfo)
+	mi.makeOpaqueCoderMethods(mt, si)
+	mi.makeFieldTypes(si.structInfo)
+
+	return true
+}
+
+func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Map {
+		panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
+	}
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	conv := NewConverter(ft, fd)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			// Don't bother checking presence bits, since we need to
+			// look at the map length even if the presence bit is set.
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return rv.Len() > 0
+		},
+		clear: func(p pointer) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			pv := conv.GoValueOf(v)
+			if pv.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(pv)
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if v.IsNil() {
+				v.Set(reflect.MakeMap(fs.Type))
+			}
+			return conv.PBValueOf(v)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(reflect.PtrTo(ft), fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return rv.Len() > 0
+		},
+		clear: func(p pointer) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			pv := conv.GoValueOf(v)
+			if pv.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			}
+			mi.setPresent(p, index)
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(pv.Elem())
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			mi.setPresent(p, index)
+			return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	fieldNumber := fd.Number()
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			if !mi.present(p, index) {
+				return false
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				sp = p.Apply(fieldOffset).AtomicGetPointer()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return rv.Elem().Len() > 0
+		},
+		clear: func(p pointer) {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+				mi.setPresent(p, index)
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			if !mi.present(p, index) {
+				return conv.Zero()
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				sp = p.Apply(fieldOffset).AtomicGetPointer()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+				mi.setPresent(p, index)
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			val := conv.GoValueOf(v)
+			if val.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			} else {
+				rv.Elem().Set(val.Elem())
+			}
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			fp := p.Apply(fieldOffset)
+			sp := fp.AtomicGetPointer()
+			if sp.IsNil() {
+				if mi.present(p, index) {
+					// Lazily unmarshal this field.
+					mi.lazyUnmarshal(p, fieldNumber)
+					sp = p.Apply(fieldOffset).AtomicGetPointer()
+				} else {
+					sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+					mi.setPresent(p, index)
+				}
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return conv.PBValueOf(rv)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+		panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				return false
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			return rv.Elem().Len() > 0
+		},
+		clear: func(p pointer) {
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if !sp.IsNil() {
+				rv := sp.AsValueOf(fs.Type.Elem())
+				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+			}
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			sp := p.Apply(fieldOffset).AtomicGetPointer()
+			if sp.IsNil() {
+				return conv.Zero()
+			}
+			rv := sp.AsValueOf(fs.Type.Elem())
+			if rv.Elem().Len() == 0 {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
+				rv.Set(reflect.New(fs.Type.Elem()))
+			}
+			val := conv.GoValueOf(v)
+			if val.IsNil() {
+				panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+			} else {
+				rv.Elem().Set(val.Elem())
+			}
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
+				rv.Set(reflect.New(fs.Type.Elem()))
+			}
+			return conv.PBValueOf(rv)
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	nullable := fd.HasPresence()
+	if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
+		nullable = true
+	}
+	deref := false
+	if nullable && ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+		deref = true
+	}
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	var getter func(p pointer) protoreflect.Value
+	if !nullable {
+		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+	} else {
+		getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
+	}
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			if nullable {
+				return mi.present(p, index)
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			switch rv.Kind() {
+			case reflect.Bool:
+				return rv.Bool()
+			case reflect.Int32, reflect.Int64:
+				return rv.Int() != 0
+			case reflect.Uint32, reflect.Uint64:
+				return rv.Uint() != 0
+			case reflect.Float32, reflect.Float64:
+				return rv.Float() != 0 || math.Signbit(rv.Float())
+			case reflect.String, reflect.Slice:
+				return rv.Len() > 0
+			default:
+				panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
+			}
+		},
+		clear: func(p pointer) {
+			if nullable {
+				mi.clearPresent(p, index)
+			}
+			// This is only valuable for bytes and strings, but we do it unconditionally.
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			rv.Set(reflect.Zero(rv.Type()))
+		},
+		get: getter,
+		// TODO: Implement unsafe fast path for set?
+		set: func(p pointer, v protoreflect.Value) {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if deref {
+				if rv.IsNil() {
+					rv.Set(reflect.New(ft))
+				}
+				rv = rv.Elem()
+			}
+
+			rv.Set(conv.GoValueOf(v))
+			if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
+				rv.Set(emptyBytes)
+			}
+			if nullable {
+				mi.setPresent(p, index)
+			}
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+	ft := fs.Type
+	conv := NewConverter(ft, fd)
+	fieldOffset := offsetOf(fs, mi.Exporter)
+	index, _ := presenceIndex(mi.Desc, fd)
+	fieldNumber := fd.Number()
+	elemType := fs.Type.Elem()
+	return fieldInfo{
+		fieldDesc: fd,
+		has: func(p pointer) bool {
+			if p.IsNil() {
+				return false
+			}
+			return mi.present(p, index)
+		},
+		clear: func(p pointer) {
+			mi.clearPresent(p, index)
+			p.Apply(fieldOffset).AtomicSetNilPointer()
+		},
+		get: func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			fp := p.Apply(fieldOffset)
+			mp := fp.AtomicGetPointer()
+			if mp.IsNil() {
+				// Lazily unmarshal this field.
+				mi.lazyUnmarshal(p, fieldNumber)
+				mp = fp.AtomicGetPointer()
+			}
+			rv := mp.AsValueOf(elemType)
+			return conv.PBValueOf(rv)
+		},
+		set: func(p pointer, v protoreflect.Value) {
+			val := pointerOfValue(conv.GoValueOf(v))
+			if val.IsNil() {
+				panic("invalid nil pointer")
+			}
+			p.Apply(fieldOffset).AtomicSetPointer(val)
+			mi.setPresent(p, index)
+		},
+		mutable: func(p pointer) protoreflect.Value {
+			fp := p.Apply(fieldOffset)
+			mp := fp.AtomicGetPointer()
+			if mp.IsNil() {
+				if mi.present(p, index) {
+					// Lazily unmarshal this field.
+					mi.lazyUnmarshal(p, fieldNumber)
+					mp = fp.AtomicGetPointer()
+				} else {
+					mp = pointerOfValue(conv.GoValueOf(conv.New()))
+					fp.AtomicSetPointer(mp)
+					mi.setPresent(p, index)
+				}
+			}
+			return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
+		},
+		newMessage: func() protoreflect.Message {
+			return conv.New().Message()
+		},
+		newField: func() protoreflect.Value {
+			return conv.New()
+		},
+	}
+}
+
+// A presenceList wraps a List, updating presence bits as necessary when the
+// list contents change.
+type presenceList struct {
+	pvalueList
+	setPresence func(bool)
+}
+type pvalueList interface {
+	protoreflect.List
+	//Unwrapper
+}
+
+func (list presenceList) Append(v protoreflect.Value) {
+	list.pvalueList.Append(v)
+	list.setPresence(true)
+}
+func (list presenceList) Truncate(i int) {
+	list.pvalueList.Truncate(i)
+	list.setPresence(i > 0)
+}
+
+// presenceIndex returns the index to pass to presence functions.
+//
+// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
+func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
+	found := false
+	var index, numIndices uint32
+	for i := 0; i < md.Fields().Len(); i++ {
+		f := md.Fields().Get(i)
+		if f == fd {
+			found = true
+			index = numIndices
+		}
+		if f.ContainingOneof() == nil || isLastOneofField(f) {
+			numIndices++
+		}
+	}
+	if !found {
+		panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
+	}
+	return index, presenceSize(numIndices)
+}
+
+func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
+	fields := fd.ContainingOneof().Fields()
+	return fields.Get(fields.Len()-1) == fd
+}
+
+func (mi *MessageInfo) setPresent(p pointer, index uint32) {
+	p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
+}
+
+func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
+	p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
+}
+
+func (mi *MessageInfo) present(p pointer, index uint32) bool {
+	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
+}
+
+// usePresenceForField implements the somewhat intricate logic of when
+// the presence bitmap is used for a field.  The main logic is that a
+// field that is optional or that can be lazy will use the presence
+// bit, but for proto2, also maps have a presence bit. It also records
+// if the field can ever be lazy, which is true if we have a
+// lazyOffset and the field is a message or a slice of messages. A
+// field that is lazy will always need a presence bit.  Oneofs are not
+// lazy and do not use presence, unless they are a synthetic oneof,
+// which is a proto3 optional field. For proto3 optionals, we use the
+// presence and they can also be lazy when applicable (a message).
+func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
+
+	// Non-oneof scalar fields with explicit field presence use the presence array.
+	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		return false, false
+	case fd.IsWeak():
+		return false, false
+	case fd.IsMap():
+		return false, false
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		return hasLazyField, hasLazyField
+	default:
+		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
new file mode 100644
index 0000000000..a69825699a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+	}
+	if fd.Kind() == protoreflect.EnumKind {
+		// Enums for nullable opaque types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return conv.PBValueOf(rv)
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bool()
+			return protoreflect.ValueOfBool(*x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32()
+			return protoreflect.ValueOfInt32(*x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32()
+			return protoreflect.ValueOfUint32(*x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64()
+			return protoreflect.ValueOfInt64(*x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64()
+			return protoreflect.ValueOfUint64(*x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32()
+			return protoreflect.ValueOfFloat32(*x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64()
+			return protoreflect.ValueOfFloat64(*x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() || !mi.present(p, index) {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).StringPtr()
+				if *x == nil {
+					return conv.Zero()
+				}
+				if len(**x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(**x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).StringPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfString(**x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() || !mi.present(p, index) {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() || !mi.present(p, index) {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index ecb4623d70..1b9b16a407 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -85,7 +85,9 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
 	mi.oneofs = map[protoreflect.Name]*oneofInfo{}
 	for i := 0; i < md.Oneofs().Len(); i++ {
 		od := md.Oneofs().Get(i)
-		mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
+		if !od.IsSynthetic() {
+			mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter)
+		}
 	}
 
 	mi.denseFields = make([]*fieldInfo, fds.Len()*2)
@@ -205,6 +207,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
 		case fd.IsList():
 			if fd.Enum() != nil || fd.Message() != nil {
 				ft = fs.Type.Elem()
+
+				if ft.Kind() == reflect.Slice {
+					ft = ft.Elem()
+				}
+
 			}
 			isMessage = fd.Message() != nil
 		case fd.Enum() != nil:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 986322b195..a740646205 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 	ft := fs.Type
 	nullable := fd.HasPresence()
 	isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+	var getter func(p pointer) protoreflect.Value
 	if nullable {
 		if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
 			// This never occurs for generated message types.
@@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 		}
 	}
 	conv := NewConverter(ft, fd)
-
-	// TODO: Implement unsafe fast path?
 	fieldOffset := offsetOf(fs, x)
+
+	// Generate specialized getter functions to avoid going through reflect.Value
+	if nullable {
+		getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
+	} else {
+		getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+	}
+
 	return fieldInfo{
 		fieldDesc: fd,
 		has: func(p pointer) bool {
 			if p.IsNil() {
 				return false
 			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable {
-				return !rv.IsNil()
+				return !p.Apply(fieldOffset).Elem().IsNil()
 			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			switch rv.Kind() {
 			case reflect.Bool:
 				return rv.Bool()
@@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			rv.Set(reflect.Zero(rv.Type()))
 		},
-		get: func(p pointer) protoreflect.Value {
-			if p.IsNil() {
-				return conv.Zero()
-			}
-			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
-			if nullable {
-				if rv.IsNil() {
-					return conv.Zero()
-				}
-				if rv.Kind() == reflect.Ptr {
-					rv = rv.Elem()
-				}
-			}
-			return conv.PBValueOf(rv)
-		},
+		get: getter,
+		// TODO: Implement unsafe fast path for set?
 		set: func(p pointer, v protoreflect.Value) {
 			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
 			if nullable && rv.Kind() == reflect.Ptr {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
new file mode 100644
index 0000000000..af5e063a1e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
@@ -0,0 +1,273 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+	"reflect"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if ft.Kind() == reflect.Ptr {
+		ft = ft.Elem()
+	}
+	if fd.Kind() == protoreflect.EnumKind {
+		elemType := fs.Type.Elem()
+		// Enums for nullable types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
+			if rv.IsNil() {
+				return conv.Zero()
+			}
+			return conv.PBValueOf(rv.Elem())
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).BoolPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfBool(**x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfInt32(**x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfUint32(**x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfInt64(**x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfUint64(**x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfFloat32(**x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64Ptr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfFloat64(**x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).StringPtr()
+				if *x == nil {
+					return conv.Zero()
+				}
+				if len(**x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(**x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).StringPtr()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfString(**x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				if len(*x) == 0 {
+					return conv.Zero()
+				}
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			if *x == nil {
+				return conv.Zero()
+			}
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
+
+func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+	ft := fs.Type
+	if fd.Kind() == protoreflect.EnumKind {
+		// Enums for non nullable types.
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			return conv.PBValueOf(rv)
+		}
+	}
+	switch ft.Kind() {
+	case reflect.Bool:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bool()
+			return protoreflect.ValueOfBool(*x)
+		}
+	case reflect.Int32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int32()
+			return protoreflect.ValueOfInt32(*x)
+		}
+	case reflect.Uint32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint32()
+			return protoreflect.ValueOfUint32(*x)
+		}
+	case reflect.Int64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Int64()
+			return protoreflect.ValueOfInt64(*x)
+		}
+	case reflect.Uint64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Uint64()
+			return protoreflect.ValueOfUint64(*x)
+		}
+	case reflect.Float32:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float32()
+			return protoreflect.ValueOfFloat32(*x)
+		}
+	case reflect.Float64:
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Float64()
+			return protoreflect.ValueOfFloat64(*x)
+		}
+	case reflect.String:
+		if fd.Kind() == protoreflect.BytesKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).String()
+				if len(*x) == 0 {
+					return protoreflect.ValueOfBytes(nil)
+				}
+				return protoreflect.ValueOfBytes([]byte(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).String()
+			return protoreflect.ValueOfString(*x)
+		}
+	case reflect.Slice:
+		if fd.Kind() == protoreflect.StringKind {
+			return func(p pointer) protoreflect.Value {
+				if p.IsNil() {
+					return conv.Zero()
+				}
+				x := p.Apply(fieldOffset).Bytes()
+				return protoreflect.ValueOfString(string(*x))
+			}
+		}
+		return func(p pointer) protoreflect.Value {
+			if p.IsNil() {
+				return conv.Zero()
+			}
+			x := p.Apply(fieldOffset).Bytes()
+			return protoreflect.ValueOfBytes(*x)
+		}
+	}
+	panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 79e186667b..041ebde2de 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -8,6 +8,8 @@ import (
 	"reflect"
 	"sync/atomic"
 	"unsafe"
+
+	"google.golang.org/protobuf/internal/protolazy"
 )
 
 const UnsafeEnabled = true
@@ -111,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte                    { return (**[]byte)(p.p)
 func (p pointer) BytesSlice() *[][]byte                 { return (*[][]byte)(p.p) }
 func (p pointer) WeakFields() *weakFields               { return (*weakFields)(p.p) }
 func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
+	return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
+}
+
+func (p pointer) PresenceInfo() presence {
+	return presence{P: p.p}
+}
 
 func (p pointer) Elem() pointer {
 	return pointer{p: *(*unsafe.Pointer)(p.p)}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
new file mode 100644
index 0000000000..38aa7b7dcf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+func (p pointer) AtomicGetPointer() pointer {
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) AtomicSetPointer(v pointer) {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
+}
+
+func (p pointer) AtomicSetNilPointer() {
+	atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
+}
+
+func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
+	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
+		return v
+	}
+	return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+type atomicV1MessageInfo struct{ p Pointer }
+
+func (mi *atomicV1MessageInfo) Get() Pointer {
+	return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
+}
+
+func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
+	if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
+		return p
+	}
+	return mi.Get()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
new file mode 100644
index 0000000000..914cb1deda
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -0,0 +1,142 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+// presenceSize represents the size of a presence set, which should be the largest index of the set+1
+type presenceSize uint32
+
+// presence is the internal representation of the bitmap array in a generated protobuf
+type presence struct {
+	// This is a pointer to the beginning of an array of uint32
+	P unsafe.Pointer
+}
+
+func (p presence) toElem(num uint32) (ret *uint32) {
+	const (
+		bitsPerByte = 8
+		siz         = unsafe.Sizeof(*ret)
+	)
+	// p.P points to an array of uint32, num is the bit in this array that the
+	// caller wants to check/manipulate. Calculate the index in the array that
+	// contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
+	offset := uintptr(num) / (siz * bitsPerByte) * siz
+	return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
+}
+
+// Present checks for the presence of a specific field number in a presence set.
+func (p presence) Present(num uint32) bool {
+	if p.P == nil {
+		return false
+	}
+	return Export{}.Present(p.toElem(num), num)
+}
+
+// SetPresent adds presence for a specific field number in a presence set.
+func (p presence) SetPresent(num uint32, size presenceSize) {
+	Export{}.SetPresent(p.toElem(num), num, uint32(size))
+}
+
+// SetPresentUnatomic adds presence for a specific field number in a presence set without using
+// atomic operations. Only to be called during unmarshaling.
+func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
+	Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
+}
+
+// ClearPresent removes presence for a specific field number in a presence set.
+func (p presence) ClearPresent(num uint32) {
+	Export{}.ClearPresent(p.toElem(num), num)
+}
+
+// LoadPresenceCache (together with PresentInCache) allows for a
+// cached version of checking for presence without re-reading the word
+// for every field. It is optimized for efficiency and assumes no
+// simltaneous mutation of the presence set (or at least does not have
+// a problem with simultaneous mutation giving inconsistent results).
+func (p presence) LoadPresenceCache() (current uint32) {
+	if p.P == nil {
+		return 0
+	}
+	return atomic.LoadUint32((*uint32)(p.P))
+}
+
+// PresentInCache reads presence from a cached word in the presence
+// bitmap. It caches up a new word if the bit is outside the
+// word. This is for really fast iteration through bitmaps in cases
+// where we either know that the bitmap will not be altered, or we
+// don't care about inconsistencies caused by simultaneous writes.
+func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
+	if num/32 != *cachedElement {
+		o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
+		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+		*current = atomic.LoadUint32(q)
+		*cachedElement = num / 32
+	}
+	return (*current & (1 << (num % 32))) > 0
+}
+
+// AnyPresent checks if any field is marked as present in the bitmap.
+func (p presence) AnyPresent(size presenceSize) bool {
+	n := uintptr((size + 31) / 32)
+	for j := uintptr(0); j < n; j++ {
+		o := j * unsafe.Sizeof(uint32(0))
+		q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+		b := atomic.LoadUint32(q)
+		if b > 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// toRaceDetectData finds the preceding RaceDetectHookData in a
+// message by using pointer arithmetic. As the type of the presence
+// set (bitmap) varies with the number of fields in the protobuf, we
+// can not have a struct type containing the array and the
+// RaceDetectHookData.  instead the RaceDetectHookData is placed
+// immediately before the bitmap array, and we find it by walking
+// backwards in the struct.
+//
+// This method is only called from the race-detect version of the code,
+// so RaceDetectHookData is never an empty struct.
+func (p presence) toRaceDetectData() *RaceDetectHookData {
+	var template struct {
+		d RaceDetectHookData
+		a [1]uint32
+	}
+	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
+	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
+}
+
+func atomicLoadShadowPresence(p **[]byte) *[]byte {
+	return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
+	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
+}
+
+// findPointerToRaceDetectData finds the preceding RaceDetectHookData
+// in a message by using pointer arithmetic. For the methods called
+// directy from generated code, we don't have a pointer to the
+// beginning of the presence set, but a pointer inside the array. As
+// we know the index of the bit we're manipulating (num), we can
+// calculate which element of the array ptr is pointing to. With that
+// information we find the preceding RaceDetectHookData and can
+// manipulate the shadow bitmap.
+//
+// This method is only called from the race-detect version of the
+// code, so RaceDetectHookData is never an empty struct.
+func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
+	var template struct {
+		d RaceDetectHookData
+		a [1]uint32
+	}
+	o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
+	return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index a24e6bbd7a..b534a3d6db 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -37,6 +37,10 @@ const (
 
 	// ValidationValid indicates that unmarshaling the message will succeed.
 	ValidationValid
+
+	// ValidationWrongWireType indicates that a validated field does not have
+	// the expected wire type.
+	ValidationWrongWireType
 )
 
 func (v ValidationStatus) String() string {
@@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
 		switch fd.Kind() {
 		case protoreflect.MessageKind:
 			vi.typ = validationTypeMessage
+
+			if ft.Kind() == reflect.Ptr {
+				// Repeated opaque message fields are *[]*T.
+				ft = ft.Elem()
+			}
+
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
 		case protoreflect.GroupKind:
 			vi.typ = validationTypeGroup
+
+			if ft.Kind() == reflect.Ptr {
+				// Repeated opaque message fields are *[]*T.
+				ft = ft.Elem()
+			}
+
 			if ft.Kind() == reflect.Slice {
 				vi.mi = getMessageInfo(ft.Elem())
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
new file mode 100644
index 0000000000..82e5cab4aa
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
@@ -0,0 +1,364 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper code for parsing a protocol buffer
+
+package protolazy
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"google.golang.org/protobuf/encoding/protowire"
+)
+
+// BufferReader is a structure encapsulating a protobuf and a current position
+type BufferReader struct {
+	Buf []byte
+	Pos int
+}
+
+// NewBufferReader creates a new BufferRead from a protobuf
+func NewBufferReader(buf []byte) BufferReader {
+	return BufferReader{Buf: buf, Pos: 0}
+}
+
+var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
+var errOverflow = errors.New("proto: integer overflow")
+
+func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
+	i := b.Pos
+	l := len(b.Buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		v := b.Buf[i]
+		i++
+		x |= (uint64(v) & 0x7F) << shift
+		if v < 0x80 {
+			b.Pos = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// decodeVarint decodes a varint at the current position
+func (b *BufferReader) DecodeVarint() (x uint64, err error) {
+	i := b.Pos
+	buf := b.Buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		b.Pos++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return b.DecodeVarintSlow()
+	}
+
+	var v uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) & 127
+	i++
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 7
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 14
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 21
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 28
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 35
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 42
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 49
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 56
+	if v < 128 {
+		goto done
+	}
+
+	v = uint64(buf[i])
+	i++
+	x |= (v & 127) << 63
+	if v < 128 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	b.Pos = i
+	return
+}
+
+// decodeVarint32 decodes a varint32 at the current position
+func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
+	i := b.Pos
+	buf := b.Buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		b.Pos++
+		return uint32(buf[i]), nil
+	} else if len(buf)-i < 5 {
+		v, err := b.DecodeVarintSlow()
+		return uint32(v), err
+	}
+
+	var v uint32
+	// we already checked the first byte
+	x = uint32(buf[i]) & 127
+	i++
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 7
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 14
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 21
+	if v < 128 {
+		goto done
+	}
+
+	v = uint32(buf[i])
+	i++
+	x |= (v & 127) << 28
+	if v < 128 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	b.Pos = i
+	return
+}
+
+// skipValue skips a value in the protobuf, based on the specified tag
+func (b *BufferReader) SkipValue(tag uint32) (err error) {
+	wireType := tag & 0x7
+	switch protowire.Type(wireType) {
+	case protowire.VarintType:
+		err = b.SkipVarint()
+	case protowire.Fixed64Type:
+		err = b.SkipFixed64()
+	case protowire.BytesType:
+		var n uint32
+		n, err = b.DecodeVarint32()
+		if err == nil {
+			err = b.Skip(int(n))
+		}
+	case protowire.StartGroupType:
+		err = b.SkipGroup(tag)
+	case protowire.Fixed32Type:
+		err = b.SkipFixed32()
+	default:
+		err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+	}
+	return
+}
+
+// skipGroup skips a group with the specified tag.  It executes efficiently using a tag stack
+func (b *BufferReader) SkipGroup(tag uint32) (err error) {
+	tagStack := make([]uint32, 0, 16)
+	tagStack = append(tagStack, tag)
+	var n uint32
+	for len(tagStack) > 0 {
+		tag, err = b.DecodeVarint32()
+		if err != nil {
+			return err
+		}
+		switch protowire.Type(tag & 0x7) {
+		case protowire.VarintType:
+			err = b.SkipVarint()
+		case protowire.Fixed64Type:
+			err = b.Skip(8)
+		case protowire.BytesType:
+			n, err = b.DecodeVarint32()
+			if err == nil {
+				err = b.Skip(int(n))
+			}
+		case protowire.StartGroupType:
+			tagStack = append(tagStack, tag)
+		case protowire.Fixed32Type:
+			err = b.SkipFixed32()
+		case protowire.EndGroupType:
+			if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
+				tagStack = tagStack[:len(tagStack)-1]
+			} else {
+				err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
+					protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// skipVarint effiently skips a varint
+func (b *BufferReader) SkipVarint() (err error) {
+	i := b.Pos
+
+	if len(b.Buf)-i < 10 {
+		// Use DecodeVarintSlow() to check for buffer overflow, but ignore result
+		if _, err := b.DecodeVarintSlow(); err != nil {
+			return err
+		}
+		return nil
+	}
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	i++
+
+	if b.Buf[i] < 0x80 {
+		goto out
+	}
+	return errOverflow
+
+out:
+	b.Pos = i + 1
+	return nil
+}
+
+// skip skips the specified number of bytes
+func (b *BufferReader) Skip(n int) (err error) {
+	if len(b.Buf) < b.Pos+n {
+		return io.ErrUnexpectedEOF
+	}
+	b.Pos += n
+	return
+}
+
+// skipFixed64 skips a fixed64
+func (b *BufferReader) SkipFixed64() (err error) {
+	return b.Skip(8)
+}
+
+// skipFixed32 skips a fixed32
+func (b *BufferReader) SkipFixed32() (err error) {
+	return b.Skip(4)
+}
+
+// skipBytes skips a set of bytes
+func (b *BufferReader) SkipBytes() (err error) {
+	n, err := b.DecodeVarint32()
+	if err != nil {
+		return err
+	}
+	return b.Skip(int(n))
+}
+
+// Done returns whether we are at the end of the protobuf
+func (b *BufferReader) Done() bool {
+	return b.Pos == len(b.Buf)
+}
+
+// Remaining returns how many bytes remain
+func (b *BufferReader) Remaining() int {
+	return len(b.Buf) - b.Pos
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
new file mode 100644
index 0000000000..ff4d4834bb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
@@ -0,0 +1,359 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protolazy contains internal data structures for lazy message decoding.
+package protolazy
+
+import (
+	"fmt"
+	"sort"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// IndexEntry is the structure for an index of the fields in a message of a
+// proto (not descending to sub-messages)
+type IndexEntry struct {
+	FieldNum uint32
+	// first byte of this tag/field
+	Start uint32
+	// first byte after a contiguous sequence of bytes for this tag/field, which could
+	// include a single encoding of the field, or multiple encodings for the field
+	End uint32
+	// True if this protobuf segment includes multiple encodings of the field
+	MultipleContiguous bool
+}
+
+// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+type XXX_lazyUnmarshalInfo struct {
+	// Index of fields and their positions in the protobuf for this
+	// message.  Make index be a pointer to a slice so it can be updated
+	// atomically.  The index pointer is only set once (lazily when/if
+	// the index is first needed), and must always be SET and LOADED
+	// ATOMICALLY.
+	index *[]IndexEntry
+	// The protobuf associated with this lazily decoded message.  It is
+	// only set during proto.Unmarshal().  It doesn't need to be set and
+	// loaded atomically, since any simultaneous set (Unmarshal) and read
+	// (during a get) would already be a race in the app code.
+	Protobuf []byte
+	// The flags present when Unmarshal was originally called for this particular message
+	unmarshalFlags piface.UnmarshalInputFlags
+}
+
+// The Buffer and SetBuffer methods let v2/internal/impl interact with
+// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
+
+// Buffer returns the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
+	return lazy.Protobuf
+}
+
+// SetBuffer sets the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
+	lazy.Protobuf = b
+}
+
+// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
+// The flags should reflect how Unmarshal was called.
+func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
+	lazy.unmarshalFlags = f
+}
+
+// UnmarshalFlags returns the original unmarshalInputFlags.
+func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
+	return lazy.unmarshalFlags
+}
+
+// AllowedPartial returns true if the user originally unmarshalled this message with
+// AllowPartial set to true
+func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
+	return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
+}
+
+func protoFieldNumber(tag uint32) uint32 {
+	return tag >> 3
+}
+
+// buildIndex builds an index of the specified protobuf, return the index
+// array and an error.
+func buildIndex(buf []byte) ([]IndexEntry, error) {
+	index := make([]IndexEntry, 0, 16)
+	var lastProtoFieldNum uint32
+	var outOfOrder bool
+
+	var r BufferReader = NewBufferReader(buf)
+
+	for !r.Done() {
+		var tag uint32
+		var err error
+		var curPos = r.Pos
+		// INLINED: tag, err = r.DecodeVarint32()
+		{
+			i := r.Pos
+			buf := r.Buf
+
+			if i >= len(buf) {
+				return nil, errOutOfBounds
+			} else if buf[i] < 0x80 {
+				r.Pos++
+				tag = uint32(buf[i])
+			} else if r.Remaining() < 5 {
+				var v uint64
+				v, err = r.DecodeVarintSlow()
+				tag = uint32(v)
+			} else {
+				var v uint32
+				// we already checked the first byte
+				tag = uint32(buf[i]) & 127
+				i++
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 7
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 14
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 21
+				if v < 128 {
+					goto done
+				}
+
+				v = uint32(buf[i])
+				i++
+				tag |= (v & 127) << 28
+				if v < 128 {
+					goto done
+				}
+
+				return nil, errOutOfBounds
+
+			done:
+				r.Pos = i
+			}
+		}
+		// DONE: tag, err = r.DecodeVarint32()
+
+		fieldNum := protoFieldNumber(tag)
+		if fieldNum < lastProtoFieldNum {
+			outOfOrder = true
+		}
+
+		// Skip the current value -- will skip over an entire group as well.
+		// INLINED: err = r.SkipValue(tag)
+		wireType := tag & 0x7
+		switch protowire.Type(wireType) {
+		case protowire.VarintType:
+			// INLINED: err = r.SkipVarint()
+			i := r.Pos
+
+			if len(r.Buf)-i < 10 {
+				// Use DecodeVarintSlow() to skip while
+				// checking for buffer overflow, but ignore result
+				_, err = r.DecodeVarintSlow()
+				goto out2
+			}
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			i++
+
+			if r.Buf[i] < 0x80 {
+				goto out
+			}
+			return nil, errOverflow
+		out:
+			r.Pos = i + 1
+			// DONE: err = r.SkipVarint()
+		case protowire.Fixed64Type:
+			err = r.SkipFixed64()
+		case protowire.BytesType:
+			var n uint32
+			n, err = r.DecodeVarint32()
+			if err == nil {
+				err = r.Skip(int(n))
+			}
+		case protowire.StartGroupType:
+			err = r.SkipGroup(tag)
+		case protowire.Fixed32Type:
+			err = r.SkipFixed32()
+		default:
+			err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+		}
+		// DONE: err = r.SkipValue(tag)
+
+	out2:
+		if err != nil {
+			return nil, err
+		}
+		if fieldNum != lastProtoFieldNum {
+			index = append(index, IndexEntry{FieldNum: fieldNum,
+				Start: uint32(curPos),
+				End:   uint32(r.Pos)},
+			)
+		} else {
+			index[len(index)-1].End = uint32(r.Pos)
+			index[len(index)-1].MultipleContiguous = true
+		}
+		lastProtoFieldNum = fieldNum
+	}
+	if outOfOrder {
+		sort.Slice(index, func(i, j int) bool {
+			return index[i].FieldNum < index[j].FieldNum ||
+				(index[i].FieldNum == index[j].FieldNum &&
+					index[i].Start < index[j].Start)
+		})
+	}
+	return index, nil
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			size += int(entry.End - entry.Start)
+		}
+		return size
+	}
+	if !found {
+		return 0
+	}
+	return int(end - start)
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
+	start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+	if multipleEntries != nil {
+		for _, entry := range multipleEntries {
+			b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
+		}
+		return b, true
+	}
+	if !found {
+		return nil, false
+	}
+	b = append(b, lazy.Protobuf[start:end]...)
+	return b, true
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
+	atomicStoreIndex(&lazy.index, &index)
+}
+
+// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
+// (including protobuf), returns startOffset/endOffset/found.
+func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
+	if lazy.Protobuf == nil {
+		// There is no backing protobuf for this message -- it was made from a builder
+		return 0, 0, false, false, nil
+	}
+	index := atomicLoadIndex(&lazy.index)
+	if index == nil {
+		r, err := buildIndex(lazy.Protobuf)
+		if err != nil {
+			panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
+		}
+		// lazy.index is a pointer to the slice returned by BuildIndex
+		index = &r
+		atomicStoreIndex(&lazy.index, index)
+	}
+	return lookupField(index, fieldNum)
+}
+
+// lookupField returns the offset at which the indicated field starts using
+// the index, offset immediately after field ends (including all instances of
+// a repeated field), and bools indicating if field was found and if there
+// are multiple encodings of the field in the byte range.
+//
+// To hande the uncommon case where there are repeated encodings for the same
+// field which are not consecutive in the protobuf (so we need to returns
+// multiple start/end offsets), we also return a slice multipleEntries.  If
+// multipleEntries is non-nil, then multiple entries were found, and the
+// values in the slice should be used, rather than start/end/found.
+func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
+	// The pointer indexp to the index was already loaded atomically.
+	// The slice is uniquely associated with the pointer, so it doesn't
+	// need to be loaded atomically.
+	index := *indexp
+	for i, entry := range index {
+		if fieldNum == entry.FieldNum {
+			if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
+				// Handle the uncommon case where there are
+				// repeated entries for the same field which
+				// are not contiguous in the protobuf.
+				multiple := make([]IndexEntry, 1, 2)
+				multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
+				i++
+				for i < len(index) && index[i].FieldNum == fieldNum {
+					multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
+					i++
+				}
+				return 0, 0, false, false, multiple
+
+			}
+			return entry.Start, entry.End, true, entry.MultipleContiguous, nil
+		}
+		if fieldNum < entry.FieldNum {
+			return 0, 0, false, false, nil
+		}
+	}
+	return 0, 0, false, false, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
new file mode 100644
index 0000000000..dc2a64ca64
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protolazy
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
+	return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 62a52a40a3..e27eaefcf3 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
 //  10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 35
-	Patch      = 2
+	Minor      = 36
+	Patch      = 0
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index d75a6534c1..a3b5e142d2 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -47,6 +47,12 @@ type UnmarshalOptions struct {
 	// RecursionLimit limits how deeply messages may be nested.
 	// If zero, a default limit is applied.
 	RecursionLimit int
+
+	//
+	// NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
+	// default. Lazy decoding only affects submessages (annotated with [lazy =
+	// true] in the .proto file) within messages that use the Opaque API.
+	NoLazyDecoding bool
 }
 
 // Unmarshal parses the wire-format message in b and places the result in m.
@@ -104,6 +110,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
 		if o.DiscardUnknown {
 			in.Flags |= protoiface.UnmarshalDiscardUnknown
 		}
+
+		if !allowPartial {
+			// This does not affect how current unmarshal functions work, it just allows them
+			// to record this for lazy the decoding case.
+			in.Flags |= protoiface.UnmarshalCheckRequired
+		}
+		if o.NoLazyDecoding {
+			in.Flags |= protoiface.UnmarshalNoLazyDecoding
+		}
+
 		out, err = methods.Unmarshal(in)
 	} else {
 		o.RecursionLimit--
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 1f847bcc35..f0473c5869 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -63,7 +63,8 @@ type MarshalOptions struct {
 	// options (except for UseCachedSize itself).
 	//
 	// 2. The message and all its submessages have not changed in any
-	// way since the Size call.
+	// way since the Size call. For lazily decoded messages, accessing
+	// a message results in decoding the message, which is a change.
 	//
 	// If either of these invariants is violated,
 	// the results are undefined and may include panics or corrupted output.
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index 052fb5ae31..c8675806c6 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -12,11 +12,19 @@ import (
 )
 
 // Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
 func Size(m Message) int {
 	return MarshalOptions{}.Size(m)
 }
 
 // Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
 func (o MarshalOptions) Size(m Message) int {
 	// Treat a nil message interface as an empty message; nothing to output.
 	if m == nil {
diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
new file mode 100644
index 0000000000..267fd0f1f6
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// ValueOrNil returns nil if has is false, or a pointer to a new variable
+// containing the value returned by the specified getter.
+//
+// This function is similar to the wrappers (proto.Int32(), proto.String(),
+// etc.), but is generic (works for any field type) and works with the hasser
+// and getter of a field, as opposed to a value.
+//
+// This is convenient when populating builder fields.
+//
+// Example:
+//
+//	hop := attr.GetDirectHop()
+//	injectedRoute := ripb.InjectedRoute_builder{
+//	  Prefixes: route.GetPrefixes(),
+//	  NextHop:  proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
+//	}
+func ValueOrNil[T any](has bool, getter func() T) *T {
+	if !has {
+		return nil
+	}
+	v := getter()
+	return &v
+}
+
+// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
+// it returns a pointer to an empty val message.
+//
+// This function allows for translating code from the old Open Struct API to the
+// new Opaque API.
+//
+// The old Open Struct API represented oneof fields with a wrapper struct:
+//
+//	var signedImg *accountpb.SignedImage
+//	profile := &accountpb.Profile{
+//		// The Avatar oneof will be set, with an empty SignedImage.
+//		Avatar: &accountpb.Profile_SignedImage{signedImg},
+//	}
+//
+// The new Opaque API treats oneof fields like regular fields, there are no more
+// wrapper structs:
+//
+//	var signedImg *accountpb.SignedImage
+//	profile := &accountpb.Profile{}
+//	profile.SetSignedImage(signedImg)
+//
+// For convenience, the Opaque API also offers Builders, which allow for a
+// direct translation of struct initialization. However, because Builders use
+// nilness to represent field presence (but there is no non-nil wrapper struct
+// anymore), Builders cannot distinguish between an unset oneof and a set oneof
+// with nil message. The above code would need to be translated with help of the
+// ValueOrDefault function to retain the same behavior:
+//
+//	var signedImg *accountpb.SignedImage
+//	return &accountpb.Profile_builder{
+//		SignedImage: proto.ValueOrDefault(signedImg),
+//	}.Build()
+func ValueOrDefault[T interface {
+	*P
+	Message
+}, P any](val T) T {
+	if val == nil {
+		return T(new(P))
+	}
+	return val
+}
+
+// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
+// type []byte.
+func ValueOrDefaultBytes(val []byte) []byte {
+	if val == nil {
+		return []byte{}
+	}
+	return val
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a7b0d06ff3..a4b78acef6 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -152,7 +152,7 @@ type Message interface {
 	// This method may return nil.
 	//
 	// The returned methods type is identical to
-	// google.golang.org/protobuf/runtime/protoiface.Methods.
+	// [google.golang.org/protobuf/runtime/protoiface.Methods].
 	// Consult the protoiface package documentation for details.
 	ProtoMethods() *methods
 }
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 246156561c..28e9e9f039 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -122,6 +122,22 @@ type UnmarshalInputFlags = uint8
 
 const (
 	UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+
+	// UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
+	// The unmarshaller must not modify the contents of the buffer.
+	UnmarshalAliasBuffer
+
+	// UnmarshalValidated indicates that validation has already been
+	// performed on the input buffer.
+	UnmarshalValidated
+
+	// UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
+	// initialized.
+	UnmarshalCheckRequired
+
+	// UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
+	// lazy decoding, even when otherwise available.
+	UnmarshalNoLazyDecoding
 )
 
 // UnmarshalOutputFlags are output from the Unmarshal method.
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
index 4a1ab7fb3d..93df1b569b 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -15,6 +15,7 @@ import (
 	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/filetype"
 	"google.golang.org/protobuf/internal/impl"
+	"google.golang.org/protobuf/internal/protolazy"
 )
 
 // UnsafeEnabled specifies whether package unsafe can be used.
@@ -39,6 +40,9 @@ type (
 	ExtensionFieldV1 = impl.ExtensionField
 
 	Pointer = impl.Pointer
+
+	LazyUnmarshalInfo  = *protolazy.XXX_lazyUnmarshalInfo
+	RaceDetectHookData = impl.RaceDetectHookData
 )
 
 var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 87da199a38..191552cce0 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -210,10 +210,7 @@ import (
 //	  "value": "1.212s"
 //	}
 type Any struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// A URL/resource name that uniquely identifies the type of the serialized
 	// protocol buffer message. This string must contain at least
 	// one "/" character. The last segment of the URL's path must represent
@@ -244,7 +241,9 @@ type Any struct {
 	// used with implementation specific semantics.
 	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
 	// Must be a valid serialized protocol buffer of the above specified type.
-	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	Value         []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New marshals src into a new Any instance.
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index b99d4d2410..34d76e6cd9 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -141,10 +141,7 @@ import (
 // be expressed in JSON format as "3.000000001s", and 3 seconds and 1
 // microsecond should be expressed in JSON format as "3.000001s".
 type Duration struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Signed seconds of the span of time. Must be from -315,576,000,000
 	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
 	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
@@ -155,7 +152,9 @@ type Duration struct {
 	// of one second or more, a non-zero value for the `nanos` field must be
 	// of the same sign as the `seconds` field. Must be from -999,999,999
 	// to +999,999,999 inclusive.
-	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New constructs a new Duration from the provided time.Duration.
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index 19de8d371f..e5d7da38c2 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -284,12 +284,11 @@ import (
 // request should verify the included field paths, and return an
 // `INVALID_ARGUMENT` error if any path is unmappable.
 type FieldMask struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The set of field mask paths.
-	Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+	Paths         []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // New constructs a field mask from a list of paths and verifies that
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index 8f206a6611..f2c53ea337 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -187,12 +187,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) {
 //
 // The JSON representation for `Struct` is JSON object.
 type Struct struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Unordered map of dynamically typed values.
-	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Fields        map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // NewStruct constructs a Struct from a general-purpose Go map.
@@ -276,13 +275,10 @@ func (x *Struct) GetFields() map[string]*Value {
 //
 // The JSON representation for `Value` is JSON value.
 type Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The kind of value.
 	//
-	// Types that are assignable to Kind:
+	// Types that are valid to be assigned to Kind:
 	//
 	//	*Value_NullValue
 	//	*Value_NumberValue
@@ -290,7 +286,9 @@ type Value struct {
 	//	*Value_BoolValue
 	//	*Value_StructValue
 	//	*Value_ListValue
-	Kind isValue_Kind `protobuf_oneof:"kind"`
+	Kind          isValue_Kind `protobuf_oneof:"kind"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // NewValue constructs a Value from a general-purpose Go interface.
@@ -483,51 +481,63 @@ func (*Value) Descriptor() ([]byte, []int) {
 	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
 }
 
-func (m *Value) GetKind() isValue_Kind {
-	if m != nil {
-		return m.Kind
+func (x *Value) GetKind() isValue_Kind {
+	if x != nil {
+		return x.Kind
 	}
 	return nil
 }
 
 func (x *Value) GetNullValue() NullValue {
-	if x, ok := x.GetKind().(*Value_NullValue); ok {
-		return x.NullValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_NullValue); ok {
+			return x.NullValue
+		}
 	}
 	return NullValue_NULL_VALUE
 }
 
 func (x *Value) GetNumberValue() float64 {
-	if x, ok := x.GetKind().(*Value_NumberValue); ok {
-		return x.NumberValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_NumberValue); ok {
+			return x.NumberValue
+		}
 	}
 	return 0
 }
 
 func (x *Value) GetStringValue() string {
-	if x, ok := x.GetKind().(*Value_StringValue); ok {
-		return x.StringValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_StringValue); ok {
+			return x.StringValue
+		}
 	}
 	return ""
 }
 
 func (x *Value) GetBoolValue() bool {
-	if x, ok := x.GetKind().(*Value_BoolValue); ok {
-		return x.BoolValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_BoolValue); ok {
+			return x.BoolValue
+		}
 	}
 	return false
 }
 
 func (x *Value) GetStructValue() *Struct {
-	if x, ok := x.GetKind().(*Value_StructValue); ok {
-		return x.StructValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_StructValue); ok {
+			return x.StructValue
+		}
 	}
 	return nil
 }
 
 func (x *Value) GetListValue() *ListValue {
-	if x, ok := x.GetKind().(*Value_ListValue); ok {
-		return x.ListValue
+	if x != nil {
+		if x, ok := x.Kind.(*Value_ListValue); ok {
+			return x.ListValue
+		}
 	}
 	return nil
 }
@@ -582,12 +592,11 @@ func (*Value_ListValue) isValue_Kind() {}
 //
 // The JSON representation for `ListValue` is JSON array.
 type ListValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Repeated field of dynamically typed values.
-	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	Values        []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // NewList constructs a ListValue from a general-purpose Go slice.
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 0d20722d70..9550109aa3 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -170,10 +170,7 @@ import (
 // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
 // ) to obtain a formatter capable of generating timestamps in this format.
 type Timestamp struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Represents seconds of UTC time since Unix epoch
 	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
 	// 9999-12-31T23:59:59Z inclusive.
@@ -182,7 +179,9 @@ type Timestamp struct {
 	// second values with fractions must still have non-negative nanos values
 	// that count forward in time. Must be from 0 to 999,999,999
 	// inclusive.
-	Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	Nanos         int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Now constructs a new Timestamp from the current time.
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 006060e569..15b424ec12 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -54,12 +54,11 @@ import (
 //
 // The JSON representation for `DoubleValue` is JSON number.
 type DoubleValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The double value.
-	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Double stores v in a new DoubleValue and returns a pointer to it.
@@ -108,12 +107,11 @@ func (x *DoubleValue) GetValue() float64 {
 //
 // The JSON representation for `FloatValue` is JSON number.
 type FloatValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The float value.
-	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Float stores v in a new FloatValue and returns a pointer to it.
@@ -162,12 +160,11 @@ func (x *FloatValue) GetValue() float32 {
 //
 // The JSON representation for `Int64Value` is JSON string.
 type Int64Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int64 value.
-	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Int64 stores v in a new Int64Value and returns a pointer to it.
@@ -216,12 +213,11 @@ func (x *Int64Value) GetValue() int64 {
 //
 // The JSON representation for `UInt64Value` is JSON string.
 type UInt64Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint64 value.
-	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // UInt64 stores v in a new UInt64Value and returns a pointer to it.
@@ -270,12 +266,11 @@ func (x *UInt64Value) GetValue() uint64 {
 //
 // The JSON representation for `Int32Value` is JSON number.
 type Int32Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int32 value.
-	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Int32 stores v in a new Int32Value and returns a pointer to it.
@@ -324,12 +319,11 @@ func (x *Int32Value) GetValue() int32 {
 //
 // The JSON representation for `UInt32Value` is JSON number.
 type UInt32Value struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint32 value.
-	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // UInt32 stores v in a new UInt32Value and returns a pointer to it.
@@ -378,12 +372,11 @@ func (x *UInt32Value) GetValue() uint32 {
 //
 // The JSON representation for `BoolValue` is JSON `true` and `false`.
 type BoolValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bool value.
-	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Bool stores v in a new BoolValue and returns a pointer to it.
@@ -432,12 +425,11 @@ func (x *BoolValue) GetValue() bool {
 //
 // The JSON representation for `StringValue` is JSON string.
 type StringValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The string value.
-	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // String stores v in a new StringValue and returns a pointer to it.
@@ -486,12 +478,11 @@ func (x *StringValue) GetValue() string {
 //
 // The JSON representation for `BytesValue` is JSON string.
 type BytesValue struct {
-	state         protoimpl.MessageState
-	sizeCache     protoimpl.SizeCache
-	unknownFields protoimpl.UnknownFields
-
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bytes value.
-	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	Value         []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
 // Bytes stores v in a new BytesValue and returns a pointer to it.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bdd4cdd75e..049a3956dc 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -409,7 +409,7 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.35.2
+# google.golang.org/protobuf v1.36.0
 ## explicit; go 1.21
 google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
@@ -432,6 +432,7 @@ google.golang.org/protobuf/internal/genid
 google.golang.org/protobuf/internal/impl
 google.golang.org/protobuf/internal/order
 google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/protolazy
 google.golang.org/protobuf/internal/set
 google.golang.org/protobuf/internal/strs
 google.golang.org/protobuf/internal/version