updated protobuf to 3.18.1

This commit is contained in:
Sven Czarnian
2021-10-10 20:51:05 +02:00
parent ea460b1aaf
commit 7d69203486
69 changed files with 6374 additions and 3669 deletions

View File

@@ -66,11 +66,11 @@ class PROTOBUF_EXPORT ArenaMetricsCollector {
// Invoked when the arena is about to be destroyed. This method will
// typically finalize any metric collection and delete the collector.
// space_allocated is the space used by the arena.
virtual void OnDestroy(uint64 space_allocated) = 0;
virtual void OnDestroy(uint64_t space_allocated) = 0;
// OnReset() is called when the associated arena is reset.
// space_allocated is the space used by the arena just before the reset.
virtual void OnReset(uint64 space_allocated) = 0;
virtual void OnReset(uint64_t space_allocated) = 0;
// OnAlloc is called when an allocation happens.
// type_info is promised to be static - its lifetime extends to
@@ -79,7 +79,7 @@ class PROTOBUF_EXPORT ArenaMetricsCollector {
// intentionally want to avoid monitoring an allocation. (i.e. internal
// allocations for managing the arena)
virtual void OnAlloc(const std::type_info* allocated_type,
uint64 alloc_size) = 0;
uint64_t alloc_size) = 0;
// Does OnAlloc() need to be called? If false, metric collection overhead
// will be reduced since we will not do extra work per allocation.
@@ -141,10 +141,10 @@ class PROTOBUF_EXPORT SerialArena {
Memory Free(Deallocator deallocator);
void CleanupList();
uint64 SpaceAllocated() const {
uint64_t SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
}
uint64 SpaceUsed() const;
uint64_t SpaceUsed() const;
bool HasSpace(size_t n) { return n <= static_cast<size_t>(limit_ - ptr_); }
@@ -154,6 +154,11 @@ class PROTOBUF_EXPORT SerialArena {
if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) {
return AllocateAlignedFallback(n, policy);
}
return AllocateFromExisting(n);
}
private:
void* AllocateFromExisting(size_t n) {
void* ret = ptr_;
ptr_ += n;
#ifdef ADDRESS_SANITIZER
@@ -162,17 +167,13 @@ class PROTOBUF_EXPORT SerialArena {
return ret;
}
public:
// Allocate space if the current region provides enough space.
bool MaybeAllocateAligned(size_t n, void** out) {
GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned.
GOOGLE_DCHECK_GE(limit_, ptr_);
if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) return false;
void* ret = ptr_;
ptr_ += n;
#ifdef ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(ret, n);
#endif // ADDRESS_SANITIZER
*out = ret;
*out = AllocateFromExisting(n);
return true;
}
@@ -181,6 +182,12 @@ class PROTOBUF_EXPORT SerialArena {
if (PROTOBUF_PREDICT_FALSE(!HasSpace(n + kCleanupSize))) {
return AllocateAlignedWithCleanupFallback(n, policy);
}
return AllocateFromExistingWithCleanupFallback(n);
}
private:
std::pair<void*, CleanupNode*> AllocateFromExistingWithCleanupFallback(
size_t n) {
void* ret = ptr_;
ptr_ += n;
limit_ -= kCleanupSize;
@@ -191,6 +198,7 @@ class PROTOBUF_EXPORT SerialArena {
return CreatePair(ret, reinterpret_cast<CleanupNode*>(limit_));
}
public:
void AddCleanup(void* elem, void (*cleanup)(void*),
const AllocationPolicy* policy) {
auto res = AllocateAlignedWithCleanup(0, policy);
@@ -206,13 +214,15 @@ class PROTOBUF_EXPORT SerialArena {
// Blocks are variable length malloc-ed objects. The following structure
// describes the common header for all blocks.
struct Block {
Block(Block* next, size_t size) : next(next), size(size), start(nullptr) {}
char* Pointer(size_t n) {
GOOGLE_DCHECK(n <= size);
return reinterpret_cast<char*>(this) + n;
}
Block* next;
size_t size;
Block* const next;
const size_t size;
CleanupNode* start;
// data follows
};
@@ -277,10 +287,10 @@ class PROTOBUF_EXPORT ThreadSafeArena {
// if it was passed in.
~ThreadSafeArena();
uint64 Reset();
uint64_t Reset();
uint64 SpaceAllocated() const;
uint64 SpaceUsed() const;
uint64_t SpaceAllocated() const;
uint64_t SpaceUsed() const;
void* AllocateAligned(size_t n, const std::type_info* type) {
SerialArena* arena;
@@ -312,7 +322,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
private:
// Unique for each arena. Changes on Reset().
uint64 tag_and_id_;
uint64_t tag_and_id_;
// The LSB of tag_and_id_ indicates if allocs in this arena are recorded.
enum { kRecordAllocs = 1 };
@@ -343,7 +353,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
inline bool ShouldRecordAlloc() const { return tag_and_id_ & kRecordAllocs; }
inline uint64 LifeCycleId() const {
inline uint64_t LifeCycleId() const {
return tag_and_id_ & (-kRecordAllocs - 1);
}
@@ -362,7 +372,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
hint_.store(serial, std::memory_order_release);
}
PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFast(uint64 lifecycle_id,
PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFast(uint64_t lifecycle_id,
SerialArena** arena) {
if (GetSerialArenaFromThreadCache(lifecycle_id, arena)) return true;
if (lifecycle_id & kRecordAllocs) return false;
@@ -379,7 +389,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
}
PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFromThreadCache(
uint64 lifecycle_id, SerialArena** arena) {
uint64_t lifecycle_id, SerialArena** arena) {
// If this thread already owns a block in this arena then try to use that.
// This fast path optimizes the case where multiple threads allocate from
// the same arena.
@@ -427,10 +437,10 @@ class PROTOBUF_EXPORT ThreadSafeArena {
static constexpr size_t kPerThreadIds = 256;
// Next lifecycle ID available to this thread. We need to reserve a new
// batch, if `next_lifecycle_id & (kPerThreadIds - 1) == 0`.
uint64 next_lifecycle_id;
uint64_t next_lifecycle_id;
// The ThreadCache is considered valid as long as this matches the
// lifecycle_id of the arena being used.
uint64 last_lifecycle_id_seen;
uint64_t last_lifecycle_id_seen;
SerialArena* last_serial_arena;
};