commit 638f368adf4165ffa456a3b44cd6c46b1a5b27cf
Author: Rex Dieter <rdieter(a)gmail.com>
Date: Wed Mar 25 10:19:05 2020 -0500
sync patches from fedora
qt5-qtwebengine-freeworld.spec | 17 +-
...d-clock_nanosleep-in-Linux-sandbox-manual.patch | 129 +++++
...ne-everywhere-5.13.2-fix-chromium-headers.patch | 578 +++++++++++++++++++++
3 files changed, 723 insertions(+), 1 deletion(-)
---
diff --git a/qt5-qtwebengine-freeworld.spec b/qt5-qtwebengine-freeworld.spec
index 8f7e23d..460e68c 100644
--- a/qt5-qtwebengine-freeworld.spec
+++ b/qt5-qtwebengine-freeworld.spec
@@ -42,7 +42,7 @@
Summary: Qt5 - QtWebEngine components (freeworld version)
Name: qt5-qtwebengine-freeworld
Version: 5.13.2
-Release: 2%{?dist}
+Release: 3%{?dist}
%global major_minor %(echo %{version} | cut -d. -f-2)
%global major %(echo %{version} | cut -d. -f1)
@@ -82,6 +82,15 @@ Patch24: qtwebengine-everywhere-src-5.11.3-aarch64-new-stat.patch
Patch25: qtwebengine-everywhere-5.13.2-missing-semicolon-in-blink.patch
# Use Python2
Patch26: qtwebengine-everywhere-5.13.2-use-python2.patch
+# Fix missing include in chromium
+Patch27: qtwebengine-everywhere-5.13.2-fix-chromium-headers.patch
+# Fix for clock_nanosleep
+#
https://bugreports.qt.io/browse/QTBUG-81313
+#
https://codereview.qt-project.org/c/qt/qtwebengine-chromium/+/292352
+# Qt:
https://codereview.qt-project.org/gitweb?p=qt/qtwebengine-chromium.git;a=...
+# Chromium:
https://chromium.googlesource.com/chromium/src/+/54407b422a9cbf775a68c1d5...
+# Didn't apply cleanly, manually ported
+Patch28:
qtwebengine-everywhere-5.13.2-allow-restricted-clock_nanosleep-in-Linux-sandbox-manual.patch
## Upstream patches:
# qtwebengine-chromium
@@ -342,6 +351,9 @@ popd
%patch24 -p1 -b .aarch64-new-stat
%patch25 -p1 -b .missing-semicolon-in-blink
%patch26 -p1 -b .use-python2
+%patch27 -p1 -b .fix-chromium
+
+%patch28 -p0 -b .allow-clock_nanosleep
# the xkbcommon config/feature was renamed in 5.12, so need to adjust QT_CONFIG
references
# when building on older Qt releases
@@ -434,6 +446,9 @@ echo "%{_libdir}/%{name}" \
%config(noreplace) %{_sysconfdir}/ld.so.conf.d/%{name}-%{_arch}.conf
%changelog
+* Wed Mar 25 2020 Rex Dieter <rdieter(a)fedoraproject.org> - 5.13.2-3
+- sync patches from fedora
+
* Wed Feb 05 2020 RPM Fusion Release Engineering <leigh123linux(a)gmail.com> -
5.13.2-2
- Rebuilt for
https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
diff --git
a/qtwebengine-everywhere-5.13.2-allow-restricted-clock_nanosleep-in-Linux-sandbox-manual.patch
b/qtwebengine-everywhere-5.13.2-allow-restricted-clock_nanosleep-in-Linux-sandbox-manual.patch
new file mode 100644
index 0000000..2cc4e0a
--- /dev/null
+++
b/qtwebengine-everywhere-5.13.2-allow-restricted-clock_nanosleep-in-Linux-sandbox-manual.patch
@@ -0,0 +1,129 @@
+diff -ur
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
+---
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc 2020-03-25
12:57:05.214021490 +0000
++++
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc 2020-03-25
12:58:35.813396054 +0000
+@@ -137,7 +137,7 @@
+ return Allow();
+ #endif
+
+- if (sysno == __NR_clock_gettime) {
++ if (sysno == __NR_clock_gettime || sysno == __NR_clock_nanosleep) {
+ return RestrictClockID();
+ }
+
+diff -ur
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
+---
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc 2020-03-25
12:57:05.214021490 +0000
++++
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc 2020-03-25
13:01:05.971702078 +0000
+@@ -393,6 +393,18 @@
+ syscall(SYS_clock_gettime, CLOCK_MONOTONIC_RAW, &ts);
+ }
+
++BPF_DEATH_TEST_C(BaselinePolicy,
++ ClockNanosleepWithDisallowedClockCrashes,
++ DEATH_SEGV_MESSAGE(GetErrorMessageContentForTests()),
++ BaselinePolicy) {
++ struct timespec ts;
++ struct timespec out_ts;
++ ts.tv_sec = 0;
++ ts.tv_nsec = 0;
++ syscall(SYS_clock_nanosleep, (~0) | CLOCKFD, 0, &ts, &out_ts);
++}
++
++
+ #if !defined(GRND_RANDOM)
+ #define GRND_RANDOM 2
+ #endif
+diff -ur
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h
+---
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h 2020-03-25
12:57:05.213021508 +0000
++++
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.h 2020-03-25
13:03:32.058081155 +0000
+@@ -86,12 +86,13 @@
+ // process).
+ SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictGetrusage();
+
+-// Restrict |clk_id| for clock_getres(), clock_gettime() and clock_settime().
+-// We allow accessing only CLOCK_MONOTONIC, CLOCK_PROCESS_CPUTIME_ID,
+-// CLOCK_REALTIME, and CLOCK_THREAD_CPUTIME_ID. In particular, this disallows
+-// access to arbitrary per-{process,thread} CPU-time clock IDs (such as those
+-// returned by {clock,pthread}_getcpuclockid), which can leak information
+-// about the state of the host OS.
++// Restrict |clk_id| for clock_getres(), clock_gettime(), clock_settime(), and
++// clock_nanosleep(). We allow accessing only CLOCK_BOOTTIME,
++// CLOCK_MONOTONIC{,_RAW,_COARSE}, CLOCK_PROCESS_CPUTIME_ID,
++// CLOCK_REALTIME{,_COARSE}, and CLOCK_THREAD_CPUTIME_ID. In particular, on
++// non-Android platforms this disallows access to arbitrary per-{process,thread}
++// CPU-time clock IDs (such as those returned by {clock,pthread}_getcpuclockid),
++// which can leak information about the state of the host OS.
+ SANDBOX_EXPORT bpf_dsl::ResultExpr RestrictClockID();
+
+ // Restrict the flags argument to getrandom() to allow only no flags, or
+diff -ur
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
+---
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc 2020-03-25
12:57:05.213021508 +0000
++++
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc 2020-03-25
13:06:05.643325692 +0000
+@@ -59,6 +59,7 @@
+ switch (sysno) {
+ case __NR_clock_gettime:
+ case __NR_clock_getres:
++ case __NR_clock_nanosleep:
+ return RestrictClockID();
+ default:
+ return Allow();
+@@ -99,6 +100,25 @@
+ #endif
+ }
+
++void CheckClockNanosleep(clockid_t clockid) {
++ struct timespec ts;
++ struct timespec out_ts;
++ ts.tv_sec = 0;
++ ts.tv_nsec = 0;
++ clock_nanosleep(clockid, 0, &ts, &out_ts);
++}
++
++BPF_TEST_C(ParameterRestrictions,
++ clock_nanosleep_allowed,
++ RestrictClockIdPolicy) {
++ CheckClockNanosleep(CLOCK_MONOTONIC);
++ CheckClockNanosleep(CLOCK_MONOTONIC_COARSE);
++ CheckClockNanosleep(CLOCK_MONOTONIC_RAW);
++ CheckClockNanosleep(CLOCK_BOOTTIME);
++ CheckClockNanosleep(CLOCK_REALTIME);
++ CheckClockNanosleep(CLOCK_REALTIME_COARSE);
++}
++
+ BPF_DEATH_TEST_C(ParameterRestrictions,
+ clock_gettime_crash_monotonic_raw,
+ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
+@@ -107,6 +127,17 @@
+ syscall(SYS_clock_gettime, CLOCK_MONOTONIC_RAW, &ts);
+ }
+
++BPF_DEATH_TEST_C(ParameterRestrictions,
++ clock_nanosleep_crash_clock_fd,
++ DEATH_SEGV_MESSAGE(sandbox::GetErrorMessageContentForTests()),
++ RestrictClockIdPolicy) {
++ struct timespec ts;
++ struct timespec out_ts;
++ ts.tv_sec = 0;
++ ts.tv_nsec = 0;
++ syscall(SYS_clock_nanosleep, (~0) | CLOCKFD, 0, &ts, &out_ts);
++}
++
+ #if !defined(OS_ANDROID)
+ BPF_DEATH_TEST_C(ParameterRestrictions,
+ clock_gettime_crash_cpu_clock,
+diff -ur
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
+---
../qtwebengine-everywhere-src-5.13.2.orig/src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc 2020-03-25
12:57:05.213021508 +0000
++++ ./src/3rdparty/chromium/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc 2020-03-25
13:06:50.881514077 +0000
+@@ -35,9 +35,10 @@
+ return true;
+ case __NR_adjtimex: // Privileged.
+ case __NR_clock_adjtime: // Privileged.
+- case __NR_clock_getres: // Could be allowed.
+- case __NR_clock_gettime:
+- case __NR_clock_nanosleep: // Could be allowed.
++ case __NR_clock_getres: // Allowed only on Android with parameters
++ // filtered by RestrictClokID().
++ case __NR_clock_gettime: // Parameters filtered by RestrictClockID().
++ case __NR_clock_nanosleep: // Parameters filtered by RestrictClockID().
+ case __NR_clock_settime: // Privileged.
+ #if defined(__i386__) || \
+ (defined(ARCH_CPU_MIPS_FAMILY) && defined(ARCH_CPU_32_BITS))
diff --git a/qtwebengine-everywhere-5.13.2-fix-chromium-headers.patch
b/qtwebengine-everywhere-5.13.2-fix-chromium-headers.patch
new file mode 100644
index 0000000..d14f1e0
--- /dev/null
+++ b/qtwebengine-everywhere-5.13.2-fix-chromium-headers.patch
@@ -0,0 +1,578 @@
+From 033c7aa2da1bc78347765d60c15843ece02ef4d8 Mon Sep 17 00:00:00 2001
+From: Troy Dawson <tdawson(a)redhat.com>
+Date: Tue, 11 Feb 2020 15:43:30 -0800
+Subject: [PATCH] fix chromium headers
+
+---
+ .../chromium/cc/base/list_container_helper.cc | 250 -----------------
+ .../chromium/cc/base/list_container_helper.h | 254 ++++++++++++++++++
+ .../aec3/clockdrift_detector.h | 1 +
+ .../modules/video_coding/decoding_state.h | 1 +
+ 4 files changed, 256 insertions(+), 250 deletions(-)
+
+diff --git a/src/3rdparty/chromium/cc/base/list_container_helper.cc
b/src/3rdparty/chromium/cc/base/list_container_helper.cc
+index 380ad3dd1..c4a9245d9 100644
+--- a/src/3rdparty/chromium/cc/base/list_container_helper.cc
++++ b/src/3rdparty/chromium/cc/base/list_container_helper.cc
+@@ -13,258 +13,8 @@
+ #include "base/macros.h"
+ #include "base/memory/aligned_memory.h"
+
+-namespace {
+-const size_t kDefaultNumElementTypesToReserve = 32;
+-} // namespace
+-
+ namespace cc {
+
+-// CharAllocator
+-////////////////////////////////////////////////////
+-// This class deals only with char* and void*. It does allocation and passing
+-// out raw pointers, as well as memory deallocation when being destroyed.
+-class ListContainerHelper::CharAllocator {
+- public:
+- // CharAllocator::InnerList
+- /////////////////////////////////////////////
+- // This class holds the raw memory chunk, as well as information about its
+- // size and availability.
+- struct InnerList {
+- std::unique_ptr<char[], base::AlignedFreeDeleter> data;
+- // The number of elements in total the memory can hold. The difference
+- // between capacity and size is the how many more elements this list can
+- // hold.
+- size_t capacity;
+- // The number of elements have been put into this list.
+- size_t size;
+- // The size of each element is in bytes. This is used to move from between
+- // elements' memory locations.
+- size_t step;
+-
+- InnerList() : capacity(0), size(0), step(0) {}
+-
+- void Erase(char* position) {
+- // Confident that destructor is called by caller of this function. Since
+- // CharAllocator does not handle construction after
+- // allocation, it doesn't handle desctrution before deallocation.
+- DCHECK_LE(position, LastElement());
+- DCHECK_GE(position, Begin());
+- char* start = position + step;
+- std::copy(start, End(), position);
+-
+- --size;
+- // Decrease capacity to avoid creating not full not last InnerList.
+- --capacity;
+- }
+-
+- void InsertBefore(size_t alignment, char** position, size_t count) {
+- DCHECK_LE(*position, LastElement() + step);
+- DCHECK_GE(*position, Begin());
+-
+- // Adjust the size and capacity
+- size_t old_size = size;
+- size += count;
+- capacity = size;
+-
+- // Allocate the new data and update the iterator's pointer.
+- std::unique_ptr<char[], base::AlignedFreeDeleter> new_data(
+- static_cast<char*>(base::AlignedAlloc(size * step, alignment)));
+- size_t position_offset = *position - Begin();
+- *position = new_data.get() + position_offset;
+-
+- // Copy the data before the inserted segment
+- memcpy(new_data.get(), data.get(), position_offset);
+- // Copy the data after the inserted segment.
+- memcpy(new_data.get() + position_offset + count * step,
+- data.get() + position_offset, old_size * step - position_offset);
+- data = std::move(new_data);
+- }
+-
+- bool IsEmpty() const { return !size; }
+- bool IsFull() { return capacity == size; }
+- size_t NumElementsAvailable() const { return capacity - size; }
+-
+- void* AddElement() {
+- DCHECK_LT(size, capacity);
+- ++size;
+- return LastElement();
+- }
+-
+- void RemoveLast() {
+- DCHECK(!IsEmpty());
+- --size;
+- }
+-
+- char* Begin() const { return data.get(); }
+- char* End() const { return data.get() + size * step; }
+- char* LastElement() const { return data.get() + (size - 1) * step; }
+- char* ElementAt(size_t index) const { return data.get() + index * step; }
+-
+- private:
+- DISALLOW_COPY_AND_ASSIGN(InnerList);
+- };
+-
+- CharAllocator(size_t alignment, size_t element_size, size_t element_count)
+- // base::AlignedAlloc does not accept alignment less than sizeof(void*).
+- : alignment_(std::max(sizeof(void*), alignment)),
+- element_size_(element_size),
+- size_(0),
+- last_list_index_(0),
+- last_list_(nullptr) {
+- // If this fails, then alignment of elements after the first could be wrong,
+- // and we need to pad sizes to fix that.
+- DCHECK_EQ(element_size % alignment, 0u);
+- AllocateNewList(element_count > 0 ? element_count
+- : kDefaultNumElementTypesToReserve);
+- last_list_ = storage_[last_list_index_].get();
+- }
+-
+- ~CharAllocator() = default;
+-
+- void* Allocate() {
+- if (last_list_->IsFull()) {
+- // Only allocate a new list if there isn't a spare one still there from
+- // previous usage.
+- if (last_list_index_ + 1 >= storage_.size())
+- AllocateNewList(last_list_->capacity * 2);
+-
+- ++last_list_index_;
+- last_list_ = storage_[last_list_index_].get();
+- }
+-
+- ++size_;
+- return last_list_->AddElement();
+- }
+-
+- size_t alignment() const { return alignment_; }
+- size_t element_size() const { return element_size_; }
+- size_t list_count() const { return storage_.size(); }
+- size_t size() const { return size_; }
+- bool IsEmpty() const { return size() == 0; }
+-
+- size_t Capacity() const {
+- size_t capacity_sum = 0;
+- for (const auto& inner_list : storage_)
+- capacity_sum += inner_list->capacity;
+- return capacity_sum;
+- }
+-
+- void Clear() {
+- // Remove all except for the first InnerList.
+- DCHECK(!storage_.empty());
+- storage_.erase(storage_.begin() + 1, storage_.end());
+- last_list_index_ = 0;
+- last_list_ = storage_[0].get();
+- last_list_->size = 0;
+- size_ = 0;
+- }
+-
+- void RemoveLast() {
+- DCHECK(!IsEmpty());
+- last_list_->RemoveLast();
+- if (last_list_->IsEmpty() && last_list_index_ > 0) {
+- --last_list_index_;
+- last_list_ = storage_[last_list_index_].get();
+-
+- // If there are now two empty inner lists, free one of them.
+- if (last_list_index_ + 2 < storage_.size())
+- storage_.pop_back();
+- }
+- --size_;
+- }
+-
+- void Erase(PositionInCharAllocator* position) {
+- DCHECK_EQ(this, position->ptr_to_container);
+-
+- // Update |position| to point to the element after the erased element.
+- InnerList* list = storage_[position->vector_index].get();
+- char* item_iterator = position->item_iterator;
+- if (item_iterator == list->LastElement())
+- position->Increment();
+-
+- list->Erase(item_iterator);
+- // TODO(weiliangc): Free the InnerList if it is empty.
+- --size_;
+- }
+-
+- void InsertBefore(ListContainerHelper::Iterator* position, size_t count) {
+- if (!count)
+- return;
+-
+- // If |position| is End(), then append |count| elements at the end. This
+- // will happen to not invalidate any iterators or memory.
+- if (!position->item_iterator) {
+- // Set |position| to be the first inserted element.
+- Allocate();
+- position->vector_index = storage_.size() - 1;
+- position->item_iterator =
storage_[position->vector_index]->LastElement();
+- // Allocate the rest.
+- for (size_t i = 1; i < count; ++i)
+- Allocate();
+- } else {
+- storage_[position->vector_index]->InsertBefore(
+- alignment_, &position->item_iterator, count);
+- size_ += count;
+- }
+- }
+-
+- InnerList* InnerListById(size_t id) const {
+- DCHECK_LT(id, storage_.size());
+- return storage_[id].get();
+- }
+-
+- size_t FirstInnerListId() const {
+- // |size_| > 0 means that at least one vector in |storage_| will be
+- // non-empty.
+- DCHECK_GT(size_, 0u);
+- size_t id = 0;
+- while (storage_[id]->size == 0)
+- ++id;
+- return id;
+- }
+-
+- size_t LastInnerListId() const {
+- // |size_| > 0 means that at least one vector in |storage_| will be
+- // non-empty.
+- DCHECK_GT(size_, 0u);
+- size_t id = storage_.size() - 1;
+- while (storage_[id]->size == 0)
+- --id;
+- return id;
+- }
+-
+- size_t NumAvailableElementsInLastList() const {
+- return last_list_->NumElementsAvailable();
+- }
+-
+- private:
+- void AllocateNewList(size_t list_size) {
+- std::unique_ptr<InnerList> new_list(new InnerList);
+- new_list->capacity = list_size;
+- new_list->size = 0;
+- new_list->step = element_size_;
+- new_list->data.reset(static_cast<char*>(
+- base::AlignedAlloc(list_size * element_size_, alignment_)));
+- storage_.push_back(std::move(new_list));
+- }
+-
+- std::vector<std::unique_ptr<InnerList>> storage_;
+- const size_t alignment_;
+- const size_t element_size_;
+-
+- // The number of elements in the list.
+- size_t size_;
+-
+- // The index of the last list to have had elements added to it, or the only
+- // list if the container has not had elements added since being cleared.
+- size_t last_list_index_;
+-
+- // This is equivalent to |storage_[last_list_index_]|.
+- InnerList* last_list_;
+-
+- DISALLOW_COPY_AND_ASSIGN(CharAllocator);
+-};
+-
+ // PositionInCharAllocator
+ //////////////////////////////////////////////////////
+ ListContainerHelper::PositionInCharAllocator::PositionInCharAllocator(
+diff --git a/src/3rdparty/chromium/cc/base/list_container_helper.h
b/src/3rdparty/chromium/cc/base/list_container_helper.h
+index c79cf1f18..a44ecb4de 100644
+--- a/src/3rdparty/chromium/cc/base/list_container_helper.h
++++ b/src/3rdparty/chromium/cc/base/list_container_helper.h
+@@ -8,10 +8,18 @@
+ #include <stddef.h>
+
+ #include <memory>
++#include <algorithm>
++#include <vector>
+
++#include "base/logging.h"
+ #include "base/macros.h"
++#include "base/memory/aligned_memory.h"
+ #include "cc/base/base_export.h"
+
++namespace {
++const size_t kDefaultNumElementTypesToReserve = 32;
++} // namespace
++
+ namespace cc {
+
+ // Helper class for ListContainer non-templated logic. All methods are private,
+@@ -174,6 +182,252 @@ class CC_BASE_EXPORT ListContainerHelper final {
+ DISALLOW_COPY_AND_ASSIGN(ListContainerHelper);
+ };
+
++// CharAllocator
++////////////////////////////////////////////////////
++// This class deals only with char* and void*. It does allocation and passing
++// out raw pointers, as well as memory deallocation when being destroyed.
++class ListContainerHelper::CharAllocator {
++ public:
++ // CharAllocator::InnerList
++ /////////////////////////////////////////////
++ // This class holds the raw memory chunk, as well as information about its
++ // size and availability.
++ struct InnerList {
++ std::unique_ptr<char[], base::AlignedFreeDeleter> data;
++ // The number of elements in total the memory can hold. The difference
++ // between capacity and size is the how many more elements this list can
++ // hold.
++ size_t capacity;
++ // The number of elements have been put into this list.
++ size_t size;
++ // The size of each element is in bytes. This is used to move from between
++ // elements' memory locations.
++ size_t step;
++
++ InnerList() : capacity(0), size(0), step(0) {}
++
++ void Erase(char* position) {
++ // Confident that destructor is called by caller of this function. Since
++ // CharAllocator does not handle construction after
++ // allocation, it doesn't handle desctrution before deallocation.
++ DCHECK_LE(position, LastElement());
++ DCHECK_GE(position, Begin());
++ char* start = position + step;
++ std::copy(start, End(), position);
++
++ --size;
++ // Decrease capacity to avoid creating not full not last InnerList.
++ --capacity;
++ }
++
++ void InsertBefore(size_t alignment, char** position, size_t count) {
++ DCHECK_LE(*position, LastElement() + step);
++ DCHECK_GE(*position, Begin());
++
++ // Adjust the size and capacity
++ size_t old_size = size;
++ size += count;
++ capacity = size;
++
++ // Allocate the new data and update the iterator's pointer.
++ std::unique_ptr<char[], base::AlignedFreeDeleter> new_data(
++ static_cast<char*>(base::AlignedAlloc(size * step, alignment)));
++ size_t position_offset = *position - Begin();
++ *position = new_data.get() + position_offset;
++
++ // Copy the data before the inserted segment
++ memcpy(new_data.get(), data.get(), position_offset);
++ // Copy the data after the inserted segment.
++ memcpy(new_data.get() + position_offset + count * step,
++ data.get() + position_offset, old_size * step - position_offset);
++ data = std::move(new_data);
++ }
++
++ bool IsEmpty() const { return !size; }
++ bool IsFull() { return capacity == size; }
++ size_t NumElementsAvailable() const { return capacity - size; }
++
++ void* AddElement() {
++ DCHECK_LT(size, capacity);
++ ++size;
++ return LastElement();
++ }
++
++ void RemoveLast() {
++ DCHECK(!IsEmpty());
++ --size;
++ }
++
++ char* Begin() const { return data.get(); }
++ char* End() const { return data.get() + size * step; }
++ char* LastElement() const { return data.get() + (size - 1) * step; }
++ char* ElementAt(size_t index) const { return data.get() + index * step; }
++
++ private:
++ DISALLOW_COPY_AND_ASSIGN(InnerList);
++ };
++
++ CharAllocator(size_t alignment, size_t element_size, size_t element_count)
++ // base::AlignedAlloc does not accept alignment less than sizeof(void*).
++ : alignment_(std::max(sizeof(void*), alignment)),
++ element_size_(element_size),
++ size_(0),
++ last_list_index_(0),
++ last_list_(nullptr) {
++ // If this fails, then alignment of elements after the first could be wrong,
++ // and we need to pad sizes to fix that.
++ DCHECK_EQ(element_size % alignment, 0u);
++ AllocateNewList(element_count > 0 ? element_count
++ : kDefaultNumElementTypesToReserve);
++ last_list_ = storage_[last_list_index_].get();
++ }
++
++ ~CharAllocator() = default;
++
++ void* Allocate() {
++ if (last_list_->IsFull()) {
++ // Only allocate a new list if there isn't a spare one still there from
++ // previous usage.
++ if (last_list_index_ + 1 >= storage_.size())
++ AllocateNewList(last_list_->capacity * 2);
++
++ ++last_list_index_;
++ last_list_ = storage_[last_list_index_].get();
++ }
++
++ ++size_;
++ return last_list_->AddElement();
++ }
++
++ size_t alignment() const { return alignment_; }
++ size_t element_size() const { return element_size_; }
++ size_t list_count() const { return storage_.size(); }
++ size_t size() const { return size_; }
++ bool IsEmpty() const { return size() == 0; }
++
++ size_t Capacity() const {
++ size_t capacity_sum = 0;
++ for (const auto& inner_list : storage_)
++ capacity_sum += inner_list->capacity;
++ return capacity_sum;
++ }
++
++ void Clear() {
++ // Remove all except for the first InnerList.
++ DCHECK(!storage_.empty());
++ storage_.erase(storage_.begin() + 1, storage_.end());
++ last_list_index_ = 0;
++ last_list_ = storage_[0].get();
++ last_list_->size = 0;
++ size_ = 0;
++ }
++
++ void RemoveLast() {
++ DCHECK(!IsEmpty());
++ last_list_->RemoveLast();
++ if (last_list_->IsEmpty() && last_list_index_ > 0) {
++ --last_list_index_;
++ last_list_ = storage_[last_list_index_].get();
++
++ // If there are now two empty inner lists, free one of them.
++ if (last_list_index_ + 2 < storage_.size())
++ storage_.pop_back();
++ }
++ --size_;
++ }
++
++ void Erase(PositionInCharAllocator* position) {
++ DCHECK_EQ(this, position->ptr_to_container);
++
++ // Update |position| to point to the element after the erased element.
++ InnerList* list = storage_[position->vector_index].get();
++ char* item_iterator = position->item_iterator;
++ if (item_iterator == list->LastElement())
++ position->Increment();
++
++ list->Erase(item_iterator);
++ // TODO(weiliangc): Free the InnerList if it is empty.
++ --size_;
++ }
++
++ void InsertBefore(ListContainerHelper::Iterator* position, size_t count) {
++ if (!count)
++ return;
++
++ // If |position| is End(), then append |count| elements at the end. This
++ // will happen to not invalidate any iterators or memory.
++ if (!position->item_iterator) {
++ // Set |position| to be the first inserted element.
++ Allocate();
++ position->vector_index = storage_.size() - 1;
++ position->item_iterator =
storage_[position->vector_index]->LastElement();
++ // Allocate the rest.
++ for (size_t i = 1; i < count; ++i)
++ Allocate();
++ } else {
++ storage_[position->vector_index]->InsertBefore(
++ alignment_, &position->item_iterator, count);
++ size_ += count;
++ }
++ }
++
++ InnerList* InnerListById(size_t id) const {
++ DCHECK_LT(id, storage_.size());
++ return storage_[id].get();
++ }
++
++ size_t FirstInnerListId() const {
++ // |size_| > 0 means that at least one vector in |storage_| will be
++ // non-empty.
++ DCHECK_GT(size_, 0u);
++ size_t id = 0;
++ while (storage_[id]->size == 0)
++ ++id;
++ return id;
++ }
++
++ size_t LastInnerListId() const {
++ // |size_| > 0 means that at least one vector in |storage_| will be
++ // non-empty.
++ DCHECK_GT(size_, 0u);
++ size_t id = storage_.size() - 1;
++ while (storage_[id]->size == 0)
++ --id;
++ return id;
++ }
++
++ size_t NumAvailableElementsInLastList() const {
++ return last_list_->NumElementsAvailable();
++ }
++
++ private:
++ void AllocateNewList(size_t list_size) {
++ std::unique_ptr<InnerList> new_list(new InnerList);
++ new_list->capacity = list_size;
++ new_list->size = 0;
++ new_list->step = element_size_;
++ new_list->data.reset(static_cast<char*>(
++ base::AlignedAlloc(list_size * element_size_, alignment_)));
++ storage_.push_back(std::move(new_list));
++ }
++
++ std::vector<std::unique_ptr<InnerList>> storage_;
++ const size_t alignment_;
++ const size_t element_size_;
++
++ // The number of elements in the list.
++ size_t size_;
++
++ // The index of the last list to have had elements added to it, or the only
++ // list if the container has not had elements added since being cleared.
++ size_t last_list_index_;
++
++ // This is equivalent to |storage_[last_list_index_]|.
++ InnerList* last_list_;
++
++ DISALLOW_COPY_AND_ASSIGN(CharAllocator);
++};
++
+ } // namespace cc
+
+ #endif // CC_BASE_LIST_CONTAINER_HELPER_H_
+diff --git
a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/aec3/clockdrift_detector.h
b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/aec3/clockdrift_detector.h
+index 22528c948..69e624e8b 100644
+---
a/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/aec3/clockdrift_detector.h
++++
b/src/3rdparty/chromium/third_party/webrtc/modules/audio_processing/aec3/clockdrift_detector.h
+@@ -12,6 +12,7 @@
+ #define MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+
+ #include <array>
++#include <cstddef>
+
+ namespace webrtc {
+
+diff --git
a/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/decoding_state.h
b/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/decoding_state.h
+index b87fb2d03..ec972949d 100644
+--- a/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/decoding_state.h
++++ b/src/3rdparty/chromium/third_party/webrtc/modules/video_coding/decoding_state.h
+@@ -11,6 +11,7 @@
+ #ifndef MODULES_VIDEO_CODING_DECODING_STATE_H_
+ #define MODULES_VIDEO_CODING_DECODING_STATE_H_
+
++#include <cstdint>
+ #include <map>
+ #include <set>
+ #include <vector>
+--
+2.24.1
+