[920] | 1 | --- cef_binary_3.3359.1774.gd49d25f_windows32/include/internal/cef_string_wrappers.h.orig 2019-03-09 15:59:59.571378000 +0100
|
---|
| 2 | +++ cef_binary_3.3359.1774.gd49d25f_windows32/include/internal/cef_string_wrappers.h 2019-03-09 16:00:08.284090000 +0100
|
---|
| 3 | @@ -33,6 +33,7 @@
|
---|
| 4 |
|
---|
| 5 | #include <memory.h>
|
---|
| 6 | #include <string>
|
---|
| 7 | +#include <cstring>
|
---|
| 8 |
|
---|
| 9 | #include "include/base/cef_string16.h"
|
---|
| 10 | #include "include/internal/cef_string_types.h"
|
---|
| 11 | --- cef_binary_3.3359.1774.gd49d25f_windows32/include/base/cef_bind_helpers.h.orig 2018-05-17 11:46:05.000000000 +0200
|
---|
| 12 | +++ cef_binary_3.3359.1774.gd49d25f_windows32/include/base/cef_bind_helpers.h 2019-03-09 16:03:17.830725000 +0100
|
---|
| 13 | @@ -272,12 +272,12 @@
|
---|
| 14 | // MSVC warns when you try to use Base if T has a private destructor, the
|
---|
| 15 | // common pattern for refcounted types. It does this even though no attempt to
|
---|
| 16 | // instantiate Base is made. We disable the warning for this definition.
|
---|
| 17 | -#if defined(OS_WIN)
|
---|
| 18 | +#if defined(OS_WIN) && defined(COMPILER_MSVC)
|
---|
| 19 | #pragma warning(push)
|
---|
| 20 | #pragma warning(disable : 4624)
|
---|
| 21 | #endif
|
---|
| 22 | struct Base : public T, public BaseMixin {};
|
---|
| 23 | -#if defined(OS_WIN)
|
---|
| 24 | +#if defined(OS_WIN) && defined(COMPILER_MSVC)
|
---|
| 25 | #pragma warning(pop)
|
---|
| 26 | #endif
|
---|
| 27 |
|
---|
| 28 | --- cef_binary_3.3359.1774.gd49d25f_windows32/include/base/internal/cef_atomicops_x86_gcc.h.orig 2019-04-02 20:09:10.287478919 +0200
|
---|
| 29 | +++ cef_binary_3.3359.1774.gd49d25f_windows32/include/base/internal/cef_atomicops_x86_gcc.h 2019-04-02 20:09:10.287478919 +0200
|
---|
| 30 | @@ -0,0 +1,268 @@
|
---|
| 31 | +// Copyright (c) 2011 Google Inc. All rights reserved.
|
---|
| 32 | +//
|
---|
| 33 | +// Redistribution and use in source and binary forms, with or without
|
---|
| 34 | +// modification, are permitted provided that the following conditions are
|
---|
| 35 | +// met:
|
---|
| 36 | +//
|
---|
| 37 | +// * Redistributions of source code must retain the above copyright
|
---|
| 38 | +// notice, this list of conditions and the following disclaimer.
|
---|
| 39 | +// * Redistributions in binary form must reproduce the above
|
---|
| 40 | +// copyright notice, this list of conditions and the following disclaimer
|
---|
| 41 | +// in the documentation and/or other materials provided with the
|
---|
| 42 | +// distribution.
|
---|
| 43 | +// * Neither the name of Google Inc. nor the name Chromium Embedded
|
---|
| 44 | +// Framework nor the names of its contributors may be used to endorse
|
---|
| 45 | +// or promote products derived from this software without specific prior
|
---|
| 46 | +// written permission.
|
---|
| 47 | +//
|
---|
| 48 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
---|
| 49 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
---|
| 50 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
---|
| 51 | +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
---|
| 52 | +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
---|
| 53 | +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
---|
| 54 | +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
---|
| 55 | +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
---|
| 56 | +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
| 57 | +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
| 58 | +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
| 59 | +
|
---|
| 60 | +// Do not include this header file directly. Use base/cef_atomicops.h
|
---|
| 61 | +// instead.
|
---|
| 62 | +
|
---|
| 63 | +#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
|
---|
| 64 | +#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
|
---|
| 65 | +
|
---|
| 66 | +// This struct is not part of the public API of this module; clients may not
|
---|
| 67 | +// use it.
|
---|
| 68 | +// Features of this x86. Values may not be correct before main() is run,
|
---|
| 69 | +// but are set conservatively.
|
---|
| 70 | +struct AtomicOps_x86CPUFeatureStruct {
|
---|
| 71 | + bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
|
---|
| 72 | + // after acquire compare-and-swap.
|
---|
| 73 | +};
|
---|
| 74 | +extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
|
---|
| 75 | +
|
---|
| 76 | +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
|
---|
| 77 | +
|
---|
| 78 | +namespace base {
|
---|
| 79 | +namespace subtle {
|
---|
| 80 | +
|
---|
| 81 | +// 32-bit low-level operations on any platform.
|
---|
| 82 | +
|
---|
| 83 | +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
|
---|
| 84 | + Atomic32 old_value,
|
---|
| 85 | + Atomic32 new_value) {
|
---|
| 86 | + Atomic32 prev;
|
---|
| 87 | + __asm__ __volatile__("lock; cmpxchgl %1,%2"
|
---|
| 88 | + : "=a"(prev)
|
---|
| 89 | + : "q"(new_value), "m"(*ptr), "0"(old_value)
|
---|
| 90 | + : "memory");
|
---|
| 91 | + return prev;
|
---|
| 92 | +}
|
---|
| 93 | +
|
---|
| 94 | +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
|
---|
| 95 | + Atomic32 new_value) {
|
---|
| 96 | + __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
|
---|
| 97 | + : "=r"(new_value)
|
---|
| 98 | + : "m"(*ptr), "0"(new_value)
|
---|
| 99 | + : "memory");
|
---|
| 100 | + return new_value; // Now it's the previous value.
|
---|
| 101 | +}
|
---|
| 102 | +
|
---|
| 103 | +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
---|
| 104 | + Atomic32 increment) {
|
---|
| 105 | + Atomic32 temp = increment;
|
---|
| 106 | + __asm__ __volatile__("lock; xaddl %0,%1"
|
---|
| 107 | + : "+r"(temp), "+m"(*ptr)
|
---|
| 108 | + :
|
---|
| 109 | + : "memory");
|
---|
| 110 | + // temp now holds the old value of *ptr
|
---|
| 111 | + return temp + increment;
|
---|
| 112 | +}
|
---|
| 113 | +
|
---|
| 114 | +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
---|
| 115 | + Atomic32 increment) {
|
---|
| 116 | + Atomic32 temp = increment;
|
---|
| 117 | + __asm__ __volatile__("lock; xaddl %0,%1"
|
---|
| 118 | + : "+r"(temp), "+m"(*ptr)
|
---|
| 119 | + :
|
---|
| 120 | + : "memory");
|
---|
| 121 | + // temp now holds the old value of *ptr
|
---|
| 122 | + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
---|
| 123 | + __asm__ __volatile__("lfence" : : : "memory");
|
---|
| 124 | + }
|
---|
| 125 | + return temp + increment;
|
---|
| 126 | +}
|
---|
| 127 | +
|
---|
| 128 | +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
---|
| 129 | + Atomic32 old_value,
|
---|
| 130 | + Atomic32 new_value) {
|
---|
| 131 | + Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
---|
| 132 | + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
---|
| 133 | + __asm__ __volatile__("lfence" : : : "memory");
|
---|
| 134 | + }
|
---|
| 135 | + return x;
|
---|
| 136 | +}
|
---|
| 137 | +
|
---|
| 138 | +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
---|
| 139 | + Atomic32 old_value,
|
---|
| 140 | + Atomic32 new_value) {
|
---|
| 141 | + return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
---|
| 142 | +}
|
---|
| 143 | +
|
---|
| 144 | +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
---|
| 145 | + *ptr = value;
|
---|
| 146 | +}
|
---|
| 147 | +
|
---|
| 148 | +inline void MemoryBarrier() {
|
---|
| 149 | + __asm__ __volatile__("mfence" : : : "memory");
|
---|
| 150 | +}
|
---|
| 151 | +
|
---|
| 152 | +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
---|
| 153 | + *ptr = value;
|
---|
| 154 | + MemoryBarrier();
|
---|
| 155 | +}
|
---|
| 156 | +
|
---|
| 157 | +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
---|
| 158 | + ATOMICOPS_COMPILER_BARRIER();
|
---|
| 159 | + *ptr = value; // An x86 store acts as a release barrier.
|
---|
| 160 | + // See comments in Atomic64 version of Release_Store(), below.
|
---|
| 161 | +}
|
---|
| 162 | +
|
---|
| 163 | +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
---|
| 164 | + return *ptr;
|
---|
| 165 | +}
|
---|
| 166 | +
|
---|
| 167 | +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
---|
| 168 | + Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
|
---|
| 169 | + // See comments in Atomic64 version of Release_Store(), below.
|
---|
| 170 | + ATOMICOPS_COMPILER_BARRIER();
|
---|
| 171 | + return value;
|
---|
| 172 | +}
|
---|
| 173 | +
|
---|
| 174 | +inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
---|
| 175 | + MemoryBarrier();
|
---|
| 176 | + return *ptr;
|
---|
| 177 | +}
|
---|
| 178 | +
|
---|
| 179 | +#if defined(__x86_64__)
|
---|
| 180 | +
|
---|
| 181 | +// 64-bit low-level operations on 64-bit platform.
|
---|
| 182 | +
|
---|
| 183 | +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
|
---|
| 184 | + Atomic64 old_value,
|
---|
| 185 | + Atomic64 new_value) {
|
---|
| 186 | + Atomic64 prev;
|
---|
| 187 | + __asm__ __volatile__("lock; cmpxchgq %1,%2"
|
---|
| 188 | + : "=a"(prev)
|
---|
| 189 | + : "q"(new_value), "m"(*ptr), "0"(old_value)
|
---|
| 190 | + : "memory");
|
---|
| 191 | + return prev;
|
---|
| 192 | +}
|
---|
| 193 | +
|
---|
| 194 | +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
|
---|
| 195 | + Atomic64 new_value) {
|
---|
| 196 | + __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
|
---|
| 197 | + : "=r"(new_value)
|
---|
| 198 | + : "m"(*ptr), "0"(new_value)
|
---|
| 199 | + : "memory");
|
---|
| 200 | + return new_value; // Now it's the previous value.
|
---|
| 201 | +}
|
---|
| 202 | +
|
---|
| 203 | +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
|
---|
| 204 | + Atomic64 increment) {
|
---|
| 205 | + Atomic64 temp = increment;
|
---|
| 206 | + __asm__ __volatile__("lock; xaddq %0,%1"
|
---|
| 207 | + : "+r"(temp), "+m"(*ptr)
|
---|
| 208 | + :
|
---|
| 209 | + : "memory");
|
---|
| 210 | + // temp now contains the previous value of *ptr
|
---|
| 211 | + return temp + increment;
|
---|
| 212 | +}
|
---|
| 213 | +
|
---|
| 214 | +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
---|
| 215 | + Atomic64 increment) {
|
---|
| 216 | + Atomic64 temp = increment;
|
---|
| 217 | + __asm__ __volatile__("lock; xaddq %0,%1"
|
---|
| 218 | + : "+r"(temp), "+m"(*ptr)
|
---|
| 219 | + :
|
---|
| 220 | + : "memory");
|
---|
| 221 | + // temp now contains the previous value of *ptr
|
---|
| 222 | + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
---|
| 223 | + __asm__ __volatile__("lfence" : : : "memory");
|
---|
| 224 | + }
|
---|
| 225 | + return temp + increment;
|
---|
| 226 | +}
|
---|
| 227 | +
|
---|
| 228 | +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
---|
| 229 | + *ptr = value;
|
---|
| 230 | +}
|
---|
| 231 | +
|
---|
| 232 | +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
---|
| 233 | + *ptr = value;
|
---|
| 234 | + MemoryBarrier();
|
---|
| 235 | +}
|
---|
| 236 | +
|
---|
| 237 | +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
---|
| 238 | + ATOMICOPS_COMPILER_BARRIER();
|
---|
| 239 | +
|
---|
| 240 | + *ptr = value; // An x86 store acts as a release barrier
|
---|
| 241 | + // for current AMD/Intel chips as of Jan 2008.
|
---|
| 242 | + // See also Acquire_Load(), below.
|
---|
| 243 | +
|
---|
| 244 | + // When new chips come out, check:
|
---|
| 245 | + // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
|
---|
| 246 | + // System Programming Guide, Chatper 7: Multiple-processor management,
|
---|
| 247 | + // Section 7.2, Memory Ordering.
|
---|
| 248 | + // Last seen at:
|
---|
| 249 | + // http://developer.intel.com/design/pentium4/manuals/index_new.htm
|
---|
| 250 | + //
|
---|
| 251 | + // x86 stores/loads fail to act as barriers for a few instructions (clflush
|
---|
| 252 | + // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
|
---|
| 253 | + // not generated by the compiler, and are rare. Users of these instructions
|
---|
| 254 | + // need to know about cache behaviour in any case since all of these involve
|
---|
| 255 | + // either flushing cache lines or non-temporal cache hints.
|
---|
| 256 | +}
|
---|
| 257 | +
|
---|
| 258 | +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
|
---|
| 259 | + return *ptr;
|
---|
| 260 | +}
|
---|
| 261 | +
|
---|
| 262 | +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
---|
| 263 | + Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
|
---|
| 264 | + // for current AMD/Intel chips as of Jan 2008.
|
---|
| 265 | + // See also Release_Store(), above.
|
---|
| 266 | + ATOMICOPS_COMPILER_BARRIER();
|
---|
| 267 | + return value;
|
---|
| 268 | +}
|
---|
| 269 | +
|
---|
| 270 | +inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
---|
| 271 | + MemoryBarrier();
|
---|
| 272 | + return *ptr;
|
---|
| 273 | +}
|
---|
| 274 | +
|
---|
| 275 | +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
---|
| 276 | + Atomic64 old_value,
|
---|
| 277 | + Atomic64 new_value) {
|
---|
| 278 | + Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
---|
| 279 | + if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
---|
| 280 | + __asm__ __volatile__("lfence" : : : "memory");
|
---|
| 281 | + }
|
---|
| 282 | + return x;
|
---|
| 283 | +}
|
---|
| 284 | +
|
---|
| 285 | +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
---|
| 286 | + Atomic64 old_value,
|
---|
| 287 | + Atomic64 new_value) {
|
---|
| 288 | + return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
---|
| 289 | +}
|
---|
| 290 | +
|
---|
| 291 | +#endif // defined(__x86_64__)
|
---|
| 292 | +
|
---|
| 293 | +} // namespace base::subtle
|
---|
| 294 | +} // namespace base
|
---|
| 295 | +
|
---|
| 296 | +#undef ATOMICOPS_COMPILER_BARRIER
|
---|
| 297 | +
|
---|
| 298 | +#endif // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
|
---|
| 299 | --- cef_binary_3.3359.1774.gd49d25f_windows32/cmake/cef_variables.cmake.orig 2019-04-02 19:37:47.332457010 +0200
|
---|
| 300 | +++ cef_binary_3.3359.1774.gd49d25f_windows32/cmake/cef_variables.cmake 2019-03-09 14:17:10.333854000 +0100
|
---|
| 301 | @@ -70,6 +70,8 @@
|
---|
| 302 | # Linux configuration.
|
---|
| 303 | #
|
---|
| 304 |
|
---|
| 305 | +set(OS_LINUX true)
|
---|
| 306 | +set(OS_WINDOWS false)
|
---|
| 307 | if(OS_LINUX)
|
---|
| 308 | # Platform-specific compiler/linker flags.
|
---|
| 309 | set(CEF_LIBTYPE SHARED)
|
---|
| 310 | @@ -185,21 +187,24 @@
|
---|
| 311 | )
|
---|
| 312 |
|
---|
| 313 | # CEF directory paths.
|
---|
| 314 | + # CEF directory paths.
|
---|
| 315 | set(CEF_RESOURCE_DIR "${_CEF_ROOT}/Resources")
|
---|
| 316 | - set(CEF_BINARY_DIR "${_CEF_ROOT}/${CMAKE_BUILD_TYPE}")
|
---|
| 317 | + set(CEF_BINARY_DIR "${_CEF_ROOT}/$<CONFIGURATION>")
|
---|
| 318 | set(CEF_BINARY_DIR_DEBUG "${_CEF_ROOT}/Debug")
|
---|
| 319 | set(CEF_BINARY_DIR_RELEASE "${_CEF_ROOT}/Release")
|
---|
| 320 |
|
---|
| 321 | # CEF library paths.
|
---|
| 322 | - set(CEF_LIB_DEBUG "${CEF_BINARY_DIR_DEBUG}/libcef.so")
|
---|
| 323 | - set(CEF_LIB_RELEASE "${CEF_BINARY_DIR_RELEASE}/libcef.so")
|
---|
| 324 | + set(CEF_LIB_DEBUG "${CEF_BINARY_DIR_DEBUG}/libcef.lib")
|
---|
| 325 | + set(CEF_LIB_RELEASE "${CEF_BINARY_DIR_RELEASE}/libcef.lib")
|
---|
| 326 |
|
---|
| 327 | # List of CEF binary files.
|
---|
| 328 | set(CEF_BINARY_FILES
|
---|
| 329 | - chrome-sandbox
|
---|
| 330 | - libcef.so
|
---|
| 331 | - libEGL.so
|
---|
| 332 | - libGLESv2.so
|
---|
| 333 | + chrome_elf.dll
|
---|
| 334 | + d3dcompiler_43.dll
|
---|
| 335 | + d3dcompiler_47.dll
|
---|
| 336 | + libcef.dll
|
---|
| 337 | + libEGL.dll
|
---|
| 338 | + libGLESv2.dll
|
---|
| 339 | natives_blob.bin
|
---|
| 340 | snapshot_blob.bin
|
---|
| 341 | v8_context_snapshot.bin
|
---|
| 342 | @@ -216,6 +221,25 @@
|
---|
| 343 | icudtl.dat
|
---|
| 344 | locales
|
---|
| 345 | )
|
---|
| 346 | +
|
---|
| 347 | + if(USE_SANDBOX)
|
---|
| 348 | + list(APPEND CEF_COMPILER_DEFINES
|
---|
| 349 | + PSAPI_VERSION=1 # Required by cef_sandbox.lib
|
---|
| 350 | + CEF_USE_SANDBOX # Used by apps to test if the sandbox is enabled
|
---|
| 351 | + )
|
---|
| 352 | +
|
---|
| 353 | + # Libraries required by cef_sandbox.lib.
|
---|
| 354 | + set(CEF_SANDBOX_STANDARD_LIBS
|
---|
| 355 | + dbghelp.lib
|
---|
| 356 | + psapi.lib
|
---|
| 357 | + version.lib
|
---|
| 358 | + winmm.lib
|
---|
| 359 | + )
|
---|
| 360 | +
|
---|
| 361 | + # CEF sandbox library paths.
|
---|
| 362 | + set(CEF_SANDBOX_LIB_DEBUG "${CEF_BINARY_DIR_DEBUG}/cef_sandbox.lib")
|
---|
| 363 | + set(CEF_SANDBOX_LIB_RELEASE "${CEF_BINARY_DIR_RELEASE}/cef_sandbox.lib")
|
---|
| 364 | + endif()
|
---|
| 365 | endif()
|
---|
| 366 |
|
---|
| 367 |
|
---|