source: docker/msys/cef_binary_3.3359.1774.gd49d25f_windows32.patch@ 971:3f37dc53abb3

python3
Last change on this file since 971:3f37dc53abb3 was 920:42b14124051b, checked in by István Váradi <ivaradi@…>, 6 years ago

Win 10 Docker image for building the new Python3-based distribution

File size: 13.3 KB
  • cef_binary_3.3359.1774.gd49d25f_windows32/include/internal/cef_string_wrappers.h

    old new  
    3333
    3434#include <memory.h>
    3535#include <string>
     36#include <cstring>
    3637
    3738#include "include/base/cef_string16.h"
    3839#include "include/internal/cef_string_types.h"
  • cef_binary_3.3359.1774.gd49d25f_windows32/include/base/cef_bind_helpers.h

    old new  
    272272// MSVC warns when you try to use Base if T has a private destructor, the
    273273// common pattern for refcounted types. It does this even though no attempt to
    274274// instantiate Base is made.  We disable the warning for this definition.
    275 #if defined(OS_WIN)
     275#if defined(OS_WIN) && defined(COMPILER_MSVC)
    276276#pragma warning(push)
    277277#pragma warning(disable : 4624)
    278278#endif
    279279  struct Base : public T, public BaseMixin {};
    280 #if defined(OS_WIN)
     280#if defined(OS_WIN) && defined(COMPILER_MSVC)
    281281#pragma warning(pop)
    282282#endif
    283283
  • cef_binary_3.3359.1774.gd49d25f_windows32/include/base/internal/cef_atomicops_x86_gcc.h

    old new  
     1// Copyright (c) 2011 Google Inc. All rights reserved.
     2//
     3// Redistribution and use in source and binary forms, with or without
     4// modification, are permitted provided that the following conditions are
     5// met:
     6//
     7//    * Redistributions of source code must retain the above copyright
     8// notice, this list of conditions and the following disclaimer.
     9//    * Redistributions in binary form must reproduce the above
     10// copyright notice, this list of conditions and the following disclaimer
     11// in the documentation and/or other materials provided with the
     12// distribution.
     13//    * Neither the name of Google Inc. nor the name Chromium Embedded
     14// Framework nor the names of its contributors may be used to endorse
     15// or promote products derived from this software without specific prior
     16// written permission.
     17//
     18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29
     30// Do not include this header file directly. Use base/cef_atomicops.h
     31// instead.
     32
     33#ifndef CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
     34#define CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
     35
     36// This struct is not part of the public API of this module; clients may not
     37// use it.
     38// Features of this x86.  Values may not be correct before main() is run,
     39// but are set conservatively.
     40struct AtomicOps_x86CPUFeatureStruct {
     41  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
     42                             // after acquire compare-and-swap.
     43};
     44extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
     45
     46#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
     47
     48namespace base {
     49namespace subtle {
     50
     51// 32-bit low-level operations on any platform.
     52
     53inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     54                                         Atomic32 old_value,
     55                                         Atomic32 new_value) {
     56  Atomic32 prev;
     57  __asm__ __volatile__("lock; cmpxchgl %1,%2"
     58                       : "=a"(prev)
     59                       : "q"(new_value), "m"(*ptr), "0"(old_value)
     60                       : "memory");
     61  return prev;
     62}
     63
     64inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     65                                         Atomic32 new_value) {
     66  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
     67                       : "=r"(new_value)
     68                       : "m"(*ptr), "0"(new_value)
     69                       : "memory");
     70  return new_value;  // Now it's the previous value.
     71}
     72
     73inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     74                                          Atomic32 increment) {
     75  Atomic32 temp = increment;
     76  __asm__ __volatile__("lock; xaddl %0,%1"
     77                       : "+r"(temp), "+m"(*ptr)
     78                       :
     79                       : "memory");
     80  // temp now holds the old value of *ptr
     81  return temp + increment;
     82}
     83
     84inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     85                                        Atomic32 increment) {
     86  Atomic32 temp = increment;
     87  __asm__ __volatile__("lock; xaddl %0,%1"
     88                       : "+r"(temp), "+m"(*ptr)
     89                       :
     90                       : "memory");
     91  // temp now holds the old value of *ptr
     92  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
     93    __asm__ __volatile__("lfence" : : : "memory");
     94  }
     95  return temp + increment;
     96}
     97
     98inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     99                                       Atomic32 old_value,
     100                                       Atomic32 new_value) {
     101  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     102  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
     103    __asm__ __volatile__("lfence" : : : "memory");
     104  }
     105  return x;
     106}
     107
     108inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
     109                                       Atomic32 old_value,
     110                                       Atomic32 new_value) {
     111  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     112}
     113
     114inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
     115  *ptr = value;
     116}
     117
     118inline void MemoryBarrier() {
     119  __asm__ __volatile__("mfence" : : : "memory");
     120}
     121
     122inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
     123  *ptr = value;
     124  MemoryBarrier();
     125}
     126
     127inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     128  ATOMICOPS_COMPILER_BARRIER();
     129  *ptr = value;  // An x86 store acts as a release barrier.
     130  // See comments in Atomic64 version of Release_Store(), below.
     131}
     132
     133inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
     134  return *ptr;
     135}
     136
     137inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
     138  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.
     139  // See comments in Atomic64 version of Release_Store(), below.
     140  ATOMICOPS_COMPILER_BARRIER();
     141  return value;
     142}
     143
     144inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
     145  MemoryBarrier();
     146  return *ptr;
     147}
     148
     149#if defined(__x86_64__)
     150
     151// 64-bit low-level operations on 64-bit platform.
     152
     153inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
     154                                         Atomic64 old_value,
     155                                         Atomic64 new_value) {
     156  Atomic64 prev;
     157  __asm__ __volatile__("lock; cmpxchgq %1,%2"
     158                       : "=a"(prev)
     159                       : "q"(new_value), "m"(*ptr), "0"(old_value)
     160                       : "memory");
     161  return prev;
     162}
     163
     164inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
     165                                         Atomic64 new_value) {
     166  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
     167                       : "=r"(new_value)
     168                       : "m"(*ptr), "0"(new_value)
     169                       : "memory");
     170  return new_value;  // Now it's the previous value.
     171}
     172
     173inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
     174                                          Atomic64 increment) {
     175  Atomic64 temp = increment;
     176  __asm__ __volatile__("lock; xaddq %0,%1"
     177                       : "+r"(temp), "+m"(*ptr)
     178                       :
     179                       : "memory");
     180  // temp now contains the previous value of *ptr
     181  return temp + increment;
     182}
     183
     184inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
     185                                        Atomic64 increment) {
     186  Atomic64 temp = increment;
     187  __asm__ __volatile__("lock; xaddq %0,%1"
     188                       : "+r"(temp), "+m"(*ptr)
     189                       :
     190                       : "memory");
     191  // temp now contains the previous value of *ptr
     192  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
     193    __asm__ __volatile__("lfence" : : : "memory");
     194  }
     195  return temp + increment;
     196}
     197
     198inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
     199  *ptr = value;
     200}
     201
     202inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
     203  *ptr = value;
     204  MemoryBarrier();
     205}
     206
     207inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
     208  ATOMICOPS_COMPILER_BARRIER();
     209
     210  *ptr = value;  // An x86 store acts as a release barrier
     211                 // for current AMD/Intel chips as of Jan 2008.
     212                 // See also Acquire_Load(), below.
     213
     214  // When new chips come out, check:
     215  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
     216  //  System Programming Guide, Chatper 7: Multiple-processor management,
     217  //  Section 7.2, Memory Ordering.
     218  // Last seen at:
     219  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
     220  //
     221  // x86 stores/loads fail to act as barriers for a few instructions (clflush
     222  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
     223  // not generated by the compiler, and are rare.  Users of these instructions
     224  // need to know about cache behaviour in any case since all of these involve
     225  // either flushing cache lines or non-temporal cache hints.
     226}
     227
     228inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
     229  return *ptr;
     230}
     231
     232inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
     233  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,
     234                          // for current AMD/Intel chips as of Jan 2008.
     235                          // See also Release_Store(), above.
     236  ATOMICOPS_COMPILER_BARRIER();
     237  return value;
     238}
     239
     240inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
     241  MemoryBarrier();
     242  return *ptr;
     243}
     244
     245inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
     246                                       Atomic64 old_value,
     247                                       Atomic64 new_value) {
     248  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     249  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
     250    __asm__ __volatile__("lfence" : : : "memory");
     251  }
     252  return x;
     253}
     254
     255inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
     256                                       Atomic64 old_value,
     257                                       Atomic64 new_value) {
     258  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     259}
     260
     261#endif  // defined(__x86_64__)
     262
     263}  // namespace base::subtle
     264}  // namespace base
     265
     266#undef ATOMICOPS_COMPILER_BARRIER
     267
     268#endif  // CEF_INCLUDE_BASE_INTERNAL_CEF_ATOMICOPS_X86_GCC_H_
  • cef_binary_3.3359.1774.gd49d25f_windows32/cmake/cef_variables.cmake

    old new  
    7070# Linux configuration.
    7171#
    7272
     73set(OS_LINUX true)
     74set(OS_WINDOWS false)
    7375if(OS_LINUX)
    7476  # Platform-specific compiler/linker flags.
    7577  set(CEF_LIBTYPE SHARED)
     
    185187    )
    186188
    187189  # CEF directory paths.
     190  # CEF directory paths.
    188191  set(CEF_RESOURCE_DIR        "${_CEF_ROOT}/Resources")
    189   set(CEF_BINARY_DIR          "${_CEF_ROOT}/${CMAKE_BUILD_TYPE}")
     192  set(CEF_BINARY_DIR          "${_CEF_ROOT}/$<CONFIGURATION>")
    190193  set(CEF_BINARY_DIR_DEBUG    "${_CEF_ROOT}/Debug")
    191194  set(CEF_BINARY_DIR_RELEASE  "${_CEF_ROOT}/Release")
    192195
    193196  # CEF library paths.
    194   set(CEF_LIB_DEBUG   "${CEF_BINARY_DIR_DEBUG}/libcef.so")
    195   set(CEF_LIB_RELEASE "${CEF_BINARY_DIR_RELEASE}/libcef.so")
     197  set(CEF_LIB_DEBUG   "${CEF_BINARY_DIR_DEBUG}/libcef.lib")
     198  set(CEF_LIB_RELEASE "${CEF_BINARY_DIR_RELEASE}/libcef.lib")
    196199
    197200  # List of CEF binary files.
    198201  set(CEF_BINARY_FILES
    199     chrome-sandbox
    200     libcef.so
    201     libEGL.so
    202     libGLESv2.so
     202    chrome_elf.dll
     203    d3dcompiler_43.dll
     204    d3dcompiler_47.dll
     205    libcef.dll
     206    libEGL.dll
     207    libGLESv2.dll
    203208    natives_blob.bin
    204209    snapshot_blob.bin
    205210    v8_context_snapshot.bin
     
    216221    icudtl.dat
    217222    locales
    218223    )
     224
     225  if(USE_SANDBOX)
     226    list(APPEND CEF_COMPILER_DEFINES
     227      PSAPI_VERSION=1   # Required by cef_sandbox.lib
     228      CEF_USE_SANDBOX   # Used by apps to test if the sandbox is enabled
     229      )
     230
     231    # Libraries required by cef_sandbox.lib.
     232    set(CEF_SANDBOX_STANDARD_LIBS
     233      dbghelp.lib
     234      psapi.lib
     235      version.lib
     236      winmm.lib
     237      )
     238
     239    # CEF sandbox library paths.
     240    set(CEF_SANDBOX_LIB_DEBUG "${CEF_BINARY_DIR_DEBUG}/cef_sandbox.lib")
     241    set(CEF_SANDBOX_LIB_RELEASE "${CEF_BINARY_DIR_RELEASE}/cef_sandbox.lib")
     242  endif()
    219243endif()
    220244
    221245
Note: See TracBrowser for help on using the repository browser.