summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRemko Tronçon <git@el-tramo.be>2012-08-17 21:07:13 (GMT)
committerRemko Tronçon <git@el-tramo.be>2012-08-17 21:07:13 (GMT)
commit4397df6b409ca84f63838fa635fc2abe8af80b71 (patch)
tree0806d51a1aaa6a1f9ac00df318be72fbee19ec1d /3rdParty/Breakpad/src/client/mac/handler
parentead6c91f24d77de3319e77ae1354387407065ef1 (diff)
downloadswift-4397df6b409ca84f63838fa635fc2abe8af80b71.zip
swift-4397df6b409ca84f63838fa635fc2abe8af80b71.tar.bz2
Added Breakpad support for Windows.
Diffstat (limited to '3rdParty/Breakpad/src/client/mac/handler')
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.cc413
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.h47
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/dynamic_images.cc578
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/dynamic_images.h317
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/exception_handler.cc830
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/exception_handler.h277
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/mach_vm_compat.h49
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/minidump_generator.cc1432
-rw-r--r--3rdParty/Breakpad/src/client/mac/handler/minidump_generator.h218
9 files changed, 4161 insertions, 0 deletions
diff --git a/3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.cc b/3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.cc
new file mode 100644
index 0000000..b50aa03
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.cc
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+/*
+ * This file was copied from libc/gen/nlist.c from Darwin's source code
+ * The version of nlist used as a base is from 10.5.2, libc-498
+ * http://www.opensource.apple.com/darwinsource/10.5.2/Libc-498/gen/nlist.c
+ *
+ * The full tarball is at:
+ * http://www.opensource.apple.com/darwinsource/tarballs/apsl/Libc-498.tar.gz
+ *
+ * I've modified it to be compatible with 64-bit images.
+*/
+
+#include "breakpad_nlist_64.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <fcntl.h>
+#include <mach-o/nlist.h>
+#include <mach-o/loader.h>
+#include <mach-o/fat.h>
+#include <mach/mach.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <TargetConditionals.h>
+#include <unistd.h>
+
+/* Stuff lifted from <a.out.h> and <sys/exec.h> since they are gone */
+/*
+ * Header prepended to each a.out file.
+ */
+struct exec {
+ unsigned short a_machtype; /* machine type */
+ unsigned short a_magic; /* magic number */
+ unsigned long a_text; /* size of text segment */
+ unsigned long a_data; /* size of initialized data */
+ unsigned long a_bss; /* size of uninitialized data */
+ unsigned long a_syms; /* size of symbol table */
+ unsigned long a_entry; /* entry point */
+ unsigned long a_trsize; /* size of text relocation */
+ unsigned long a_drsize; /* size of data relocation */
+};
+
+#define OMAGIC 0407 /* old impure format */
+#define NMAGIC 0410 /* read-only text */
+#define ZMAGIC 0413 /* demand load format */
+
+#define N_BADMAG(x) \
+ (((x).a_magic)!=OMAGIC && ((x).a_magic)!=NMAGIC && ((x).a_magic)!=ZMAGIC)
+#define N_TXTOFF(x) \
+ ((x).a_magic==ZMAGIC ? 0 : sizeof (struct exec))
+#define N_SYMOFF(x) \
+ (N_TXTOFF(x) + (x).a_text+(x).a_data + (x).a_trsize+(x).a_drsize)
+
+// Traits structs for specializing function templates to handle
+// 32-bit/64-bit Mach-O files.
+template<typename T>
+struct MachBits {};
+
+typedef struct nlist nlist32;
+typedef struct nlist_64 nlist64;
+
+template<>
+struct MachBits<nlist32> {
+ typedef mach_header mach_header_type;
+ typedef uint32_t word_type;
+ static const uint32_t magic = MH_MAGIC;
+};
+
+template<>
+struct MachBits<nlist64> {
+ typedef mach_header_64 mach_header_type;
+ typedef uint64_t word_type;
+ static const uint32_t magic = MH_MAGIC_64;
+};
+
+template<typename nlist_type>
+int
+__breakpad_fdnlist(int fd, nlist_type *list, const char **symbolNames,
+ cpu_type_t cpu_type);
+
+/*
+ * nlist - retreive attributes from name list (string table version)
+ */
+
+template <typename nlist_type>
+int breakpad_nlist_common(const char *name,
+ nlist_type *list,
+ const char **symbolNames,
+ cpu_type_t cpu_type) {
+ int fd = open(name, O_RDONLY, 0);
+ if (fd < 0)
+ return -1;
+ int n = __breakpad_fdnlist(fd, list, symbolNames, cpu_type);
+ close(fd);
+ return n;
+}
+
+int breakpad_nlist(const char *name,
+ struct nlist *list,
+ const char **symbolNames,
+ cpu_type_t cpu_type) {
+ return breakpad_nlist_common(name, list, symbolNames, cpu_type);
+}
+
+int breakpad_nlist(const char *name,
+ struct nlist_64 *list,
+ const char **symbolNames,
+ cpu_type_t cpu_type) {
+ return breakpad_nlist_common(name, list, symbolNames, cpu_type);
+}
+
+/* Note: __fdnlist() is called from kvm_nlist in libkvm's kvm.c */
+
+template<typename nlist_type>
+int __breakpad_fdnlist(int fd, nlist_type *list, const char **symbolNames,
+ cpu_type_t cpu_type) {
+ typedef typename MachBits<nlist_type>::mach_header_type mach_header_type;
+ typedef typename MachBits<nlist_type>::word_type word_type;
+
+ const uint32_t magic = MachBits<nlist_type>::magic;
+
+ int maxlen = 500;
+ int nreq = 0;
+ for (nlist_type* q = list;
+ symbolNames[q-list] && symbolNames[q-list][0];
+ q++, nreq++) {
+
+ q->n_type = 0;
+ q->n_value = 0;
+ q->n_desc = 0;
+ q->n_sect = 0;
+ q->n_un.n_strx = 0;
+ }
+
+ struct exec buf;
+ if (read(fd, (char *)&buf, sizeof(buf)) != sizeof(buf) ||
+ (N_BADMAG(buf) && *((uint32_t *)&buf) != magic &&
+ CFSwapInt32BigToHost(*((uint32_t *)&buf)) != FAT_MAGIC &&
+ /* The following is the big-endian ppc64 check */
+ (*((uint32_t*)&buf)) != FAT_MAGIC)) {
+ return -1;
+ }
+
+ /* Deal with fat file if necessary */
+ unsigned arch_offset = 0;
+ if (CFSwapInt32BigToHost(*((uint32_t *)&buf)) == FAT_MAGIC ||
+ /* The following is the big-endian ppc64 check */
+ *((unsigned int *)&buf) == FAT_MAGIC) {
+ /* Get host info */
+ host_t host = mach_host_self();
+ unsigned i = HOST_BASIC_INFO_COUNT;
+ struct host_basic_info hbi;
+ kern_return_t kr;
+ if ((kr = host_info(host, HOST_BASIC_INFO,
+ (host_info_t)(&hbi), &i)) != KERN_SUCCESS) {
+ return -1;
+ }
+ mach_port_deallocate(mach_task_self(), host);
+
+ /* Read in the fat header */
+ struct fat_header fh;
+ if (lseek(fd, 0, SEEK_SET) == -1) {
+ return -1;
+ }
+ if (read(fd, (char *)&fh, sizeof(fh)) != sizeof(fh)) {
+ return -1;
+ }
+
+ /* Convert fat_narchs to host byte order */
+ fh.nfat_arch = CFSwapInt32BigToHost(fh.nfat_arch);
+
+ /* Read in the fat archs */
+ struct fat_arch *fat_archs =
+ (struct fat_arch *)malloc(fh.nfat_arch * sizeof(struct fat_arch));
+ if (fat_archs == NULL) {
+ return -1;
+ }
+ if (read(fd, (char *)fat_archs,
+ sizeof(struct fat_arch) * fh.nfat_arch) !=
+ (ssize_t)(sizeof(struct fat_arch) * fh.nfat_arch)) {
+ free(fat_archs);
+ return -1;
+ }
+
+ /*
+ * Convert archs to host byte ordering (a constraint of
+ * cpusubtype_getbestarch()
+ */
+ for (unsigned i = 0; i < fh.nfat_arch; i++) {
+ fat_archs[i].cputype =
+ CFSwapInt32BigToHost(fat_archs[i].cputype);
+ fat_archs[i].cpusubtype =
+ CFSwapInt32BigToHost(fat_archs[i].cpusubtype);
+ fat_archs[i].offset =
+ CFSwapInt32BigToHost(fat_archs[i].offset);
+ fat_archs[i].size =
+ CFSwapInt32BigToHost(fat_archs[i].size);
+ fat_archs[i].align =
+ CFSwapInt32BigToHost(fat_archs[i].align);
+ }
+
+ struct fat_arch *fap = NULL;
+ for (unsigned i = 0; i < fh.nfat_arch; i++) {
+ if (fat_archs[i].cputype == cpu_type) {
+ fap = &fat_archs[i];
+ break;
+ }
+ }
+
+ if (!fap) {
+ free(fat_archs);
+ return -1;
+ }
+ arch_offset = fap->offset;
+ free(fat_archs);
+
+ /* Read in the beginning of the architecture-specific file */
+ if (lseek(fd, arch_offset, SEEK_SET) == -1) {
+ return -1;
+ }
+ if (read(fd, (char *)&buf, sizeof(buf)) != sizeof(buf)) {
+ return -1;
+ }
+ }
+
+ off_t sa; /* symbol address */
+ off_t ss; /* start of strings */
+ register register_t n;
+ if (*((unsigned int *)&buf) == magic) {
+ if (lseek(fd, arch_offset, SEEK_SET) == -1) {
+ return -1;
+ }
+ mach_header_type mh;
+ if (read(fd, (char *)&mh, sizeof(mh)) != sizeof(mh)) {
+ return -1;
+ }
+
+ struct load_command *load_commands =
+ (struct load_command *)malloc(mh.sizeofcmds);
+ if (load_commands == NULL) {
+ return -1;
+ }
+ if (read(fd, (char *)load_commands, mh.sizeofcmds) !=
+ (ssize_t)mh.sizeofcmds) {
+ free(load_commands);
+ return -1;
+ }
+ struct symtab_command *stp = NULL;
+ struct load_command *lcp = load_commands;
+ // iterate through all load commands, looking for
+ // LC_SYMTAB load command
+ for (uint32_t i = 0; i < mh.ncmds; i++) {
+ if (lcp->cmdsize % sizeof(word_type) != 0 ||
+ lcp->cmdsize <= 0 ||
+ (char *)lcp + lcp->cmdsize >
+ (char *)load_commands + mh.sizeofcmds) {
+ free(load_commands);
+ return -1;
+ }
+ if (lcp->cmd == LC_SYMTAB) {
+ if (lcp->cmdsize !=
+ sizeof(struct symtab_command)) {
+ free(load_commands);
+ return -1;
+ }
+ stp = (struct symtab_command *)lcp;
+ break;
+ }
+ lcp = (struct load_command *)
+ ((char *)lcp + lcp->cmdsize);
+ }
+ if (stp == NULL) {
+ free(load_commands);
+ return -1;
+ }
+ // sa points to the beginning of the symbol table
+ sa = stp->symoff + arch_offset;
+ // ss points to the beginning of the string table
+ ss = stp->stroff + arch_offset;
+ // n is the number of bytes in the symbol table
+ // each symbol table entry is an nlist structure
+ n = stp->nsyms * sizeof(nlist_type);
+ free(load_commands);
+ } else {
+ sa = N_SYMOFF(buf) + arch_offset;
+ ss = sa + buf.a_syms + arch_offset;
+ n = buf.a_syms;
+ }
+
+ if (lseek(fd, sa, SEEK_SET) == -1) {
+ return -1;
+ }
+
+ // the algorithm here is to read the nlist entries in m-sized
+ // chunks into q. q is then iterated over. for each entry in q,
+ // use the string table index(q->n_un.n_strx) to read the symbol
+ // name, then scan the nlist entries passed in by the user(via p),
+ // and look for a match
+ while (n) {
+ nlist_type space[BUFSIZ/sizeof (nlist_type)];
+ register register_t m = sizeof (space);
+
+ if (n < m)
+ m = n;
+ if (read(fd, (char *)space, m) != m)
+ break;
+ n -= m;
+ long savpos = lseek(fd, 0, SEEK_CUR);
+ if (savpos == -1) {
+ return -1;
+ }
+ for (nlist_type* q = space; (m -= sizeof(nlist_type)) >= 0; q++) {
+ char nambuf[BUFSIZ];
+
+ if (q->n_un.n_strx == 0 || q->n_type & N_STAB)
+ continue;
+
+ // seek to the location in the binary where the symbol
+ // name is stored & read it into memory
+ if (lseek(fd, ss+q->n_un.n_strx, SEEK_SET) == -1) {
+ return -1;
+ }
+ if (read(fd, nambuf, maxlen+1) == -1) {
+ return -1;
+ }
+ const char *s2 = nambuf;
+ for (nlist_type *p = list;
+ symbolNames[p-list] && symbolNames[p-list][0];
+ p++) {
+ // get the symbol name the user has passed in that
+ // corresponds to the nlist entry that we're looking at
+ const char *s1 = symbolNames[p - list];
+ while (*s1) {
+ if (*s1++ != *s2++)
+ goto cont;
+ }
+ if (*s2)
+ goto cont;
+
+ p->n_value = q->n_value;
+ p->n_type = q->n_type;
+ p->n_desc = q->n_desc;
+ p->n_sect = q->n_sect;
+ p->n_un.n_strx = q->n_un.n_strx;
+ if (--nreq == 0)
+ return nreq;
+
+ break;
+ cont: ;
+ }
+ }
+ if (lseek(fd, savpos, SEEK_SET) == -1) {
+ return -1;
+ }
+ }
+ return nreq;
+}
diff --git a/3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.h b/3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.h
new file mode 100644
index 0000000..1d2c639
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/breakpad_nlist_64.h
@@ -0,0 +1,47 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// breakpad_nlist.h
+//
+// This file is meant to provide a header for clients of the modified
+// nlist function implemented to work on 64-bit.
+
+#ifndef CLIENT_MAC_HANDLER_BREAKPAD_NLIST_H__
+
+#include <mach/machine.h>
+
+int breakpad_nlist(const char *name,
+ struct nlist *list,
+ const char **symbolNames,
+ cpu_type_t cpu_type);
+int breakpad_nlist(const char *name,
+ struct nlist_64 *list,
+ const char **symbolNames,
+ cpu_type_t cpu_type);
+
+#endif /* CLIENT_MAC_HANDLER_BREAKPAD_NLIST_H__ */
diff --git a/3rdParty/Breakpad/src/client/mac/handler/dynamic_images.cc b/3rdParty/Breakpad/src/client/mac/handler/dynamic_images.cc
new file mode 100644
index 0000000..ef5743c
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/dynamic_images.cc
@@ -0,0 +1,578 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "client/mac/handler/dynamic_images.h"
+
+extern "C" { // needed to compile on Leopard
+ #include <mach-o/nlist.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+}
+
+#include <assert.h>
+#include <AvailabilityMacros.h>
+#include <dlfcn.h>
+#include <mach/task_info.h>
+#include <sys/sysctl.h>
+#include <TargetConditionals.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "breakpad_nlist_64.h"
+
+#if !TARGET_OS_IPHONE
+#include <CoreServices/CoreServices.h>
+
+#ifndef MAC_OS_X_VERSION_10_6
+#define MAC_OS_X_VERSION_10_6 1060
+#endif
+
+#if MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6
+
+// Fallback declarations for TASK_DYLD_INFO and friends, introduced in
+// <mach/task_info.h> in the Mac OS X 10.6 SDK.
+#define TASK_DYLD_INFO 17
+struct task_dyld_info {
+ mach_vm_address_t all_image_info_addr;
+ mach_vm_size_t all_image_info_size;
+};
+typedef struct task_dyld_info task_dyld_info_data_t;
+typedef struct task_dyld_info *task_dyld_info_t;
+#define TASK_DYLD_INFO_COUNT (sizeof(task_dyld_info_data_t) / sizeof(natural_t))
+
+#endif
+
+#endif // !TARGET_OS_IPHONE
+
+namespace google_breakpad {
+
+using std::string;
+using std::vector;
+
+//==============================================================================
+// Returns the size of the memory region containing |address| and the
+// number of bytes from |address| to the end of the region.
+// We potentially, will extend the size of the original
+// region by the size of the following region if it's contiguous with the
+// first in order to handle cases when we're reading strings and they
+// straddle two vm regions.
+//
+static mach_vm_size_t GetMemoryRegionSize(task_port_t target_task,
+ const uint64_t address,
+ mach_vm_size_t *size_to_end) {
+ mach_vm_address_t region_base = (mach_vm_address_t)address;
+ mach_vm_size_t region_size;
+ natural_t nesting_level = 0;
+ vm_region_submap_info_64 submap_info;
+ mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ // Get information about the vm region containing |address|
+ vm_region_recurse_info_t region_info;
+ region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
+
+ kern_return_t result =
+ mach_vm_region_recurse(target_task,
+ &region_base,
+ &region_size,
+ &nesting_level,
+ region_info,
+ &info_count);
+
+ if (result == KERN_SUCCESS) {
+ // Get distance from |address| to the end of this region
+ *size_to_end = region_base + region_size -(mach_vm_address_t)address;
+
+ // If we want to handle strings as long as 4096 characters we may need
+ // to check if there's a vm region immediately following the first one.
+ // If so, we need to extend |*size_to_end| to go all the way to the end
+ // of the second region.
+ if (*size_to_end < 4096) {
+ // Second region starts where the first one ends
+ mach_vm_address_t region_base2 =
+ (mach_vm_address_t)(region_base + region_size);
+ mach_vm_size_t region_size2;
+
+ // Get information about the following vm region
+ result =
+ mach_vm_region_recurse(target_task,
+ &region_base2,
+ &region_size2,
+ &nesting_level,
+ region_info,
+ &info_count);
+
+ // Extend region_size to go all the way to the end of the 2nd region
+ if (result == KERN_SUCCESS
+ && region_base2 == region_base + region_size) {
+ region_size += region_size2;
+ }
+ }
+
+ *size_to_end = region_base + region_size -(mach_vm_address_t)address;
+ } else {
+ region_size = 0;
+ *size_to_end = 0;
+ }
+
+ return region_size;
+}
+
+#define kMaxStringLength 8192
+//==============================================================================
+// Reads a NULL-terminated string from another task.
+//
+// Warning! This will not read any strings longer than kMaxStringLength-1
+//
+static string ReadTaskString(task_port_t target_task,
+ const uint64_t address) {
+ // The problem is we don't know how much to read until we know how long
+ // the string is. And we don't know how long the string is, until we've read
+ // the memory! So, we'll try to read kMaxStringLength bytes
+ // (or as many bytes as we can until we reach the end of the vm region).
+ mach_vm_size_t size_to_end;
+ GetMemoryRegionSize(target_task, address, &size_to_end);
+
+ if (size_to_end > 0) {
+ mach_vm_size_t size_to_read =
+ size_to_end > kMaxStringLength ? kMaxStringLength : size_to_end;
+
+ vector<uint8_t> bytes;
+ if (ReadTaskMemory(target_task, address, (size_t)size_to_read, bytes) !=
+ KERN_SUCCESS)
+ return string();
+
+ return string(reinterpret_cast<const char*>(&bytes[0]));
+ }
+
+ return string();
+}
+
+//==============================================================================
+// Reads an address range from another task. The bytes read will be returned
+// in bytes, which will be resized as necessary.
+kern_return_t ReadTaskMemory(task_port_t target_task,
+ const uint64_t address,
+ size_t length,
+ vector<uint8_t> &bytes) {
+ int systemPageSize = getpagesize();
+
+ // use the negative of the page size for the mask to find the page address
+ mach_vm_address_t page_address = address & (-systemPageSize);
+
+ mach_vm_address_t last_page_address =
+ (address + length + (systemPageSize - 1)) & (-systemPageSize);
+
+ mach_vm_size_t page_size = last_page_address - page_address;
+ uint8_t* local_start;
+ uint32_t local_length;
+
+ kern_return_t r = mach_vm_read(target_task,
+ page_address,
+ page_size,
+ reinterpret_cast<vm_offset_t*>(&local_start),
+ &local_length);
+
+ if (r != KERN_SUCCESS)
+ return r;
+
+ bytes.resize(length);
+ memcpy(&bytes[0],
+ &local_start[(mach_vm_address_t)address - page_address],
+ length);
+ mach_vm_deallocate(mach_task_self(), (uintptr_t)local_start, local_length);
+ return KERN_SUCCESS;
+}
+
+#pragma mark -
+
+//==============================================================================
+// Traits structs for specializing function templates to handle
+// 32-bit/64-bit Mach-O files.
+struct MachO32 {
+ typedef mach_header mach_header_type;
+ typedef segment_command mach_segment_command_type;
+ typedef dyld_image_info32 dyld_image_info;
+ typedef dyld_all_image_infos32 dyld_all_image_infos;
+ typedef struct nlist nlist_type;
+ static const uint32_t magic = MH_MAGIC;
+ static const uint32_t segment_load_command = LC_SEGMENT;
+};
+
+struct MachO64 {
+ typedef mach_header_64 mach_header_type;
+ typedef segment_command_64 mach_segment_command_type;
+ typedef dyld_image_info64 dyld_image_info;
+ typedef dyld_all_image_infos64 dyld_all_image_infos;
+ typedef struct nlist_64 nlist_type;
+ static const uint32_t magic = MH_MAGIC_64;
+ static const uint32_t segment_load_command = LC_SEGMENT_64;
+};
+
+template<typename MachBits>
+bool FindTextSection(DynamicImage& image) {
+ typedef typename MachBits::mach_header_type mach_header_type;
+ typedef typename MachBits::mach_segment_command_type
+ mach_segment_command_type;
+
+ const mach_header_type* header =
+ reinterpret_cast<const mach_header_type*>(&image.header_[0]);
+
+ if(header->magic != MachBits::magic) {
+ return false;
+ }
+
+ const struct load_command *cmd =
+ reinterpret_cast<const struct load_command *>(header + 1);
+
+ bool found_text_section = false;
+ bool found_dylib_id_command = false;
+ for (unsigned int i = 0; cmd && (i < header->ncmds); ++i) {
+ if (!found_text_section) {
+ if (cmd->cmd == MachBits::segment_load_command) {
+ const mach_segment_command_type *seg =
+ reinterpret_cast<const mach_segment_command_type *>(cmd);
+
+ if (!strcmp(seg->segname, "__TEXT")) {
+ image.vmaddr_ = seg->vmaddr;
+ image.vmsize_ = seg->vmsize;
+ image.slide_ = 0;
+
+ if (seg->fileoff == 0 && seg->filesize != 0) {
+ image.slide_ =
+ (uintptr_t)image.GetLoadAddress() - (uintptr_t)seg->vmaddr;
+ }
+ found_text_section = true;
+ }
+ }
+ }
+
+ if (!found_dylib_id_command) {
+ if (cmd->cmd == LC_ID_DYLIB) {
+ const struct dylib_command *dc =
+ reinterpret_cast<const struct dylib_command *>(cmd);
+
+ image.version_ = dc->dylib.current_version;
+ found_dylib_id_command = true;
+ }
+ }
+
+ if (found_dylib_id_command && found_text_section) {
+ return true;
+ }
+
+ cmd = reinterpret_cast<const struct load_command *>
+ (reinterpret_cast<const char *>(cmd) + cmd->cmdsize);
+ }
+
+ return false;
+}
+
+//==============================================================================
+// Initializes vmaddr_, vmsize_, and slide_
+void DynamicImage::CalculateMemoryAndVersionInfo() {
+ // unless we can process the header, ensure that calls to
+ // IsValid() will return false
+ vmaddr_ = 0;
+ vmsize_ = 0;
+ slide_ = 0;
+ version_ = 0;
+
+ // The function template above does all the real work.
+ if (Is64Bit())
+ FindTextSection<MachO64>(*this);
+ else
+ FindTextSection<MachO32>(*this);
+}
+
+//==============================================================================
+// The helper function template abstracts the 32/64-bit differences.
+template<typename MachBits>
+uint32_t GetFileTypeFromHeader(DynamicImage& image) {
+ typedef typename MachBits::mach_header_type mach_header_type;
+
+ const mach_header_type* header =
+ reinterpret_cast<const mach_header_type*>(&image.header_[0]);
+ return header->filetype;
+}
+
+uint32_t DynamicImage::GetFileType() {
+ if (Is64Bit())
+ return GetFileTypeFromHeader<MachO64>(*this);
+
+ return GetFileTypeFromHeader<MachO32>(*this);
+}
+
+#pragma mark -
+
+//==============================================================================
+// Loads information about dynamically loaded code in the given task.
+DynamicImages::DynamicImages(mach_port_t task)
+ : task_(task),
+ cpu_type_(DetermineTaskCPUType(task)),
+ image_list_() {
+ ReadImageInfoForTask();
+}
+
+template<typename MachBits>
+static uint64_t LookupSymbol(const char* symbol_name,
+ const char* filename,
+ cpu_type_t cpu_type) {
+ typedef typename MachBits::nlist_type nlist_type;
+
+ nlist_type symbol_info[8] = {};
+ const char *symbolNames[2] = { symbol_name, "\0" };
+ nlist_type &list = symbol_info[0];
+ int invalidEntriesCount = breakpad_nlist(filename,
+ &list,
+ symbolNames,
+ cpu_type);
+
+ if(invalidEntriesCount != 0) {
+ return 0;
+ }
+
+ assert(list.n_value);
+ return list.n_value;
+}
+
+#if TARGET_OS_IPHONE
+static bool HasTaskDyldInfo() {
+ return true;
+}
+#else
+static SInt32 GetOSVersionInternal() {
+ SInt32 os_version = 0;
+ Gestalt(gestaltSystemVersion, &os_version);
+ return os_version;
+}
+
+static SInt32 GetOSVersion() {
+ static SInt32 os_version = GetOSVersionInternal();
+ return os_version;
+}
+
+static bool HasTaskDyldInfo() {
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
+ return true;
+#else
+ return GetOSVersion() >= 0x1060;
+#endif
+}
+#endif // TARGET_OS_IPHONE
+
+uint64_t DynamicImages::GetDyldAllImageInfosPointer() {
+ if (HasTaskDyldInfo()) {
+ task_dyld_info_data_t task_dyld_info;
+ mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT;
+ if (task_info(task_, TASK_DYLD_INFO, (task_info_t)&task_dyld_info,
+ &count) != KERN_SUCCESS) {
+ return 0;
+ }
+
+ return (uint64_t)task_dyld_info.all_image_info_addr;
+ } else {
+ const char *imageSymbolName = "_dyld_all_image_infos";
+ const char *dyldPath = "/usr/lib/dyld";
+
+ if (Is64Bit())
+ return LookupSymbol<MachO64>(imageSymbolName, dyldPath, cpu_type_);
+ return LookupSymbol<MachO32>(imageSymbolName, dyldPath, cpu_type_);
+ }
+}
+
+//==============================================================================
+// This code was written using dyld_debug.c (from Darwin) as a guide.
+
+template<typename MachBits>
+void ReadImageInfo(DynamicImages& images,
+ uint64_t image_list_address) {
+ typedef typename MachBits::dyld_image_info dyld_image_info;
+ typedef typename MachBits::dyld_all_image_infos dyld_all_image_infos;
+ typedef typename MachBits::mach_header_type mach_header_type;
+
+ // Read the structure inside of dyld that contains information about
+ // loaded images. We're reading from the desired task's address space.
+
+ // Here we make the assumption that dyld loaded at the same address in
+ // the crashed process vs. this one. This is an assumption made in
+ // "dyld_debug.c" and is said to be nearly always valid.
+ vector<uint8_t> dyld_all_info_bytes;
+ if (ReadTaskMemory(images.task_,
+ image_list_address,
+ sizeof(dyld_all_image_infos),
+ dyld_all_info_bytes) != KERN_SUCCESS)
+ return;
+
+ dyld_all_image_infos *dyldInfo =
+ reinterpret_cast<dyld_all_image_infos*>(&dyld_all_info_bytes[0]);
+
+ // number of loaded images
+ int count = dyldInfo->infoArrayCount;
+
+ // Read an array of dyld_image_info structures each containing
+ // information about a loaded image.
+ vector<uint8_t> dyld_info_array_bytes;
+ if (ReadTaskMemory(images.task_,
+ dyldInfo->infoArray,
+ count * sizeof(dyld_image_info),
+ dyld_info_array_bytes) != KERN_SUCCESS)
+ return;
+
+ dyld_image_info *infoArray =
+ reinterpret_cast<dyld_image_info*>(&dyld_info_array_bytes[0]);
+ images.image_list_.reserve(count);
+
+ for (int i = 0; i < count; ++i) {
+ dyld_image_info &info = infoArray[i];
+
+ // First read just the mach_header from the image in the task.
+ vector<uint8_t> mach_header_bytes;
+ if (ReadTaskMemory(images.task_,
+ info.load_address_,
+ sizeof(mach_header_type),
+ mach_header_bytes) != KERN_SUCCESS)
+ continue; // bail on this dynamic image
+
+ mach_header_type *header =
+ reinterpret_cast<mach_header_type*>(&mach_header_bytes[0]);
+
+ // Now determine the total amount necessary to read the header
+ // plus all of the load commands.
+ size_t header_size =
+ sizeof(mach_header_type) + header->sizeofcmds;
+
+ if (ReadTaskMemory(images.task_,
+ info.load_address_,
+ header_size,
+ mach_header_bytes) != KERN_SUCCESS)
+ continue;
+
+ header = reinterpret_cast<mach_header_type*>(&mach_header_bytes[0]);
+
+ // Read the file name from the task's memory space.
+ string file_path;
+ if (info.file_path_) {
+ // Although we're reading kMaxStringLength bytes, it's copied in the
+ // the DynamicImage constructor below with the correct string length,
+ // so it's not really wasting memory.
+ file_path = ReadTaskString(images.task_, info.file_path_);
+ }
+
+ // Create an object representing this image and add it to our list.
+ DynamicImage *new_image;
+ new_image = new DynamicImage(&mach_header_bytes[0],
+ header_size,
+ info.load_address_,
+ file_path,
+ info.file_mod_date_,
+ images.task_,
+ images.cpu_type_);
+
+ if (new_image->IsValid()) {
+ images.image_list_.push_back(DynamicImageRef(new_image));
+ } else {
+ delete new_image;
+ }
+ }
+
+ // sorts based on loading address
+ sort(images.image_list_.begin(), images.image_list_.end());
+ // remove duplicates - this happens in certain strange cases
+ // You can see it in DashboardClient when Google Gadgets plugin
+ // is installed. Apple's crash reporter log and gdb "info shared"
+ // both show the same library multiple times at the same address
+
+ vector<DynamicImageRef>::iterator it = unique(images.image_list_.begin(),
+ images.image_list_.end());
+ images.image_list_.erase(it, images.image_list_.end());
+}
+
+void DynamicImages::ReadImageInfoForTask() {
+ uint64_t imageList = GetDyldAllImageInfosPointer();
+
+ if (imageList) {
+ if (Is64Bit())
+ ReadImageInfo<MachO64>(*this, imageList);
+ else
+ ReadImageInfo<MachO32>(*this, imageList);
+ }
+}
+
+//==============================================================================
+DynamicImage *DynamicImages::GetExecutableImage() {
+ int executable_index = GetExecutableImageIndex();
+
+ if (executable_index >= 0) {
+ return GetImage(executable_index);
+ }
+
+ return NULL;
+}
+
+//==============================================================================
+// returns -1 if failure to find executable
+int DynamicImages::GetExecutableImageIndex() {
+ int image_count = GetImageCount();
+
+ for (int i = 0; i < image_count; ++i) {
+ DynamicImage *image = GetImage(i);
+ if (image->GetFileType() == MH_EXECUTE) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+//==============================================================================
+// static
+cpu_type_t DynamicImages::DetermineTaskCPUType(task_t task) {
+ if (task == mach_task_self())
+ return GetNativeCPUType();
+
+ int mib[CTL_MAXNAME];
+ size_t mibLen = CTL_MAXNAME;
+ int err = sysctlnametomib("sysctl.proc_cputype", mib, &mibLen);
+ if (err == 0) {
+ assert(mibLen < CTL_MAXNAME);
+ pid_for_task(task, &mib[mibLen]);
+ mibLen += 1;
+
+ cpu_type_t cpu_type;
+ size_t cpuTypeSize = sizeof(cpu_type);
+ sysctl(mib, mibLen, &cpu_type, &cpuTypeSize, 0, 0);
+ return cpu_type;
+ }
+
+ return GetNativeCPUType();
+}
+
+} // namespace google_breakpad
diff --git a/3rdParty/Breakpad/src/client/mac/handler/dynamic_images.h b/3rdParty/Breakpad/src/client/mac/handler/dynamic_images.h
new file mode 100644
index 0000000..d039eda
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/dynamic_images.h
@@ -0,0 +1,317 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// dynamic_images.h
+//
+// Implements most of the function of the dyld API, but allowing an
+// arbitrary task to be introspected, unlike the dyld API which
+// only allows operation on the current task. The current implementation
+// is limited to use by 32-bit tasks.
+
+#ifndef CLIENT_MAC_HANDLER_DYNAMIC_IMAGES_H__
+#define CLIENT_MAC_HANDLER_DYNAMIC_IMAGES_H__
+
+#include <mach/mach.h>
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+#include <sys/types.h>
+
+#include <string>
+#include <vector>
+
+#include "mach_vm_compat.h"
+
+namespace google_breakpad {
+
+using std::string;
+using std::vector;
+
+//==============================================================================
+// The memory layout of this struct matches the dyld_image_info struct
+// defined in "dyld_gdb.h" in the darwin source.
+typedef struct dyld_image_info32 {
+ uint32_t load_address_; // struct mach_header*
+ uint32_t file_path_; // char*
+ uint32_t file_mod_date_;
+} dyld_image_info32;
+
+typedef struct dyld_image_info64 {
+ uint64_t load_address_; // struct mach_header*
+ uint64_t file_path_; // char*
+ uint64_t file_mod_date_;
+} dyld_image_info64;
+
+//==============================================================================
+// This is as defined in "dyld_gdb.h" in the darwin source.
+// _dyld_all_image_infos (in dyld) is a structure of this type
+// which will be used to determine which dynamic code has been loaded.
+typedef struct dyld_all_image_infos32 {
+ uint32_t version; // == 1 in Mac OS X 10.4
+ uint32_t infoArrayCount;
+ uint32_t infoArray; // const struct dyld_image_info*
+ uint32_t notification;
+ bool processDetachedFromSharedRegion;
+} dyld_all_image_infos32;
+
+typedef struct dyld_all_image_infos64 {
+ uint32_t version; // == 1 in Mac OS X 10.4
+ uint32_t infoArrayCount;
+ uint64_t infoArray; // const struct dyld_image_info*
+ uint64_t notification;
+ bool processDetachedFromSharedRegion;
+} dyld_all_image_infos64;
+
+// some typedefs to isolate 64/32 bit differences
+#ifdef __LP64__
+typedef mach_header_64 breakpad_mach_header;
+typedef segment_command_64 breakpad_mach_segment_command;
+#else
+typedef mach_header breakpad_mach_header;
+typedef segment_command breakpad_mach_segment_command;
+#endif
+
+// Helper functions to deal with 32-bit/64-bit Mach-O differences.
+class DynamicImage;
+template<typename MachBits>
+bool FindTextSection(DynamicImage& image);
+
+template<typename MachBits>
+uint32_t GetFileTypeFromHeader(DynamicImage& image);
+
+//==============================================================================
+// Represents a single dynamically loaded mach-o image
+class DynamicImage {
+ public:
+ DynamicImage(uint8_t *header, // data is copied
+ size_t header_size, // includes load commands
+ uint64_t load_address,
+ string file_path,
+ uintptr_t image_mod_date,
+ mach_port_t task,
+ cpu_type_t cpu_type)
+ : header_(header, header + header_size),
+ header_size_(header_size),
+ load_address_(load_address),
+ vmaddr_(0),
+ vmsize_(0),
+ slide_(0),
+ version_(0),
+ file_path_(file_path),
+ file_mod_date_(image_mod_date),
+ task_(task),
+ cpu_type_(cpu_type) {
+ CalculateMemoryAndVersionInfo();
+ }
+
+ // Size of mach_header plus load commands
+ size_t GetHeaderSize() const {return header_.size();}
+
+ // Full path to mach-o binary
+ string GetFilePath() {return file_path_;}
+
+ uint64_t GetModDate() const {return file_mod_date_;}
+
+ // Actual address where the image was loaded
+ uint64_t GetLoadAddress() const {return load_address_;}
+
+ // Address where the image should be loaded
+ mach_vm_address_t GetVMAddr() const {return vmaddr_;}
+
+ // Difference between GetLoadAddress() and GetVMAddr()
+ ptrdiff_t GetVMAddrSlide() const {return slide_;}
+
+ // Size of the image
+ mach_vm_size_t GetVMSize() const {return vmsize_;}
+
+ // Task owning this loaded image
+ mach_port_t GetTask() {return task_;}
+
+ // CPU type of the task
+ cpu_type_t GetCPUType() {return cpu_type_;}
+
+ // filetype from the Mach-O header.
+ uint32_t GetFileType();
+
+ // Return true if the task is a 64-bit architecture.
+ bool Is64Bit() { return (GetCPUType() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64; }
+
+ uint32_t GetVersion() {return version_;}
+ // For sorting
+ bool operator<(const DynamicImage &inInfo) {
+ return GetLoadAddress() < inInfo.GetLoadAddress();
+ }
+
+ // Sanity checking
+ bool IsValid() {return GetVMSize() != 0;}
+
+ private:
+ DynamicImage(const DynamicImage &);
+ DynamicImage &operator=(const DynamicImage &);
+
+ friend class DynamicImages;
+ template<typename MachBits>
+ friend bool FindTextSection(DynamicImage& image);
+ template<typename MachBits>
+ friend uint32_t GetFileTypeFromHeader(DynamicImage& image);
+
+ // Initializes vmaddr_, vmsize_, and slide_
+ void CalculateMemoryAndVersionInfo();
+
+ const vector<uint8_t> header_; // our local copy of the header
+ size_t header_size_; // mach_header plus load commands
+ uint64_t load_address_; // base address image is mapped into
+ mach_vm_address_t vmaddr_;
+ mach_vm_size_t vmsize_;
+ ptrdiff_t slide_;
+ uint32_t version_; // Dylib version
+ string file_path_; // path dyld used to load the image
+ uintptr_t file_mod_date_; // time_t of image file
+
+ mach_port_t task_;
+ cpu_type_t cpu_type_; // CPU type of task_
+};
+
+//==============================================================================
+// DynamicImageRef is just a simple wrapper for a pointer to
+// DynamicImage. The reason we use it instead of a simple typedef is so
+// that we can use stl::sort() on a vector of DynamicImageRefs
+// and simple class pointers can't implement operator<().
+//
+class DynamicImageRef {
+ public:
+ explicit DynamicImageRef(DynamicImage *inP) : p(inP) {}
+ // The copy constructor is required by STL
+ DynamicImageRef(const DynamicImageRef &inRef) : p(inRef.p) {}
+
+ bool operator<(const DynamicImageRef &inRef) const {
+ return (*const_cast<DynamicImageRef*>(this)->p)
+ < (*const_cast<DynamicImageRef&>(inRef).p);
+ }
+
+ bool operator==(const DynamicImageRef &inInfo) const {
+ return (*const_cast<DynamicImageRef*>(this)->p).GetLoadAddress() ==
+ (*const_cast<DynamicImageRef&>(inInfo)).GetLoadAddress();
+ }
+
+ // Be just like DynamicImage*
+ DynamicImage *operator->() {return p;}
+ operator DynamicImage*() {return p;}
+
+ private:
+ DynamicImage *p;
+};
+
+// Helper function to deal with 32-bit/64-bit Mach-O differences.
+class DynamicImages;
+template<typename MachBits>
+void ReadImageInfo(DynamicImages& images, uint64_t image_list_address);
+
+//==============================================================================
+// An object of type DynamicImages may be created to allow introspection of
+// an arbitrary task's dynamically loaded mach-o binaries. This makes the
+// assumption that the current task has send rights to the target task.
+class DynamicImages {
+ public:
+ explicit DynamicImages(mach_port_t task);
+
+ ~DynamicImages() {
+ for (int i = 0; i < GetImageCount(); ++i) {
+ delete image_list_[i];
+ }
+ }
+
+ // Returns the number of dynamically loaded mach-o images.
+ int GetImageCount() const {return static_cast<int>(image_list_.size());}
+
+ // Returns an individual image.
+ DynamicImage *GetImage(int i) {
+ if (i < (int)image_list_.size()) {
+ return image_list_[i];
+ }
+ return NULL;
+ }
+
+ // Returns the image corresponding to the main executable.
+ DynamicImage *GetExecutableImage();
+ int GetExecutableImageIndex();
+
+ // Returns the task which we're looking at.
+ mach_port_t GetTask() const {return task_;}
+
+ // CPU type of the task
+ cpu_type_t GetCPUType() {return cpu_type_;}
+
+ // Return true if the task is a 64-bit architecture.
+ bool Is64Bit() { return (GetCPUType() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64; }
+
+ // Determine the CPU type of the task being dumped.
+ static cpu_type_t DetermineTaskCPUType(task_t task);
+
+ // Get the native CPU type of this task.
+ static cpu_type_t GetNativeCPUType() {
+#if defined(__i386__)
+ return CPU_TYPE_I386;
+#elif defined(__x86_64__)
+ return CPU_TYPE_X86_64;
+#elif defined(__ppc__)
+ return CPU_TYPE_POWERPC;
+#elif defined(__ppc64__)
+ return CPU_TYPE_POWERPC64;
+#elif defined(__arm__)
+ return CPU_TYPE_ARM;
+#else
+#error "GetNativeCPUType not implemented for this architecture"
+#endif
+ }
+
+ private:
+ template<typename MachBits>
+ friend void ReadImageInfo(DynamicImages& images, uint64_t image_list_address);
+
+ bool IsOurTask() {return task_ == mach_task_self();}
+
+ // Initialization
+ void ReadImageInfoForTask();
+ uint64_t GetDyldAllImageInfosPointer();
+
+ mach_port_t task_;
+ cpu_type_t cpu_type_; // CPU type of task_
+ vector<DynamicImageRef> image_list_;
+};
+
+// Fill bytes with the contents of memory at a particular
+// location in another task.
+kern_return_t ReadTaskMemory(task_port_t target_task,
+ const uint64_t address,
+ size_t length,
+ vector<uint8_t> &bytes);
+
+} // namespace google_breakpad
+
+#endif // CLIENT_MAC_HANDLER_DYNAMIC_IMAGES_H__
diff --git a/3rdParty/Breakpad/src/client/mac/handler/exception_handler.cc b/3rdParty/Breakpad/src/client/mac/handler/exception_handler.cc
new file mode 100644
index 0000000..4043019
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/exception_handler.cc
@@ -0,0 +1,830 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <map>
+#include <mach/exc.h>
+#include <mach/mig.h>
+#include <pthread.h>
+#include <signal.h>
+#include <TargetConditionals.h>
+
+#include "client/mac/handler/exception_handler.h"
+#include "client/mac/handler/minidump_generator.h"
+#include "common/mac/macho_utilities.h"
+#include "common/mac/scoped_task_suspend-inl.h"
+#include "google_breakpad/common/minidump_exception_mac.h"
+
+#ifndef USE_PROTECTED_ALLOCATIONS
+#if TARGET_OS_IPHONE
+#define USE_PROTECTED_ALLOCATIONS 1
+#else
+#define USE_PROTECTED_ALLOCATIONS 0
+#endif
+#endif
+
+// If USE_PROTECTED_ALLOCATIONS is activated then the
+// gBreakpadAllocator needs to be setup in other code
+// ahead of time. Please see ProtectedMemoryAllocator.h
+// for more details.
+#if USE_PROTECTED_ALLOCATIONS
+ #include "protected_memory_allocator.h"
+ extern ProtectedMemoryAllocator *gBreakpadAllocator;
+#endif
+
+namespace google_breakpad {
+
+static union {
+#if USE_PROTECTED_ALLOCATIONS
+ char protected_buffer[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+#endif
+ google_breakpad::ExceptionHandler *handler;
+} gProtectedData;
+
+using std::map;
+
+// These structures and techniques are illustrated in
+// Mac OS X Internals, Amit Singh, ch 9.7
+struct ExceptionMessage {
+ mach_msg_header_t header;
+ mach_msg_body_t body;
+ mach_msg_port_descriptor_t thread;
+ mach_msg_port_descriptor_t task;
+ NDR_record_t ndr;
+ exception_type_t exception;
+ mach_msg_type_number_t code_count;
+ integer_t code[EXCEPTION_CODE_MAX];
+ char padding[512];
+};
+
+struct ExceptionParameters {
+ ExceptionParameters() : count(0) {}
+ mach_msg_type_number_t count;
+ exception_mask_t masks[EXC_TYPES_COUNT];
+ mach_port_t ports[EXC_TYPES_COUNT];
+ exception_behavior_t behaviors[EXC_TYPES_COUNT];
+ thread_state_flavor_t flavors[EXC_TYPES_COUNT];
+};
+
+struct ExceptionReplyMessage {
+ mach_msg_header_t header;
+ NDR_record_t ndr;
+ kern_return_t return_code;
+};
+
+// Only catch these three exceptions. The other ones are nebulously defined
+// and may result in treating a non-fatal exception as fatal.
+exception_mask_t s_exception_mask = EXC_MASK_BAD_ACCESS |
+EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC | EXC_MASK_BREAKPOINT;
+
+#if !TARGET_OS_IPHONE
+extern "C"
+{
+ // Forward declarations for functions that need "C" style compilation
+ boolean_t exc_server(mach_msg_header_t *request,
+ mach_msg_header_t *reply);
+
+ // This symbol must be visible to dlsym() - see
+ // http://code.google.com/p/google-breakpad/issues/detail?id=345 for details.
+ kern_return_t catch_exception_raise(mach_port_t target_port,
+ mach_port_t failed_thread,
+ mach_port_t task,
+ exception_type_t exception,
+ exception_data_t code,
+ mach_msg_type_number_t code_count)
+ __attribute__((visibility("default")));
+}
+#endif
+
+kern_return_t ForwardException(mach_port_t task,
+ mach_port_t failed_thread,
+ exception_type_t exception,
+ exception_data_t code,
+ mach_msg_type_number_t code_count);
+
+#if TARGET_OS_IPHONE
+// Implementation is based on the implementation generated by mig.
+boolean_t breakpad_exc_server(mach_msg_header_t *InHeadP,
+ mach_msg_header_t *OutHeadP) {
+ OutHeadP->msgh_bits =
+ MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(InHeadP->msgh_bits), 0);
+ OutHeadP->msgh_remote_port = InHeadP->msgh_remote_port;
+ /* Minimal size: routine() will update it if different */
+ OutHeadP->msgh_size = (mach_msg_size_t)sizeof(mig_reply_error_t);
+ OutHeadP->msgh_local_port = MACH_PORT_NULL;
+ OutHeadP->msgh_id = InHeadP->msgh_id + 100;
+
+ if (InHeadP->msgh_id != 2401) {
+ ((mig_reply_error_t *)OutHeadP)->NDR = NDR_record;
+ ((mig_reply_error_t *)OutHeadP)->RetCode = MIG_BAD_ID;
+ return FALSE;
+ }
+
+#ifdef __MigPackStructs
+#pragma pack(4)
+#endif
+ typedef struct {
+ mach_msg_header_t Head;
+ /* start of the kernel processed data */
+ mach_msg_body_t msgh_body;
+ mach_msg_port_descriptor_t thread;
+ mach_msg_port_descriptor_t task;
+ /* end of the kernel processed data */
+ NDR_record_t NDR;
+ exception_type_t exception;
+ mach_msg_type_number_t codeCnt;
+ integer_t code[2];
+ mach_msg_trailer_t trailer;
+ } Request;
+
+ typedef struct {
+ mach_msg_header_t Head;
+ NDR_record_t NDR;
+ kern_return_t RetCode;
+ } Reply;
+#ifdef __MigPackStructs
+#pragma pack()
+#endif
+
+ Request *In0P = (Request *)InHeadP;
+ Reply *OutP = (Reply *)OutHeadP;
+
+ if (In0P->task.name != mach_task_self()) {
+ return FALSE;
+ }
+ OutP->RetCode = ForwardException(In0P->task.name,
+ In0P->thread.name,
+ In0P->exception,
+ In0P->code,
+ In0P->codeCnt);
+ OutP->NDR = NDR_record;
+ return TRUE;
+}
+#else
+boolean_t breakpad_exc_server(mach_msg_header_t *request,
+ mach_msg_header_t *reply) {
+ return exc_server(request, reply);
+}
+
+// Callback from exc_server()
+kern_return_t catch_exception_raise(mach_port_t port, mach_port_t failed_thread,
+ mach_port_t task,
+ exception_type_t exception,
+ exception_data_t code,
+ mach_msg_type_number_t code_count) {
+ if (task != mach_task_self()) {
+ return KERN_FAILURE;
+ }
+ return ForwardException(task, failed_thread, exception, code, code_count);
+}
+#endif
+
+ExceptionHandler::ExceptionHandler(const string &dump_path,
+ FilterCallback filter,
+ MinidumpCallback callback,
+ void *callback_context,
+ bool install_handler,
+ const char *port_name)
+ : dump_path_(),
+ filter_(filter),
+ callback_(callback),
+ callback_context_(callback_context),
+ directCallback_(NULL),
+ handler_thread_(NULL),
+ handler_port_(MACH_PORT_NULL),
+ previous_(NULL),
+ installed_exception_handler_(false),
+ is_in_teardown_(false),
+ last_minidump_write_result_(false),
+ use_minidump_write_mutex_(false) {
+ // This will update to the ID and C-string pointers
+ set_dump_path(dump_path);
+ MinidumpGenerator::GatherSystemInformation();
+#if !TARGET_OS_IPHONE
+ if (port_name)
+ crash_generation_client_.reset(new CrashGenerationClient(port_name));
+#endif
+ Setup(install_handler);
+}
+
+// special constructor if we want to bypass minidump writing and
+// simply get a callback with the exception information
+ExceptionHandler::ExceptionHandler(DirectCallback callback,
+ void *callback_context,
+ bool install_handler)
+ : dump_path_(),
+ filter_(NULL),
+ callback_(NULL),
+ callback_context_(callback_context),
+ directCallback_(callback),
+ handler_thread_(NULL),
+ handler_port_(MACH_PORT_NULL),
+ previous_(NULL),
+ installed_exception_handler_(false),
+ is_in_teardown_(false),
+ last_minidump_write_result_(false),
+ use_minidump_write_mutex_(false) {
+ MinidumpGenerator::GatherSystemInformation();
+ Setup(install_handler);
+}
+
+ExceptionHandler::~ExceptionHandler() {
+ Teardown();
+}
+
+bool ExceptionHandler::WriteMinidump(bool write_exception_stream) {
+ // If we're currently writing, just return
+ if (use_minidump_write_mutex_)
+ return false;
+
+ use_minidump_write_mutex_ = true;
+ last_minidump_write_result_ = false;
+
+ // Lock the mutex. Since we just created it, this will return immediately.
+ if (pthread_mutex_lock(&minidump_write_mutex_) == 0) {
+ // Send an empty message to the handle port so that a minidump will
+ // be written
+ SendMessageToHandlerThread(write_exception_stream ?
+ kWriteDumpWithExceptionMessage :
+ kWriteDumpMessage);
+
+ // Wait for the minidump writer to complete its writing. It will unlock
+ // the mutex when completed
+ pthread_mutex_lock(&minidump_write_mutex_);
+ }
+
+ use_minidump_write_mutex_ = false;
+ UpdateNextID();
+ return last_minidump_write_result_;
+}
+
+// static
+bool ExceptionHandler::WriteMinidump(const string &dump_path,
+ bool write_exception_stream,
+ MinidumpCallback callback,
+ void *callback_context) {
+ ExceptionHandler handler(dump_path, NULL, callback, callback_context, false,
+ NULL);
+ return handler.WriteMinidump(write_exception_stream);
+}
+
+// static
+bool ExceptionHandler::WriteMinidumpForChild(mach_port_t child,
+ mach_port_t child_blamed_thread,
+ const string &dump_path,
+ MinidumpCallback callback,
+ void *callback_context) {
+ ScopedTaskSuspend suspend(child);
+
+ MinidumpGenerator generator(child, MACH_PORT_NULL);
+ string dump_id;
+ string dump_filename = generator.UniqueNameInDirectory(dump_path, &dump_id);
+
+ generator.SetExceptionInformation(EXC_BREAKPOINT,
+#if defined (__i386__) || defined(__x86_64__)
+ EXC_I386_BPT,
+#elif defined (__ppc__) || defined (__ppc64__)
+ EXC_PPC_BREAKPOINT,
+#elif defined (__arm__)
+ EXC_ARM_BREAKPOINT,
+#else
+#error architecture not supported
+#endif
+ 0,
+ child_blamed_thread);
+ bool result = generator.Write(dump_filename.c_str());
+
+ if (callback) {
+ return callback(dump_path.c_str(), dump_id.c_str(),
+ callback_context, result);
+ }
+ return result;
+}
+
+bool ExceptionHandler::WriteMinidumpWithException(int exception_type,
+ int exception_code,
+ int exception_subcode,
+ mach_port_t thread_name,
+ bool exit_after_write,
+ bool report_current_thread) {
+ bool result = false;
+
+ if (directCallback_) {
+ if (directCallback_(callback_context_,
+ exception_type,
+ exception_code,
+ exception_subcode,
+ thread_name) ) {
+ if (exit_after_write)
+ _exit(exception_type);
+ }
+#if !TARGET_OS_IPHONE
+ } else if (IsOutOfProcess()) {
+ if (exception_type && exception_code) {
+ // If this is a real exception, give the filter (if any) a chance to
+ // decide if this should be sent.
+ if (filter_ && !filter_(callback_context_))
+ return false;
+ return crash_generation_client_->RequestDumpForException(
+ exception_type,
+ exception_code,
+ exception_subcode,
+ thread_name);
+ }
+#endif
+ } else {
+ string minidump_id;
+
+ // Putting the MinidumpGenerator in its own context will ensure that the
+ // destructor is executed, closing the newly created minidump file.
+ if (!dump_path_.empty()) {
+ MinidumpGenerator md(mach_task_self(),
+ report_current_thread ? MACH_PORT_NULL :
+ mach_thread_self());
+ if (exception_type && exception_code) {
+ // If this is a real exception, give the filter (if any) a chance to
+ // decide if this should be sent.
+ if (filter_ && !filter_(callback_context_))
+ return false;
+
+ md.SetExceptionInformation(exception_type, exception_code,
+ exception_subcode, thread_name);
+ }
+
+ result = md.Write(next_minidump_path_c_);
+ }
+
+ // Call user specified callback (if any)
+ if (callback_) {
+ // If the user callback returned true and we're handling an exception
+ // (rather than just writing out the file), then we should exit without
+ // forwarding the exception to the next handler.
+ if (callback_(dump_path_c_, next_minidump_id_c_, callback_context_,
+ result)) {
+ if (exit_after_write)
+ _exit(exception_type);
+ }
+ }
+ }
+
+ return result;
+}
+
+kern_return_t ForwardException(mach_port_t task, mach_port_t failed_thread,
+ exception_type_t exception,
+ exception_data_t code,
+ mach_msg_type_number_t code_count) {
+ // At this time, we should have called Uninstall() on the exception handler
+ // so that the current exception ports are the ones that we should be
+ // forwarding to.
+ ExceptionParameters current;
+
+ current.count = EXC_TYPES_COUNT;
+ mach_port_t current_task = mach_task_self();
+ task_get_exception_ports(current_task,
+ s_exception_mask,
+ current.masks,
+ &current.count,
+ current.ports,
+ current.behaviors,
+ current.flavors);
+
+ // Find the first exception handler that matches the exception
+ unsigned int found;
+ for (found = 0; found < current.count; ++found) {
+ if (current.masks[found] & (1 << exception)) {
+ break;
+ }
+ }
+
+ // Nothing to forward
+ if (found == current.count) {
+ fprintf(stderr, "** No previous ports for forwarding!! \n");
+ exit(KERN_FAILURE);
+ }
+
+ mach_port_t target_port = current.ports[found];
+ exception_behavior_t target_behavior = current.behaviors[found];
+
+ kern_return_t result;
+ switch (target_behavior) {
+ case EXCEPTION_DEFAULT:
+ result = exception_raise(target_port, failed_thread, task, exception,
+ code, code_count);
+ break;
+
+ default:
+ fprintf(stderr, "** Unknown exception behavior: %d\n", target_behavior);
+ result = KERN_FAILURE;
+ break;
+ }
+
+ return result;
+}
+
+// static
+void *ExceptionHandler::WaitForMessage(void *exception_handler_class) {
+ ExceptionHandler *self =
+ reinterpret_cast<ExceptionHandler *>(exception_handler_class);
+ ExceptionMessage receive;
+
+ // Wait for the exception info
+ while (1) {
+ receive.header.msgh_local_port = self->handler_port_;
+ receive.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(receive));
+ kern_return_t result = mach_msg(&(receive.header),
+ MACH_RCV_MSG | MACH_RCV_LARGE, 0,
+ receive.header.msgh_size,
+ self->handler_port_,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+
+ if (result == KERN_SUCCESS) {
+ // Uninstall our handler so that we don't get in a loop if the process of
+ // writing out a minidump causes an exception. However, if the exception
+ // was caused by a fork'd process, don't uninstall things
+
+ // If the actual exception code is zero, then we're calling this handler
+ // in a way that indicates that we want to either exit this thread or
+ // generate a minidump
+ //
+ // While reporting, all threads (except this one) must be suspended
+ // to avoid misleading stacks. If appropriate they will be resumed
+ // afterwards.
+ if (!receive.exception) {
+ // Don't touch self, since this message could have been sent
+ // from its destructor.
+ if (receive.header.msgh_id == kShutdownMessage)
+ return NULL;
+
+ self->SuspendThreads();
+
+#if USE_PROTECTED_ALLOCATIONS
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Unprotect();
+#endif
+
+ mach_port_t thread = MACH_PORT_NULL;
+ int exception_type = 0;
+ int exception_code = 0;
+ if (receive.header.msgh_id == kWriteDumpWithExceptionMessage) {
+ thread = receive.thread.name;
+ exception_type = EXC_BREAKPOINT;
+#if defined (__i386__) || defined(__x86_64__)
+ exception_code = EXC_I386_BPT;
+#elif defined (__ppc__) || defined (__ppc64__)
+ exception_code = EXC_PPC_BREAKPOINT;
+#elif defined (__arm__)
+ exception_code = EXC_ARM_BREAKPOINT;
+#else
+#error architecture not supported
+#endif
+ }
+
+ // Write out the dump and save the result for later retrieval
+ self->last_minidump_write_result_ =
+ self->WriteMinidumpWithException(exception_type, exception_code,
+ 0, thread,
+ false, false);
+
+#if USE_PROTECTED_ALLOCATIONS
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Protect();
+#endif
+
+ self->ResumeThreads();
+
+ if (self->use_minidump_write_mutex_)
+ pthread_mutex_unlock(&self->minidump_write_mutex_);
+ } else {
+ // When forking a child process with the exception handler installed,
+ // if the child crashes, it will send the exception back to the parent
+ // process. The check for task == self_task() ensures that only
+ // exceptions that occur in the parent process are caught and
+ // processed. If the exception was not caused by this task, we
+ // still need to call into the exception server and have it return
+ // KERN_FAILURE (see catch_exception_raise) in order for the kernel
+ // to move onto the host exception handler for the child task
+ if (receive.task.name == mach_task_self()) {
+ self->SuspendThreads();
+
+#if USE_PROTECTED_ALLOCATIONS
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Unprotect();
+#endif
+
+ int subcode = 0;
+ if (receive.exception == EXC_BAD_ACCESS && receive.code_count > 1)
+ subcode = receive.code[1];
+
+ // Generate the minidump with the exception data.
+ self->WriteMinidumpWithException(receive.exception, receive.code[0],
+ subcode, receive.thread.name, true,
+ false);
+
+#if USE_PROTECTED_ALLOCATIONS
+ // This may have become protected again within
+ // WriteMinidumpWithException, but it needs to be unprotected for
+ // UninstallHandler.
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Unprotect();
+#endif
+
+ self->UninstallHandler(true);
+
+#if USE_PROTECTED_ALLOCATIONS
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Protect();
+#endif
+ }
+ // Pass along the exception to the server, which will setup the
+ // message and call catch_exception_raise() and put the return
+ // code into the reply.
+ ExceptionReplyMessage reply;
+ if (!breakpad_exc_server(&receive.header, &reply.header))
+ exit(1);
+
+ // Send a reply and exit
+ mach_msg(&(reply.header), MACH_SEND_MSG,
+ reply.header.msgh_size, 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+//static
+void ExceptionHandler::SignalHandler(int sig, siginfo_t* info, void* uc) {
+#if USE_PROTECTED_ALLOCATIONS
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Unprotect();
+#endif
+ gProtectedData.handler->WriteMinidumpWithException(
+ EXC_SOFTWARE,
+ MD_EXCEPTION_CODE_MAC_ABORT,
+ 0,
+ mach_thread_self(),
+ true,
+ true);
+#if USE_PROTECTED_ALLOCATIONS
+ if (gBreakpadAllocator)
+ gBreakpadAllocator->Protect();
+#endif
+}
+
+bool ExceptionHandler::InstallHandler() {
+ // If a handler is already installed, something is really wrong.
+ if (gProtectedData.handler != NULL) {
+ return false;
+ }
+#if TARGET_OS_IPHONE
+ if (!IsOutOfProcess()) {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sigemptyset(&sa.sa_mask);
+ sigaddset(&sa.sa_mask, SIGABRT);
+ sa.sa_sigaction = ExceptionHandler::SignalHandler;
+ sa.sa_flags = SA_SIGINFO;
+
+ scoped_ptr<struct sigaction> old(new struct sigaction);
+ if (sigaction(SIGABRT, &sa, old.get()) == -1) {
+ return false;
+ }
+ old_handler_.swap(old);
+ gProtectedData.handler = this;
+#if USE_PROTECTED_ALLOCATIONS
+ assert(((size_t)(gProtectedData.protected_buffer) & PAGE_MASK) == 0);
+ mprotect(gProtectedData.protected_buffer, PAGE_SIZE, PROT_READ);
+#endif
+ }
+#endif
+
+ try {
+#if USE_PROTECTED_ALLOCATIONS
+ previous_ = new (gBreakpadAllocator->Allocate(sizeof(ExceptionParameters)) )
+ ExceptionParameters();
+#else
+ previous_ = new ExceptionParameters();
+#endif
+
+ }
+ catch (std::bad_alloc) {
+ return false;
+ }
+
+ // Save the current exception ports so that we can forward to them
+ previous_->count = EXC_TYPES_COUNT;
+ mach_port_t current_task = mach_task_self();
+ kern_return_t result = task_get_exception_ports(current_task,
+ s_exception_mask,
+ previous_->masks,
+ &previous_->count,
+ previous_->ports,
+ previous_->behaviors,
+ previous_->flavors);
+
+ // Setup the exception ports on this task
+ if (result == KERN_SUCCESS)
+ result = task_set_exception_ports(current_task, s_exception_mask,
+ handler_port_, EXCEPTION_DEFAULT,
+ THREAD_STATE_NONE);
+
+ installed_exception_handler_ = (result == KERN_SUCCESS);
+
+ return installed_exception_handler_;
+}
+
+bool ExceptionHandler::UninstallHandler(bool in_exception) {
+ kern_return_t result = KERN_SUCCESS;
+
+ if (old_handler_.get()) {
+ sigaction(SIGABRT, old_handler_.get(), NULL);
+#if USE_PROTECTED_ALLOCATIONS
+ mprotect(gProtectedData.protected_buffer, PAGE_SIZE,
+ PROT_READ | PROT_WRITE);
+#endif
+ old_handler_.reset();
+ gProtectedData.handler = NULL;
+ }
+
+ if (installed_exception_handler_) {
+ mach_port_t current_task = mach_task_self();
+
+ // Restore the previous ports
+ for (unsigned int i = 0; i < previous_->count; ++i) {
+ result = task_set_exception_ports(current_task, previous_->masks[i],
+ previous_->ports[i],
+ previous_->behaviors[i],
+ previous_->flavors[i]);
+ if (result != KERN_SUCCESS)
+ return false;
+ }
+
+ // this delete should NOT happen if an exception just occurred!
+ if (!in_exception) {
+#if USE_PROTECTED_ALLOCATIONS
+ previous_->~ExceptionParameters();
+#else
+ delete previous_;
+#endif
+ }
+
+ previous_ = NULL;
+ installed_exception_handler_ = false;
+ }
+
+ return result == KERN_SUCCESS;
+}
+
+bool ExceptionHandler::Setup(bool install_handler) {
+ if (pthread_mutex_init(&minidump_write_mutex_, NULL))
+ return false;
+
+ // Create a receive right
+ mach_port_t current_task = mach_task_self();
+ kern_return_t result = mach_port_allocate(current_task,
+ MACH_PORT_RIGHT_RECEIVE,
+ &handler_port_);
+ // Add send right
+ if (result == KERN_SUCCESS)
+ result = mach_port_insert_right(current_task, handler_port_, handler_port_,
+ MACH_MSG_TYPE_MAKE_SEND);
+
+ if (install_handler && result == KERN_SUCCESS)
+ if (!InstallHandler())
+ return false;
+
+ if (result == KERN_SUCCESS) {
+ // Install the handler in its own thread, detached as we won't be joining.
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ int thread_create_result = pthread_create(&handler_thread_, &attr,
+ &WaitForMessage, this);
+ pthread_attr_destroy(&attr);
+ result = thread_create_result ? KERN_FAILURE : KERN_SUCCESS;
+ }
+
+ return result == KERN_SUCCESS ? true : false;
+}
+
+bool ExceptionHandler::Teardown() {
+ kern_return_t result = KERN_SUCCESS;
+ is_in_teardown_ = true;
+
+ if (!UninstallHandler(false))
+ return false;
+
+ // Send an empty message so that the handler_thread exits
+ if (SendMessageToHandlerThread(kShutdownMessage)) {
+ mach_port_t current_task = mach_task_self();
+ result = mach_port_deallocate(current_task, handler_port_);
+ if (result != KERN_SUCCESS)
+ return false;
+ } else {
+ return false;
+ }
+
+ handler_thread_ = NULL;
+ handler_port_ = MACH_PORT_NULL;
+ pthread_mutex_destroy(&minidump_write_mutex_);
+
+ return result == KERN_SUCCESS;
+}
+
+bool ExceptionHandler::SendMessageToHandlerThread(
+ HandlerThreadMessage message_id) {
+ ExceptionMessage msg;
+ memset(&msg, 0, sizeof(msg));
+ msg.header.msgh_id = message_id;
+ if (message_id == kWriteDumpMessage ||
+ message_id == kWriteDumpWithExceptionMessage) {
+ // Include this thread's port.
+ msg.thread.name = mach_thread_self();
+ msg.thread.disposition = MACH_MSG_TYPE_PORT_SEND;
+ msg.thread.type = MACH_MSG_PORT_DESCRIPTOR;
+ }
+ msg.header.msgh_size = sizeof(msg) - sizeof(msg.padding);
+ msg.header.msgh_remote_port = handler_port_;
+ msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE);
+ kern_return_t result = mach_msg(&(msg.header),
+ MACH_SEND_MSG | MACH_SEND_TIMEOUT,
+ msg.header.msgh_size, 0, 0,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+ return result == KERN_SUCCESS;
+}
+
+void ExceptionHandler::UpdateNextID() {
+ next_minidump_path_ =
+ (MinidumpGenerator::UniqueNameInDirectory(dump_path_, &next_minidump_id_));
+
+ next_minidump_path_c_ = next_minidump_path_.c_str();
+ next_minidump_id_c_ = next_minidump_id_.c_str();
+}
+
+bool ExceptionHandler::SuspendThreads() {
+ thread_act_port_array_t threads_for_task;
+ mach_msg_type_number_t thread_count;
+
+ if (task_threads(mach_task_self(), &threads_for_task, &thread_count))
+ return false;
+
+ // suspend all of the threads except for this one
+ for (unsigned int i = 0; i < thread_count; ++i) {
+ if (threads_for_task[i] != mach_thread_self()) {
+ if (thread_suspend(threads_for_task[i]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ExceptionHandler::ResumeThreads() {
+ thread_act_port_array_t threads_for_task;
+ mach_msg_type_number_t thread_count;
+
+ if (task_threads(mach_task_self(), &threads_for_task, &thread_count))
+ return false;
+
+ // resume all of the threads except for this one
+ for (unsigned int i = 0; i < thread_count; ++i) {
+ if (threads_for_task[i] != mach_thread_self()) {
+ if (thread_resume(threads_for_task[i]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace google_breakpad
diff --git a/3rdParty/Breakpad/src/client/mac/handler/exception_handler.h b/3rdParty/Breakpad/src/client/mac/handler/exception_handler.h
new file mode 100644
index 0000000..ec09134
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/exception_handler.h
@@ -0,0 +1,277 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// exception_handler.h: MacOS exception handler
+// This class can install a Mach exception port handler to trap most common
+// programming errors. If an exception occurs, a minidump file will be
+// generated which contains detailed information about the process and the
+// exception.
+
+#ifndef CLIENT_MAC_HANDLER_EXCEPTION_HANDLER_H__
+#define CLIENT_MAC_HANDLER_EXCEPTION_HANDLER_H__
+
+#include <mach/mach.h>
+#include <TargetConditionals.h>
+
+#include <string>
+
+#include "processor/scoped_ptr.h"
+
+#if !TARGET_OS_IPHONE
+#include "client/mac/crash_generation/crash_generation_client.h"
+#endif
+
+namespace google_breakpad {
+
+using std::string;
+
+struct ExceptionParameters;
+
+enum HandlerThreadMessage {
+ // Message ID telling the handler thread to write a dump.
+ kWriteDumpMessage = 0,
+ // Message ID telling the handler thread to write a dump and include
+ // an exception stream.
+ kWriteDumpWithExceptionMessage = 1,
+ // Message ID telling the handler thread to quit.
+ kShutdownMessage = 2
+};
+
+class ExceptionHandler {
+ public:
+ // A callback function to run before Breakpad performs any substantial
+ // processing of an exception. A FilterCallback is called before writing
+ // a minidump. context is the parameter supplied by the user as
+ // callback_context when the handler was created.
+ //
+ // If a FilterCallback returns true, Breakpad will continue processing,
+ // attempting to write a minidump. If a FilterCallback returns false, Breakpad
+ // will immediately report the exception as unhandled without writing a
+ // minidump, allowing another handler the opportunity to handle it.
+ typedef bool (*FilterCallback)(void *context);
+
+ // A callback function to run after the minidump has been written.
+ // |minidump_id| is a unique id for the dump, so the minidump
+ // file is <dump_dir>/<minidump_id>.dmp.
+ // |context| is the value passed into the constructor.
+ // |succeeded| indicates whether a minidump file was successfully written.
+ // Return true if the exception was fully handled and breakpad should exit.
+ // Return false to allow any other exception handlers to process the
+ // exception.
+ typedef bool (*MinidumpCallback)(const char *dump_dir,
+ const char *minidump_id,
+ void *context, bool succeeded);
+
+ // A callback function which will be called directly if an exception occurs.
+ // This bypasses the minidump file writing and simply gives the client
+ // the exception information.
+ typedef bool (*DirectCallback)( void *context,
+ int exception_type,
+ int exception_code,
+ int exception_subcode,
+ mach_port_t thread_name);
+
+ // Creates a new ExceptionHandler instance to handle writing minidumps.
+ // Minidump files will be written to dump_path, and the optional callback
+ // is called after writing the dump file, as described above.
+ // If install_handler is true, then a minidump will be written whenever
+ // an unhandled exception occurs. If it is false, minidumps will only
+ // be written when WriteMinidump is called.
+ // If port_name is non-NULL, attempt to perform out-of-process dump generation
+ // If port_name is NULL, in-process dump generation will be used.
+ ExceptionHandler(const string &dump_path,
+ FilterCallback filter, MinidumpCallback callback,
+ void *callback_context, bool install_handler,
+ const char *port_name);
+
+ // A special constructor if we want to bypass minidump writing and
+ // simply get a callback with the exception information.
+ ExceptionHandler(DirectCallback callback,
+ void *callback_context,
+ bool install_handler);
+
+ ~ExceptionHandler();
+
+ // Get and set the minidump path.
+ string dump_path() const { return dump_path_; }
+ void set_dump_path(const string &dump_path) {
+ dump_path_ = dump_path;
+ dump_path_c_ = dump_path_.c_str();
+ UpdateNextID(); // Necessary to put dump_path_ in next_minidump_path_.
+ }
+
+ // Writes a minidump immediately. This can be used to capture the
+ // execution state independently of a crash. Returns true on success.
+ bool WriteMinidump() {
+ return WriteMinidump(false);
+ }
+
+ bool WriteMinidump(bool write_exception_stream);
+
+ // Convenience form of WriteMinidump which does not require an
+ // ExceptionHandler instance.
+ static bool WriteMinidump(const string &dump_path, MinidumpCallback callback,
+ void *callback_context) {
+ return WriteMinidump(dump_path, false, callback, callback_context);
+ }
+
+ static bool WriteMinidump(const string &dump_path,
+ bool write_exception_stream,
+ MinidumpCallback callback,
+ void *callback_context);
+
+ // Write a minidump of child immediately. This can be used to capture
+ // the execution state of a child process independently of a crash.
+ static bool WriteMinidumpForChild(mach_port_t child,
+ mach_port_t child_blamed_thread,
+ const std::string &dump_path,
+ MinidumpCallback callback,
+ void *callback_context);
+
+ // Returns whether out-of-process dump generation is used or not.
+ bool IsOutOfProcess() const {
+#if TARGET_OS_IPHONE
+ return false;
+#else
+ return crash_generation_client_.get() != NULL;
+#endif
+ }
+
+ private:
+ // Install the mach exception handler
+ bool InstallHandler();
+
+ // Uninstall the mach exception handler (if any)
+ bool UninstallHandler(bool in_exception);
+
+ // Setup the handler thread, and if |install_handler| is true, install the
+ // mach exception port handler
+ bool Setup(bool install_handler);
+
+ // Uninstall the mach exception handler (if any) and terminate the helper
+ // thread
+ bool Teardown();
+
+ // Send a mach message to the exception handler. Return true on
+ // success, false otherwise.
+ bool SendMessageToHandlerThread(HandlerThreadMessage message_id);
+
+ // All minidump writing goes through this one routine
+ bool WriteMinidumpWithException(int exception_type,
+ int exception_code,
+ int exception_subcode,
+ mach_port_t thread_name,
+ bool exit_after_write,
+ bool report_current_thread);
+
+ // When installed, this static function will be call from a newly created
+ // pthread with |this| as the argument
+ static void *WaitForMessage(void *exception_handler_class);
+
+ // Signal handler for SIGABRT.
+ static void SignalHandler(int sig, siginfo_t* info, void* uc);
+
+ // disallow copy ctor and operator=
+ explicit ExceptionHandler(const ExceptionHandler &);
+ void operator=(const ExceptionHandler &);
+
+ // Generates a new ID and stores it in next_minidump_id_, and stores the
+ // path of the next minidump to be written in next_minidump_path_.
+ void UpdateNextID();
+
+ // These functions will suspend/resume all threads except for the
+ // reporting thread
+ bool SuspendThreads();
+ bool ResumeThreads();
+
+ // The destination directory for the minidump
+ string dump_path_;
+
+ // The basename of the next minidump w/o extension
+ string next_minidump_id_;
+
+ // The full path to the next minidump to be written, including extension
+ string next_minidump_path_;
+
+ // Pointers to the UTF-8 versions of above
+ const char *dump_path_c_;
+ const char *next_minidump_id_c_;
+ const char *next_minidump_path_c_;
+
+ // The callback function and pointer to be passed back after the minidump
+ // has been written
+ FilterCallback filter_;
+ MinidumpCallback callback_;
+ void *callback_context_;
+
+ // The callback function to be passed back when we don't want a minidump
+ // file to be written
+ DirectCallback directCallback_;
+
+ // The thread that is created for the handler
+ pthread_t handler_thread_;
+
+ // The port that is waiting on an exception message to be sent, if the
+ // handler is installed
+ mach_port_t handler_port_;
+
+ // These variables save the previous exception handler's data so that it
+ // can be re-installed when this handler is uninstalled
+ ExceptionParameters *previous_;
+
+ // True, if we've installed the exception handler
+ bool installed_exception_handler_;
+
+ // True, if we're in the process of uninstalling the exception handler and
+ // the thread.
+ bool is_in_teardown_;
+
+ // Save the last result of the last minidump
+ bool last_minidump_write_result_;
+
+ // A mutex for use when writing out a minidump that was requested on a
+ // thread other than the exception handler.
+ pthread_mutex_t minidump_write_mutex_;
+
+ // True, if we're using the mutext to indicate when mindump writing occurs
+ bool use_minidump_write_mutex_;
+
+ // Old signal handler for SIGABRT. Used to be able to restore it when
+ // uninstalling.
+ scoped_ptr<struct sigaction> old_handler_;
+
+#if !TARGET_OS_IPHONE
+ // Client for out-of-process dump generation.
+ scoped_ptr<CrashGenerationClient> crash_generation_client_;
+#endif
+};
+
+} // namespace google_breakpad
+
+#endif // CLIENT_MAC_HANDLER_EXCEPTION_HANDLER_H__
diff --git a/3rdParty/Breakpad/src/client/mac/handler/mach_vm_compat.h b/3rdParty/Breakpad/src/client/mac/handler/mach_vm_compat.h
new file mode 100644
index 0000000..e0459be
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/mach_vm_compat.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2011, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef CLIENT_MAC_GENERATOR_MACH_VM_COMPAT_H_
+#define CLIENT_MAC_GENERATOR_MACH_VM_COMPAT_H_
+
+#include <TargetConditionals.h>
+
+// On iOS 5, mach/mach_vm.h is not supported anymore. As the architecture is 32
+// bits, we can use the simple vm_ functions instead of the mach_vm_ ones.
+#if TARGET_OS_IPHONE
+#include <mach/vm_map.h>
+#define mach_vm_address_t vm_address_t
+#define mach_vm_deallocate vm_deallocate
+#define mach_vm_read vm_read
+#define mach_vm_region vm_region
+#define mach_vm_region_recurse vm_region_recurse
+#define mach_vm_size_t vm_size_t
+#else
+#include <mach/mach_vm.h>
+#endif // TARGET_OS_IPHONE
+
+#endif // CLIENT_MAC_GENERATOR_MACH_VM_COMPAT_H_
diff --git a/3rdParty/Breakpad/src/client/mac/handler/minidump_generator.cc b/3rdParty/Breakpad/src/client/mac/handler/minidump_generator.cc
new file mode 100644
index 0000000..b1d429c
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/minidump_generator.cc
@@ -0,0 +1,1432 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <cstdio>
+
+#include <mach/host_info.h>
+#include <mach/vm_statistics.h>
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+#include <sys/sysctl.h>
+#include <sys/resource.h>
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "client/mac/handler/minidump_generator.h"
+
+#ifdef HAS_ARM_SUPPORT
+#include <mach/arm/thread_status.h>
+#endif
+#ifdef HAS_PPC_SUPPORT
+#include <mach/ppc/thread_status.h>
+#endif
+#ifdef HAS_X86_SUPPORT
+#include <mach/i386/thread_status.h>
+#endif
+
+#include "client/minidump_file_writer-inl.h"
+#include "common/mac/file_id.h"
+#include "common/mac/macho_id.h"
+#include "common/mac/string_utilities.h"
+
+using MacStringUtils::ConvertToString;
+using MacStringUtils::IntegerValueAtIndex;
+
+namespace google_breakpad {
+
+#if __LP64__
+#define LC_SEGMENT_ARCH LC_SEGMENT_64
+#else
+#define LC_SEGMENT_ARCH LC_SEGMENT
+#endif
+
+// constructor when generating from within the crashed process
+MinidumpGenerator::MinidumpGenerator()
+ : writer_(),
+ exception_type_(0),
+ exception_code_(0),
+ exception_subcode_(0),
+ exception_thread_(0),
+ crashing_task_(mach_task_self()),
+ handler_thread_(mach_thread_self()),
+ cpu_type_(DynamicImages::GetNativeCPUType()),
+ dynamic_images_(NULL),
+ memory_blocks_(&allocator_) {
+ GatherSystemInformation();
+}
+
+// constructor when generating from a different process than the
+// crashed process
+MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task,
+ mach_port_t handler_thread)
+ : writer_(),
+ exception_type_(0),
+ exception_code_(0),
+ exception_subcode_(0),
+ exception_thread_(0),
+ crashing_task_(crashing_task),
+ handler_thread_(handler_thread),
+ cpu_type_(DynamicImages::GetNativeCPUType()),
+ dynamic_images_(NULL),
+ memory_blocks_(&allocator_) {
+ if (crashing_task != mach_task_self()) {
+ dynamic_images_ = new DynamicImages(crashing_task_);
+ cpu_type_ = dynamic_images_->GetCPUType();
+ } else {
+ dynamic_images_ = NULL;
+ cpu_type_ = DynamicImages::GetNativeCPUType();
+ }
+
+ GatherSystemInformation();
+}
+
+MinidumpGenerator::~MinidumpGenerator() {
+ delete dynamic_images_;
+}
+
+char MinidumpGenerator::build_string_[16];
+int MinidumpGenerator::os_major_version_ = 0;
+int MinidumpGenerator::os_minor_version_ = 0;
+int MinidumpGenerator::os_build_number_ = 0;
+
+// static
+void MinidumpGenerator::GatherSystemInformation() {
+ // If this is non-zero, then we've already gathered the information
+ if (os_major_version_)
+ return;
+
+ // This code extracts the version and build information from the OS
+ CFStringRef vers_path =
+ CFSTR("/System/Library/CoreServices/SystemVersion.plist");
+ CFURLRef sys_vers =
+ CFURLCreateWithFileSystemPath(NULL,
+ vers_path,
+ kCFURLPOSIXPathStyle,
+ false);
+ CFDataRef data;
+ SInt32 error;
+ CFURLCreateDataAndPropertiesFromResource(NULL, sys_vers, &data, NULL, NULL,
+ &error);
+
+ if (!data) {
+ CFRelease(sys_vers);
+ return;
+ }
+
+ CFDictionaryRef list = static_cast<CFDictionaryRef>
+ (CFPropertyListCreateFromXMLData(NULL, data, kCFPropertyListImmutable,
+ NULL));
+ if (!list) {
+ CFRelease(sys_vers);
+ CFRelease(data);
+ return;
+ }
+
+ CFStringRef build_version = static_cast<CFStringRef>
+ (CFDictionaryGetValue(list, CFSTR("ProductBuildVersion")));
+ CFStringRef product_version = static_cast<CFStringRef>
+ (CFDictionaryGetValue(list, CFSTR("ProductVersion")));
+ string build_str = ConvertToString(build_version);
+ string product_str = ConvertToString(product_version);
+
+ CFRelease(list);
+ CFRelease(sys_vers);
+ CFRelease(data);
+
+ strlcpy(build_string_, build_str.c_str(), sizeof(build_string_));
+
+ // Parse the string that looks like "10.4.8"
+ os_major_version_ = IntegerValueAtIndex(product_str, 0);
+ os_minor_version_ = IntegerValueAtIndex(product_str, 1);
+ os_build_number_ = IntegerValueAtIndex(product_str, 2);
+}
+
+string MinidumpGenerator::UniqueNameInDirectory(const string &dir,
+ string *unique_name) {
+ CFUUIDRef uuid = CFUUIDCreate(NULL);
+ CFStringRef uuid_cfstr = CFUUIDCreateString(NULL, uuid);
+ CFRelease(uuid);
+ string file_name(ConvertToString(uuid_cfstr));
+ CFRelease(uuid_cfstr);
+ string path(dir);
+
+ // Ensure that the directory (if non-empty) has a trailing slash so that
+ // we can append the file name and have a valid pathname.
+ if (!dir.empty()) {
+ if (dir.at(dir.size() - 1) != '/')
+ path.append(1, '/');
+ }
+
+ path.append(file_name);
+ path.append(".dmp");
+
+ if (unique_name)
+ *unique_name = file_name;
+
+ return path;
+}
+
+bool MinidumpGenerator::Write(const char *path) {
+ WriteStreamFN writers[] = {
+ &MinidumpGenerator::WriteThreadListStream,
+ &MinidumpGenerator::WriteMemoryListStream,
+ &MinidumpGenerator::WriteSystemInfoStream,
+ &MinidumpGenerator::WriteModuleListStream,
+ &MinidumpGenerator::WriteMiscInfoStream,
+ &MinidumpGenerator::WriteBreakpadInfoStream,
+ // Exception stream needs to be the last entry in this array as it may
+ // be omitted in the case where the minidump is written without an
+ // exception.
+ &MinidumpGenerator::WriteExceptionStream,
+ };
+ bool result = false;
+
+ // If opening was successful, create the header, directory, and call each
+ // writer. The destructor for the TypedMDRVAs will cause the data to be
+ // flushed. The destructor for the MinidumpFileWriter will close the file.
+ if (writer_.Open(path)) {
+ TypedMDRVA<MDRawHeader> header(&writer_);
+ TypedMDRVA<MDRawDirectory> dir(&writer_);
+
+ if (!header.Allocate())
+ return false;
+
+ int writer_count = static_cast<int>(sizeof(writers) / sizeof(writers[0]));
+
+ // If we don't have exception information, don't write out the
+ // exception stream
+ if (!exception_thread_ && !exception_type_)
+ --writer_count;
+
+ // Add space for all writers
+ if (!dir.AllocateArray(writer_count))
+ return false;
+
+ MDRawHeader *header_ptr = header.get();
+ header_ptr->signature = MD_HEADER_SIGNATURE;
+ header_ptr->version = MD_HEADER_VERSION;
+ time(reinterpret_cast<time_t *>(&(header_ptr->time_date_stamp)));
+ header_ptr->stream_count = writer_count;
+ header_ptr->stream_directory_rva = dir.position();
+
+ MDRawDirectory local_dir;
+ result = true;
+ for (int i = 0; (result) && (i < writer_count); ++i) {
+ result = (this->*writers[i])(&local_dir);
+
+ if (result)
+ dir.CopyIndex(i, &local_dir);
+ }
+ }
+ return result;
+}
+
+size_t MinidumpGenerator::CalculateStackSize(mach_vm_address_t start_addr) {
+ mach_vm_address_t stack_region_base = start_addr;
+ mach_vm_size_t stack_region_size;
+ natural_t nesting_level = 0;
+ vm_region_submap_info_64 submap_info;
+ mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ vm_region_recurse_info_t region_info;
+ region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
+
+ if (start_addr == 0) {
+ return 0;
+ }
+
+ kern_return_t result =
+ mach_vm_region_recurse(crashing_task_, &stack_region_base,
+ &stack_region_size, &nesting_level,
+ region_info, &info_count);
+
+ if (result != KERN_SUCCESS || start_addr < stack_region_base) {
+ // Failure or stack corruption, since mach_vm_region had to go
+ // higher in the process address space to find a valid region.
+ return 0;
+ }
+
+ unsigned int tag = submap_info.user_tag;
+
+ // If the user tag is VM_MEMORY_STACK, look for more readable regions with
+ // the same tag placed immediately above the computed stack region. Under
+ // some circumstances, the stack for thread 0 winds up broken up into
+ // multiple distinct abutting regions. This can happen for several reasons,
+ // including user code that calls setrlimit(RLIMIT_STACK, ...) or changes
+ // the access on stack pages by calling mprotect.
+ if (tag == VM_MEMORY_STACK) {
+ while (true) {
+ mach_vm_address_t next_region_base = stack_region_base +
+ stack_region_size;
+ mach_vm_address_t proposed_next_region_base = next_region_base;
+ mach_vm_size_t next_region_size;
+ nesting_level = 0;
+ mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ result = mach_vm_region_recurse(crashing_task_, &next_region_base,
+ &next_region_size, &nesting_level,
+ region_info, &info_count);
+ if (result != KERN_SUCCESS ||
+ next_region_base != proposed_next_region_base ||
+ submap_info.user_tag != tag ||
+ (submap_info.protection & VM_PROT_READ) == 0) {
+ break;
+ }
+
+ stack_region_size += next_region_size;
+ }
+ }
+
+ return stack_region_base + stack_region_size - start_addr;
+}
+
+bool MinidumpGenerator::WriteStackFromStartAddress(
+ mach_vm_address_t start_addr,
+ MDMemoryDescriptor *stack_location) {
+ UntypedMDRVA memory(&writer_);
+
+ bool result = false;
+ size_t size = CalculateStackSize(start_addr);
+
+ if (size == 0) {
+ // In some situations the stack address for the thread can come back 0.
+ // In these cases we skip over the threads in question and stuff the
+ // stack with a clearly borked value.
+ start_addr = 0xDEADBEEF;
+ size = 16;
+ if (!memory.Allocate(size))
+ return false;
+
+ unsigned long long dummy_stack[2]; // Fill dummy stack with 16 bytes of
+ // junk.
+ dummy_stack[0] = 0xDEADBEEF;
+ dummy_stack[1] = 0xDEADBEEF;
+
+ result = memory.Copy(dummy_stack, size);
+ } else {
+
+ if (!memory.Allocate(size))
+ return false;
+
+ if (dynamic_images_) {
+ vector<uint8_t> stack_memory;
+ if (ReadTaskMemory(crashing_task_,
+ start_addr,
+ size,
+ stack_memory) != KERN_SUCCESS) {
+ return false;
+ }
+
+ result = memory.Copy(&stack_memory[0], size);
+ } else {
+ result = memory.Copy(reinterpret_cast<const void *>(start_addr), size);
+ }
+ }
+
+ stack_location->start_of_memory_range = start_addr;
+ stack_location->memory = memory.location();
+
+ return result;
+}
+
+bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location) {
+ switch (cpu_type_) {
+#ifdef HAS_ARM_SUPPORT
+ case CPU_TYPE_ARM:
+ return WriteStackARM(state, stack_location);
+#endif
+#ifdef HAS_PPC_SUPPORT
+ case CPU_TYPE_POWERPC:
+ return WriteStackPPC(state, stack_location);
+ case CPU_TYPE_POWERPC64:
+ return WriteStackPPC64(state, stack_location);
+#endif
+#ifdef HAS_X86_SUPPORT
+ case CPU_TYPE_I386:
+ return WriteStackX86(state, stack_location);
+ case CPU_TYPE_X86_64:
+ return WriteStackX86_64(state, stack_location);
+#endif
+ default:
+ return false;
+ }
+}
+
+bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location) {
+ switch (cpu_type_) {
+#ifdef HAS_ARM_SUPPORT
+ case CPU_TYPE_ARM:
+ return WriteContextARM(state, register_location);
+#endif
+#ifdef HAS_PPC_SUPPORT
+ case CPU_TYPE_POWERPC:
+ return WriteContextPPC(state, register_location);
+ case CPU_TYPE_POWERPC64:
+ return WriteContextPPC64(state, register_location);
+#endif
+#ifdef HAS_X86_SUPPORT
+ case CPU_TYPE_I386:
+ return WriteContextX86(state, register_location);
+ case CPU_TYPE_X86_64:
+ return WriteContextX86_64(state, register_location);
+#endif
+ default:
+ return false;
+ }
+}
+
+u_int64_t MinidumpGenerator::CurrentPCForStack(
+ breakpad_thread_state_data_t state) {
+ switch (cpu_type_) {
+#ifdef HAS_ARM_SUPPORT
+ case CPU_TYPE_ARM:
+ return CurrentPCForStackARM(state);
+#endif
+#ifdef HAS_PPC_SUPPORT
+ case CPU_TYPE_POWERPC:
+ return CurrentPCForStackPPC(state);
+ case CPU_TYPE_POWERPC64:
+ return CurrentPCForStackPPC64(state);
+#endif
+#ifdef HAS_X86_SUPPORT
+ case CPU_TYPE_I386:
+ return CurrentPCForStackX86(state);
+ case CPU_TYPE_X86_64:
+ return CurrentPCForStackX86_64(state);
+#endif
+ default:
+ assert("Unknown CPU type!");
+ return 0;
+ }
+}
+
+#ifdef HAS_ARM_SUPPORT
+bool MinidumpGenerator::WriteStackARM(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location) {
+ arm_thread_state_t *machine_state =
+ reinterpret_cast<arm_thread_state_t *>(state);
+ mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp);
+ return WriteStackFromStartAddress(start_addr, stack_location);
+}
+
+u_int64_t
+MinidumpGenerator::CurrentPCForStackARM(breakpad_thread_state_data_t state) {
+ arm_thread_state_t *machine_state =
+ reinterpret_cast<arm_thread_state_t *>(state);
+
+ return REGISTER_FROM_THREADSTATE(machine_state, pc);
+}
+
+bool MinidumpGenerator::WriteContextARM(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location)
+{
+ TypedMDRVA<MDRawContextARM> context(&writer_);
+ arm_thread_state_t *machine_state =
+ reinterpret_cast<arm_thread_state_t *>(state);
+
+ if (!context.Allocate())
+ return false;
+
+ *register_location = context.location();
+ MDRawContextARM *context_ptr = context.get();
+ context_ptr->context_flags = MD_CONTEXT_ARM_FULL;
+
+#define AddGPR(a) context_ptr->iregs[a] = REGISTER_FROM_THREADSTATE(machine_state, r[a])
+
+ context_ptr->iregs[13] = REGISTER_FROM_THREADSTATE(machine_state, sp);
+ context_ptr->iregs[14] = REGISTER_FROM_THREADSTATE(machine_state, lr);
+ context_ptr->iregs[15] = REGISTER_FROM_THREADSTATE(machine_state, pc);
+ context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr);
+
+ AddGPR(0);
+ AddGPR(1);
+ AddGPR(2);
+ AddGPR(3);
+ AddGPR(4);
+ AddGPR(5);
+ AddGPR(6);
+ AddGPR(7);
+ AddGPR(8);
+ AddGPR(9);
+ AddGPR(10);
+ AddGPR(11);
+ AddGPR(12);
+#undef AddReg
+#undef AddGPR
+
+ return true;
+}
+#endif
+
+#ifdef HAS_PCC_SUPPORT
+bool MinidumpGenerator::WriteStackPPC(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location) {
+ ppc_thread_state_t *machine_state =
+ reinterpret_cast<ppc_thread_state_t *>(state);
+ mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
+ return WriteStackFromStartAddress(start_addr, stack_location);
+}
+
+bool MinidumpGenerator::WriteStackPPC64(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location) {
+ ppc_thread_state64_t *machine_state =
+ reinterpret_cast<ppc_thread_state64_t *>(state);
+ mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
+ return WriteStackFromStartAddress(start_addr, stack_location);
+}
+
+u_int64_t
+MinidumpGenerator::CurrentPCForStackPPC(breakpad_thread_state_data_t state) {
+ ppc_thread_state_t *machine_state =
+ reinterpret_cast<ppc_thread_state_t *>(state);
+
+ return REGISTER_FROM_THREADSTATE(machine_state, srr0);
+}
+
+u_int64_t
+MinidumpGenerator::CurrentPCForStackPPC64(breakpad_thread_state_data_t state) {
+ ppc_thread_state64_t *machine_state =
+ reinterpret_cast<ppc_thread_state64_t *>(state);
+
+ return REGISTER_FROM_THREADSTATE(machine_state, srr0);
+}
+
+bool MinidumpGenerator::WriteContextPPC(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location)
+{
+ TypedMDRVA<MDRawContextPPC> context(&writer_);
+ ppc_thread_state_t *machine_state =
+ reinterpret_cast<ppc_thread_state_t *>(state);
+
+ if (!context.Allocate())
+ return false;
+
+ *register_location = context.location();
+ MDRawContextPPC *context_ptr = context.get();
+ context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
+
+#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
+#define AddGPR(a) context_ptr->gpr[a] = REGISTER_FROM_THREADSTATE(machine_state, r ## a)
+
+ AddReg(srr0);
+ AddReg(cr);
+ AddReg(xer);
+ AddReg(ctr);
+ AddReg(lr);
+ AddReg(vrsave);
+
+ AddGPR(0);
+ AddGPR(1);
+ AddGPR(2);
+ AddGPR(3);
+ AddGPR(4);
+ AddGPR(5);
+ AddGPR(6);
+ AddGPR(7);
+ AddGPR(8);
+ AddGPR(9);
+ AddGPR(10);
+ AddGPR(11);
+ AddGPR(12);
+ AddGPR(13);
+ AddGPR(14);
+ AddGPR(15);
+ AddGPR(16);
+ AddGPR(17);
+ AddGPR(18);
+ AddGPR(19);
+ AddGPR(20);
+ AddGPR(21);
+ AddGPR(22);
+ AddGPR(23);
+ AddGPR(24);
+ AddGPR(25);
+ AddGPR(26);
+ AddGPR(27);
+ AddGPR(28);
+ AddGPR(29);
+ AddGPR(30);
+ AddGPR(31);
+ AddReg(mq);
+#undef AddReg
+#undef AddGPR
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteContextPPC64(
+ breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location) {
+ TypedMDRVA<MDRawContextPPC64> context(&writer_);
+ ppc_thread_state64_t *machine_state =
+ reinterpret_cast<ppc_thread_state64_t *>(state);
+
+ if (!context.Allocate())
+ return false;
+
+ *register_location = context.location();
+ MDRawContextPPC64 *context_ptr = context.get();
+ context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
+
+#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
+#define AddGPR(a) context_ptr->gpr[a] = REGISTER_FROM_THREADSTATE(machine_state, r ## a)
+
+ AddReg(srr0);
+ AddReg(cr);
+ AddReg(xer);
+ AddReg(ctr);
+ AddReg(lr);
+ AddReg(vrsave);
+
+ AddGPR(0);
+ AddGPR(1);
+ AddGPR(2);
+ AddGPR(3);
+ AddGPR(4);
+ AddGPR(5);
+ AddGPR(6);
+ AddGPR(7);
+ AddGPR(8);
+ AddGPR(9);
+ AddGPR(10);
+ AddGPR(11);
+ AddGPR(12);
+ AddGPR(13);
+ AddGPR(14);
+ AddGPR(15);
+ AddGPR(16);
+ AddGPR(17);
+ AddGPR(18);
+ AddGPR(19);
+ AddGPR(20);
+ AddGPR(21);
+ AddGPR(22);
+ AddGPR(23);
+ AddGPR(24);
+ AddGPR(25);
+ AddGPR(26);
+ AddGPR(27);
+ AddGPR(28);
+ AddGPR(29);
+ AddGPR(30);
+ AddGPR(31);
+#undef AddReg
+#undef AddGPR
+
+ return true;
+}
+
+#endif
+
+#ifdef HAS_X86_SUPPORT
+bool MinidumpGenerator::WriteStackX86(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location) {
+ i386_thread_state_t *machine_state =
+ reinterpret_cast<i386_thread_state_t *>(state);
+
+ mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, esp);
+ return WriteStackFromStartAddress(start_addr, stack_location);
+}
+
+bool MinidumpGenerator::WriteStackX86_64(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location) {
+ x86_thread_state64_t *machine_state =
+ reinterpret_cast<x86_thread_state64_t *>(state);
+
+ mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, rsp);
+ return WriteStackFromStartAddress(start_addr, stack_location);
+}
+
+u_int64_t
+MinidumpGenerator::CurrentPCForStackX86(breakpad_thread_state_data_t state) {
+ i386_thread_state_t *machine_state =
+ reinterpret_cast<i386_thread_state_t *>(state);
+
+ return REGISTER_FROM_THREADSTATE(machine_state, eip);
+}
+
+u_int64_t
+MinidumpGenerator::CurrentPCForStackX86_64(breakpad_thread_state_data_t state) {
+ x86_thread_state64_t *machine_state =
+ reinterpret_cast<x86_thread_state64_t *>(state);
+
+ return REGISTER_FROM_THREADSTATE(machine_state, rip);
+}
+
+bool MinidumpGenerator::WriteContextX86(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location)
+{
+ TypedMDRVA<MDRawContextX86> context(&writer_);
+ i386_thread_state_t *machine_state =
+ reinterpret_cast<i386_thread_state_t *>(state);
+
+ if (!context.Allocate())
+ return false;
+
+ *register_location = context.location();
+ MDRawContextX86 *context_ptr = context.get();
+
+#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
+
+ context_ptr->context_flags = MD_CONTEXT_X86;
+ AddReg(eax);
+ AddReg(ebx);
+ AddReg(ecx);
+ AddReg(edx);
+ AddReg(esi);
+ AddReg(edi);
+ AddReg(ebp);
+ AddReg(esp);
+
+ AddReg(cs);
+ AddReg(ds);
+ AddReg(ss);
+ AddReg(es);
+ AddReg(fs);
+ AddReg(gs);
+ AddReg(eflags);
+
+ AddReg(eip);
+#undef AddReg
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteContextX86_64(
+ breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location) {
+ TypedMDRVA<MDRawContextAMD64> context(&writer_);
+ x86_thread_state64_t *machine_state =
+ reinterpret_cast<x86_thread_state64_t *>(state);
+
+ if (!context.Allocate())
+ return false;
+
+ *register_location = context.location();
+ MDRawContextAMD64 *context_ptr = context.get();
+
+#define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
+
+ context_ptr->context_flags = MD_CONTEXT_AMD64;
+ AddReg(rax);
+ AddReg(rbx);
+ AddReg(rcx);
+ AddReg(rdx);
+ AddReg(rdi);
+ AddReg(rsi);
+ AddReg(rbp);
+ AddReg(rsp);
+ AddReg(r8);
+ AddReg(r9);
+ AddReg(r10);
+ AddReg(r11);
+ AddReg(r12);
+ AddReg(r13);
+ AddReg(r14);
+ AddReg(r15);
+ AddReg(rip);
+ // according to AMD's software developer guide, bits above 18 are
+ // not used in the flags register. Since the minidump format
+ // specifies 32 bits for the flags register, we can truncate safely
+ // with no loss.
+ context_ptr->eflags = static_cast<u_int32_t>(REGISTER_FROM_THREADSTATE(machine_state, rflags));
+ AddReg(cs);
+ AddReg(fs);
+ AddReg(gs);
+#undef AddReg
+
+ return true;
+}
+#endif
+
+bool MinidumpGenerator::GetThreadState(thread_act_t target_thread,
+ thread_state_t state,
+ mach_msg_type_number_t *count) {
+ thread_state_flavor_t flavor;
+ switch (cpu_type_) {
+#ifdef HAS_ARM_SUPPORT
+ case CPU_TYPE_ARM:
+ flavor = ARM_THREAD_STATE;
+ break;
+#endif
+#ifdef HAS_PPC_SUPPORT
+ case CPU_TYPE_POWERPC:
+ flavor = PPC_THREAD_STATE;
+ break;
+ case CPU_TYPE_POWERPC64:
+ flavor = PPC_THREAD_STATE64;
+ break;
+#endif
+#ifdef HAS_X86_SUPPORT
+ case CPU_TYPE_I386:
+ flavor = i386_THREAD_STATE;
+ break;
+ case CPU_TYPE_X86_64:
+ flavor = x86_THREAD_STATE64;
+ break;
+#endif
+ default:
+ return false;
+ }
+ return thread_get_state(target_thread, flavor,
+ state, count) == KERN_SUCCESS;
+}
+
+bool MinidumpGenerator::WriteThreadStream(mach_port_t thread_id,
+ MDRawThread *thread) {
+ breakpad_thread_state_data_t state;
+ mach_msg_type_number_t state_count
+ = static_cast<mach_msg_type_number_t>(sizeof(state));
+
+ if (GetThreadState(thread_id, state, &state_count)) {
+ if (!WriteStack(state, &thread->stack))
+ return false;
+
+ memory_blocks_.push_back(thread->stack);
+
+ if (!WriteContext(state, &thread->thread_context))
+ return false;
+
+ thread->thread_id = thread_id;
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteThreadListStream(
+ MDRawDirectory *thread_list_stream) {
+ TypedMDRVA<MDRawThreadList> list(&writer_);
+ thread_act_port_array_t threads_for_task;
+ mach_msg_type_number_t thread_count;
+ int non_generator_thread_count;
+
+ if (task_threads(crashing_task_, &threads_for_task, &thread_count))
+ return false;
+
+ // Don't include the generator thread
+ if (handler_thread_ != MACH_PORT_NULL)
+ non_generator_thread_count = thread_count - 1;
+ else
+ non_generator_thread_count = thread_count;
+ if (!list.AllocateObjectAndArray(non_generator_thread_count,
+ sizeof(MDRawThread)))
+ return false;
+
+ thread_list_stream->stream_type = MD_THREAD_LIST_STREAM;
+ thread_list_stream->location = list.location();
+
+ list.get()->number_of_threads = non_generator_thread_count;
+
+ MDRawThread thread;
+ int thread_idx = 0;
+
+ for (unsigned int i = 0; i < thread_count; ++i) {
+ memset(&thread, 0, sizeof(MDRawThread));
+
+ if (threads_for_task[i] != handler_thread_) {
+ if (!WriteThreadStream(threads_for_task[i], &thread))
+ return false;
+
+ list.CopyIndexAfterObject(thread_idx++, &thread, sizeof(MDRawThread));
+ }
+ }
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteMemoryListStream(
+ MDRawDirectory *memory_list_stream) {
+ TypedMDRVA<MDRawMemoryList> list(&writer_);
+
+ // If the dump has an exception, include some memory around the
+ // instruction pointer.
+ const size_t kIPMemorySize = 256; // bytes
+ bool have_ip_memory = false;
+ MDMemoryDescriptor ip_memory_d;
+ if (exception_thread_ && exception_type_) {
+ breakpad_thread_state_data_t state;
+ mach_msg_type_number_t stateCount
+ = static_cast<mach_msg_type_number_t>(sizeof(state));
+
+ if (thread_get_state(exception_thread_,
+ BREAKPAD_MACHINE_THREAD_STATE,
+ state,
+ &stateCount) == KERN_SUCCESS) {
+ u_int64_t ip = CurrentPCForStack(state);
+ // Bound it to the upper and lower bounds of the region
+ // it's contained within. If it's not in a known memory region,
+ // don't bother trying to write it.
+ mach_vm_address_t addr = ip;
+ mach_vm_size_t size;
+ natural_t nesting_level = 0;
+ vm_region_submap_info_64 info;
+ mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ kern_return_t ret =
+ mach_vm_region_recurse(crashing_task_,
+ &addr,
+ &size,
+ &nesting_level,
+ (vm_region_recurse_info_t)&info,
+ &info_count);
+ if (ret == KERN_SUCCESS && ip >= addr && ip < (addr + size)) {
+ // Try to get 128 bytes before and after the IP, but
+ // settle for whatever's available.
+ ip_memory_d.start_of_memory_range =
+ std::max(uintptr_t(addr),
+ uintptr_t(ip - (kIPMemorySize / 2)));
+ uintptr_t end_of_range =
+ std::min(uintptr_t(ip + (kIPMemorySize / 2)),
+ uintptr_t(addr + size));
+ ip_memory_d.memory.data_size =
+ end_of_range - ip_memory_d.start_of_memory_range;
+ have_ip_memory = true;
+ // This needs to get appended to the list even though
+ // the memory bytes aren't filled in yet so the entire
+ // list can be written first. The memory bytes will get filled
+ // in after the memory list is written.
+ memory_blocks_.push_back(ip_memory_d);
+ }
+ }
+ }
+
+ // Now fill in the memory list and write it.
+ unsigned memory_count = memory_blocks_.size();
+ if (!list.AllocateObjectAndArray(memory_count,
+ sizeof(MDMemoryDescriptor)))
+ return false;
+
+ memory_list_stream->stream_type = MD_MEMORY_LIST_STREAM;
+ memory_list_stream->location = list.location();
+
+ list.get()->number_of_memory_ranges = memory_count;
+
+ unsigned int i;
+ for (i = 0; i < memory_count; ++i) {
+ list.CopyIndexAfterObject(i, &memory_blocks_[i],
+ sizeof(MDMemoryDescriptor));
+ }
+
+ if (have_ip_memory) {
+ // Now read the memory around the instruction pointer.
+ UntypedMDRVA ip_memory(&writer_);
+ if (!ip_memory.Allocate(ip_memory_d.memory.data_size))
+ return false;
+
+ if (dynamic_images_) {
+ // Out-of-process.
+ vector<uint8_t> memory;
+ if (ReadTaskMemory(crashing_task_,
+ ip_memory_d.start_of_memory_range,
+ ip_memory_d.memory.data_size,
+ memory) != KERN_SUCCESS) {
+ return false;
+ }
+
+ ip_memory.Copy(&memory[0], ip_memory_d.memory.data_size);
+ } else {
+ // In-process, just copy from local memory.
+ ip_memory.Copy(
+ reinterpret_cast<const void *>(ip_memory_d.start_of_memory_range),
+ ip_memory_d.memory.data_size);
+ }
+
+ ip_memory_d.memory = ip_memory.location();
+ // Write this again now that the data location is filled in.
+ list.CopyIndexAfterObject(i - 1, &ip_memory_d,
+ sizeof(MDMemoryDescriptor));
+ }
+
+ return true;
+}
+
+bool
+MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
+ TypedMDRVA<MDRawExceptionStream> exception(&writer_);
+
+ if (!exception.Allocate())
+ return false;
+
+ exception_stream->stream_type = MD_EXCEPTION_STREAM;
+ exception_stream->location = exception.location();
+ MDRawExceptionStream *exception_ptr = exception.get();
+ exception_ptr->thread_id = exception_thread_;
+
+ // This naming is confusing, but it is the proper translation from
+ // mach naming to minidump naming.
+ exception_ptr->exception_record.exception_code = exception_type_;
+ exception_ptr->exception_record.exception_flags = exception_code_;
+
+ breakpad_thread_state_data_t state;
+ mach_msg_type_number_t state_count
+ = static_cast<mach_msg_type_number_t>(sizeof(state));
+
+ if (!GetThreadState(exception_thread_, state, &state_count))
+ return false;
+
+ if (!WriteContext(state, &exception_ptr->thread_context))
+ return false;
+
+ if (exception_type_ == EXC_BAD_ACCESS)
+ exception_ptr->exception_record.exception_address = exception_subcode_;
+ else
+ exception_ptr->exception_record.exception_address = CurrentPCForStack(state);
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteSystemInfoStream(
+ MDRawDirectory *system_info_stream) {
+ TypedMDRVA<MDRawSystemInfo> info(&writer_);
+
+ if (!info.Allocate())
+ return false;
+
+ system_info_stream->stream_type = MD_SYSTEM_INFO_STREAM;
+ system_info_stream->location = info.location();
+
+ // CPU Information
+ uint32_t number_of_processors;
+ size_t len = sizeof(number_of_processors);
+ sysctlbyname("hw.ncpu", &number_of_processors, &len, NULL, 0);
+ MDRawSystemInfo *info_ptr = info.get();
+
+ switch (cpu_type_) {
+#ifdef HAS_ARM_SUPPORT
+ case CPU_TYPE_ARM:
+ info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM;
+ break;
+#endif
+#ifdef HAS_PPC_SUPPORT
+ case CPU_TYPE_POWERPC:
+ case CPU_TYPE_POWERPC64:
+ info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_PPC;
+ break;
+#endif
+#ifdef HAS_X86_SUPPORT
+ case CPU_TYPE_I386:
+ case CPU_TYPE_X86_64:
+ if (cpu_type_ == CPU_TYPE_I386)
+ info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_X86;
+ else
+ info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_AMD64;
+#ifdef __i386__
+ // ebx is used for PIC code, so we need
+ // to preserve it.
+#define cpuid(op,eax,ebx,ecx,edx) \
+ asm ("pushl %%ebx \n\t" \
+ "cpuid \n\t" \
+ "movl %%ebx,%1 \n\t" \
+ "popl %%ebx" \
+ : "=a" (eax), \
+ "=g" (ebx), \
+ "=c" (ecx), \
+ "=d" (edx) \
+ : "0" (op))
+#elif defined(__x86_64__)
+
+#define cpuid(op,eax,ebx,ecx,edx) \
+ asm ("cpuid \n\t" \
+ : "=a" (eax), \
+ "=b" (ebx), \
+ "=c" (ecx), \
+ "=d" (edx) \
+ : "0" (op))
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+ int unused, unused2;
+ // get vendor id
+ cpuid(0, unused, info_ptr->cpu.x86_cpu_info.vendor_id[0],
+ info_ptr->cpu.x86_cpu_info.vendor_id[2],
+ info_ptr->cpu.x86_cpu_info.vendor_id[1]);
+ // get version and feature info
+ cpuid(1, info_ptr->cpu.x86_cpu_info.version_information, unused, unused2,
+ info_ptr->cpu.x86_cpu_info.feature_information);
+
+ // family
+ info_ptr->processor_level =
+ (info_ptr->cpu.x86_cpu_info.version_information & 0xF00) >> 8;
+ // 0xMMSS (Model, Stepping)
+ info_ptr->processor_revision =
+ (info_ptr->cpu.x86_cpu_info.version_information & 0xF) |
+ ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0) << 4);
+
+ // decode extended model info
+ if (info_ptr->processor_level == 0xF ||
+ info_ptr->processor_level == 0x6) {
+ info_ptr->processor_revision |=
+ ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0000) >> 4);
+ }
+
+ // decode extended family info
+ if (info_ptr->processor_level == 0xF) {
+ info_ptr->processor_level +=
+ ((info_ptr->cpu.x86_cpu_info.version_information & 0xFF00000) >> 20);
+ }
+
+#endif // __i386__ || __x86_64_
+ break;
+#endif // HAS_X86_SUPPORT
+ default:
+ info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_UNKNOWN;
+ break;
+ }
+
+ info_ptr->number_of_processors = number_of_processors;
+#if TARGET_OS_IPHONE
+ info_ptr->platform_id = MD_OS_IOS;
+#else
+ info_ptr->platform_id = MD_OS_MAC_OS_X;
+#endif // TARGET_OS_IPHONE
+
+ MDLocationDescriptor build_string_loc;
+
+ if (!writer_.WriteString(build_string_, 0,
+ &build_string_loc))
+ return false;
+
+ info_ptr->csd_version_rva = build_string_loc.rva;
+ info_ptr->major_version = os_major_version_;
+ info_ptr->minor_version = os_minor_version_;
+ info_ptr->build_number = os_build_number_;
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteModuleStream(unsigned int index,
+ MDRawModule *module) {
+ if (dynamic_images_) {
+ // we're in a different process than the crashed process
+ DynamicImage *image = dynamic_images_->GetImage(index);
+
+ if (!image)
+ return false;
+
+ memset(module, 0, sizeof(MDRawModule));
+
+ MDLocationDescriptor string_location;
+
+ string name = image->GetFilePath();
+ if (!writer_.WriteString(name.c_str(), 0, &string_location))
+ return false;
+
+ module->base_of_image = image->GetVMAddr() + image->GetVMAddrSlide();
+ module->size_of_image = static_cast<u_int32_t>(image->GetVMSize());
+ module->module_name_rva = string_location.rva;
+
+ // We'll skip the executable module, because they don't have
+ // LC_ID_DYLIB load commands, and the crash processing server gets
+ // version information from the Plist file, anyway.
+ if (index != (uint32_t)FindExecutableModule()) {
+ module->version_info.signature = MD_VSFIXEDFILEINFO_SIGNATURE;
+ module->version_info.struct_version |= MD_VSFIXEDFILEINFO_VERSION;
+ // Convert MAC dylib version format, which is a 32 bit number, to the
+ // format used by minidump. The mac format is <16 bits>.<8 bits>.<8 bits>
+ // so it fits nicely into the windows version with some massaging
+ // The mapping is:
+ // 1) upper 16 bits of MAC version go to lower 16 bits of product HI
+ // 2) Next most significant 8 bits go to upper 16 bits of product LO
+ // 3) Least significant 8 bits go to lower 16 bits of product LO
+ uint32_t modVersion = image->GetVersion();
+ module->version_info.file_version_hi = 0;
+ module->version_info.file_version_hi = modVersion >> 16;
+ module->version_info.file_version_lo |= (modVersion & 0xff00) << 8;
+ module->version_info.file_version_lo |= (modVersion & 0xff);
+ }
+
+ if (!WriteCVRecord(module, image->GetCPUType(), name.c_str(), false)) {
+ return false;
+ }
+ } else {
+ // Getting module info in the crashed process
+ const breakpad_mach_header *header;
+ header = (breakpad_mach_header*)_dyld_get_image_header(index);
+ if (!header)
+ return false;
+
+#ifdef __LP64__
+ assert(header->magic == MH_MAGIC_64);
+
+ if(header->magic != MH_MAGIC_64)
+ return false;
+#else
+ assert(header->magic == MH_MAGIC);
+
+ if(header->magic != MH_MAGIC)
+ return false;
+#endif
+
+ int cpu_type = header->cputype;
+ unsigned long slide = _dyld_get_image_vmaddr_slide(index);
+ const char* name = _dyld_get_image_name(index);
+ const struct load_command *cmd =
+ reinterpret_cast<const struct load_command *>(header + 1);
+
+ memset(module, 0, sizeof(MDRawModule));
+
+ for (unsigned int i = 0; cmd && (i < header->ncmds); i++) {
+ if (cmd->cmd == LC_SEGMENT_ARCH) {
+
+ const breakpad_mach_segment_command *seg =
+ reinterpret_cast<const breakpad_mach_segment_command *>(cmd);
+
+ if (!strcmp(seg->segname, "__TEXT")) {
+ MDLocationDescriptor string_location;
+
+ if (!writer_.WriteString(name, 0, &string_location))
+ return false;
+
+ module->base_of_image = seg->vmaddr + slide;
+ module->size_of_image = static_cast<u_int32_t>(seg->vmsize);
+ module->module_name_rva = string_location.rva;
+
+ bool in_memory = false;
+#if TARGET_OS_IPHONE
+ in_memory = true;
+#endif
+ if (!WriteCVRecord(module, cpu_type, name, in_memory))
+ return false;
+
+ return true;
+ }
+ }
+
+ cmd = reinterpret_cast<struct load_command*>((char *)cmd + cmd->cmdsize);
+ }
+ }
+
+ return true;
+}
+
+int MinidumpGenerator::FindExecutableModule() {
+ if (dynamic_images_) {
+ int index = dynamic_images_->GetExecutableImageIndex();
+
+ if (index >= 0) {
+ return index;
+ }
+ } else {
+ int image_count = _dyld_image_count();
+ const struct mach_header *header;
+
+ for (int index = 0; index < image_count; ++index) {
+ header = _dyld_get_image_header(index);
+
+ if (header->filetype == MH_EXECUTE)
+ return index;
+ }
+ }
+
+ // failed - just use the first image
+ return 0;
+}
+
+bool MinidumpGenerator::WriteCVRecord(MDRawModule *module, int cpu_type,
+ const char *module_path, bool in_memory) {
+ TypedMDRVA<MDCVInfoPDB70> cv(&writer_);
+
+ // Only return the last path component of the full module path
+ const char *module_name = strrchr(module_path, '/');
+
+ // Increment past the slash
+ if (module_name)
+ ++module_name;
+ else
+ module_name = "<Unknown>";
+
+ size_t module_name_length = strlen(module_name);
+
+ if (!cv.AllocateObjectAndArray(module_name_length + 1, sizeof(u_int8_t)))
+ return false;
+
+ if (!cv.CopyIndexAfterObject(0, module_name, module_name_length))
+ return false;
+
+ module->cv_record = cv.location();
+ MDCVInfoPDB70 *cv_ptr = cv.get();
+ cv_ptr->cv_signature = MD_CVINFOPDB70_SIGNATURE;
+ cv_ptr->age = 0;
+
+ // Get the module identifier
+ unsigned char identifier[16];
+ bool result = false;
+ if (in_memory) {
+ MacFileUtilities::MachoID macho(module_path,
+ reinterpret_cast<void *>(module->base_of_image),
+ static_cast<size_t>(module->size_of_image));
+ result = macho.UUIDCommand(cpu_type, identifier);
+ if (!result)
+ result = macho.MD5(cpu_type, identifier);
+ }
+
+ if (!result) {
+ FileID file_id(module_path);
+ result = file_id.MachoIdentifier(cpu_type, identifier);
+ }
+
+ if (result) {
+ cv_ptr->signature.data1 = (uint32_t)identifier[0] << 24 |
+ (uint32_t)identifier[1] << 16 | (uint32_t)identifier[2] << 8 |
+ (uint32_t)identifier[3];
+ cv_ptr->signature.data2 = (uint32_t)identifier[4] << 8 | identifier[5];
+ cv_ptr->signature.data3 = (uint32_t)identifier[6] << 8 | identifier[7];
+ cv_ptr->signature.data4[0] = identifier[8];
+ cv_ptr->signature.data4[1] = identifier[9];
+ cv_ptr->signature.data4[2] = identifier[10];
+ cv_ptr->signature.data4[3] = identifier[11];
+ cv_ptr->signature.data4[4] = identifier[12];
+ cv_ptr->signature.data4[5] = identifier[13];
+ cv_ptr->signature.data4[6] = identifier[14];
+ cv_ptr->signature.data4[7] = identifier[15];
+ }
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteModuleListStream(
+ MDRawDirectory *module_list_stream) {
+ TypedMDRVA<MDRawModuleList> list(&writer_);
+
+ size_t image_count = dynamic_images_ ?
+ static_cast<size_t>(dynamic_images_->GetImageCount()) :
+ _dyld_image_count();
+
+ if (!list.AllocateObjectAndArray(image_count, MD_MODULE_SIZE))
+ return false;
+
+ module_list_stream->stream_type = MD_MODULE_LIST_STREAM;
+ module_list_stream->location = list.location();
+ list.get()->number_of_modules = image_count;
+
+ // Write out the executable module as the first one
+ MDRawModule module;
+ size_t executableIndex = FindExecutableModule();
+
+ if (!WriteModuleStream(executableIndex, &module)) {
+ return false;
+ }
+
+ list.CopyIndexAfterObject(0, &module, MD_MODULE_SIZE);
+ int destinationIndex = 1; // Write all other modules after this one
+
+ for (size_t i = 0; i < image_count; ++i) {
+ if (i != executableIndex) {
+ if (!WriteModuleStream(i, &module)) {
+ return false;
+ }
+
+ list.CopyIndexAfterObject(destinationIndex++, &module, MD_MODULE_SIZE);
+ }
+ }
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteMiscInfoStream(MDRawDirectory *misc_info_stream) {
+ TypedMDRVA<MDRawMiscInfo> info(&writer_);
+
+ if (!info.Allocate())
+ return false;
+
+ misc_info_stream->stream_type = MD_MISC_INFO_STREAM;
+ misc_info_stream->location = info.location();
+
+ MDRawMiscInfo *info_ptr = info.get();
+ info_ptr->size_of_info = static_cast<u_int32_t>(sizeof(MDRawMiscInfo));
+ info_ptr->flags1 = MD_MISCINFO_FLAGS1_PROCESS_ID |
+ MD_MISCINFO_FLAGS1_PROCESS_TIMES |
+ MD_MISCINFO_FLAGS1_PROCESSOR_POWER_INFO;
+
+ // Process ID
+ info_ptr->process_id = getpid();
+
+ // Times
+ struct rusage usage;
+ if (getrusage(RUSAGE_SELF, &usage) != -1) {
+ // Omit the fractional time since the MDRawMiscInfo only wants seconds
+ info_ptr->process_user_time =
+ static_cast<u_int32_t>(usage.ru_utime.tv_sec);
+ info_ptr->process_kernel_time =
+ static_cast<u_int32_t>(usage.ru_stime.tv_sec);
+ }
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID,
+ static_cast<int>(info_ptr->process_id) };
+ u_int mibsize = static_cast<u_int>(sizeof(mib) / sizeof(mib[0]));
+ struct kinfo_proc proc;
+ size_t size = sizeof(proc);
+ if (sysctl(mib, mibsize, &proc, &size, NULL, 0) == 0) {
+ info_ptr->process_create_time =
+ static_cast<u_int32_t>(proc.kp_proc.p_starttime.tv_sec);
+ }
+
+ // Speed
+ uint64_t speed;
+ const uint64_t kOneMillion = 1000 * 1000;
+ size = sizeof(speed);
+ sysctlbyname("hw.cpufrequency_max", &speed, &size, NULL, 0);
+ info_ptr->processor_max_mhz = static_cast<u_int32_t>(speed / kOneMillion);
+ info_ptr->processor_mhz_limit = static_cast<u_int32_t>(speed / kOneMillion);
+ size = sizeof(speed);
+ sysctlbyname("hw.cpufrequency", &speed, &size, NULL, 0);
+ info_ptr->processor_current_mhz = static_cast<u_int32_t>(speed / kOneMillion);
+
+ return true;
+}
+
+bool MinidumpGenerator::WriteBreakpadInfoStream(
+ MDRawDirectory *breakpad_info_stream) {
+ TypedMDRVA<MDRawBreakpadInfo> info(&writer_);
+
+ if (!info.Allocate())
+ return false;
+
+ breakpad_info_stream->stream_type = MD_BREAKPAD_INFO_STREAM;
+ breakpad_info_stream->location = info.location();
+ MDRawBreakpadInfo *info_ptr = info.get();
+
+ if (exception_thread_ && exception_type_) {
+ info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID |
+ MD_BREAKPAD_INFO_VALID_REQUESTING_THREAD_ID;
+ info_ptr->dump_thread_id = handler_thread_;
+ info_ptr->requesting_thread_id = exception_thread_;
+ } else {
+ info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID;
+ info_ptr->dump_thread_id = handler_thread_;
+ info_ptr->requesting_thread_id = 0;
+ }
+
+ return true;
+}
+
+} // namespace google_breakpad
diff --git a/3rdParty/Breakpad/src/client/mac/handler/minidump_generator.h b/3rdParty/Breakpad/src/client/mac/handler/minidump_generator.h
new file mode 100644
index 0000000..8394ce6
--- /dev/null
+++ b/3rdParty/Breakpad/src/client/mac/handler/minidump_generator.h
@@ -0,0 +1,218 @@
+// Copyright (c) 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// minidump_generator.h: Create a minidump of the current MacOS process.
+
+#ifndef CLIENT_MAC_GENERATOR_MINIDUMP_GENERATOR_H__
+#define CLIENT_MAC_GENERATOR_MINIDUMP_GENERATOR_H__
+
+#include <mach/mach.h>
+#include <TargetConditionals.h>
+
+#include <string>
+
+#include "client/minidump_file_writer.h"
+#include "common/memory.h"
+#include "common/mac/macho_utilities.h"
+#include "google_breakpad/common/minidump_format.h"
+
+#include "dynamic_images.h"
+#include "mach_vm_compat.h"
+
+#if !TARGET_OS_IPHONE && (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7)
+ #define HAS_PPC_SUPPORT
+#endif
+#if defined(__arm__)
+ #define HAS_ARM_SUPPORT
+#elif defined(__i386__) || defined(__x86_64__)
+ #define HAS_X86_SUPPORT
+#endif
+
+namespace google_breakpad {
+
+using std::string;
+
+// Use the REGISTER_FROM_THREADSTATE to access a register name from the
+// breakpad_thread_state_t structure.
+#if __DARWIN_UNIX03 || TARGET_CPU_X86_64 || TARGET_CPU_PPC64 || TARGET_CPU_ARM
+// In The 10.5 SDK Headers Apple prepended __ to the variable names in the
+// i386_thread_state_t structure. There's no good way to tell what version of
+// the SDK we're compiling against so we just toggle on the same preprocessor
+// symbol Apple's headers use.
+#define REGISTER_FROM_THREADSTATE(a, b) ((a)->__ ## b)
+#else
+#define REGISTER_FROM_THREADSTATE(a, b) (a->b)
+#endif
+
+// Creates a minidump file of the current process. If there is exception data,
+// use SetExceptionInformation() to add this to the minidump. The minidump
+// file is generated by the Write() function.
+// Usage:
+// MinidumpGenerator minidump();
+// minidump.Write("/tmp/minidump");
+//
+class MinidumpGenerator {
+ public:
+ MinidumpGenerator();
+ MinidumpGenerator(mach_port_t crashing_task, mach_port_t handler_thread);
+
+ virtual ~MinidumpGenerator();
+
+ // Return <dir>/<unique_name>.dmp
+ // Sets |unique_name| (if requested) to the unique name for the minidump
+ static string UniqueNameInDirectory(const string &dir, string *unique_name);
+
+ // Write out the minidump into |path|
+ // All of the components of |path| must exist and be writable
+ // Return true if successful, false otherwise
+ bool Write(const char *path);
+
+ // Specify some exception information, if applicable
+ void SetExceptionInformation(int type, int code, int subcode,
+ mach_port_t thread_name) {
+ exception_type_ = type;
+ exception_code_ = code;
+ exception_subcode_ = subcode;
+ exception_thread_ = thread_name;
+ }
+
+ // Gather system information. This should be call at least once before using
+ // the MinidumpGenerator class.
+ static void GatherSystemInformation();
+
+ protected:
+ // Overridable Stream writers
+ virtual bool WriteExceptionStream(MDRawDirectory *exception_stream);
+
+ // Overridable Helper
+ virtual bool WriteThreadStream(mach_port_t thread_id, MDRawThread *thread);
+
+ private:
+ typedef bool (MinidumpGenerator::*WriteStreamFN)(MDRawDirectory *);
+
+ // Stream writers
+ bool WriteThreadListStream(MDRawDirectory *thread_list_stream);
+ bool WriteMemoryListStream(MDRawDirectory *memory_list_stream);
+ bool WriteSystemInfoStream(MDRawDirectory *system_info_stream);
+ bool WriteModuleListStream(MDRawDirectory *module_list_stream);
+ bool WriteMiscInfoStream(MDRawDirectory *misc_info_stream);
+ bool WriteBreakpadInfoStream(MDRawDirectory *breakpad_info_stream);
+
+ // Helpers
+ u_int64_t CurrentPCForStack(breakpad_thread_state_data_t state);
+ bool GetThreadState(thread_act_t target_thread, thread_state_t state,
+ mach_msg_type_number_t *count);
+ bool WriteStackFromStartAddress(mach_vm_address_t start_addr,
+ MDMemoryDescriptor *stack_location);
+ bool WriteStack(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location);
+ bool WriteContext(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location);
+ bool WriteCVRecord(MDRawModule *module, int cpu_type,
+ const char *module_path, bool in_memory);
+ bool WriteModuleStream(unsigned int index, MDRawModule *module);
+ size_t CalculateStackSize(mach_vm_address_t start_addr);
+ int FindExecutableModule();
+
+ // Per-CPU implementations of these methods
+#ifdef HAS_ARM_SUPPORT
+ bool WriteStackARM(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location);
+ bool WriteContextARM(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location);
+ u_int64_t CurrentPCForStackARM(breakpad_thread_state_data_t state);
+#endif
+#ifdef HAS_PPC_SUPPORT
+ bool WriteStackPPC(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location);
+ bool WriteContextPPC(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location);
+ u_int64_t CurrentPCForStackPPC(breakpad_thread_state_data_t state);
+ bool WriteStackPPC64(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location);
+ bool WriteContextPPC64(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location);
+ u_int64_t CurrentPCForStackPPC64(breakpad_thread_state_data_t state);
+#endif
+#ifdef HAS_X86_SUPPORT
+ bool WriteStackX86(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location);
+ bool WriteContextX86(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location);
+ u_int64_t CurrentPCForStackX86(breakpad_thread_state_data_t state);
+ bool WriteStackX86_64(breakpad_thread_state_data_t state,
+ MDMemoryDescriptor *stack_location);
+ bool WriteContextX86_64(breakpad_thread_state_data_t state,
+ MDLocationDescriptor *register_location);
+ u_int64_t CurrentPCForStackX86_64(breakpad_thread_state_data_t state);
+#endif
+
+ // disallow copy ctor and operator=
+ explicit MinidumpGenerator(const MinidumpGenerator &);
+ void operator=(const MinidumpGenerator &);
+
+ protected:
+ // Use this writer to put the data to disk
+ MinidumpFileWriter writer_;
+
+ private:
+ // Exception information
+ int exception_type_;
+ int exception_code_;
+ int exception_subcode_;
+ mach_port_t exception_thread_;
+ mach_port_t crashing_task_;
+ mach_port_t handler_thread_;
+
+ // CPU type of the task being dumped.
+ cpu_type_t cpu_type_;
+
+ // System information
+ static char build_string_[16];
+ static int os_major_version_;
+ static int os_minor_version_;
+ static int os_build_number_;
+
+ // Information about dynamically loaded code
+ DynamicImages *dynamic_images_;
+
+ // PageAllocator makes it possible to allocate memory
+ // directly from the system, even while handling an exception.
+ mutable PageAllocator allocator_;
+
+ protected:
+ // Blocks of memory written to the dump. These are all currently
+ // written while writing the thread list stream, but saved here
+ // so a memory list stream can be written afterwards.
+ wasteful_vector<MDMemoryDescriptor> memory_blocks_;
+};
+
+} // namespace google_breakpad
+
+#endif // CLIENT_MAC_GENERATOR_MINIDUMP_GENERATOR_H__